2 * Copyright (c) 2006 ARM Ltd.
3 * Copyright (c) 2010 ST-Ericsson SA
5 * Author: Peter Pearse <peter.pearse@arm.com>
6 * Author: Linus Walleij <linus.walleij@stericsson.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 * The full GNU General Public License is in this distribution in the file
25 * Documentation: ARM DDI 0196G == PL080
26 * Documentation: ARM DDI 0218E == PL081
27 * Documentation: S3C6410 User's Manual == PL080S
29 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
32 * The PL080 has 8 channels available for simultaneous use, and the PL081
33 * has only two channels. So on these DMA controllers the number of channels
34 * and the number of incoming DMA signals are two totally different things.
35 * It is usually not possible to theoretically handle all physical signals,
36 * so a multiplexing scheme with possible denial of use is necessary.
38 * The PL080 has a dual bus master, PL081 has a single master.
40 * PL080S is a version modified by Samsung and used in S3C64xx SoCs.
41 * It differs in following aspects:
42 * - CH_CONFIG register at different offset,
43 * - separate CH_CONTROL2 register for transfer size,
44 * - bigger maximum transfer size,
45 * - 8-word aligned LLI, instead of 4-word, due to extra CCTL2 word,
46 * - no support for peripheral flow control.
48 * Memory to peripheral transfer may be visualized as
49 * Get data from memory to DMAC
51 * On burst request from peripheral
52 * Destination burst from DMAC to peripheral
54 * Raise terminal count interrupt
56 * For peripherals with a FIFO:
57 * Source burst size == half the depth of the peripheral FIFO
58 * Destination burst size == the depth of the peripheral FIFO
60 * (Bursts are irrelevant for mem to mem transfers - there are no burst
61 * signals, the DMA controller will simply facilitate its AHB master.)
63 * ASSUMES default (little) endianness for DMA transfers
65 * The PL08x has two flow control settings:
66 * - DMAC flow control: the transfer size defines the number of transfers
67 * which occur for the current LLI entry, and the DMAC raises TC at the
68 * end of every LLI entry. Observed behaviour shows the DMAC listening
69 * to both the BREQ and SREQ signals (contrary to documented),
70 * transferring data if either is active. The LBREQ and LSREQ signals
73 * - Peripheral flow control: the transfer size is ignored (and should be
74 * zero). The data is transferred from the current LLI entry, until
75 * after the final transfer signalled by LBREQ or LSREQ. The DMAC
76 * will then move to the next LLI entry. Unsupported by PL080S.
78 #include <linux/amba/bus.h>
79 #include <linux/amba/pl08x.h>
80 #include <linux/debugfs.h>
81 #include <linux/delay.h>
82 #include <linux/device.h>
83 #include <linux/dmaengine.h>
84 #include <linux/dmapool.h>
85 #include <linux/dma-mapping.h>
86 #include <linux/init.h>
87 #include <linux/interrupt.h>
88 #include <linux/module.h>
89 #include <linux/pm_runtime.h>
90 #include <linux/seq_file.h>
91 #include <linux/slab.h>
92 #include <linux/amba/pl080.h>
94 #include "dmaengine.h"
97 #define DRIVER_NAME "pl08xdmac"
99 static struct amba_driver pl08x_amba_driver
;
100 struct pl08x_driver_data
;
103 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
104 * @channels: the number of channels available in this variant
105 * @dualmaster: whether this version supports dual AHB masters or not.
106 * @nomadik: whether the channels have Nomadik security extension bits
107 * that need to be checked for permission before use and some registers are
109 * @pl080s: whether this version is a PL080S, which has separate register and
110 * LLI word for transfer size.
118 u32 max_transfer_size
;
122 * struct pl08x_bus_data - information of source or destination
123 * busses for a transfer
124 * @addr: current address
125 * @maxwidth: the maximum width of a transfer on this bus
126 * @buswidth: the width of this bus in bytes: 1, 2 or 4
128 struct pl08x_bus_data
{
135 * struct pl08x_phy_chan - holder for the physical channels
136 * @id: physical index to this channel
137 * @lock: a lock to use when altering an instance of this struct
138 * @serving: the virtual channel currently being served by this physical
140 * @locked: channel unavailable for the system, e.g. dedicated to secure
143 struct pl08x_phy_chan
{
146 void __iomem
*reg_config
;
148 struct pl08x_dma_chan
*serving
;
153 * struct pl08x_sg - structure containing data per sg
154 * @src_addr: src address of sg
155 * @dst_addr: dst address of sg
156 * @len: transfer len in bytes
157 * @node: node for txd's dsg_list
163 struct list_head node
;
167 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
168 * @vd: virtual DMA descriptor
169 * @dsg_list: list of children sg's
170 * @llis_bus: DMA memory address (physical) start for the LLIs
171 * @llis_va: virtual memory address start for the LLIs
172 * @cctl: control reg values for current txd
173 * @ccfg: config reg values for current txd
174 * @done: this marks completed descriptors, which should not have their
178 struct virt_dma_desc vd
;
179 struct list_head dsg_list
;
182 /* Default cctl value for LLIs */
185 * Settings to be put into the physical channel when we
186 * trigger this txd. Other registers are in llis_va[0].
193 * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
195 * @PL08X_CHAN_IDLE: the channel is idle
196 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
197 * channel and is running a transfer on it
198 * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
199 * channel, but the transfer is currently paused
200 * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
201 * channel to become available (only pertains to memcpy channels)
203 enum pl08x_dma_chan_state
{
211 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
212 * @vc: wrappped virtual channel
213 * @phychan: the physical channel utilized by this channel, if there is one
214 * @name: name of channel
215 * @cd: channel platform data
216 * @runtime_addr: address for RX/TX according to the runtime config
217 * @at: active transaction on this channel
218 * @lock: a lock for this channel data
219 * @host: a pointer to the host (internal use)
220 * @state: whether the channel is idle, paused, running etc
221 * @slave: whether this channel is a device (slave) or for memcpy
222 * @signal: the physical DMA request signal which this channel is using
223 * @mux_use: count of descriptors using this DMA request signal setting
225 struct pl08x_dma_chan
{
226 struct virt_dma_chan vc
;
227 struct pl08x_phy_chan
*phychan
;
229 const struct pl08x_channel_data
*cd
;
230 struct dma_slave_config cfg
;
231 struct pl08x_txd
*at
;
232 struct pl08x_driver_data
*host
;
233 enum pl08x_dma_chan_state state
;
240 * struct pl08x_driver_data - the local state holder for the PL08x
241 * @slave: slave engine for this instance
242 * @memcpy: memcpy engine for this instance
243 * @base: virtual memory base (remapped) for the PL08x
244 * @adev: the corresponding AMBA (PrimeCell) bus entry
245 * @vd: vendor data for this PL08x variant
246 * @pd: platform data passed in from the platform/machine
247 * @phy_chans: array of data for the physical channels
248 * @pool: a pool for the LLI descriptors
249 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
251 * @mem_buses: set to indicate memory transfers on AHB2.
252 * @lock: a spinlock for this struct
254 struct pl08x_driver_data
{
255 struct dma_device slave
;
256 struct dma_device memcpy
;
258 struct amba_device
*adev
;
259 const struct vendor_data
*vd
;
260 struct pl08x_platform_data
*pd
;
261 struct pl08x_phy_chan
*phy_chans
;
262 struct dma_pool
*pool
;
269 * PL08X specific defines
272 /* The order of words in an LLI. */
273 #define PL080_LLI_SRC 0
274 #define PL080_LLI_DST 1
275 #define PL080_LLI_LLI 2
276 #define PL080_LLI_CCTL 3
277 #define PL080S_LLI_CCTL2 4
279 /* Total words in an LLI. */
280 #define PL080_LLI_WORDS 4
281 #define PL080S_LLI_WORDS 8
284 * Number of LLIs in each LLI buffer allocated for one transfer
285 * (maximum times we call dma_pool_alloc on this pool without freeing)
287 #define MAX_NUM_TSFR_LLIS 512
288 #define PL08X_ALIGN 8
290 static inline struct pl08x_dma_chan
*to_pl08x_chan(struct dma_chan
*chan
)
292 return container_of(chan
, struct pl08x_dma_chan
, vc
.chan
);
295 static inline struct pl08x_txd
*to_pl08x_txd(struct dma_async_tx_descriptor
*tx
)
297 return container_of(tx
, struct pl08x_txd
, vd
.tx
);
303 * This gives us the DMA request input to the PL08x primecell which the
304 * peripheral described by the channel data will be routed to, possibly
305 * via a board/SoC specific external MUX. One important point to note
306 * here is that this does not depend on the physical channel.
308 static int pl08x_request_mux(struct pl08x_dma_chan
*plchan
)
310 const struct pl08x_platform_data
*pd
= plchan
->host
->pd
;
313 if (plchan
->mux_use
++ == 0 && pd
->get_xfer_signal
) {
314 ret
= pd
->get_xfer_signal(plchan
->cd
);
320 plchan
->signal
= ret
;
325 static void pl08x_release_mux(struct pl08x_dma_chan
*plchan
)
327 const struct pl08x_platform_data
*pd
= plchan
->host
->pd
;
329 if (plchan
->signal
>= 0) {
330 WARN_ON(plchan
->mux_use
== 0);
332 if (--plchan
->mux_use
== 0 && pd
->put_xfer_signal
) {
333 pd
->put_xfer_signal(plchan
->cd
, plchan
->signal
);
340 * Physical channel handling
343 /* Whether a certain channel is busy or not */
344 static int pl08x_phy_channel_busy(struct pl08x_phy_chan
*ch
)
348 val
= readl(ch
->reg_config
);
349 return val
& PL080_CONFIG_ACTIVE
;
352 static void pl08x_write_lli(struct pl08x_driver_data
*pl08x
,
353 struct pl08x_phy_chan
*phychan
, const u32
*lli
, u32 ccfg
)
355 if (pl08x
->vd
->pl080s
)
356 dev_vdbg(&pl08x
->adev
->dev
,
357 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
358 "clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x\n",
359 phychan
->id
, lli
[PL080_LLI_SRC
], lli
[PL080_LLI_DST
],
360 lli
[PL080_LLI_LLI
], lli
[PL080_LLI_CCTL
],
361 lli
[PL080S_LLI_CCTL2
], ccfg
);
363 dev_vdbg(&pl08x
->adev
->dev
,
364 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
365 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
366 phychan
->id
, lli
[PL080_LLI_SRC
], lli
[PL080_LLI_DST
],
367 lli
[PL080_LLI_LLI
], lli
[PL080_LLI_CCTL
], ccfg
);
369 writel_relaxed(lli
[PL080_LLI_SRC
], phychan
->base
+ PL080_CH_SRC_ADDR
);
370 writel_relaxed(lli
[PL080_LLI_DST
], phychan
->base
+ PL080_CH_DST_ADDR
);
371 writel_relaxed(lli
[PL080_LLI_LLI
], phychan
->base
+ PL080_CH_LLI
);
372 writel_relaxed(lli
[PL080_LLI_CCTL
], phychan
->base
+ PL080_CH_CONTROL
);
374 if (pl08x
->vd
->pl080s
)
375 writel_relaxed(lli
[PL080S_LLI_CCTL2
],
376 phychan
->base
+ PL080S_CH_CONTROL2
);
378 writel(ccfg
, phychan
->reg_config
);
382 * Set the initial DMA register values i.e. those for the first LLI
383 * The next LLI pointer and the configuration interrupt bit have
384 * been set when the LLIs were constructed. Poke them into the hardware
385 * and start the transfer.
387 static void pl08x_start_next_txd(struct pl08x_dma_chan
*plchan
)
389 struct pl08x_driver_data
*pl08x
= plchan
->host
;
390 struct pl08x_phy_chan
*phychan
= plchan
->phychan
;
391 struct virt_dma_desc
*vd
= vchan_next_desc(&plchan
->vc
);
392 struct pl08x_txd
*txd
= to_pl08x_txd(&vd
->tx
);
395 list_del(&txd
->vd
.node
);
399 /* Wait for channel inactive */
400 while (pl08x_phy_channel_busy(phychan
))
403 pl08x_write_lli(pl08x
, phychan
, &txd
->llis_va
[0], txd
->ccfg
);
405 /* Enable the DMA channel */
406 /* Do not access config register until channel shows as disabled */
407 while (readl(pl08x
->base
+ PL080_EN_CHAN
) & (1 << phychan
->id
))
410 /* Do not access config register until channel shows as inactive */
411 val
= readl(phychan
->reg_config
);
412 while ((val
& PL080_CONFIG_ACTIVE
) || (val
& PL080_CONFIG_ENABLE
))
413 val
= readl(phychan
->reg_config
);
415 writel(val
| PL080_CONFIG_ENABLE
, phychan
->reg_config
);
419 * Pause the channel by setting the HALT bit.
421 * For M->P transfers, pause the DMAC first and then stop the peripheral -
422 * the FIFO can only drain if the peripheral is still requesting data.
423 * (note: this can still timeout if the DMAC FIFO never drains of data.)
425 * For P->M transfers, disable the peripheral first to stop it filling
426 * the DMAC FIFO, and then pause the DMAC.
428 static void pl08x_pause_phy_chan(struct pl08x_phy_chan
*ch
)
433 /* Set the HALT bit and wait for the FIFO to drain */
434 val
= readl(ch
->reg_config
);
435 val
|= PL080_CONFIG_HALT
;
436 writel(val
, ch
->reg_config
);
438 /* Wait for channel inactive */
439 for (timeout
= 1000; timeout
; timeout
--) {
440 if (!pl08x_phy_channel_busy(ch
))
444 if (pl08x_phy_channel_busy(ch
))
445 pr_err("pl08x: channel%u timeout waiting for pause\n", ch
->id
);
448 static void pl08x_resume_phy_chan(struct pl08x_phy_chan
*ch
)
452 /* Clear the HALT bit */
453 val
= readl(ch
->reg_config
);
454 val
&= ~PL080_CONFIG_HALT
;
455 writel(val
, ch
->reg_config
);
459 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
460 * clears any pending interrupt status. This should not be used for
461 * an on-going transfer, but as a method of shutting down a channel
462 * (eg, when it's no longer used) or terminating a transfer.
464 static void pl08x_terminate_phy_chan(struct pl08x_driver_data
*pl08x
,
465 struct pl08x_phy_chan
*ch
)
467 u32 val
= readl(ch
->reg_config
);
469 val
&= ~(PL080_CONFIG_ENABLE
| PL080_CONFIG_ERR_IRQ_MASK
|
470 PL080_CONFIG_TC_IRQ_MASK
);
472 writel(val
, ch
->reg_config
);
474 writel(1 << ch
->id
, pl08x
->base
+ PL080_ERR_CLEAR
);
475 writel(1 << ch
->id
, pl08x
->base
+ PL080_TC_CLEAR
);
478 static inline u32
get_bytes_in_cctl(u32 cctl
)
480 /* The source width defines the number of bytes */
481 u32 bytes
= cctl
& PL080_CONTROL_TRANSFER_SIZE_MASK
;
483 switch (cctl
>> PL080_CONTROL_SWIDTH_SHIFT
) {
484 case PL080_WIDTH_8BIT
:
486 case PL080_WIDTH_16BIT
:
489 case PL080_WIDTH_32BIT
:
496 static inline u32
get_bytes_in_cctl_pl080s(u32 cctl
, u32 cctl1
)
498 /* The source width defines the number of bytes */
499 u32 bytes
= cctl1
& PL080S_CONTROL_TRANSFER_SIZE_MASK
;
501 switch (cctl
>> PL080_CONTROL_SWIDTH_SHIFT
) {
502 case PL080_WIDTH_8BIT
:
504 case PL080_WIDTH_16BIT
:
507 case PL080_WIDTH_32BIT
:
514 /* The channel should be paused when calling this */
515 static u32
pl08x_getbytes_chan(struct pl08x_dma_chan
*plchan
)
517 struct pl08x_driver_data
*pl08x
= plchan
->host
;
518 const u32
*llis_va
, *llis_va_limit
;
519 struct pl08x_phy_chan
*ch
;
521 struct pl08x_txd
*txd
;
526 ch
= plchan
->phychan
;
533 * Follow the LLIs to get the number of remaining
534 * bytes in the currently active transaction.
536 clli
= readl(ch
->base
+ PL080_CH_LLI
) & ~PL080_LLI_LM_AHB2
;
538 /* First get the remaining bytes in the active transfer */
539 if (pl08x
->vd
->pl080s
)
540 bytes
= get_bytes_in_cctl_pl080s(
541 readl(ch
->base
+ PL080_CH_CONTROL
),
542 readl(ch
->base
+ PL080S_CH_CONTROL2
));
544 bytes
= get_bytes_in_cctl(readl(ch
->base
+ PL080_CH_CONTROL
));
549 llis_va
= txd
->llis_va
;
550 llis_bus
= txd
->llis_bus
;
552 llis_max_words
= pl08x
->lli_words
* MAX_NUM_TSFR_LLIS
;
553 BUG_ON(clli
< llis_bus
|| clli
>= llis_bus
+
554 sizeof(u32
) * llis_max_words
);
557 * Locate the next LLI - as this is an array,
558 * it's simple maths to find.
560 llis_va
+= (clli
- llis_bus
) / sizeof(u32
);
562 llis_va_limit
= llis_va
+ llis_max_words
;
564 for (; llis_va
< llis_va_limit
; llis_va
+= pl08x
->lli_words
) {
565 if (pl08x
->vd
->pl080s
)
566 bytes
+= get_bytes_in_cctl_pl080s(
567 llis_va
[PL080_LLI_CCTL
],
568 llis_va
[PL080S_LLI_CCTL2
]);
570 bytes
+= get_bytes_in_cctl(llis_va
[PL080_LLI_CCTL
]);
573 * A LLI pointer of 0 terminates the LLI list
575 if (!llis_va
[PL080_LLI_LLI
])
583 * Allocate a physical channel for a virtual channel
585 * Try to locate a physical channel to be used for this transfer. If all
586 * are taken return NULL and the requester will have to cope by using
587 * some fallback PIO mode or retrying later.
589 static struct pl08x_phy_chan
*
590 pl08x_get_phy_channel(struct pl08x_driver_data
*pl08x
,
591 struct pl08x_dma_chan
*virt_chan
)
593 struct pl08x_phy_chan
*ch
= NULL
;
597 for (i
= 0; i
< pl08x
->vd
->channels
; i
++) {
598 ch
= &pl08x
->phy_chans
[i
];
600 spin_lock_irqsave(&ch
->lock
, flags
);
602 if (!ch
->locked
&& !ch
->serving
) {
603 ch
->serving
= virt_chan
;
604 spin_unlock_irqrestore(&ch
->lock
, flags
);
608 spin_unlock_irqrestore(&ch
->lock
, flags
);
611 if (i
== pl08x
->vd
->channels
) {
612 /* No physical channel available, cope with it */
619 /* Mark the physical channel as free. Note, this write is atomic. */
620 static inline void pl08x_put_phy_channel(struct pl08x_driver_data
*pl08x
,
621 struct pl08x_phy_chan
*ch
)
627 * Try to allocate a physical channel. When successful, assign it to
628 * this virtual channel, and initiate the next descriptor. The
629 * virtual channel lock must be held at this point.
631 static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan
*plchan
)
633 struct pl08x_driver_data
*pl08x
= plchan
->host
;
634 struct pl08x_phy_chan
*ch
;
636 ch
= pl08x_get_phy_channel(pl08x
, plchan
);
638 dev_dbg(&pl08x
->adev
->dev
, "no physical channel available for xfer on %s\n", plchan
->name
);
639 plchan
->state
= PL08X_CHAN_WAITING
;
643 dev_dbg(&pl08x
->adev
->dev
, "allocated physical channel %d for xfer on %s\n",
644 ch
->id
, plchan
->name
);
646 plchan
->phychan
= ch
;
647 plchan
->state
= PL08X_CHAN_RUNNING
;
648 pl08x_start_next_txd(plchan
);
651 static void pl08x_phy_reassign_start(struct pl08x_phy_chan
*ch
,
652 struct pl08x_dma_chan
*plchan
)
654 struct pl08x_driver_data
*pl08x
= plchan
->host
;
656 dev_dbg(&pl08x
->adev
->dev
, "reassigned physical channel %d for xfer on %s\n",
657 ch
->id
, plchan
->name
);
660 * We do this without taking the lock; we're really only concerned
661 * about whether this pointer is NULL or not, and we're guaranteed
662 * that this will only be called when it _already_ is non-NULL.
664 ch
->serving
= plchan
;
665 plchan
->phychan
= ch
;
666 plchan
->state
= PL08X_CHAN_RUNNING
;
667 pl08x_start_next_txd(plchan
);
671 * Free a physical DMA channel, potentially reallocating it to another
672 * virtual channel if we have any pending.
674 static void pl08x_phy_free(struct pl08x_dma_chan
*plchan
)
676 struct pl08x_driver_data
*pl08x
= plchan
->host
;
677 struct pl08x_dma_chan
*p
, *next
;
682 /* Find a waiting virtual channel for the next transfer. */
683 list_for_each_entry(p
, &pl08x
->memcpy
.channels
, vc
.chan
.device_node
)
684 if (p
->state
== PL08X_CHAN_WAITING
) {
690 list_for_each_entry(p
, &pl08x
->slave
.channels
, vc
.chan
.device_node
)
691 if (p
->state
== PL08X_CHAN_WAITING
) {
697 /* Ensure that the physical channel is stopped */
698 pl08x_terminate_phy_chan(pl08x
, plchan
->phychan
);
704 * Eww. We know this isn't going to deadlock
705 * but lockdep probably doesn't.
707 spin_lock(&next
->vc
.lock
);
708 /* Re-check the state now that we have the lock */
709 success
= next
->state
== PL08X_CHAN_WAITING
;
711 pl08x_phy_reassign_start(plchan
->phychan
, next
);
712 spin_unlock(&next
->vc
.lock
);
714 /* If the state changed, try to find another channel */
718 /* No more jobs, so free up the physical channel */
719 pl08x_put_phy_channel(pl08x
, plchan
->phychan
);
722 plchan
->phychan
= NULL
;
723 plchan
->state
= PL08X_CHAN_IDLE
;
730 static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded
)
733 case PL080_WIDTH_8BIT
:
735 case PL080_WIDTH_16BIT
:
737 case PL080_WIDTH_32BIT
:
746 static inline u32
pl08x_cctl_bits(u32 cctl
, u8 srcwidth
, u8 dstwidth
,
751 /* Remove all src, dst and transfer size bits */
752 retbits
&= ~PL080_CONTROL_DWIDTH_MASK
;
753 retbits
&= ~PL080_CONTROL_SWIDTH_MASK
;
754 retbits
&= ~PL080_CONTROL_TRANSFER_SIZE_MASK
;
756 /* Then set the bits according to the parameters */
759 retbits
|= PL080_WIDTH_8BIT
<< PL080_CONTROL_SWIDTH_SHIFT
;
762 retbits
|= PL080_WIDTH_16BIT
<< PL080_CONTROL_SWIDTH_SHIFT
;
765 retbits
|= PL080_WIDTH_32BIT
<< PL080_CONTROL_SWIDTH_SHIFT
;
774 retbits
|= PL080_WIDTH_8BIT
<< PL080_CONTROL_DWIDTH_SHIFT
;
777 retbits
|= PL080_WIDTH_16BIT
<< PL080_CONTROL_DWIDTH_SHIFT
;
780 retbits
|= PL080_WIDTH_32BIT
<< PL080_CONTROL_DWIDTH_SHIFT
;
787 tsize
&= PL080_CONTROL_TRANSFER_SIZE_MASK
;
788 retbits
|= tsize
<< PL080_CONTROL_TRANSFER_SIZE_SHIFT
;
792 struct pl08x_lli_build_data
{
793 struct pl08x_txd
*txd
;
794 struct pl08x_bus_data srcbus
;
795 struct pl08x_bus_data dstbus
;
801 * Autoselect a master bus to use for the transfer. Slave will be the chosen as
802 * victim in case src & dest are not similarly aligned. i.e. If after aligning
803 * masters address with width requirements of transfer (by sending few byte by
804 * byte data), slave is still not aligned, then its width will be reduced to
806 * - prefers the destination bus if both available
807 * - prefers bus with fixed address (i.e. peripheral)
809 static void pl08x_choose_master_bus(struct pl08x_lli_build_data
*bd
,
810 struct pl08x_bus_data
**mbus
, struct pl08x_bus_data
**sbus
, u32 cctl
)
812 if (!(cctl
& PL080_CONTROL_DST_INCR
)) {
815 } else if (!(cctl
& PL080_CONTROL_SRC_INCR
)) {
819 if (bd
->dstbus
.buswidth
>= bd
->srcbus
.buswidth
) {
830 * Fills in one LLI for a certain transfer descriptor and advance the counter
832 static void pl08x_fill_lli_for_desc(struct pl08x_driver_data
*pl08x
,
833 struct pl08x_lli_build_data
*bd
,
834 int num_llis
, int len
, u32 cctl
, u32 cctl2
)
836 u32 offset
= num_llis
* pl08x
->lli_words
;
837 u32
*llis_va
= bd
->txd
->llis_va
+ offset
;
838 dma_addr_t llis_bus
= bd
->txd
->llis_bus
;
840 BUG_ON(num_llis
>= MAX_NUM_TSFR_LLIS
);
842 /* Advance the offset to next LLI. */
843 offset
+= pl08x
->lli_words
;
845 llis_va
[PL080_LLI_SRC
] = bd
->srcbus
.addr
;
846 llis_va
[PL080_LLI_DST
] = bd
->dstbus
.addr
;
847 llis_va
[PL080_LLI_LLI
] = (llis_bus
+ sizeof(u32
) * offset
);
848 llis_va
[PL080_LLI_LLI
] |= bd
->lli_bus
;
849 llis_va
[PL080_LLI_CCTL
] = cctl
;
850 if (pl08x
->vd
->pl080s
)
851 llis_va
[PL080S_LLI_CCTL2
] = cctl2
;
853 if (cctl
& PL080_CONTROL_SRC_INCR
)
854 bd
->srcbus
.addr
+= len
;
855 if (cctl
& PL080_CONTROL_DST_INCR
)
856 bd
->dstbus
.addr
+= len
;
858 BUG_ON(bd
->remainder
< len
);
860 bd
->remainder
-= len
;
863 static inline void prep_byte_width_lli(struct pl08x_driver_data
*pl08x
,
864 struct pl08x_lli_build_data
*bd
, u32
*cctl
, u32 len
,
865 int num_llis
, size_t *total_bytes
)
867 *cctl
= pl08x_cctl_bits(*cctl
, 1, 1, len
);
868 pl08x_fill_lli_for_desc(pl08x
, bd
, num_llis
, len
, *cctl
, len
);
869 (*total_bytes
) += len
;
873 static void pl08x_dump_lli(struct pl08x_driver_data
*pl08x
,
874 const u32
*llis_va
, int num_llis
)
878 if (pl08x
->vd
->pl080s
) {
879 dev_vdbg(&pl08x
->adev
->dev
,
880 "%-3s %-9s %-10s %-10s %-10s %-10s %s\n",
881 "lli", "", "csrc", "cdst", "clli", "cctl", "cctl2");
882 for (i
= 0; i
< num_llis
; i
++) {
883 dev_vdbg(&pl08x
->adev
->dev
,
884 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
885 i
, llis_va
, llis_va
[PL080_LLI_SRC
],
886 llis_va
[PL080_LLI_DST
], llis_va
[PL080_LLI_LLI
],
887 llis_va
[PL080_LLI_CCTL
],
888 llis_va
[PL080S_LLI_CCTL2
]);
889 llis_va
+= pl08x
->lli_words
;
892 dev_vdbg(&pl08x
->adev
->dev
,
893 "%-3s %-9s %-10s %-10s %-10s %s\n",
894 "lli", "", "csrc", "cdst", "clli", "cctl");
895 for (i
= 0; i
< num_llis
; i
++) {
896 dev_vdbg(&pl08x
->adev
->dev
,
897 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
898 i
, llis_va
, llis_va
[PL080_LLI_SRC
],
899 llis_va
[PL080_LLI_DST
], llis_va
[PL080_LLI_LLI
],
900 llis_va
[PL080_LLI_CCTL
]);
901 llis_va
+= pl08x
->lli_words
;
906 static inline void pl08x_dump_lli(struct pl08x_driver_data
*pl08x
,
907 const u32
*llis_va
, int num_llis
) {}
911 * This fills in the table of LLIs for the transfer descriptor
912 * Note that we assume we never have to change the burst sizes
915 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data
*pl08x
,
916 struct pl08x_txd
*txd
)
918 struct pl08x_bus_data
*mbus
, *sbus
;
919 struct pl08x_lli_build_data bd
;
921 u32 cctl
, early_bytes
= 0;
922 size_t max_bytes_per_lli
, total_bytes
;
923 u32
*llis_va
, *last_lli
;
924 struct pl08x_sg
*dsg
;
926 txd
->llis_va
= dma_pool_alloc(pl08x
->pool
, GFP_NOWAIT
, &txd
->llis_bus
);
928 dev_err(&pl08x
->adev
->dev
, "%s no memory for llis\n", __func__
);
933 bd
.lli_bus
= (pl08x
->lli_buses
& PL08X_AHB2
) ? PL080_LLI_LM_AHB2
: 0;
936 /* Find maximum width of the source bus */
938 pl08x_get_bytes_for_cctl((cctl
& PL080_CONTROL_SWIDTH_MASK
) >>
939 PL080_CONTROL_SWIDTH_SHIFT
);
941 /* Find maximum width of the destination bus */
943 pl08x_get_bytes_for_cctl((cctl
& PL080_CONTROL_DWIDTH_MASK
) >>
944 PL080_CONTROL_DWIDTH_SHIFT
);
946 list_for_each_entry(dsg
, &txd
->dsg_list
, node
) {
950 bd
.srcbus
.addr
= dsg
->src_addr
;
951 bd
.dstbus
.addr
= dsg
->dst_addr
;
952 bd
.remainder
= dsg
->len
;
953 bd
.srcbus
.buswidth
= bd
.srcbus
.maxwidth
;
954 bd
.dstbus
.buswidth
= bd
.dstbus
.maxwidth
;
956 pl08x_choose_master_bus(&bd
, &mbus
, &sbus
, cctl
);
958 dev_vdbg(&pl08x
->adev
->dev
, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n",
959 bd
.srcbus
.addr
, cctl
& PL080_CONTROL_SRC_INCR
? "+" : "",
961 bd
.dstbus
.addr
, cctl
& PL080_CONTROL_DST_INCR
? "+" : "",
964 dev_vdbg(&pl08x
->adev
->dev
, "mbus=%s sbus=%s\n",
965 mbus
== &bd
.srcbus
? "src" : "dst",
966 sbus
== &bd
.srcbus
? "src" : "dst");
969 * Zero length is only allowed if all these requirements are
971 * - flow controller is peripheral.
972 * - src.addr is aligned to src.width
973 * - dst.addr is aligned to dst.width
975 * sg_len == 1 should be true, as there can be two cases here:
977 * - Memory addresses are contiguous and are not scattered.
978 * Here, Only one sg will be passed by user driver, with
979 * memory address and zero length. We pass this to controller
980 * and after the transfer it will receive the last burst
981 * request from peripheral and so transfer finishes.
983 * - Memory addresses are scattered and are not contiguous.
984 * Here, Obviously as DMA controller doesn't know when a lli's
985 * transfer gets over, it can't load next lli. So in this
986 * case, there has to be an assumption that only one lli is
987 * supported. Thus, we can't have scattered addresses.
990 u32 fc
= (txd
->ccfg
& PL080_CONFIG_FLOW_CONTROL_MASK
) >>
991 PL080_CONFIG_FLOW_CONTROL_SHIFT
;
992 if (!((fc
>= PL080_FLOW_SRC2DST_DST
) &&
993 (fc
<= PL080_FLOW_SRC2DST_SRC
))) {
994 dev_err(&pl08x
->adev
->dev
, "%s sg len can't be zero",
999 if ((bd
.srcbus
.addr
% bd
.srcbus
.buswidth
) ||
1000 (bd
.dstbus
.addr
% bd
.dstbus
.buswidth
)) {
1001 dev_err(&pl08x
->adev
->dev
,
1002 "%s src & dst address must be aligned to src"
1003 " & dst width if peripheral is flow controller",
1008 cctl
= pl08x_cctl_bits(cctl
, bd
.srcbus
.buswidth
,
1009 bd
.dstbus
.buswidth
, 0);
1010 pl08x_fill_lli_for_desc(pl08x
, &bd
, num_llis
++,
1016 * Send byte by byte for following cases
1017 * - Less than a bus width available
1018 * - until master bus is aligned
1020 if (bd
.remainder
< mbus
->buswidth
)
1021 early_bytes
= bd
.remainder
;
1022 else if ((mbus
->addr
) % (mbus
->buswidth
)) {
1023 early_bytes
= mbus
->buswidth
- (mbus
->addr
) %
1025 if ((bd
.remainder
- early_bytes
) < mbus
->buswidth
)
1026 early_bytes
= bd
.remainder
;
1030 dev_vdbg(&pl08x
->adev
->dev
,
1031 "%s byte width LLIs (remain 0x%08x)\n",
1032 __func__
, bd
.remainder
);
1033 prep_byte_width_lli(pl08x
, &bd
, &cctl
, early_bytes
,
1034 num_llis
++, &total_bytes
);
1039 * Master now aligned
1040 * - if slave is not then we must set its width down
1042 if (sbus
->addr
% sbus
->buswidth
) {
1043 dev_dbg(&pl08x
->adev
->dev
,
1044 "%s set down bus width to one byte\n",
1051 * Bytes transferred = tsize * src width, not
1054 max_bytes_per_lli
= bd
.srcbus
.buswidth
*
1055 pl08x
->vd
->max_transfer_size
;
1056 dev_vdbg(&pl08x
->adev
->dev
,
1057 "%s max bytes per lli = %zu\n",
1058 __func__
, max_bytes_per_lli
);
1061 * Make largest possible LLIs until less than one bus
1064 while (bd
.remainder
> (mbus
->buswidth
- 1)) {
1065 size_t lli_len
, tsize
, width
;
1068 * If enough left try to send max possible,
1069 * otherwise try to send the remainder
1071 lli_len
= min(bd
.remainder
, max_bytes_per_lli
);
1074 * Check against maximum bus alignment:
1075 * Calculate actual transfer size in relation to
1076 * bus width an get a maximum remainder of the
1077 * highest bus width - 1
1079 width
= max(mbus
->buswidth
, sbus
->buswidth
);
1080 lli_len
= (lli_len
/ width
) * width
;
1081 tsize
= lli_len
/ bd
.srcbus
.buswidth
;
1083 dev_vdbg(&pl08x
->adev
->dev
,
1084 "%s fill lli with single lli chunk of "
1085 "size 0x%08zx (remainder 0x%08zx)\n",
1086 __func__
, lli_len
, bd
.remainder
);
1088 cctl
= pl08x_cctl_bits(cctl
, bd
.srcbus
.buswidth
,
1089 bd
.dstbus
.buswidth
, tsize
);
1090 pl08x_fill_lli_for_desc(pl08x
, &bd
, num_llis
++,
1091 lli_len
, cctl
, tsize
);
1092 total_bytes
+= lli_len
;
1096 * Send any odd bytes
1099 dev_vdbg(&pl08x
->adev
->dev
,
1100 "%s align with boundary, send odd bytes (remain %zu)\n",
1101 __func__
, bd
.remainder
);
1102 prep_byte_width_lli(pl08x
, &bd
, &cctl
,
1103 bd
.remainder
, num_llis
++, &total_bytes
);
1107 if (total_bytes
!= dsg
->len
) {
1108 dev_err(&pl08x
->adev
->dev
,
1109 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
1110 __func__
, total_bytes
, dsg
->len
);
1114 if (num_llis
>= MAX_NUM_TSFR_LLIS
) {
1115 dev_err(&pl08x
->adev
->dev
,
1116 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
1117 __func__
, MAX_NUM_TSFR_LLIS
);
1122 llis_va
= txd
->llis_va
;
1123 last_lli
= llis_va
+ (num_llis
- 1) * pl08x
->lli_words
;
1124 /* The final LLI terminates the LLI. */
1125 last_lli
[PL080_LLI_LLI
] = 0;
1126 /* The final LLI element shall also fire an interrupt. */
1127 last_lli
[PL080_LLI_CCTL
] |= PL080_CONTROL_TC_IRQ_EN
;
1129 pl08x_dump_lli(pl08x
, llis_va
, num_llis
);
1134 static void pl08x_free_txd(struct pl08x_driver_data
*pl08x
,
1135 struct pl08x_txd
*txd
)
1137 struct pl08x_sg
*dsg
, *_dsg
;
1140 dma_pool_free(pl08x
->pool
, txd
->llis_va
, txd
->llis_bus
);
1142 list_for_each_entry_safe(dsg
, _dsg
, &txd
->dsg_list
, node
) {
1143 list_del(&dsg
->node
);
1150 static void pl08x_unmap_buffers(struct pl08x_txd
*txd
)
1152 struct device
*dev
= txd
->vd
.tx
.chan
->device
->dev
;
1153 struct pl08x_sg
*dsg
;
1155 if (!(txd
->vd
.tx
.flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
1156 if (txd
->vd
.tx
.flags
& DMA_COMPL_SRC_UNMAP_SINGLE
)
1157 list_for_each_entry(dsg
, &txd
->dsg_list
, node
)
1158 dma_unmap_single(dev
, dsg
->src_addr
, dsg
->len
,
1161 list_for_each_entry(dsg
, &txd
->dsg_list
, node
)
1162 dma_unmap_page(dev
, dsg
->src_addr
, dsg
->len
,
1166 if (!(txd
->vd
.tx
.flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
1167 if (txd
->vd
.tx
.flags
& DMA_COMPL_DEST_UNMAP_SINGLE
)
1168 list_for_each_entry(dsg
, &txd
->dsg_list
, node
)
1169 dma_unmap_single(dev
, dsg
->dst_addr
, dsg
->len
,
1172 list_for_each_entry(dsg
, &txd
->dsg_list
, node
)
1173 dma_unmap_page(dev
, dsg
->dst_addr
, dsg
->len
,
1178 static void pl08x_desc_free(struct virt_dma_desc
*vd
)
1180 struct pl08x_txd
*txd
= to_pl08x_txd(&vd
->tx
);
1181 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(vd
->tx
.chan
);
1184 pl08x_unmap_buffers(txd
);
1187 pl08x_release_mux(plchan
);
1189 pl08x_free_txd(plchan
->host
, txd
);
1192 static void pl08x_free_txd_list(struct pl08x_driver_data
*pl08x
,
1193 struct pl08x_dma_chan
*plchan
)
1197 vchan_get_all_descriptors(&plchan
->vc
, &head
);
1198 vchan_dma_desc_free_list(&plchan
->vc
, &head
);
1202 * The DMA ENGINE API
1204 static int pl08x_alloc_chan_resources(struct dma_chan
*chan
)
1209 static void pl08x_free_chan_resources(struct dma_chan
*chan
)
1211 /* Ensure all queued descriptors are freed */
1212 vchan_free_chan_resources(to_virt_chan(chan
));
1215 static struct dma_async_tx_descriptor
*pl08x_prep_dma_interrupt(
1216 struct dma_chan
*chan
, unsigned long flags
)
1218 struct dma_async_tx_descriptor
*retval
= NULL
;
1224 * Code accessing dma_async_is_complete() in a tight loop may give problems.
1225 * If slaves are relying on interrupts to signal completion this function
1226 * must not be called with interrupts disabled.
1228 static enum dma_status
pl08x_dma_tx_status(struct dma_chan
*chan
,
1229 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
1231 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1232 struct virt_dma_desc
*vd
;
1233 unsigned long flags
;
1234 enum dma_status ret
;
1237 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1238 if (ret
== DMA_SUCCESS
)
1242 * There's no point calculating the residue if there's
1243 * no txstate to store the value.
1246 if (plchan
->state
== PL08X_CHAN_PAUSED
)
1251 spin_lock_irqsave(&plchan
->vc
.lock
, flags
);
1252 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1253 if (ret
!= DMA_SUCCESS
) {
1254 vd
= vchan_find_desc(&plchan
->vc
, cookie
);
1256 /* On the issued list, so hasn't been processed yet */
1257 struct pl08x_txd
*txd
= to_pl08x_txd(&vd
->tx
);
1258 struct pl08x_sg
*dsg
;
1260 list_for_each_entry(dsg
, &txd
->dsg_list
, node
)
1263 bytes
= pl08x_getbytes_chan(plchan
);
1266 spin_unlock_irqrestore(&plchan
->vc
.lock
, flags
);
1269 * This cookie not complete yet
1270 * Get number of bytes left in the active transactions and queue
1272 dma_set_residue(txstate
, bytes
);
1274 if (plchan
->state
== PL08X_CHAN_PAUSED
&& ret
== DMA_IN_PROGRESS
)
1277 /* Whether waiting or running, we're in progress */
1281 /* PrimeCell DMA extension */
1282 struct burst_table
{
1287 static const struct burst_table burst_sizes
[] = {
1290 .reg
= PL080_BSIZE_256
,
1294 .reg
= PL080_BSIZE_128
,
1298 .reg
= PL080_BSIZE_64
,
1302 .reg
= PL080_BSIZE_32
,
1306 .reg
= PL080_BSIZE_16
,
1310 .reg
= PL080_BSIZE_8
,
1314 .reg
= PL080_BSIZE_4
,
1318 .reg
= PL080_BSIZE_1
,
1323 * Given the source and destination available bus masks, select which
1324 * will be routed to each port. We try to have source and destination
1325 * on separate ports, but always respect the allowable settings.
1327 static u32
pl08x_select_bus(u8 src
, u8 dst
)
1331 if (!(dst
& PL08X_AHB1
) || ((dst
& PL08X_AHB2
) && (src
& PL08X_AHB1
)))
1332 cctl
|= PL080_CONTROL_DST_AHB2
;
1333 if (!(src
& PL08X_AHB1
) || ((src
& PL08X_AHB2
) && !(dst
& PL08X_AHB2
)))
1334 cctl
|= PL080_CONTROL_SRC_AHB2
;
1339 static u32
pl08x_cctl(u32 cctl
)
1341 cctl
&= ~(PL080_CONTROL_SRC_AHB2
| PL080_CONTROL_DST_AHB2
|
1342 PL080_CONTROL_SRC_INCR
| PL080_CONTROL_DST_INCR
|
1343 PL080_CONTROL_PROT_MASK
);
1345 /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1346 return cctl
| PL080_CONTROL_PROT_SYS
;
1349 static u32
pl08x_width(enum dma_slave_buswidth width
)
1352 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
1353 return PL080_WIDTH_8BIT
;
1354 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
1355 return PL080_WIDTH_16BIT
;
1356 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
1357 return PL080_WIDTH_32BIT
;
1363 static u32
pl08x_burst(u32 maxburst
)
1367 for (i
= 0; i
< ARRAY_SIZE(burst_sizes
); i
++)
1368 if (burst_sizes
[i
].burstwords
<= maxburst
)
1371 return burst_sizes
[i
].reg
;
1374 static u32
pl08x_get_cctl(struct pl08x_dma_chan
*plchan
,
1375 enum dma_slave_buswidth addr_width
, u32 maxburst
)
1377 u32 width
, burst
, cctl
= 0;
1379 width
= pl08x_width(addr_width
);
1383 cctl
|= width
<< PL080_CONTROL_SWIDTH_SHIFT
;
1384 cctl
|= width
<< PL080_CONTROL_DWIDTH_SHIFT
;
1387 * If this channel will only request single transfers, set this
1388 * down to ONE element. Also select one element if no maxburst
1391 if (plchan
->cd
->single
)
1394 burst
= pl08x_burst(maxburst
);
1395 cctl
|= burst
<< PL080_CONTROL_SB_SIZE_SHIFT
;
1396 cctl
|= burst
<< PL080_CONTROL_DB_SIZE_SHIFT
;
1398 return pl08x_cctl(cctl
);
1401 static int dma_set_runtime_config(struct dma_chan
*chan
,
1402 struct dma_slave_config
*config
)
1404 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1405 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1410 /* Reject definitely invalid configurations */
1411 if (config
->src_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
||
1412 config
->dst_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
)
1415 if (config
->device_fc
&& pl08x
->vd
->pl080s
) {
1416 dev_err(&pl08x
->adev
->dev
,
1417 "%s: PL080S does not support peripheral flow control\n",
1422 plchan
->cfg
= *config
;
1428 * Slave transactions callback to the slave device to allow
1429 * synchronization of slave DMA signals with the DMAC enable
1431 static void pl08x_issue_pending(struct dma_chan
*chan
)
1433 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1434 unsigned long flags
;
1436 spin_lock_irqsave(&plchan
->vc
.lock
, flags
);
1437 if (vchan_issue_pending(&plchan
->vc
)) {
1438 if (!plchan
->phychan
&& plchan
->state
!= PL08X_CHAN_WAITING
)
1439 pl08x_phy_alloc_and_start(plchan
);
1441 spin_unlock_irqrestore(&plchan
->vc
.lock
, flags
);
1444 static struct pl08x_txd
*pl08x_get_txd(struct pl08x_dma_chan
*plchan
)
1446 struct pl08x_txd
*txd
= kzalloc(sizeof(*txd
), GFP_NOWAIT
);
1449 INIT_LIST_HEAD(&txd
->dsg_list
);
1451 /* Always enable error and terminal interrupts */
1452 txd
->ccfg
= PL080_CONFIG_ERR_IRQ_MASK
|
1453 PL080_CONFIG_TC_IRQ_MASK
;
1459 * Initialize a descriptor to be used by memcpy submit
1461 static struct dma_async_tx_descriptor
*pl08x_prep_dma_memcpy(
1462 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
1463 size_t len
, unsigned long flags
)
1465 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1466 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1467 struct pl08x_txd
*txd
;
1468 struct pl08x_sg
*dsg
;
1471 txd
= pl08x_get_txd(plchan
);
1473 dev_err(&pl08x
->adev
->dev
,
1474 "%s no memory for descriptor\n", __func__
);
1478 dsg
= kzalloc(sizeof(struct pl08x_sg
), GFP_NOWAIT
);
1480 pl08x_free_txd(pl08x
, txd
);
1481 dev_err(&pl08x
->adev
->dev
, "%s no memory for pl080 sg\n",
1485 list_add_tail(&dsg
->node
, &txd
->dsg_list
);
1487 dsg
->src_addr
= src
;
1488 dsg
->dst_addr
= dest
;
1491 /* Set platform data for m2m */
1492 txd
->ccfg
|= PL080_FLOW_MEM2MEM
<< PL080_CONFIG_FLOW_CONTROL_SHIFT
;
1493 txd
->cctl
= pl08x
->pd
->memcpy_channel
.cctl_memcpy
&
1494 ~(PL080_CONTROL_DST_AHB2
| PL080_CONTROL_SRC_AHB2
);
1496 /* Both to be incremented or the code will break */
1497 txd
->cctl
|= PL080_CONTROL_SRC_INCR
| PL080_CONTROL_DST_INCR
;
1499 if (pl08x
->vd
->dualmaster
)
1500 txd
->cctl
|= pl08x_select_bus(pl08x
->mem_buses
,
1503 ret
= pl08x_fill_llis_for_desc(plchan
->host
, txd
);
1505 pl08x_free_txd(pl08x
, txd
);
1509 return vchan_tx_prep(&plchan
->vc
, &txd
->vd
, flags
);
1512 static struct dma_async_tx_descriptor
*pl08x_prep_slave_sg(
1513 struct dma_chan
*chan
, struct scatterlist
*sgl
,
1514 unsigned int sg_len
, enum dma_transfer_direction direction
,
1515 unsigned long flags
, void *context
)
1517 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1518 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1519 struct pl08x_txd
*txd
;
1520 struct pl08x_sg
*dsg
;
1521 struct scatterlist
*sg
;
1522 enum dma_slave_buswidth addr_width
;
1523 dma_addr_t slave_addr
;
1525 u8 src_buses
, dst_buses
;
1528 dev_dbg(&pl08x
->adev
->dev
, "%s prepare transaction of %d bytes from %s\n",
1529 __func__
, sg_dma_len(sgl
), plchan
->name
);
1531 txd
= pl08x_get_txd(plchan
);
1533 dev_err(&pl08x
->adev
->dev
, "%s no txd\n", __func__
);
1538 * Set up addresses, the PrimeCell configured address
1539 * will take precedence since this may configure the
1540 * channel target address dynamically at runtime.
1542 if (direction
== DMA_MEM_TO_DEV
) {
1543 cctl
= PL080_CONTROL_SRC_INCR
;
1544 slave_addr
= plchan
->cfg
.dst_addr
;
1545 addr_width
= plchan
->cfg
.dst_addr_width
;
1546 maxburst
= plchan
->cfg
.dst_maxburst
;
1547 src_buses
= pl08x
->mem_buses
;
1548 dst_buses
= plchan
->cd
->periph_buses
;
1549 } else if (direction
== DMA_DEV_TO_MEM
) {
1550 cctl
= PL080_CONTROL_DST_INCR
;
1551 slave_addr
= plchan
->cfg
.src_addr
;
1552 addr_width
= plchan
->cfg
.src_addr_width
;
1553 maxburst
= plchan
->cfg
.src_maxburst
;
1554 src_buses
= plchan
->cd
->periph_buses
;
1555 dst_buses
= pl08x
->mem_buses
;
1557 pl08x_free_txd(pl08x
, txd
);
1558 dev_err(&pl08x
->adev
->dev
,
1559 "%s direction unsupported\n", __func__
);
1563 cctl
|= pl08x_get_cctl(plchan
, addr_width
, maxburst
);
1565 pl08x_free_txd(pl08x
, txd
);
1566 dev_err(&pl08x
->adev
->dev
,
1567 "DMA slave configuration botched?\n");
1571 txd
->cctl
= cctl
| pl08x_select_bus(src_buses
, dst_buses
);
1573 if (plchan
->cfg
.device_fc
)
1574 tmp
= (direction
== DMA_MEM_TO_DEV
) ? PL080_FLOW_MEM2PER_PER
:
1575 PL080_FLOW_PER2MEM_PER
;
1577 tmp
= (direction
== DMA_MEM_TO_DEV
) ? PL080_FLOW_MEM2PER
:
1580 txd
->ccfg
|= tmp
<< PL080_CONFIG_FLOW_CONTROL_SHIFT
;
1582 ret
= pl08x_request_mux(plchan
);
1584 pl08x_free_txd(pl08x
, txd
);
1585 dev_dbg(&pl08x
->adev
->dev
,
1586 "unable to mux for transfer on %s due to platform restrictions\n",
1591 dev_dbg(&pl08x
->adev
->dev
, "allocated DMA request signal %d for xfer on %s\n",
1592 plchan
->signal
, plchan
->name
);
1594 /* Assign the flow control signal to this channel */
1595 if (direction
== DMA_MEM_TO_DEV
)
1596 txd
->ccfg
|= plchan
->signal
<< PL080_CONFIG_DST_SEL_SHIFT
;
1598 txd
->ccfg
|= plchan
->signal
<< PL080_CONFIG_SRC_SEL_SHIFT
;
1600 for_each_sg(sgl
, sg
, sg_len
, tmp
) {
1601 dsg
= kzalloc(sizeof(struct pl08x_sg
), GFP_NOWAIT
);
1603 pl08x_release_mux(plchan
);
1604 pl08x_free_txd(pl08x
, txd
);
1605 dev_err(&pl08x
->adev
->dev
, "%s no mem for pl080 sg\n",
1609 list_add_tail(&dsg
->node
, &txd
->dsg_list
);
1611 dsg
->len
= sg_dma_len(sg
);
1612 if (direction
== DMA_MEM_TO_DEV
) {
1613 dsg
->src_addr
= sg_dma_address(sg
);
1614 dsg
->dst_addr
= slave_addr
;
1616 dsg
->src_addr
= slave_addr
;
1617 dsg
->dst_addr
= sg_dma_address(sg
);
1621 ret
= pl08x_fill_llis_for_desc(plchan
->host
, txd
);
1623 pl08x_release_mux(plchan
);
1624 pl08x_free_txd(pl08x
, txd
);
1628 return vchan_tx_prep(&plchan
->vc
, &txd
->vd
, flags
);
1631 static int pl08x_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
1634 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1635 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1636 unsigned long flags
;
1639 /* Controls applicable to inactive channels */
1640 if (cmd
== DMA_SLAVE_CONFIG
) {
1641 return dma_set_runtime_config(chan
,
1642 (struct dma_slave_config
*)arg
);
1646 * Anything succeeds on channels with no physical allocation and
1647 * no queued transfers.
1649 spin_lock_irqsave(&plchan
->vc
.lock
, flags
);
1650 if (!plchan
->phychan
&& !plchan
->at
) {
1651 spin_unlock_irqrestore(&plchan
->vc
.lock
, flags
);
1656 case DMA_TERMINATE_ALL
:
1657 plchan
->state
= PL08X_CHAN_IDLE
;
1659 if (plchan
->phychan
) {
1661 * Mark physical channel as free and free any slave
1664 pl08x_phy_free(plchan
);
1666 /* Dequeue jobs and free LLIs */
1668 pl08x_desc_free(&plchan
->at
->vd
);
1671 /* Dequeue jobs not yet fired as well */
1672 pl08x_free_txd_list(pl08x
, plchan
);
1675 pl08x_pause_phy_chan(plchan
->phychan
);
1676 plchan
->state
= PL08X_CHAN_PAUSED
;
1679 pl08x_resume_phy_chan(plchan
->phychan
);
1680 plchan
->state
= PL08X_CHAN_RUNNING
;
1683 /* Unknown command */
1688 spin_unlock_irqrestore(&plchan
->vc
.lock
, flags
);
1693 bool pl08x_filter_id(struct dma_chan
*chan
, void *chan_id
)
1695 struct pl08x_dma_chan
*plchan
;
1696 char *name
= chan_id
;
1698 /* Reject channels for devices not bound to this driver */
1699 if (chan
->device
->dev
->driver
!= &pl08x_amba_driver
.drv
)
1702 plchan
= to_pl08x_chan(chan
);
1704 /* Check that the channel is not taken! */
1705 if (!strcmp(plchan
->name
, name
))
1712 * Just check that the device is there and active
1713 * TODO: turn this bit on/off depending on the number of physical channels
1714 * actually used, if it is zero... well shut it off. That will save some
1715 * power. Cut the clock at the same time.
1717 static void pl08x_ensure_on(struct pl08x_driver_data
*pl08x
)
1719 /* The Nomadik variant does not have the config register */
1720 if (pl08x
->vd
->nomadik
)
1722 writel(PL080_CONFIG_ENABLE
, pl08x
->base
+ PL080_CONFIG
);
1725 static irqreturn_t
pl08x_irq(int irq
, void *dev
)
1727 struct pl08x_driver_data
*pl08x
= dev
;
1728 u32 mask
= 0, err
, tc
, i
;
1730 /* check & clear - ERR & TC interrupts */
1731 err
= readl(pl08x
->base
+ PL080_ERR_STATUS
);
1733 dev_err(&pl08x
->adev
->dev
, "%s error interrupt, register value 0x%08x\n",
1735 writel(err
, pl08x
->base
+ PL080_ERR_CLEAR
);
1737 tc
= readl(pl08x
->base
+ PL080_TC_STATUS
);
1739 writel(tc
, pl08x
->base
+ PL080_TC_CLEAR
);
1744 for (i
= 0; i
< pl08x
->vd
->channels
; i
++) {
1745 if (((1 << i
) & err
) || ((1 << i
) & tc
)) {
1746 /* Locate physical channel */
1747 struct pl08x_phy_chan
*phychan
= &pl08x
->phy_chans
[i
];
1748 struct pl08x_dma_chan
*plchan
= phychan
->serving
;
1749 struct pl08x_txd
*tx
;
1752 dev_err(&pl08x
->adev
->dev
,
1753 "%s Error TC interrupt on unused channel: 0x%08x\n",
1758 spin_lock(&plchan
->vc
.lock
);
1763 * This descriptor is done, release its mux
1766 pl08x_release_mux(plchan
);
1768 vchan_cookie_complete(&tx
->vd
);
1771 * And start the next descriptor (if any),
1772 * otherwise free this channel.
1774 if (vchan_next_desc(&plchan
->vc
))
1775 pl08x_start_next_txd(plchan
);
1777 pl08x_phy_free(plchan
);
1779 spin_unlock(&plchan
->vc
.lock
);
1785 return mask
? IRQ_HANDLED
: IRQ_NONE
;
1788 static void pl08x_dma_slave_init(struct pl08x_dma_chan
*chan
)
1791 chan
->name
= chan
->cd
->bus_id
;
1792 chan
->cfg
.src_addr
= chan
->cd
->addr
;
1793 chan
->cfg
.dst_addr
= chan
->cd
->addr
;
1797 * Initialise the DMAC memcpy/slave channels.
1798 * Make a local wrapper to hold required data
1800 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data
*pl08x
,
1801 struct dma_device
*dmadev
, unsigned int channels
, bool slave
)
1803 struct pl08x_dma_chan
*chan
;
1806 INIT_LIST_HEAD(&dmadev
->channels
);
1809 * Register as many many memcpy as we have physical channels,
1810 * we won't always be able to use all but the code will have
1811 * to cope with that situation.
1813 for (i
= 0; i
< channels
; i
++) {
1814 chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
);
1816 dev_err(&pl08x
->adev
->dev
,
1817 "%s no memory for channel\n", __func__
);
1822 chan
->state
= PL08X_CHAN_IDLE
;
1826 chan
->cd
= &pl08x
->pd
->slave_channels
[i
];
1827 pl08x_dma_slave_init(chan
);
1829 chan
->cd
= &pl08x
->pd
->memcpy_channel
;
1830 chan
->name
= kasprintf(GFP_KERNEL
, "memcpy%d", i
);
1836 dev_dbg(&pl08x
->adev
->dev
,
1837 "initialize virtual channel \"%s\"\n",
1840 chan
->vc
.desc_free
= pl08x_desc_free
;
1841 vchan_init(&chan
->vc
, dmadev
);
1843 dev_info(&pl08x
->adev
->dev
, "initialized %d virtual %s channels\n",
1844 i
, slave
? "slave" : "memcpy");
1848 static void pl08x_free_virtual_channels(struct dma_device
*dmadev
)
1850 struct pl08x_dma_chan
*chan
= NULL
;
1851 struct pl08x_dma_chan
*next
;
1853 list_for_each_entry_safe(chan
,
1854 next
, &dmadev
->channels
, vc
.chan
.device_node
) {
1855 list_del(&chan
->vc
.chan
.device_node
);
1860 #ifdef CONFIG_DEBUG_FS
1861 static const char *pl08x_state_str(enum pl08x_dma_chan_state state
)
1864 case PL08X_CHAN_IDLE
:
1866 case PL08X_CHAN_RUNNING
:
1868 case PL08X_CHAN_PAUSED
:
1870 case PL08X_CHAN_WAITING
:
1875 return "UNKNOWN STATE";
1878 static int pl08x_debugfs_show(struct seq_file
*s
, void *data
)
1880 struct pl08x_driver_data
*pl08x
= s
->private;
1881 struct pl08x_dma_chan
*chan
;
1882 struct pl08x_phy_chan
*ch
;
1883 unsigned long flags
;
1886 seq_printf(s
, "PL08x physical channels:\n");
1887 seq_printf(s
, "CHANNEL:\tUSER:\n");
1888 seq_printf(s
, "--------\t-----\n");
1889 for (i
= 0; i
< pl08x
->vd
->channels
; i
++) {
1890 struct pl08x_dma_chan
*virt_chan
;
1892 ch
= &pl08x
->phy_chans
[i
];
1894 spin_lock_irqsave(&ch
->lock
, flags
);
1895 virt_chan
= ch
->serving
;
1897 seq_printf(s
, "%d\t\t%s%s\n",
1899 virt_chan
? virt_chan
->name
: "(none)",
1900 ch
->locked
? " LOCKED" : "");
1902 spin_unlock_irqrestore(&ch
->lock
, flags
);
1905 seq_printf(s
, "\nPL08x virtual memcpy channels:\n");
1906 seq_printf(s
, "CHANNEL:\tSTATE:\n");
1907 seq_printf(s
, "--------\t------\n");
1908 list_for_each_entry(chan
, &pl08x
->memcpy
.channels
, vc
.chan
.device_node
) {
1909 seq_printf(s
, "%s\t\t%s\n", chan
->name
,
1910 pl08x_state_str(chan
->state
));
1913 seq_printf(s
, "\nPL08x virtual slave channels:\n");
1914 seq_printf(s
, "CHANNEL:\tSTATE:\n");
1915 seq_printf(s
, "--------\t------\n");
1916 list_for_each_entry(chan
, &pl08x
->slave
.channels
, vc
.chan
.device_node
) {
1917 seq_printf(s
, "%s\t\t%s\n", chan
->name
,
1918 pl08x_state_str(chan
->state
));
1924 static int pl08x_debugfs_open(struct inode
*inode
, struct file
*file
)
1926 return single_open(file
, pl08x_debugfs_show
, inode
->i_private
);
1929 static const struct file_operations pl08x_debugfs_operations
= {
1930 .open
= pl08x_debugfs_open
,
1932 .llseek
= seq_lseek
,
1933 .release
= single_release
,
1936 static void init_pl08x_debugfs(struct pl08x_driver_data
*pl08x
)
1938 /* Expose a simple debugfs interface to view all clocks */
1939 (void) debugfs_create_file(dev_name(&pl08x
->adev
->dev
),
1940 S_IFREG
| S_IRUGO
, NULL
, pl08x
,
1941 &pl08x_debugfs_operations
);
1945 static inline void init_pl08x_debugfs(struct pl08x_driver_data
*pl08x
)
1950 static int pl08x_probe(struct amba_device
*adev
, const struct amba_id
*id
)
1952 struct pl08x_driver_data
*pl08x
;
1953 const struct vendor_data
*vd
= id
->data
;
1958 ret
= amba_request_regions(adev
, NULL
);
1962 /* Create the driver state holder */
1963 pl08x
= kzalloc(sizeof(*pl08x
), GFP_KERNEL
);
1969 /* Initialize memcpy engine */
1970 dma_cap_set(DMA_MEMCPY
, pl08x
->memcpy
.cap_mask
);
1971 pl08x
->memcpy
.dev
= &adev
->dev
;
1972 pl08x
->memcpy
.device_alloc_chan_resources
= pl08x_alloc_chan_resources
;
1973 pl08x
->memcpy
.device_free_chan_resources
= pl08x_free_chan_resources
;
1974 pl08x
->memcpy
.device_prep_dma_memcpy
= pl08x_prep_dma_memcpy
;
1975 pl08x
->memcpy
.device_prep_dma_interrupt
= pl08x_prep_dma_interrupt
;
1976 pl08x
->memcpy
.device_tx_status
= pl08x_dma_tx_status
;
1977 pl08x
->memcpy
.device_issue_pending
= pl08x_issue_pending
;
1978 pl08x
->memcpy
.device_control
= pl08x_control
;
1980 /* Initialize slave engine */
1981 dma_cap_set(DMA_SLAVE
, pl08x
->slave
.cap_mask
);
1982 pl08x
->slave
.dev
= &adev
->dev
;
1983 pl08x
->slave
.device_alloc_chan_resources
= pl08x_alloc_chan_resources
;
1984 pl08x
->slave
.device_free_chan_resources
= pl08x_free_chan_resources
;
1985 pl08x
->slave
.device_prep_dma_interrupt
= pl08x_prep_dma_interrupt
;
1986 pl08x
->slave
.device_tx_status
= pl08x_dma_tx_status
;
1987 pl08x
->slave
.device_issue_pending
= pl08x_issue_pending
;
1988 pl08x
->slave
.device_prep_slave_sg
= pl08x_prep_slave_sg
;
1989 pl08x
->slave
.device_control
= pl08x_control
;
1991 /* Get the platform data */
1992 pl08x
->pd
= dev_get_platdata(&adev
->dev
);
1994 dev_err(&adev
->dev
, "no platform data supplied\n");
1996 goto out_no_platdata
;
1999 /* Assign useful pointers to the driver state */
2003 /* By default, AHB1 only. If dualmaster, from platform */
2004 pl08x
->lli_buses
= PL08X_AHB1
;
2005 pl08x
->mem_buses
= PL08X_AHB1
;
2006 if (pl08x
->vd
->dualmaster
) {
2007 pl08x
->lli_buses
= pl08x
->pd
->lli_buses
;
2008 pl08x
->mem_buses
= pl08x
->pd
->mem_buses
;
2012 pl08x
->lli_words
= PL080S_LLI_WORDS
;
2014 pl08x
->lli_words
= PL080_LLI_WORDS
;
2015 tsfr_size
= MAX_NUM_TSFR_LLIS
* pl08x
->lli_words
* sizeof(u32
);
2017 /* A DMA memory pool for LLIs, align on 1-byte boundary */
2018 pl08x
->pool
= dma_pool_create(DRIVER_NAME
, &pl08x
->adev
->dev
,
2019 tsfr_size
, PL08X_ALIGN
, 0);
2022 goto out_no_lli_pool
;
2025 pl08x
->base
= ioremap(adev
->res
.start
, resource_size(&adev
->res
));
2028 goto out_no_ioremap
;
2031 /* Turn on the PL08x */
2032 pl08x_ensure_on(pl08x
);
2034 /* Attach the interrupt handler */
2035 writel(0x000000FF, pl08x
->base
+ PL080_ERR_CLEAR
);
2036 writel(0x000000FF, pl08x
->base
+ PL080_TC_CLEAR
);
2038 ret
= request_irq(adev
->irq
[0], pl08x_irq
, IRQF_DISABLED
,
2039 DRIVER_NAME
, pl08x
);
2041 dev_err(&adev
->dev
, "%s failed to request interrupt %d\n",
2042 __func__
, adev
->irq
[0]);
2046 /* Initialize physical channels */
2047 pl08x
->phy_chans
= kzalloc((vd
->channels
* sizeof(*pl08x
->phy_chans
)),
2049 if (!pl08x
->phy_chans
) {
2050 dev_err(&adev
->dev
, "%s failed to allocate "
2051 "physical channel holders\n",
2054 goto out_no_phychans
;
2057 for (i
= 0; i
< vd
->channels
; i
++) {
2058 struct pl08x_phy_chan
*ch
= &pl08x
->phy_chans
[i
];
2061 ch
->base
= pl08x
->base
+ PL080_Cx_BASE(i
);
2062 ch
->reg_config
= ch
->base
+ vd
->config_offset
;
2063 spin_lock_init(&ch
->lock
);
2066 * Nomadik variants can have channels that are locked
2067 * down for the secure world only. Lock up these channels
2068 * by perpetually serving a dummy virtual channel.
2073 val
= readl(ch
->reg_config
);
2074 if (val
& (PL080N_CONFIG_ITPROT
| PL080N_CONFIG_SECPROT
)) {
2075 dev_info(&adev
->dev
, "physical channel %d reserved for secure access only\n", i
);
2080 dev_dbg(&adev
->dev
, "physical channel %d is %s\n",
2081 i
, pl08x_phy_channel_busy(ch
) ? "BUSY" : "FREE");
2084 /* Register as many memcpy channels as there are physical channels */
2085 ret
= pl08x_dma_init_virtual_channels(pl08x
, &pl08x
->memcpy
,
2086 pl08x
->vd
->channels
, false);
2088 dev_warn(&pl08x
->adev
->dev
,
2089 "%s failed to enumerate memcpy channels - %d\n",
2093 pl08x
->memcpy
.chancnt
= ret
;
2095 /* Register slave channels */
2096 ret
= pl08x_dma_init_virtual_channels(pl08x
, &pl08x
->slave
,
2097 pl08x
->pd
->num_slave_channels
, true);
2099 dev_warn(&pl08x
->adev
->dev
,
2100 "%s failed to enumerate slave channels - %d\n",
2104 pl08x
->slave
.chancnt
= ret
;
2106 ret
= dma_async_device_register(&pl08x
->memcpy
);
2108 dev_warn(&pl08x
->adev
->dev
,
2109 "%s failed to register memcpy as an async device - %d\n",
2111 goto out_no_memcpy_reg
;
2114 ret
= dma_async_device_register(&pl08x
->slave
);
2116 dev_warn(&pl08x
->adev
->dev
,
2117 "%s failed to register slave as an async device - %d\n",
2119 goto out_no_slave_reg
;
2122 amba_set_drvdata(adev
, pl08x
);
2123 init_pl08x_debugfs(pl08x
);
2124 dev_info(&pl08x
->adev
->dev
, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n",
2125 amba_part(adev
), pl08x
->vd
->pl080s
? "s" : "", amba_rev(adev
),
2126 (unsigned long long)adev
->res
.start
, adev
->irq
[0]);
2131 dma_async_device_unregister(&pl08x
->memcpy
);
2133 pl08x_free_virtual_channels(&pl08x
->slave
);
2135 pl08x_free_virtual_channels(&pl08x
->memcpy
);
2137 kfree(pl08x
->phy_chans
);
2139 free_irq(adev
->irq
[0], pl08x
);
2141 iounmap(pl08x
->base
);
2143 dma_pool_destroy(pl08x
->pool
);
2148 amba_release_regions(adev
);
2152 /* PL080 has 8 channels and the PL080 have just 2 */
2153 static struct vendor_data vendor_pl080
= {
2154 .config_offset
= PL080_CH_CONFIG
,
2157 .max_transfer_size
= PL080_CONTROL_TRANSFER_SIZE_MASK
,
2160 static struct vendor_data vendor_nomadik
= {
2161 .config_offset
= PL080_CH_CONFIG
,
2165 .max_transfer_size
= PL080_CONTROL_TRANSFER_SIZE_MASK
,
2168 static struct vendor_data vendor_pl080s
= {
2169 .config_offset
= PL080S_CH_CONFIG
,
2172 .max_transfer_size
= PL080S_CONTROL_TRANSFER_SIZE_MASK
,
2175 static struct vendor_data vendor_pl081
= {
2176 .config_offset
= PL080_CH_CONFIG
,
2178 .dualmaster
= false,
2179 .max_transfer_size
= PL080_CONTROL_TRANSFER_SIZE_MASK
,
2182 static struct amba_id pl08x_ids
[] = {
2183 /* Samsung PL080S variant */
2187 .data
= &vendor_pl080s
,
2193 .data
= &vendor_pl080
,
2199 .data
= &vendor_pl081
,
2201 /* Nomadik 8815 PL080 variant */
2205 .data
= &vendor_nomadik
,
2210 MODULE_DEVICE_TABLE(amba
, pl08x_ids
);
2212 static struct amba_driver pl08x_amba_driver
= {
2213 .drv
.name
= DRIVER_NAME
,
2214 .id_table
= pl08x_ids
,
2215 .probe
= pl08x_probe
,
2218 static int __init
pl08x_init(void)
2221 retval
= amba_driver_register(&pl08x_amba_driver
);
2223 printk(KERN_WARNING DRIVER_NAME
2224 "failed to register as an AMBA device (%d)\n",
2228 subsys_initcall(pl08x_init
);