]>
git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/net/wireless/brcm80211/brcmsmac/dma.c
2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 #include <linux/slab.h>
17 #include <linux/skbuff.h>
18 #include <linux/delay.h>
19 #include <linux/pci.h>
21 #include <brcmu_utils.h>
27 * DMA hardware requires each descriptor ring to be 8kB aligned, and fit within
28 * a contiguous 8kB physical address.
30 #define D64RINGALIGN_BITS 13
31 #define D64MAXRINGSZ (1 << D64RINGALIGN_BITS)
32 #define D64RINGALIGN (1 << D64RINGALIGN_BITS)
34 #define D64MAXDD (D64MAXRINGSZ / sizeof(struct dma64desc))
36 /* transmit channel control */
37 #define D64_XC_XE 0x00000001 /* transmit enable */
38 #define D64_XC_SE 0x00000002 /* transmit suspend request */
39 #define D64_XC_LE 0x00000004 /* loopback enable */
40 #define D64_XC_FL 0x00000010 /* flush request */
41 #define D64_XC_PD 0x00000800 /* parity check disable */
42 #define D64_XC_AE 0x00030000 /* address extension bits */
43 #define D64_XC_AE_SHIFT 16
45 /* transmit descriptor table pointer */
46 #define D64_XP_LD_MASK 0x00000fff /* last valid descriptor */
48 /* transmit channel status */
49 #define D64_XS0_CD_MASK 0x00001fff /* current descriptor pointer */
50 #define D64_XS0_XS_MASK 0xf0000000 /* transmit state */
51 #define D64_XS0_XS_SHIFT 28
52 #define D64_XS0_XS_DISABLED 0x00000000 /* disabled */
53 #define D64_XS0_XS_ACTIVE 0x10000000 /* active */
54 #define D64_XS0_XS_IDLE 0x20000000 /* idle wait */
55 #define D64_XS0_XS_STOPPED 0x30000000 /* stopped */
56 #define D64_XS0_XS_SUSP 0x40000000 /* suspend pending */
58 #define D64_XS1_AD_MASK 0x00001fff /* active descriptor */
59 #define D64_XS1_XE_MASK 0xf0000000 /* transmit errors */
60 #define D64_XS1_XE_SHIFT 28
61 #define D64_XS1_XE_NOERR 0x00000000 /* no error */
62 #define D64_XS1_XE_DPE 0x10000000 /* descriptor protocol error */
63 #define D64_XS1_XE_DFU 0x20000000 /* data fifo underrun */
64 #define D64_XS1_XE_DTE 0x30000000 /* data transfer error */
65 #define D64_XS1_XE_DESRE 0x40000000 /* descriptor read error */
66 #define D64_XS1_XE_COREE 0x50000000 /* core error */
68 /* receive channel control */
70 #define D64_RC_RE 0x00000001
71 /* receive frame offset */
72 #define D64_RC_RO_MASK 0x000000fe
73 #define D64_RC_RO_SHIFT 1
74 /* direct fifo receive (pio) mode */
75 #define D64_RC_FM 0x00000100
76 /* separate rx header descriptor enable */
77 #define D64_RC_SH 0x00000200
78 /* overflow continue */
79 #define D64_RC_OC 0x00000400
80 /* parity check disable */
81 #define D64_RC_PD 0x00000800
82 /* address extension bits */
83 #define D64_RC_AE 0x00030000
84 #define D64_RC_AE_SHIFT 16
86 /* flags for dma controller */
88 #define DMA_CTRL_PEN (1 << 0)
89 /* rx overflow continue */
90 #define DMA_CTRL_ROC (1 << 1)
91 /* allow rx scatter to multiple descriptors */
92 #define DMA_CTRL_RXMULTI (1 << 2)
93 /* Unframed Rx/Tx data */
94 #define DMA_CTRL_UNFRAMED (1 << 3)
96 /* receive descriptor table pointer */
97 #define D64_RP_LD_MASK 0x00000fff /* last valid descriptor */
99 /* receive channel status */
100 #define D64_RS0_CD_MASK 0x00001fff /* current descriptor pointer */
101 #define D64_RS0_RS_MASK 0xf0000000 /* receive state */
102 #define D64_RS0_RS_SHIFT 28
103 #define D64_RS0_RS_DISABLED 0x00000000 /* disabled */
104 #define D64_RS0_RS_ACTIVE 0x10000000 /* active */
105 #define D64_RS0_RS_IDLE 0x20000000 /* idle wait */
106 #define D64_RS0_RS_STOPPED 0x30000000 /* stopped */
107 #define D64_RS0_RS_SUSP 0x40000000 /* suspend pending */
109 #define D64_RS1_AD_MASK 0x0001ffff /* active descriptor */
110 #define D64_RS1_RE_MASK 0xf0000000 /* receive errors */
111 #define D64_RS1_RE_SHIFT 28
112 #define D64_RS1_RE_NOERR 0x00000000 /* no error */
113 #define D64_RS1_RE_DPO 0x10000000 /* descriptor protocol error */
114 #define D64_RS1_RE_DFU 0x20000000 /* data fifo overflow */
115 #define D64_RS1_RE_DTE 0x30000000 /* data transfer error */
116 #define D64_RS1_RE_DESRE 0x40000000 /* descriptor read error */
117 #define D64_RS1_RE_COREE 0x50000000 /* core error */
120 #define D64_FA_OFF_MASK 0xffff /* offset */
121 #define D64_FA_SEL_MASK 0xf0000 /* select */
122 #define D64_FA_SEL_SHIFT 16
123 #define D64_FA_SEL_XDD 0x00000 /* transmit dma data */
124 #define D64_FA_SEL_XDP 0x10000 /* transmit dma pointers */
125 #define D64_FA_SEL_RDD 0x40000 /* receive dma data */
126 #define D64_FA_SEL_RDP 0x50000 /* receive dma pointers */
127 #define D64_FA_SEL_XFD 0x80000 /* transmit fifo data */
128 #define D64_FA_SEL_XFP 0x90000 /* transmit fifo pointers */
129 #define D64_FA_SEL_RFD 0xc0000 /* receive fifo data */
130 #define D64_FA_SEL_RFP 0xd0000 /* receive fifo pointers */
131 #define D64_FA_SEL_RSD 0xe0000 /* receive frame status data */
132 #define D64_FA_SEL_RSP 0xf0000 /* receive frame status pointers */
134 /* descriptor control flags 1 */
135 #define D64_CTRL_COREFLAGS 0x0ff00000 /* core specific flags */
136 #define D64_CTRL1_EOT ((u32)1 << 28) /* end of descriptor table */
137 #define D64_CTRL1_IOC ((u32)1 << 29) /* interrupt on completion */
138 #define D64_CTRL1_EOF ((u32)1 << 30) /* end of frame */
139 #define D64_CTRL1_SOF ((u32)1 << 31) /* start of frame */
141 /* descriptor control flags 2 */
142 /* buffer byte count. real data len must <= 16KB */
143 #define D64_CTRL2_BC_MASK 0x00007fff
144 /* address extension bits */
145 #define D64_CTRL2_AE 0x00030000
146 #define D64_CTRL2_AE_SHIFT 16
148 #define D64_CTRL2_PARITY 0x00040000
150 /* control flags in the range [27:20] are core-specific and not defined here */
151 #define D64_CTRL_CORE_MASK 0x0ff00000
153 #define D64_RX_FRM_STS_LEN 0x0000ffff /* frame length mask */
154 #define D64_RX_FRM_STS_OVFL 0x00800000 /* RxOverFlow */
155 #define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /* no. of descriptors used - 1 */
156 #define D64_RX_FRM_STS_DATATYPE 0xf0000000 /* core-dependent data type */
159 * packet headroom necessary to accommodate the largest header
160 * in the system, (i.e TXOFF). By doing, we avoid the need to
161 * allocate an extra buffer for the header when bridging to WL.
162 * There is a compile time check in wlc.c which ensure that this
163 * value is at least as big as TXOFF. This value is used in
167 #define BCMEXTRAHDROOM 172
171 #define DMA_ERROR(args) \
173 if (!(*di->msg_level & 1)) \
178 #define DMA_TRACE(args) \
180 if (!(*di->msg_level & 2)) \
186 #define DMA_ERROR(args)
187 #define DMA_TRACE(args)
190 #define DMA_NONE(args)
192 #define MAXNAMEL 8 /* 8 char names */
194 /* macros to convert between byte offsets and indexes */
195 #define B2I(bytes, type) ((bytes) / sizeof(type))
196 #define I2B(index, type) ((index) * sizeof(type))
198 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
199 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
201 #define PCI64ADDR_HIGH 0x80000000 /* address[63] */
202 #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
206 * Descriptors are only read by the hardware, never written back.
209 __le32 ctrl1
; /* misc control bits & bufcount */
210 __le32 ctrl2
; /* buffer count and address extension */
211 __le32 addrlow
; /* memory address of the date buffer, bits 31:0 */
212 __le32 addrhigh
; /* memory address of the date buffer, bits 63:32 */
215 /* dma engine software state */
217 struct dma_pub dma
; /* exported structure */
218 uint
*msg_level
; /* message level pointer */
219 char name
[MAXNAMEL
]; /* callers name for diag msgs */
221 struct pci_dev
*pbus
; /* bus handle */
223 bool dma64
; /* this dma engine is operating in 64-bit mode */
224 bool addrext
; /* this dma engine supports DmaExtendedAddrChanges */
226 /* 64-bit dma tx engine registers */
227 struct dma64regs __iomem
*d64txregs
;
228 /* 64-bit dma rx engine registers */
229 struct dma64regs __iomem
*d64rxregs
;
230 /* pointer to dma64 tx descriptor ring */
231 struct dma64desc
*txd64
;
232 /* pointer to dma64 rx descriptor ring */
233 struct dma64desc
*rxd64
;
235 u16 dmadesc_align
; /* alignment requirement for dma descriptors */
237 u16 ntxd
; /* # tx descriptors tunable */
238 u16 txin
; /* index of next descriptor to reclaim */
239 u16 txout
; /* index of next descriptor to post */
240 /* pointer to parallel array of pointers to packets */
241 struct sk_buff
**txp
;
242 /* Aligned physical address of descriptor ring */
244 /* Original physical address of descriptor ring */
245 dma_addr_t txdpaorig
;
246 u16 txdalign
; /* #bytes added to alloc'd mem to align txd */
247 u32 txdalloc
; /* #bytes allocated for the ring */
248 u32 xmtptrbase
; /* When using unaligned descriptors, the ptr register
249 * is not just an index, it needs all 13 bits to be
250 * an offset from the addr register.
253 u16 nrxd
; /* # rx descriptors tunable */
254 u16 rxin
; /* index of next descriptor to reclaim */
255 u16 rxout
; /* index of next descriptor to post */
256 /* pointer to parallel array of pointers to packets */
257 struct sk_buff
**rxp
;
258 /* Aligned physical address of descriptor ring */
260 /* Original physical address of descriptor ring */
261 dma_addr_t rxdpaorig
;
262 u16 rxdalign
; /* #bytes added to alloc'd mem to align rxd */
263 u32 rxdalloc
; /* #bytes allocated for the ring */
264 u32 rcvptrbase
; /* Base for ptr reg when using unaligned descriptors */
267 unsigned int rxbufsize
; /* rx buffer size in bytes, not including
270 uint rxextrahdrroom
; /* extra rx headroom, reverseved to assist upper
271 * stack, e.g. some rx pkt buffers will be
272 * bridged to tx side without byte copying.
273 * The extra headroom needs to be large enough
274 * to fit txheader needs. Some dongle driver may
277 uint nrxpost
; /* # rx buffers to keep posted */
278 unsigned int rxoffset
; /* rxcontrol offset */
279 /* add to get dma address of descriptor ring, low 32 bits */
283 /* add to get dma address of data buffer, low 32 bits */
287 /* descriptor base need to be aligned or not */
292 * default dma message level (if input msg_level
293 * pointer is null in dma_attach())
295 static uint dma_msg_level
;
297 /* Check for odd number of 1's */
298 static u32
parity32(__le32 data
)
300 /* no swap needed for counting 1's */
301 u32 par_data
= *(u32
*)&data
;
303 par_data
^= par_data
>> 16;
304 par_data
^= par_data
>> 8;
305 par_data
^= par_data
>> 4;
306 par_data
^= par_data
>> 2;
307 par_data
^= par_data
>> 1;
312 static bool dma64_dd_parity(struct dma64desc
*dd
)
314 return parity32(dd
->addrlow
^ dd
->addrhigh
^ dd
->ctrl1
^ dd
->ctrl2
);
317 /* descriptor bumping functions */
319 static uint
xxd(uint x
, uint n
)
321 return x
& (n
- 1); /* faster than %, but n must be power of 2 */
324 static uint
txd(struct dma_info
*di
, uint x
)
326 return xxd(x
, di
->ntxd
);
329 static uint
rxd(struct dma_info
*di
, uint x
)
331 return xxd(x
, di
->nrxd
);
334 static uint
nexttxd(struct dma_info
*di
, uint i
)
336 return txd(di
, i
+ 1);
339 static uint
prevtxd(struct dma_info
*di
, uint i
)
341 return txd(di
, i
- 1);
344 static uint
nextrxd(struct dma_info
*di
, uint i
)
346 return txd(di
, i
+ 1);
349 static uint
ntxdactive(struct dma_info
*di
, uint h
, uint t
)
354 static uint
nrxdactive(struct dma_info
*di
, uint h
, uint t
)
359 static uint
_dma_ctrlflags(struct dma_info
*di
, uint mask
, uint flags
)
364 DMA_ERROR(("_dma_ctrlflags: NULL dma handle\n"));
368 dmactrlflags
= di
->dma
.dmactrlflags
;
369 dmactrlflags
&= ~mask
;
370 dmactrlflags
|= flags
;
372 /* If trying to enable parity, check if parity is actually supported */
373 if (dmactrlflags
& DMA_CTRL_PEN
) {
376 control
= R_REG(&di
->d64txregs
->control
);
377 W_REG(&di
->d64txregs
->control
,
378 control
| D64_XC_PD
);
379 if (R_REG(&di
->d64txregs
->control
) & D64_XC_PD
)
380 /* We *can* disable it so it is supported,
381 * restore control register
383 W_REG(&di
->d64txregs
->control
,
386 /* Not supported, don't allow it to be enabled */
387 dmactrlflags
&= ~DMA_CTRL_PEN
;
390 di
->dma
.dmactrlflags
= dmactrlflags
;
395 static bool _dma64_addrext(struct dma64regs __iomem
*dma64regs
)
398 OR_REG(&dma64regs
->control
, D64_XC_AE
);
399 w
= R_REG(&dma64regs
->control
);
400 AND_REG(&dma64regs
->control
, ~D64_XC_AE
);
401 return (w
& D64_XC_AE
) == D64_XC_AE
;
405 * return true if this dma engine supports DmaExtendedAddrChanges,
408 static bool _dma_isaddrext(struct dma_info
*di
)
410 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
412 /* not all tx or rx channel are available */
413 if (di
->d64txregs
!= NULL
) {
414 if (!_dma64_addrext(di
->d64txregs
))
415 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have "
416 "AE set\n", di
->name
));
418 } else if (di
->d64rxregs
!= NULL
) {
419 if (!_dma64_addrext(di
->d64rxregs
))
420 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have "
421 "AE set\n", di
->name
));
428 static bool _dma_descriptor_align(struct dma_info
*di
)
432 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
433 if (di
->d64txregs
!= NULL
) {
434 W_REG(&di
->d64txregs
->addrlow
, 0xff0);
435 addrl
= R_REG(&di
->d64txregs
->addrlow
);
438 } else if (di
->d64rxregs
!= NULL
) {
439 W_REG(&di
->d64rxregs
->addrlow
, 0xff0);
440 addrl
= R_REG(&di
->d64rxregs
->addrlow
);
448 * Descriptor table must start at the DMA hardware dictated alignment, so
449 * allocated memory must be large enough to support this requirement.
451 static void *dma_alloc_consistent(struct pci_dev
*pdev
, uint size
,
452 u16 align_bits
, uint
*alloced
,
456 u16 align
= (1 << align_bits
);
457 if (!IS_ALIGNED(PAGE_SIZE
, align
))
461 return pci_alloc_consistent(pdev
, size
, pap
);
465 u8
dma_align_sizetobits(uint size
)
473 /* This function ensures that the DMA descriptor ring will not get allocated
474 * across Page boundary. If the allocation is done across the page boundary
475 * at the first time, then it is freed and the allocation is done at
476 * descriptor ring size aligned location. This will ensure that the ring will
477 * not cross page boundary
479 static void *dma_ringalloc(struct dma_info
*di
, u32 boundary
, uint size
,
480 u16
*alignbits
, uint
*alloced
,
485 u32 alignbytes
= 1 << *alignbits
;
487 va
= dma_alloc_consistent(di
->pbus
, size
, *alignbits
, alloced
, descpa
);
492 desc_strtaddr
= (u32
) roundup((unsigned long)va
, alignbytes
);
493 if (((desc_strtaddr
+ size
- 1) & boundary
) != (desc_strtaddr
495 *alignbits
= dma_align_sizetobits(size
);
496 pci_free_consistent(di
->pbus
, size
, va
, *descpa
);
497 va
= dma_alloc_consistent(di
->pbus
, size
, *alignbits
,
503 static bool dma64_alloc(struct dma_info
*di
, uint direction
)
512 ddlen
= sizeof(struct dma64desc
);
514 size
= (direction
== DMA_TX
) ? (di
->ntxd
* ddlen
) : (di
->nrxd
* ddlen
);
515 align_bits
= di
->dmadesc_align
;
516 align
= (1 << align_bits
);
518 if (direction
== DMA_TX
) {
519 va
= dma_ringalloc(di
, D64RINGALIGN
, size
, &align_bits
,
520 &alloced
, &di
->txdpaorig
);
522 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd)"
523 " failed\n", di
->name
));
526 align
= (1 << align_bits
);
527 di
->txd64
= (struct dma64desc
*)
528 roundup((unsigned long)va
, align
);
529 di
->txdalign
= (uint
) ((s8
*)di
->txd64
- (s8
*) va
);
530 di
->txdpa
= di
->txdpaorig
+ di
->txdalign
;
531 di
->txdalloc
= alloced
;
533 va
= dma_ringalloc(di
, D64RINGALIGN
, size
, &align_bits
,
534 &alloced
, &di
->rxdpaorig
);
536 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd)"
537 " failed\n", di
->name
));
540 align
= (1 << align_bits
);
541 di
->rxd64
= (struct dma64desc
*)
542 roundup((unsigned long)va
, align
);
543 di
->rxdalign
= (uint
) ((s8
*)di
->rxd64
- (s8
*) va
);
544 di
->rxdpa
= di
->rxdpaorig
+ di
->rxdalign
;
545 di
->rxdalloc
= alloced
;
551 static bool _dma_alloc(struct dma_info
*di
, uint direction
)
553 return dma64_alloc(di
, direction
);
556 struct dma_pub
*dma_attach(char *name
, struct si_pub
*sih
,
557 void __iomem
*dmaregstx
, void __iomem
*dmaregsrx
,
558 uint ntxd
, uint nrxd
,
559 uint rxbufsize
, int rxextheadroom
,
560 uint nrxpost
, uint rxoffset
, uint
*msg_level
)
565 /* allocate private info structure */
566 di
= kzalloc(sizeof(struct dma_info
), GFP_ATOMIC
);
570 di
->msg_level
= msg_level
? msg_level
: &dma_msg_level
;
573 di
->dma64
= ((ai_core_sflags(sih
, 0, 0) & SISF_DMA64
) == SISF_DMA64
);
575 /* init dma reg pointer */
576 di
->d64txregs
= (struct dma64regs __iomem
*) dmaregstx
;
577 di
->d64rxregs
= (struct dma64regs __iomem
*) dmaregsrx
;
580 * Default flags (which can be changed by the driver calling
581 * dma_ctrlflags before enable): For backwards compatibility
582 * both Rx Overflow Continue and Parity are DISABLED.
584 _dma_ctrlflags(di
, DMA_CTRL_ROC
| DMA_CTRL_PEN
, 0);
586 DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d "
587 "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
588 "dmaregstx %p dmaregsrx %p\n", name
, "DMA64",
589 di
->dma
.dmactrlflags
, ntxd
, nrxd
, rxbufsize
,
590 rxextheadroom
, nrxpost
, rxoffset
, dmaregstx
, dmaregsrx
));
592 /* make a private copy of our callers name */
593 strncpy(di
->name
, name
, MAXNAMEL
);
594 di
->name
[MAXNAMEL
- 1] = '\0';
596 di
->pbus
= ((struct si_info
*)sih
)->pbus
;
599 di
->ntxd
= (u16
) ntxd
;
600 di
->nrxd
= (u16
) nrxd
;
602 /* the actual dma size doesn't include the extra headroom */
604 (rxextheadroom
== -1) ? BCMEXTRAHDROOM
: rxextheadroom
;
605 if (rxbufsize
> BCMEXTRAHDROOM
)
606 di
->rxbufsize
= (u16
) (rxbufsize
- di
->rxextrahdrroom
);
608 di
->rxbufsize
= (u16
) rxbufsize
;
610 di
->nrxpost
= (u16
) nrxpost
;
611 di
->rxoffset
= (u8
) rxoffset
;
614 * figure out the DMA physical address offset for dd and data
615 * PCI/PCIE: they map silicon backplace address to zero
616 * based memory, need offset
617 * Other bus: use zero SI_BUS BIGENDIAN kludge: use sdram
618 * swapped region for data buffer, not descriptor
621 di
->dataoffsetlow
= 0;
622 /* add offset for pcie with DMA64 bus */
624 di
->ddoffsethigh
= SI_PCIE_DMA_H32
;
625 di
->dataoffsetlow
= di
->ddoffsetlow
;
626 di
->dataoffsethigh
= di
->ddoffsethigh
;
627 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
628 if ((ai_coreid(sih
) == SDIOD_CORE_ID
)
629 && ((ai_corerev(sih
) > 0) && (ai_corerev(sih
) <= 2)))
631 else if ((ai_coreid(sih
) == I2S_CORE_ID
) &&
632 ((ai_corerev(sih
) == 0) || (ai_corerev(sih
) == 1)))
635 di
->addrext
= _dma_isaddrext(di
);
637 /* does the descriptor need to be aligned and if yes, on 4K/8K or not */
638 di
->aligndesc_4k
= _dma_descriptor_align(di
);
639 if (di
->aligndesc_4k
) {
640 di
->dmadesc_align
= D64RINGALIGN_BITS
;
641 if ((ntxd
< D64MAXDD
/ 2) && (nrxd
< D64MAXDD
/ 2))
642 /* for smaller dd table, HW relax alignment reqmnt */
643 di
->dmadesc_align
= D64RINGALIGN_BITS
- 1;
645 di
->dmadesc_align
= 4; /* 16 byte alignment */
648 DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
649 di
->aligndesc_4k
, di
->dmadesc_align
));
651 /* allocate tx packet pointer vector */
653 size
= ntxd
* sizeof(void *);
654 di
->txp
= kzalloc(size
, GFP_ATOMIC
);
659 /* allocate rx packet pointer vector */
661 size
= nrxd
* sizeof(void *);
662 di
->rxp
= kzalloc(size
, GFP_ATOMIC
);
668 * allocate transmit descriptor ring, only need ntxd descriptors
669 * but it must be aligned
672 if (!_dma_alloc(di
, DMA_TX
))
677 * allocate receive descriptor ring, only need nrxd descriptors
678 * but it must be aligned
681 if (!_dma_alloc(di
, DMA_RX
))
685 if ((di
->ddoffsetlow
!= 0) && !di
->addrext
) {
686 if (di
->txdpa
> SI_PCI_DMA_SZ
) {
687 DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not "
688 "supported\n", di
->name
, (u32
)di
->txdpa
));
691 if (di
->rxdpa
> SI_PCI_DMA_SZ
) {
692 DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not "
693 "supported\n", di
->name
, (u32
)di
->rxdpa
));
698 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x "
699 "dataoffsethigh " "0x%x addrext %d\n", di
->ddoffsetlow
,
700 di
->ddoffsethigh
, di
->dataoffsetlow
, di
->dataoffsethigh
,
703 return (struct dma_pub
*) di
;
706 dma_detach((struct dma_pub
*)di
);
711 dma64_dd_upd(struct dma_info
*di
, struct dma64desc
*ddring
,
712 dma_addr_t pa
, uint outidx
, u32
*flags
, u32 bufcount
)
714 u32 ctrl2
= bufcount
& D64_CTRL2_BC_MASK
;
716 /* PCI bus with big(>1G) physical address, use address extension */
717 if ((di
->dataoffsetlow
== 0) || !(pa
& PCI32ADDR_HIGH
)) {
718 ddring
[outidx
].addrlow
= cpu_to_le32(pa
+ di
->dataoffsetlow
);
719 ddring
[outidx
].addrhigh
= cpu_to_le32(di
->dataoffsethigh
);
720 ddring
[outidx
].ctrl1
= cpu_to_le32(*flags
);
721 ddring
[outidx
].ctrl2
= cpu_to_le32(ctrl2
);
723 /* address extension for 32-bit PCI */
726 ae
= (pa
& PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
727 pa
&= ~PCI32ADDR_HIGH
;
729 ctrl2
|= (ae
<< D64_CTRL2_AE_SHIFT
) & D64_CTRL2_AE
;
730 ddring
[outidx
].addrlow
= cpu_to_le32(pa
+ di
->dataoffsetlow
);
731 ddring
[outidx
].addrhigh
= cpu_to_le32(di
->dataoffsethigh
);
732 ddring
[outidx
].ctrl1
= cpu_to_le32(*flags
);
733 ddring
[outidx
].ctrl2
= cpu_to_le32(ctrl2
);
735 if (di
->dma
.dmactrlflags
& DMA_CTRL_PEN
) {
736 if (dma64_dd_parity(&ddring
[outidx
]))
737 ddring
[outidx
].ctrl2
=
738 cpu_to_le32(ctrl2
| D64_CTRL2_PARITY
);
742 /* !! may be called with core in reset */
743 void dma_detach(struct dma_pub
*pub
)
745 struct dma_info
*di
= (struct dma_info
*)pub
;
747 DMA_TRACE(("%s: dma_detach\n", di
->name
));
749 /* free dma descriptor rings */
751 pci_free_consistent(di
->pbus
, di
->txdalloc
,
752 ((s8
*)di
->txd64
- di
->txdalign
),
755 pci_free_consistent(di
->pbus
, di
->rxdalloc
,
756 ((s8
*)di
->rxd64
- di
->rxdalign
),
759 /* free packet pointer vectors */
763 /* free our private info structure */
768 /* initialize descriptor table base address */
770 _dma_ddtable_init(struct dma_info
*di
, uint direction
, dma_addr_t pa
)
772 if (!di
->aligndesc_4k
) {
773 if (direction
== DMA_TX
)
779 if ((di
->ddoffsetlow
== 0)
780 || !(pa
& PCI32ADDR_HIGH
)) {
781 if (direction
== DMA_TX
) {
782 W_REG(&di
->d64txregs
->addrlow
, pa
+ di
->ddoffsetlow
);
783 W_REG(&di
->d64txregs
->addrhigh
, di
->ddoffsethigh
);
785 W_REG(&di
->d64rxregs
->addrlow
, pa
+ di
->ddoffsetlow
);
786 W_REG(&di
->d64rxregs
->addrhigh
, di
->ddoffsethigh
);
789 /* DMA64 32bits address extension */
792 /* shift the high bit(s) from pa to ae */
793 ae
= (pa
& PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
794 pa
&= ~PCI32ADDR_HIGH
;
796 if (direction
== DMA_TX
) {
797 W_REG(&di
->d64txregs
->addrlow
, pa
+ di
->ddoffsetlow
);
798 W_REG(&di
->d64txregs
->addrhigh
, di
->ddoffsethigh
);
799 SET_REG(&di
->d64txregs
->control
,
800 D64_XC_AE
, (ae
<< D64_XC_AE_SHIFT
));
802 W_REG(&di
->d64rxregs
->addrlow
, pa
+ di
->ddoffsetlow
);
803 W_REG(&di
->d64rxregs
->addrhigh
, di
->ddoffsethigh
);
804 SET_REG(&di
->d64rxregs
->control
,
805 D64_RC_AE
, (ae
<< D64_RC_AE_SHIFT
));
810 static void _dma_rxenable(struct dma_info
*di
)
812 uint dmactrlflags
= di
->dma
.dmactrlflags
;
815 DMA_TRACE(("%s: dma_rxenable\n", di
->name
));
818 (R_REG(&di
->d64rxregs
->control
) & D64_RC_AE
) |
821 if ((dmactrlflags
& DMA_CTRL_PEN
) == 0)
822 control
|= D64_RC_PD
;
824 if (dmactrlflags
& DMA_CTRL_ROC
)
825 control
|= D64_RC_OC
;
827 W_REG(&di
->d64rxregs
->control
,
828 ((di
->rxoffset
<< D64_RC_RO_SHIFT
) | control
));
831 void dma_rxinit(struct dma_pub
*pub
)
833 struct dma_info
*di
= (struct dma_info
*)pub
;
835 DMA_TRACE(("%s: dma_rxinit\n", di
->name
));
840 di
->rxin
= di
->rxout
= 0;
842 /* clear rx descriptor ring */
843 memset(di
->rxd64
, '\0', di
->nrxd
* sizeof(struct dma64desc
));
845 /* DMA engine with out alignment requirement requires table to be inited
846 * before enabling the engine
848 if (!di
->aligndesc_4k
)
849 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
853 if (di
->aligndesc_4k
)
854 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
857 static struct sk_buff
*dma64_getnextrxp(struct dma_info
*di
, bool forceall
)
865 /* return if no packets posted */
870 B2I(((R_REG(&di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) -
871 di
->rcvptrbase
) & D64_RS0_CD_MASK
, struct dma64desc
);
873 /* ignore curr if forceall */
874 if (!forceall
&& (i
== curr
))
877 /* get the packet pointer that corresponds to the rx descriptor */
881 pa
= le32_to_cpu(di
->rxd64
[i
].addrlow
) - di
->dataoffsetlow
;
883 /* clear this packet from the descriptor ring */
884 pci_unmap_single(di
->pbus
, pa
, di
->rxbufsize
, PCI_DMA_FROMDEVICE
);
886 di
->rxd64
[i
].addrlow
= cpu_to_le32(0xdeadbeef);
887 di
->rxd64
[i
].addrhigh
= cpu_to_le32(0xdeadbeef);
889 di
->rxin
= nextrxd(di
, i
);
894 static struct sk_buff
*_dma_getnextrxp(struct dma_info
*di
, bool forceall
)
899 return dma64_getnextrxp(di
, forceall
);
903 * !! rx entry routine
904 * returns a pointer to the next frame received, or NULL if there are no more
905 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is
906 * supported with pkts chain
907 * otherwise, it's treated as giant pkt and will be tossed.
908 * The DMA scattering starts with normal DMA header, followed by first
909 * buffer data. After it reaches the max size of buffer, the data continues
910 * in next DMA descriptor buffer WITHOUT DMA header
912 struct sk_buff
*dma_rx(struct dma_pub
*pub
)
914 struct dma_info
*di
= (struct dma_info
*)pub
;
915 struct sk_buff
*p
, *head
, *tail
;
921 head
= _dma_getnextrxp(di
, false);
925 len
= le16_to_cpu(*(__le16
*) (head
->data
));
926 DMA_TRACE(("%s: dma_rx len %d\n", di
->name
, len
));
927 dma_spin_for_len(len
, head
);
929 /* set actual length */
930 pkt_len
= min((di
->rxoffset
+ len
), di
->rxbufsize
);
931 __skb_trim(head
, pkt_len
);
932 resid
= len
- (di
->rxbufsize
- di
->rxoffset
);
934 /* check for single or multi-buffer rx */
937 while ((resid
> 0) && (p
= _dma_getnextrxp(di
, false))) {
939 pkt_len
= min_t(uint
, resid
, di
->rxbufsize
);
940 __skb_trim(p
, pkt_len
);
943 resid
-= di
->rxbufsize
;
950 B2I(((R_REG(&di
->d64rxregs
->status0
) &
952 di
->rcvptrbase
) & D64_RS0_CD_MASK
,
954 DMA_ERROR(("dma_rx, rxin %d rxout %d, hw_curr %d\n",
955 di
->rxin
, di
->rxout
, cur
));
959 if ((di
->dma
.dmactrlflags
& DMA_CTRL_RXMULTI
) == 0) {
960 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
962 brcmu_pkt_buf_free_skb(head
);
971 static bool dma64_rxidle(struct dma_info
*di
)
973 DMA_TRACE(("%s: dma_rxidle\n", di
->name
));
978 return ((R_REG(&di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) ==
979 (R_REG(&di
->d64rxregs
->ptr
) & D64_RS0_CD_MASK
));
983 * post receive buffers
984 * return false is refill failed completely and ring is empty this will stall
985 * the rx dma and user might want to call rxfill again asap. This unlikely
986 * happens on memory-rich NIC, but often on memory-constrained dongle
988 bool dma_rxfill(struct dma_pub
*pub
)
990 struct dma_info
*di
= (struct dma_info
*)pub
;
997 uint extra_offset
= 0;
1003 * Determine how many receive buffers we're lacking
1004 * from the full complement, allocate, initialize,
1005 * and post them, then update the chip rx lastdscr.
1011 n
= di
->nrxpost
- nrxdactive(di
, rxin
, rxout
);
1013 DMA_TRACE(("%s: dma_rxfill: post %d\n", di
->name
, n
));
1015 if (di
->rxbufsize
> BCMEXTRAHDROOM
)
1016 extra_offset
= di
->rxextrahdrroom
;
1018 for (i
= 0; i
< n
; i
++) {
1020 * the di->rxbufsize doesn't include the extra headroom,
1021 * we need to add it to the size to be allocated
1023 p
= brcmu_pkt_buf_get_skb(di
->rxbufsize
+ extra_offset
);
1026 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
1028 if (i
== 0 && dma64_rxidle(di
)) {
1029 DMA_ERROR(("%s: rxfill64: ring is empty !\n",
1036 /* reserve an extra headroom, if applicable */
1038 skb_pull(p
, extra_offset
);
1040 /* Do a cached write instead of uncached write since DMA_MAP
1041 * will flush the cache.
1043 *(u32
*) (p
->data
) = 0;
1045 pa
= pci_map_single(di
->pbus
, p
->data
,
1046 di
->rxbufsize
, PCI_DMA_FROMDEVICE
);
1048 /* save the free packet pointer */
1051 /* reset flags for each descriptor */
1053 if (rxout
== (di
->nrxd
- 1))
1054 flags
= D64_CTRL1_EOT
;
1056 dma64_dd_upd(di
, di
->rxd64
, pa
, rxout
, &flags
,
1058 rxout
= nextrxd(di
, rxout
);
1063 /* update the chip lastdscr pointer */
1064 W_REG(&di
->d64rxregs
->ptr
,
1065 di
->rcvptrbase
+ I2B(rxout
, struct dma64desc
));
1070 void dma_rxreclaim(struct dma_pub
*pub
)
1072 struct dma_info
*di
= (struct dma_info
*)pub
;
1075 DMA_TRACE(("%s: dma_rxreclaim\n", di
->name
));
1077 while ((p
= _dma_getnextrxp(di
, true)))
1078 brcmu_pkt_buf_free_skb(p
);
1081 void dma_counterreset(struct dma_pub
*pub
)
1083 /* reset all software counters */
1089 /* get the address of the var in order to change later */
1090 unsigned long dma_getvar(struct dma_pub
*pub
, const char *name
)
1092 struct dma_info
*di
= (struct dma_info
*)pub
;
1094 if (!strcmp(name
, "&txavail"))
1095 return (unsigned long)&(di
->dma
.txavail
);
1099 /* 64-bit DMA functions */
1101 void dma_txinit(struct dma_pub
*pub
)
1103 struct dma_info
*di
= (struct dma_info
*)pub
;
1104 u32 control
= D64_XC_XE
;
1106 DMA_TRACE(("%s: dma_txinit\n", di
->name
));
1111 di
->txin
= di
->txout
= 0;
1112 di
->dma
.txavail
= di
->ntxd
- 1;
1114 /* clear tx descriptor ring */
1115 memset(di
->txd64
, '\0', (di
->ntxd
* sizeof(struct dma64desc
)));
1117 /* DMA engine with out alignment requirement requires table to be inited
1118 * before enabling the engine
1120 if (!di
->aligndesc_4k
)
1121 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
1123 if ((di
->dma
.dmactrlflags
& DMA_CTRL_PEN
) == 0)
1124 control
|= D64_XC_PD
;
1125 OR_REG(&di
->d64txregs
->control
, control
);
1127 /* DMA engine with alignment requirement requires table to be inited
1128 * before enabling the engine
1130 if (di
->aligndesc_4k
)
1131 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
1134 void dma_txsuspend(struct dma_pub
*pub
)
1136 struct dma_info
*di
= (struct dma_info
*)pub
;
1138 DMA_TRACE(("%s: dma_txsuspend\n", di
->name
));
1143 OR_REG(&di
->d64txregs
->control
, D64_XC_SE
);
1146 void dma_txresume(struct dma_pub
*pub
)
1148 struct dma_info
*di
= (struct dma_info
*)pub
;
1150 DMA_TRACE(("%s: dma_txresume\n", di
->name
));
1155 AND_REG(&di
->d64txregs
->control
, ~D64_XC_SE
);
1158 bool dma_txsuspended(struct dma_pub
*pub
)
1160 struct dma_info
*di
= (struct dma_info
*)pub
;
1162 return (di
->ntxd
== 0) ||
1163 ((R_REG(&di
->d64txregs
->control
) & D64_XC_SE
) ==
1167 void dma_txreclaim(struct dma_pub
*pub
, enum txd_range range
)
1169 struct dma_info
*di
= (struct dma_info
*)pub
;
1172 DMA_TRACE(("%s: dma_txreclaim %s\n", di
->name
,
1173 (range
== DMA_RANGE_ALL
) ? "all" :
1175 DMA_RANGE_TRANSMITTED
) ? "transmitted" :
1178 if (di
->txin
== di
->txout
)
1181 while ((p
= dma_getnexttxp(pub
, range
))) {
1182 /* For unframed data, we don't have any packets to free */
1183 if (!(di
->dma
.dmactrlflags
& DMA_CTRL_UNFRAMED
))
1184 brcmu_pkt_buf_free_skb(p
);
1188 bool dma_txreset(struct dma_pub
*pub
)
1190 struct dma_info
*di
= (struct dma_info
*)pub
;
1196 /* suspend tx DMA first */
1197 W_REG(&di
->d64txregs
->control
, D64_XC_SE
);
1199 (R_REG(&di
->d64txregs
->status0
) & D64_XS0_XS_MASK
))
1200 != D64_XS0_XS_DISABLED
) && (status
!= D64_XS0_XS_IDLE
)
1201 && (status
!= D64_XS0_XS_STOPPED
), 10000);
1203 W_REG(&di
->d64txregs
->control
, 0);
1205 (R_REG(&di
->d64txregs
->status0
) & D64_XS0_XS_MASK
))
1206 != D64_XS0_XS_DISABLED
), 10000);
1208 /* wait for the last transaction to complete */
1211 return status
== D64_XS0_XS_DISABLED
;
1214 bool dma_rxreset(struct dma_pub
*pub
)
1216 struct dma_info
*di
= (struct dma_info
*)pub
;
1222 W_REG(&di
->d64rxregs
->control
, 0);
1224 (R_REG(&di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
))
1225 != D64_RS0_RS_DISABLED
), 10000);
1227 return status
== D64_RS0_RS_DISABLED
;
1231 * !! tx entry routine
1232 * WARNING: call must check the return value for error.
1233 * the error(toss frames) could be fatal and cause many subsequent hard
1236 int dma_txfast(struct dma_pub
*pub
, struct sk_buff
*p0
, bool commit
)
1238 struct dma_info
*di
= (struct dma_info
*)pub
;
1239 struct sk_buff
*p
, *next
;
1240 unsigned char *data
;
1246 DMA_TRACE(("%s: dma_txfast\n", di
->name
));
1251 * Walk the chain of packet buffers
1252 * allocating and initializing transmit descriptor entries.
1254 for (p
= p0
; p
; p
= next
) {
1259 /* return nonzero if out of tx descriptors */
1260 if (nexttxd(di
, txout
) == di
->txin
)
1266 /* get physical address of buffer start */
1267 pa
= pci_map_single(di
->pbus
, data
, len
, PCI_DMA_TODEVICE
);
1271 flags
|= D64_CTRL1_SOF
;
1273 /* With a DMA segment list, Descriptor table is filled
1274 * using the segment list instead of looping over
1275 * buffers in multi-chain DMA. Therefore, EOF for SGLIST
1276 * is when end of segment list is reached.
1279 flags
|= (D64_CTRL1_IOC
| D64_CTRL1_EOF
);
1280 if (txout
== (di
->ntxd
- 1))
1281 flags
|= D64_CTRL1_EOT
;
1283 dma64_dd_upd(di
, di
->txd64
, pa
, txout
, &flags
, len
);
1285 txout
= nexttxd(di
, txout
);
1288 /* if last txd eof not set, fix it */
1289 if (!(flags
& D64_CTRL1_EOF
))
1290 di
->txd64
[prevtxd(di
, txout
)].ctrl1
=
1291 cpu_to_le32(flags
| D64_CTRL1_IOC
| D64_CTRL1_EOF
);
1293 /* save the packet */
1294 di
->txp
[prevtxd(di
, txout
)] = p0
;
1296 /* bump the tx descriptor index */
1301 W_REG(&di
->d64txregs
->ptr
,
1302 di
->xmtptrbase
+ I2B(txout
, struct dma64desc
));
1304 /* tx flow control */
1305 di
->dma
.txavail
= di
->ntxd
- ntxdactive(di
, di
->txin
, di
->txout
) - 1;
1310 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di
->name
));
1311 brcmu_pkt_buf_free_skb(p0
);
1312 di
->dma
.txavail
= 0;
1318 * Reclaim next completed txd (txds if using chained buffers) in the range
1319 * specified and return associated packet.
1320 * If range is DMA_RANGE_TRANSMITTED, reclaim descriptors that have be
1321 * transmitted as noted by the hardware "CurrDescr" pointer.
1322 * If range is DMA_RANGE_TRANSFERED, reclaim descriptors that have be
1323 * transferred by the DMA as noted by the hardware "ActiveDescr" pointer.
1324 * If range is DMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
1325 * return associated packet regardless of the value of hardware pointers.
1327 struct sk_buff
*dma_getnexttxp(struct dma_pub
*pub
, enum txd_range range
)
1329 struct dma_info
*di
= (struct dma_info
*)pub
;
1332 struct sk_buff
*txp
;
1334 DMA_TRACE(("%s: dma_getnexttxp %s\n", di
->name
,
1335 (range
== DMA_RANGE_ALL
) ? "all" :
1337 DMA_RANGE_TRANSMITTED
) ? "transmitted" :
1346 if (range
== DMA_RANGE_ALL
)
1349 struct dma64regs __iomem
*dregs
= di
->d64txregs
;
1351 end
= (u16
) (B2I(((R_REG(&dregs
->status0
) &
1353 di
->xmtptrbase
) & D64_XS0_CD_MASK
,
1356 if (range
== DMA_RANGE_TRANSFERED
) {
1358 (u16
) (R_REG(&dregs
->status1
) &
1361 (active_desc
- di
->xmtptrbase
) & D64_XS0_CD_MASK
;
1362 active_desc
= B2I(active_desc
, struct dma64desc
);
1363 if (end
!= active_desc
)
1364 end
= prevtxd(di
, active_desc
);
1368 if ((start
== 0) && (end
> di
->txout
))
1371 for (i
= start
; i
!= end
&& !txp
; i
= nexttxd(di
, i
)) {
1375 pa
= le32_to_cpu(di
->txd64
[i
].addrlow
) - di
->dataoffsetlow
;
1378 (le32_to_cpu(di
->txd64
[i
].ctrl2
) &
1381 di
->txd64
[i
].addrlow
= cpu_to_le32(0xdeadbeef);
1382 di
->txd64
[i
].addrhigh
= cpu_to_le32(0xdeadbeef);
1387 pci_unmap_single(di
->pbus
, pa
, size
, PCI_DMA_TODEVICE
);
1392 /* tx flow control */
1393 di
->dma
.txavail
= di
->ntxd
- ntxdactive(di
, di
->txin
, di
->txout
) - 1;
1398 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d "
1399 "force %d\n", start
, end
, di
->txout
, forceall
));
1404 * Mac80211 initiated actions sometimes require packets in the DMA queue to be
1405 * modified. The modified portion of the packet is not under control of the DMA
1406 * engine. This function calls a caller-supplied function for each packet in
1407 * the caller specified dma chain.
1409 void dma_walk_packets(struct dma_pub
*dmah
, void (*callback_fnc
)
1410 (void *pkt
, void *arg_a
), void *arg_a
)
1412 struct dma_info
*di
= (struct dma_info
*) dmah
;
1414 uint end
= di
->txout
;
1415 struct sk_buff
*skb
;
1416 struct ieee80211_tx_info
*tx_info
;
1419 skb
= (struct sk_buff
*)di
->txp
[i
];
1421 tx_info
= (struct ieee80211_tx_info
*)skb
->cb
;
1422 (callback_fnc
)(tx_info
, arg_a
);