]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/staging/brcm80211/util/hnddma.c
2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/netdevice.h>
20 #include <linux/pci.h>
24 #include <bcmendian.h>
34 #define DMA_ERROR(args) \
36 if (!(*di->msg_level & 1)) \
41 #define DMA_TRACE(args) \
43 if (!(*di->msg_level & 2)) \
49 #define DMA_ERROR(args)
50 #define DMA_TRACE(args)
53 #define DMA_NONE(args)
55 #define d32txregs dregs.d32_u.txregs_32
56 #define d32rxregs dregs.d32_u.rxregs_32
57 #define txd32 dregs.d32_u.txd_32
58 #define rxd32 dregs.d32_u.rxd_32
60 #define d64txregs dregs.d64_u.txregs_64
61 #define d64rxregs dregs.d64_u.rxregs_64
62 #define txd64 dregs.d64_u.txd_64
63 #define rxd64 dregs.d64_u.rxd_64
65 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
66 static uint dma_msg_level
;
68 #define MAXNAMEL 8 /* 8 char names */
70 #define DI_INFO(dmah) ((dma_info_t *)dmah)
72 /* dma engine software state */
73 typedef struct dma_info
{
74 struct hnddma_pub hnddma
; /* exported structure, don't use hnddma_t,
75 * which could be const
77 uint
*msg_level
; /* message level pointer */
78 char name
[MAXNAMEL
]; /* callers name for diag msgs */
80 void *osh
; /* os handle */
81 si_t
*sih
; /* sb handle */
83 bool dma64
; /* this dma engine is operating in 64-bit mode */
84 bool addrext
; /* this dma engine supports DmaExtendedAddrChanges */
88 dma32regs_t
*txregs_32
; /* 32-bit dma tx engine registers */
89 dma32regs_t
*rxregs_32
; /* 32-bit dma rx engine registers */
90 dma32dd_t
*txd_32
; /* pointer to dma32 tx descriptor ring */
91 dma32dd_t
*rxd_32
; /* pointer to dma32 rx descriptor ring */
94 dma64regs_t
*txregs_64
; /* 64-bit dma tx engine registers */
95 dma64regs_t
*rxregs_64
; /* 64-bit dma rx engine registers */
96 dma64dd_t
*txd_64
; /* pointer to dma64 tx descriptor ring */
97 dma64dd_t
*rxd_64
; /* pointer to dma64 rx descriptor ring */
101 u16 dmadesc_align
; /* alignment requirement for dma descriptors */
103 u16 ntxd
; /* # tx descriptors tunable */
104 u16 txin
; /* index of next descriptor to reclaim */
105 u16 txout
; /* index of next descriptor to post */
106 void **txp
; /* pointer to parallel array of pointers to packets */
107 osldma_t
*tx_dmah
; /* DMA TX descriptor ring handle */
108 hnddma_seg_map_t
*txp_dmah
; /* DMA MAP meta-data handle */
109 dmaaddr_t txdpa
; /* Aligned physical address of descriptor ring */
110 dmaaddr_t txdpaorig
; /* Original physical address of descriptor ring */
111 u16 txdalign
; /* #bytes added to alloc'd mem to align txd */
112 u32 txdalloc
; /* #bytes allocated for the ring */
113 u32 xmtptrbase
; /* When using unaligned descriptors, the ptr register
114 * is not just an index, it needs all 13 bits to be
115 * an offset from the addr register.
118 u16 nrxd
; /* # rx descriptors tunable */
119 u16 rxin
; /* index of next descriptor to reclaim */
120 u16 rxout
; /* index of next descriptor to post */
121 void **rxp
; /* pointer to parallel array of pointers to packets */
122 osldma_t
*rx_dmah
; /* DMA RX descriptor ring handle */
123 hnddma_seg_map_t
*rxp_dmah
; /* DMA MAP meta-data handle */
124 dmaaddr_t rxdpa
; /* Aligned physical address of descriptor ring */
125 dmaaddr_t rxdpaorig
; /* Original physical address of descriptor ring */
126 u16 rxdalign
; /* #bytes added to alloc'd mem to align rxd */
127 u32 rxdalloc
; /* #bytes allocated for the ring */
128 u32 rcvptrbase
; /* Base for ptr reg when using unaligned descriptors */
131 unsigned int rxbufsize
; /* rx buffer size in bytes,
132 * not including the extra headroom
134 uint rxextrahdrroom
; /* extra rx headroom, reverseved to assist upper stack
135 * e.g. some rx pkt buffers will be bridged to tx side
136 * without byte copying. The extra headroom needs to be
137 * large enough to fit txheader needs.
138 * Some dongle driver may not need it.
140 uint nrxpost
; /* # rx buffers to keep posted */
141 unsigned int rxoffset
; /* rxcontrol offset */
142 uint ddoffsetlow
; /* add to get dma address of descriptor ring, low 32 bits */
143 uint ddoffsethigh
; /* high 32 bits */
144 uint dataoffsetlow
; /* add to get dma address of data buffer, low 32 bits */
145 uint dataoffsethigh
; /* high 32 bits */
146 bool aligndesc_4k
; /* descriptor base need to be aligned or not */
150 * If BCMDMA32 is defined, hnddma will support both 32-bit and 64-bit DMA engines.
151 * Otherwise it will support only 64-bit.
153 * DMA32_ENAB indicates whether hnddma is compiled with support for 32-bit DMA engines.
154 * DMA64_ENAB indicates whether hnddma is compiled with support for 64-bit DMA engines.
156 * DMA64_MODE indicates whether the current DMA engine is running as 64-bit.
159 #define DMA32_ENAB(di) 1
160 #define DMA64_ENAB(di) 1
161 #define DMA64_MODE(di) ((di)->dma64)
162 #else /* !BCMDMA32 */
163 #define DMA32_ENAB(di) 0
164 #define DMA64_ENAB(di) 1
165 #define DMA64_MODE(di) 1
166 #endif /* !BCMDMA32 */
168 /* DMA Scatter-gather list is supported. Note this is limited to TX direction only */
169 #ifdef BCMDMASGLISTOSL
170 #define DMASGLIST_ENAB true
172 #define DMASGLIST_ENAB false
173 #endif /* BCMDMASGLISTOSL */
175 /* descriptor bumping macros */
176 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
177 #define TXD(x) XXD((x), di->ntxd)
178 #define RXD(x) XXD((x), di->nrxd)
179 #define NEXTTXD(i) TXD((i) + 1)
180 #define PREVTXD(i) TXD((i) - 1)
181 #define NEXTRXD(i) RXD((i) + 1)
182 #define PREVRXD(i) RXD((i) - 1)
184 #define NTXDACTIVE(h, t) TXD((t) - (h))
185 #define NRXDACTIVE(h, t) RXD((t) - (h))
187 /* macros to convert between byte offsets and indexes */
188 #define B2I(bytes, type) ((bytes) / sizeof(type))
189 #define I2B(index, type) ((index) * sizeof(type))
191 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
192 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
194 #define PCI64ADDR_HIGH 0x80000000 /* address[63] */
195 #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
197 /* Common prototypes */
198 static bool _dma_isaddrext(dma_info_t
*di
);
199 static bool _dma_descriptor_align(dma_info_t
*di
);
200 static bool _dma_alloc(dma_info_t
*di
, uint direction
);
201 static void _dma_detach(dma_info_t
*di
);
202 static void _dma_ddtable_init(dma_info_t
*di
, uint direction
, dmaaddr_t pa
);
203 static void _dma_rxinit(dma_info_t
*di
);
204 static void *_dma_rx(dma_info_t
*di
);
205 static bool _dma_rxfill(dma_info_t
*di
);
206 static void _dma_rxreclaim(dma_info_t
*di
);
207 static void _dma_rxenable(dma_info_t
*di
);
208 static void *_dma_getnextrxp(dma_info_t
*di
, bool forceall
);
209 static void _dma_rx_param_get(dma_info_t
*di
, u16
*rxoffset
,
212 static void _dma_txblock(dma_info_t
*di
);
213 static void _dma_txunblock(dma_info_t
*di
);
214 static uint
_dma_txactive(dma_info_t
*di
);
215 static uint
_dma_rxactive(dma_info_t
*di
);
216 static uint
_dma_txpending(dma_info_t
*di
);
217 static uint
_dma_txcommitted(dma_info_t
*di
);
219 static void *_dma_peeknexttxp(dma_info_t
*di
);
220 static void *_dma_peeknextrxp(dma_info_t
*di
);
221 static unsigned long _dma_getvar(dma_info_t
*di
, const char *name
);
222 static void _dma_counterreset(dma_info_t
*di
);
223 static void _dma_fifoloopbackenable(dma_info_t
*di
);
224 static uint
_dma_ctrlflags(dma_info_t
*di
, uint mask
, uint flags
);
225 static u8
dma_align_sizetobits(uint size
);
226 static void *dma_ringalloc(struct osl_info
*osh
, u32 boundary
, uint size
,
227 u16
*alignbits
, uint
*alloced
,
228 dmaaddr_t
*descpa
, osldma_t
**dmah
);
230 /* Prototypes for 32-bit routines */
231 static bool dma32_alloc(dma_info_t
*di
, uint direction
);
232 static bool dma32_txreset(dma_info_t
*di
);
233 static bool dma32_rxreset(dma_info_t
*di
);
234 static bool dma32_txsuspendedidle(dma_info_t
*di
);
235 static int dma32_txfast(dma_info_t
*di
, struct sk_buff
*p0
, bool commit
);
236 static void *dma32_getnexttxp(dma_info_t
*di
, txd_range_t range
);
237 static void *dma32_getnextrxp(dma_info_t
*di
, bool forceall
);
238 static void dma32_txrotate(dma_info_t
*di
);
239 static bool dma32_rxidle(dma_info_t
*di
);
240 static void dma32_txinit(dma_info_t
*di
);
241 static bool dma32_txenabled(dma_info_t
*di
);
242 static void dma32_txsuspend(dma_info_t
*di
);
243 static void dma32_txresume(dma_info_t
*di
);
244 static bool dma32_txsuspended(dma_info_t
*di
);
245 static void dma32_txreclaim(dma_info_t
*di
, txd_range_t range
);
246 static bool dma32_txstopped(dma_info_t
*di
);
247 static bool dma32_rxstopped(dma_info_t
*di
);
248 static bool dma32_rxenabled(dma_info_t
*di
);
250 static bool _dma32_addrext(struct osl_info
*osh
, dma32regs_t
*dma32regs
);
252 /* Prototypes for 64-bit routines */
253 static bool dma64_alloc(dma_info_t
*di
, uint direction
);
254 static bool dma64_txreset(dma_info_t
*di
);
255 static bool dma64_rxreset(dma_info_t
*di
);
256 static bool dma64_txsuspendedidle(dma_info_t
*di
);
257 static int dma64_txfast(dma_info_t
*di
, struct sk_buff
*p0
, bool commit
);
258 static int dma64_txunframed(dma_info_t
*di
, void *p0
, uint len
, bool commit
);
259 static void *dma64_getpos(dma_info_t
*di
, bool direction
);
260 static void *dma64_getnexttxp(dma_info_t
*di
, txd_range_t range
);
261 static void *dma64_getnextrxp(dma_info_t
*di
, bool forceall
);
262 static void dma64_txrotate(dma_info_t
*di
);
264 static bool dma64_rxidle(dma_info_t
*di
);
265 static void dma64_txinit(dma_info_t
*di
);
266 static bool dma64_txenabled(dma_info_t
*di
);
267 static void dma64_txsuspend(dma_info_t
*di
);
268 static void dma64_txresume(dma_info_t
*di
);
269 static bool dma64_txsuspended(dma_info_t
*di
);
270 static void dma64_txreclaim(dma_info_t
*di
, txd_range_t range
);
271 static bool dma64_txstopped(dma_info_t
*di
);
272 static bool dma64_rxstopped(dma_info_t
*di
);
273 static bool dma64_rxenabled(dma_info_t
*di
);
274 static bool _dma64_addrext(struct osl_info
*osh
, dma64regs_t
*dma64regs
);
276 static inline u32
parity32(u32 data
);
278 const di_fcn_t dma64proc
= {
279 (di_detach_t
) _dma_detach
,
280 (di_txinit_t
) dma64_txinit
,
281 (di_txreset_t
) dma64_txreset
,
282 (di_txenabled_t
) dma64_txenabled
,
283 (di_txsuspend_t
) dma64_txsuspend
,
284 (di_txresume_t
) dma64_txresume
,
285 (di_txsuspended_t
) dma64_txsuspended
,
286 (di_txsuspendedidle_t
) dma64_txsuspendedidle
,
287 (di_txfast_t
) dma64_txfast
,
288 (di_txunframed_t
) dma64_txunframed
,
289 (di_getpos_t
) dma64_getpos
,
290 (di_txstopped_t
) dma64_txstopped
,
291 (di_txreclaim_t
) dma64_txreclaim
,
292 (di_getnexttxp_t
) dma64_getnexttxp
,
293 (di_peeknexttxp_t
) _dma_peeknexttxp
,
294 (di_txblock_t
) _dma_txblock
,
295 (di_txunblock_t
) _dma_txunblock
,
296 (di_txactive_t
) _dma_txactive
,
297 (di_txrotate_t
) dma64_txrotate
,
299 (di_rxinit_t
) _dma_rxinit
,
300 (di_rxreset_t
) dma64_rxreset
,
301 (di_rxidle_t
) dma64_rxidle
,
302 (di_rxstopped_t
) dma64_rxstopped
,
303 (di_rxenable_t
) _dma_rxenable
,
304 (di_rxenabled_t
) dma64_rxenabled
,
306 (di_rxfill_t
) _dma_rxfill
,
307 (di_rxreclaim_t
) _dma_rxreclaim
,
308 (di_getnextrxp_t
) _dma_getnextrxp
,
309 (di_peeknextrxp_t
) _dma_peeknextrxp
,
310 (di_rxparam_get_t
) _dma_rx_param_get
,
312 (di_fifoloopbackenable_t
) _dma_fifoloopbackenable
,
313 (di_getvar_t
) _dma_getvar
,
314 (di_counterreset_t
) _dma_counterreset
,
315 (di_ctrlflags_t
) _dma_ctrlflags
,
319 (di_rxactive_t
) _dma_rxactive
,
320 (di_txpending_t
) _dma_txpending
,
321 (di_txcommitted_t
) _dma_txcommitted
,
325 static const di_fcn_t dma32proc
= {
326 (di_detach_t
) _dma_detach
,
327 (di_txinit_t
) dma32_txinit
,
328 (di_txreset_t
) dma32_txreset
,
329 (di_txenabled_t
) dma32_txenabled
,
330 (di_txsuspend_t
) dma32_txsuspend
,
331 (di_txresume_t
) dma32_txresume
,
332 (di_txsuspended_t
) dma32_txsuspended
,
333 (di_txsuspendedidle_t
) dma32_txsuspendedidle
,
334 (di_txfast_t
) dma32_txfast
,
337 (di_txstopped_t
) dma32_txstopped
,
338 (di_txreclaim_t
) dma32_txreclaim
,
339 (di_getnexttxp_t
) dma32_getnexttxp
,
340 (di_peeknexttxp_t
) _dma_peeknexttxp
,
341 (di_txblock_t
) _dma_txblock
,
342 (di_txunblock_t
) _dma_txunblock
,
343 (di_txactive_t
) _dma_txactive
,
344 (di_txrotate_t
) dma32_txrotate
,
346 (di_rxinit_t
) _dma_rxinit
,
347 (di_rxreset_t
) dma32_rxreset
,
348 (di_rxidle_t
) dma32_rxidle
,
349 (di_rxstopped_t
) dma32_rxstopped
,
350 (di_rxenable_t
) _dma_rxenable
,
351 (di_rxenabled_t
) dma32_rxenabled
,
353 (di_rxfill_t
) _dma_rxfill
,
354 (di_rxreclaim_t
) _dma_rxreclaim
,
355 (di_getnextrxp_t
) _dma_getnextrxp
,
356 (di_peeknextrxp_t
) _dma_peeknextrxp
,
357 (di_rxparam_get_t
) _dma_rx_param_get
,
359 (di_fifoloopbackenable_t
) _dma_fifoloopbackenable
,
360 (di_getvar_t
) _dma_getvar
,
361 (di_counterreset_t
) _dma_counterreset
,
362 (di_ctrlflags_t
) _dma_ctrlflags
,
366 (di_rxactive_t
) _dma_rxactive
,
367 (di_txpending_t
) _dma_txpending
,
368 (di_txcommitted_t
) _dma_txcommitted
,
372 hnddma_t
*dma_attach(struct osl_info
*osh
, char *name
, si_t
*sih
,
373 void *dmaregstx
, void *dmaregsrx
, uint ntxd
,
374 uint nrxd
, uint rxbufsize
, int rxextheadroom
,
375 uint nrxpost
, uint rxoffset
, uint
*msg_level
)
380 /* allocate private info structure */
381 di
= kzalloc(sizeof(dma_info_t
), GFP_ATOMIC
);
384 printf("dma_attach: out of memory\n");
389 di
->msg_level
= msg_level
? msg_level
: &dma_msg_level
;
391 /* old chips w/o sb is no longer supported */
396 ((si_core_sflags(sih
, 0, 0) & SISF_DMA64
) == SISF_DMA64
);
400 /* check arguments */
401 ASSERT(ISPOWEROF2(ntxd
));
402 ASSERT(ISPOWEROF2(nrxd
));
405 ASSERT(dmaregsrx
== NULL
);
407 ASSERT(dmaregstx
== NULL
);
409 /* init dma reg pointer */
410 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
411 ASSERT(ntxd
<= D64MAXDD
);
412 ASSERT(nrxd
<= D64MAXDD
);
413 di
->d64txregs
= (dma64regs_t
*) dmaregstx
;
414 di
->d64rxregs
= (dma64regs_t
*) dmaregsrx
;
415 di
->hnddma
.di_fn
= (const di_fcn_t
*)&dma64proc
;
416 } else if (DMA32_ENAB(di
)) {
417 ASSERT(ntxd
<= D32MAXDD
);
418 ASSERT(nrxd
<= D32MAXDD
);
419 di
->d32txregs
= (dma32regs_t
*) dmaregstx
;
420 di
->d32rxregs
= (dma32regs_t
*) dmaregsrx
;
421 di
->hnddma
.di_fn
= (const di_fcn_t
*)&dma32proc
;
423 DMA_ERROR(("dma_attach: driver doesn't support 32-bit DMA\n"));
428 /* Default flags (which can be changed by the driver calling dma_ctrlflags
429 * before enable): For backwards compatibility both Rx Overflow Continue
430 * and Parity are DISABLED.
433 di
->hnddma
.di_fn
->ctrlflags(&di
->hnddma
, DMA_CTRL_ROC
| DMA_CTRL_PEN
,
436 DMA_TRACE(("%s: dma_attach: %s osh %p flags 0x%x ntxd %d nrxd %d rxbufsize %d " "rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n", name
, (DMA64_MODE(di
) ? "DMA64" : "DMA32"), osh
, di
->hnddma
.dmactrlflags
, ntxd
, nrxd
, rxbufsize
, rxextheadroom
, nrxpost
, rxoffset
, dmaregstx
, dmaregsrx
));
438 /* make a private copy of our callers name */
439 strncpy(di
->name
, name
, MAXNAMEL
);
440 di
->name
[MAXNAMEL
- 1] = '\0';
446 di
->ntxd
= (u16
) ntxd
;
447 di
->nrxd
= (u16
) nrxd
;
449 /* the actual dma size doesn't include the extra headroom */
451 (rxextheadroom
== -1) ? BCMEXTRAHDROOM
: rxextheadroom
;
452 if (rxbufsize
> BCMEXTRAHDROOM
)
453 di
->rxbufsize
= (u16
) (rxbufsize
- di
->rxextrahdrroom
);
455 di
->rxbufsize
= (u16
) rxbufsize
;
457 di
->nrxpost
= (u16
) nrxpost
;
458 di
->rxoffset
= (u8
) rxoffset
;
461 * figure out the DMA physical address offset for dd and data
462 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
463 * Other bus: use zero
464 * SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
467 di
->dataoffsetlow
= 0;
468 /* for pci bus, add offset */
469 if (sih
->bustype
== PCI_BUS
) {
470 if ((sih
->buscoretype
== PCIE_CORE_ID
) && DMA64_MODE(di
)) {
471 /* pcie with DMA64 */
473 di
->ddoffsethigh
= SI_PCIE_DMA_H32
;
475 /* pci(DMA32/DMA64) or pcie with DMA32 */
476 di
->ddoffsetlow
= SI_PCI_DMA
;
477 di
->ddoffsethigh
= 0;
479 di
->dataoffsetlow
= di
->ddoffsetlow
;
480 di
->dataoffsethigh
= di
->ddoffsethigh
;
482 #if defined(__mips__) && defined(IL_BIGENDIAN)
483 di
->dataoffsetlow
= di
->dataoffsetlow
+ SI_SDRAM_SWAPPED
;
484 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
485 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
486 if ((si_coreid(sih
) == SDIOD_CORE_ID
)
487 && ((si_corerev(sih
) > 0) && (si_corerev(sih
) <= 2)))
489 else if ((si_coreid(sih
) == I2S_CORE_ID
) &&
490 ((si_corerev(sih
) == 0) || (si_corerev(sih
) == 1)))
493 di
->addrext
= _dma_isaddrext(di
);
495 /* does the descriptors need to be aligned and if yes, on 4K/8K or not */
496 di
->aligndesc_4k
= _dma_descriptor_align(di
);
497 if (di
->aligndesc_4k
) {
498 if (DMA64_MODE(di
)) {
499 di
->dmadesc_align
= D64RINGALIGN_BITS
;
500 if ((ntxd
< D64MAXDD
/ 2) && (nrxd
< D64MAXDD
/ 2)) {
501 /* for smaller dd table, HW relax the alignment requirement */
502 di
->dmadesc_align
= D64RINGALIGN_BITS
- 1;
505 di
->dmadesc_align
= D32RINGALIGN_BITS
;
507 di
->dmadesc_align
= 4; /* 16 byte alignment */
509 DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
510 di
->aligndesc_4k
, di
->dmadesc_align
));
512 /* allocate tx packet pointer vector */
514 size
= ntxd
* sizeof(void *);
515 di
->txp
= kzalloc(size
, GFP_ATOMIC
);
516 if (di
->txp
== NULL
) {
517 DMA_ERROR(("%s: dma_attach: out of tx memory\n", di
->name
));
522 /* allocate rx packet pointer vector */
524 size
= nrxd
* sizeof(void *);
525 di
->rxp
= kzalloc(size
, GFP_ATOMIC
);
526 if (di
->rxp
== NULL
) {
527 DMA_ERROR(("%s: dma_attach: out of rx memory\n", di
->name
));
532 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
534 if (!_dma_alloc(di
, DMA_TX
))
538 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
540 if (!_dma_alloc(di
, DMA_RX
))
544 if ((di
->ddoffsetlow
!= 0) && !di
->addrext
) {
545 if (PHYSADDRLO(di
->txdpa
) > SI_PCI_DMA_SZ
) {
546 DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not supported\n", di
->name
, (u32
) PHYSADDRLO(di
->txdpa
)));
549 if (PHYSADDRLO(di
->rxdpa
) > SI_PCI_DMA_SZ
) {
550 DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not supported\n", di
->name
, (u32
) PHYSADDRLO(di
->rxdpa
)));
555 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di
->ddoffsetlow
, di
->ddoffsethigh
, di
->dataoffsetlow
, di
->dataoffsethigh
, di
->addrext
));
557 /* allocate DMA mapping vectors */
558 if (DMASGLIST_ENAB
) {
560 size
= ntxd
* sizeof(hnddma_seg_map_t
);
561 di
->txp_dmah
= kzalloc(size
, GFP_ATOMIC
);
562 if (di
->txp_dmah
== NULL
)
567 size
= nrxd
* sizeof(hnddma_seg_map_t
);
568 di
->rxp_dmah
= kzalloc(size
, GFP_ATOMIC
);
569 if (di
->rxp_dmah
== NULL
)
574 return (hnddma_t
*) di
;
581 /* init the tx or rx descriptor */
583 dma32_dd_upd(dma_info_t
*di
, dma32dd_t
*ddring
, dmaaddr_t pa
, uint outidx
,
584 u32
*flags
, u32 bufcount
)
586 /* dma32 uses 32-bit control to fit both flags and bufcounter */
587 *flags
= *flags
| (bufcount
& CTRL_BC_MASK
);
589 if ((di
->dataoffsetlow
== 0) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
590 W_SM(&ddring
[outidx
].addr
,
591 BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
592 W_SM(&ddring
[outidx
].ctrl
, BUS_SWAP32(*flags
));
594 /* address extension */
597 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
598 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
600 *flags
|= (ae
<< CTRL_AE_SHIFT
);
601 W_SM(&ddring
[outidx
].addr
,
602 BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
603 W_SM(&ddring
[outidx
].ctrl
, BUS_SWAP32(*flags
));
607 /* Check for odd number of 1's */
608 static inline u32
parity32(u32 data
)
619 #define DMA64_DD_PARITY(dd) parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
622 dma64_dd_upd(dma_info_t
*di
, dma64dd_t
*ddring
, dmaaddr_t pa
, uint outidx
,
623 u32
*flags
, u32 bufcount
)
625 u32 ctrl2
= bufcount
& D64_CTRL2_BC_MASK
;
627 /* PCI bus with big(>1G) physical address, use address extension */
628 #if defined(__mips__) && defined(IL_BIGENDIAN)
629 if ((di
->dataoffsetlow
== SI_SDRAM_SWAPPED
)
630 || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
632 if ((di
->dataoffsetlow
== 0) || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
633 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
634 ASSERT((PHYSADDRHI(pa
) & PCI64ADDR_HIGH
) == 0);
636 W_SM(&ddring
[outidx
].addrlow
,
637 BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
638 W_SM(&ddring
[outidx
].addrhigh
,
639 BUS_SWAP32(PHYSADDRHI(pa
) + di
->dataoffsethigh
));
640 W_SM(&ddring
[outidx
].ctrl1
, BUS_SWAP32(*flags
));
641 W_SM(&ddring
[outidx
].ctrl2
, BUS_SWAP32(ctrl2
));
643 /* address extension for 32-bit PCI */
647 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
648 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
649 ASSERT(PHYSADDRHI(pa
) == 0);
651 ctrl2
|= (ae
<< D64_CTRL2_AE_SHIFT
) & D64_CTRL2_AE
;
652 W_SM(&ddring
[outidx
].addrlow
,
653 BUS_SWAP32(PHYSADDRLO(pa
) + di
->dataoffsetlow
));
654 W_SM(&ddring
[outidx
].addrhigh
,
655 BUS_SWAP32(0 + di
->dataoffsethigh
));
656 W_SM(&ddring
[outidx
].ctrl1
, BUS_SWAP32(*flags
));
657 W_SM(&ddring
[outidx
].ctrl2
, BUS_SWAP32(ctrl2
));
659 if (di
->hnddma
.dmactrlflags
& DMA_CTRL_PEN
) {
660 if (DMA64_DD_PARITY(&ddring
[outidx
])) {
661 W_SM(&ddring
[outidx
].ctrl2
,
662 BUS_SWAP32(ctrl2
| D64_CTRL2_PARITY
));
667 static bool _dma32_addrext(struct osl_info
*osh
, dma32regs_t
*dma32regs
)
671 OR_REG(osh
, &dma32regs
->control
, XC_AE
);
672 w
= R_REG(osh
, &dma32regs
->control
);
673 AND_REG(osh
, &dma32regs
->control
, ~XC_AE
);
674 return (w
& XC_AE
) == XC_AE
;
677 static bool _dma_alloc(dma_info_t
*di
, uint direction
)
679 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
680 return dma64_alloc(di
, direction
);
681 } else if (DMA32_ENAB(di
)) {
682 return dma32_alloc(di
, direction
);
687 /* !! may be called with core in reset */
688 static void _dma_detach(dma_info_t
*di
)
691 DMA_TRACE(("%s: dma_detach\n", di
->name
));
693 /* shouldn't be here if descriptors are unreclaimed */
694 ASSERT(di
->txin
== di
->txout
);
695 ASSERT(di
->rxin
== di
->rxout
);
697 /* free dma descriptor rings */
698 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
700 DMA_FREE_CONSISTENT(di
->osh
,
702 di
->txdalign
), di
->txdalloc
,
703 (di
->txdpaorig
), &di
->tx_dmah
);
705 DMA_FREE_CONSISTENT(di
->osh
,
707 di
->rxdalign
), di
->rxdalloc
,
708 (di
->rxdpaorig
), &di
->rx_dmah
);
709 } else if (DMA32_ENAB(di
)) {
711 DMA_FREE_CONSISTENT(di
->osh
,
713 di
->txdalign
), di
->txdalloc
,
714 (di
->txdpaorig
), &di
->tx_dmah
);
716 DMA_FREE_CONSISTENT(di
->osh
,
718 di
->rxdalign
), di
->rxdalloc
,
719 (di
->rxdpaorig
), &di
->rx_dmah
);
723 /* free packet pointer vectors */
725 kfree((void *)di
->txp
);
727 kfree((void *)di
->rxp
);
729 /* free tx packet DMA handles */
733 /* free rx packet DMA handles */
737 /* free our private info structure */
742 static bool _dma_descriptor_align(dma_info_t
*di
)
744 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
747 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
748 if (di
->d64txregs
!= NULL
) {
749 W_REG(di
->osh
, &di
->d64txregs
->addrlow
, 0xff0);
750 addrl
= R_REG(di
->osh
, &di
->d64txregs
->addrlow
);
753 } else if (di
->d64rxregs
!= NULL
) {
754 W_REG(di
->osh
, &di
->d64rxregs
->addrlow
, 0xff0);
755 addrl
= R_REG(di
->osh
, &di
->d64rxregs
->addrlow
);
763 /* return true if this dma engine supports DmaExtendedAddrChanges, otherwise false */
764 static bool _dma_isaddrext(dma_info_t
*di
)
766 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
767 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
769 /* not all tx or rx channel are available */
770 if (di
->d64txregs
!= NULL
) {
771 if (!_dma64_addrext(di
->osh
, di
->d64txregs
)) {
772 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have AE set\n", di
->name
));
776 } else if (di
->d64rxregs
!= NULL
) {
777 if (!_dma64_addrext(di
->osh
, di
->d64rxregs
)) {
778 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have AE set\n", di
->name
));
784 } else if (DMA32_ENAB(di
)) {
786 return _dma32_addrext(di
->osh
, di
->d32txregs
);
787 else if (di
->d32rxregs
)
788 return _dma32_addrext(di
->osh
, di
->d32rxregs
);
795 /* initialize descriptor table base address */
796 static void _dma_ddtable_init(dma_info_t
*di
, uint direction
, dmaaddr_t pa
)
798 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
799 if (!di
->aligndesc_4k
) {
800 if (direction
== DMA_TX
)
801 di
->xmtptrbase
= PHYSADDRLO(pa
);
803 di
->rcvptrbase
= PHYSADDRLO(pa
);
806 if ((di
->ddoffsetlow
== 0)
807 || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
808 if (direction
== DMA_TX
) {
809 W_REG(di
->osh
, &di
->d64txregs
->addrlow
,
810 (PHYSADDRLO(pa
) + di
->ddoffsetlow
));
811 W_REG(di
->osh
, &di
->d64txregs
->addrhigh
,
812 (PHYSADDRHI(pa
) + di
->ddoffsethigh
));
814 W_REG(di
->osh
, &di
->d64rxregs
->addrlow
,
815 (PHYSADDRLO(pa
) + di
->ddoffsetlow
));
816 W_REG(di
->osh
, &di
->d64rxregs
->addrhigh
,
817 (PHYSADDRHI(pa
) + di
->ddoffsethigh
));
820 /* DMA64 32bits address extension */
823 ASSERT(PHYSADDRHI(pa
) == 0);
825 /* shift the high bit(s) from pa to ae */
826 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >>
827 PCI32ADDR_HIGH_SHIFT
;
828 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
830 if (direction
== DMA_TX
) {
831 W_REG(di
->osh
, &di
->d64txregs
->addrlow
,
832 (PHYSADDRLO(pa
) + di
->ddoffsetlow
));
833 W_REG(di
->osh
, &di
->d64txregs
->addrhigh
,
835 SET_REG(di
->osh
, &di
->d64txregs
->control
,
836 D64_XC_AE
, (ae
<< D64_XC_AE_SHIFT
));
838 W_REG(di
->osh
, &di
->d64rxregs
->addrlow
,
839 (PHYSADDRLO(pa
) + di
->ddoffsetlow
));
840 W_REG(di
->osh
, &di
->d64rxregs
->addrhigh
,
842 SET_REG(di
->osh
, &di
->d64rxregs
->control
,
843 D64_RC_AE
, (ae
<< D64_RC_AE_SHIFT
));
847 } else if (DMA32_ENAB(di
)) {
848 ASSERT(PHYSADDRHI(pa
) == 0);
849 if ((di
->ddoffsetlow
== 0)
850 || !(PHYSADDRLO(pa
) & PCI32ADDR_HIGH
)) {
851 if (direction
== DMA_TX
)
852 W_REG(di
->osh
, &di
->d32txregs
->addr
,
853 (PHYSADDRLO(pa
) + di
->ddoffsetlow
));
855 W_REG(di
->osh
, &di
->d32rxregs
->addr
,
856 (PHYSADDRLO(pa
) + di
->ddoffsetlow
));
858 /* dma32 address extension */
862 /* shift the high bit(s) from pa to ae */
863 ae
= (PHYSADDRLO(pa
) & PCI32ADDR_HIGH
) >>
864 PCI32ADDR_HIGH_SHIFT
;
865 PHYSADDRLO(pa
) &= ~PCI32ADDR_HIGH
;
867 if (direction
== DMA_TX
) {
868 W_REG(di
->osh
, &di
->d32txregs
->addr
,
869 (PHYSADDRLO(pa
) + di
->ddoffsetlow
));
870 SET_REG(di
->osh
, &di
->d32txregs
->control
, XC_AE
,
873 W_REG(di
->osh
, &di
->d32rxregs
->addr
,
874 (PHYSADDRLO(pa
) + di
->ddoffsetlow
));
875 SET_REG(di
->osh
, &di
->d32rxregs
->control
, RC_AE
,
883 static void _dma_fifoloopbackenable(dma_info_t
*di
)
885 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di
->name
));
887 if (DMA64_ENAB(di
) && DMA64_MODE(di
))
888 OR_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_LE
);
889 else if (DMA32_ENAB(di
))
890 OR_REG(di
->osh
, &di
->d32txregs
->control
, XC_LE
);
895 static void _dma_rxinit(dma_info_t
*di
)
897 DMA_TRACE(("%s: dma_rxinit\n", di
->name
));
902 di
->rxin
= di
->rxout
= 0;
904 /* clear rx descriptor ring */
905 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
906 BZERO_SM((void *)di
->rxd64
,
907 (di
->nrxd
* sizeof(dma64dd_t
)));
909 /* DMA engine with out alignment requirement requires table to be inited
910 * before enabling the engine
912 if (!di
->aligndesc_4k
)
913 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
917 if (di
->aligndesc_4k
)
918 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
919 } else if (DMA32_ENAB(di
)) {
920 BZERO_SM((void *)di
->rxd32
,
921 (di
->nrxd
* sizeof(dma32dd_t
)));
923 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
928 static void _dma_rxenable(dma_info_t
*di
)
930 uint dmactrlflags
= di
->hnddma
.dmactrlflags
;
932 DMA_TRACE(("%s: dma_rxenable\n", di
->name
));
934 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
936 (R_REG(di
->osh
, &di
->d64rxregs
->control
) & D64_RC_AE
) |
939 if ((dmactrlflags
& DMA_CTRL_PEN
) == 0)
940 control
|= D64_RC_PD
;
942 if (dmactrlflags
& DMA_CTRL_ROC
)
943 control
|= D64_RC_OC
;
945 W_REG(di
->osh
, &di
->d64rxregs
->control
,
946 ((di
->rxoffset
<< D64_RC_RO_SHIFT
) | control
));
947 } else if (DMA32_ENAB(di
)) {
949 (R_REG(di
->osh
, &di
->d32rxregs
->control
) & RC_AE
) | RC_RE
;
951 if ((dmactrlflags
& DMA_CTRL_PEN
) == 0)
954 if (dmactrlflags
& DMA_CTRL_ROC
)
957 W_REG(di
->osh
, &di
->d32rxregs
->control
,
958 ((di
->rxoffset
<< RC_RO_SHIFT
) | control
));
964 _dma_rx_param_get(dma_info_t
*di
, u16
*rxoffset
, u16
*rxbufsize
)
966 /* the normal values fit into 16 bits */
967 *rxoffset
= (u16
) di
->rxoffset
;
968 *rxbufsize
= (u16
) di
->rxbufsize
;
971 /* !! rx entry routine
972 * returns a pointer to the next frame received, or NULL if there are no more
973 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is supported
975 * otherwise, it's treated as giant pkt and will be tossed.
976 * The DMA scattering starts with normal DMA header, followed by first buffer data.
977 * After it reaches the max size of buffer, the data continues in next DMA descriptor
978 * buffer WITHOUT DMA header
980 static void *BCMFASTPATH
_dma_rx(dma_info_t
*di
)
982 struct sk_buff
*p
, *head
, *tail
;
988 head
= _dma_getnextrxp(di
, false);
992 len
= ltoh16(*(u16
*) (head
->data
));
993 DMA_TRACE(("%s: dma_rx len %d\n", di
->name
, len
));
995 #if defined(__mips__)
997 while (!(len
= *(u16
*) OSL_UNCACHED(head
->data
)))
1000 *(u16
*) (head
->data
) = htol16((u16
) len
);
1002 #endif /* defined(__mips__) */
1004 /* set actual length */
1005 pkt_len
= min((di
->rxoffset
+ len
), di
->rxbufsize
);
1006 __skb_trim(head
, pkt_len
);
1007 resid
= len
- (di
->rxbufsize
- di
->rxoffset
);
1009 /* check for single or multi-buffer rx */
1012 while ((resid
> 0) && (p
= _dma_getnextrxp(di
, false))) {
1014 pkt_len
= min(resid
, (int)di
->rxbufsize
);
1015 __skb_trim(p
, pkt_len
);
1018 resid
-= di
->rxbufsize
;
1025 cur
= (DMA64_ENAB(di
) && DMA64_MODE(di
)) ?
1026 B2I(((R_REG(di
->osh
, &di
->d64rxregs
->status0
) &
1028 di
->rcvptrbase
) & D64_RS0_CD_MASK
,
1029 dma64dd_t
) : B2I(R_REG(di
->osh
,
1031 status
) & RS_CD_MASK
,
1033 DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n",
1034 di
->rxin
, di
->rxout
, cur
));
1038 if ((di
->hnddma
.dmactrlflags
& DMA_CTRL_RXMULTI
) == 0) {
1039 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
1041 PKTFREE(di
->osh
, head
, false);
1042 di
->hnddma
.rxgiants
++;
1050 /* post receive buffers
1051 * return false is refill failed completely and ring is empty
1052 * this will stall the rx dma and user might want to call rxfill again asap
1053 * This unlikely happens on memory-rich NIC, but often on memory-constrained dongle
1055 static bool BCMFASTPATH
_dma_rxfill(dma_info_t
*di
)
1063 uint extra_offset
= 0;
1069 * Determine how many receive buffers we're lacking
1070 * from the full complement, allocate, initialize,
1071 * and post them, then update the chip rx lastdscr.
1077 n
= di
->nrxpost
- NRXDACTIVE(rxin
, rxout
);
1079 DMA_TRACE(("%s: dma_rxfill: post %d\n", di
->name
, n
));
1081 if (di
->rxbufsize
> BCMEXTRAHDROOM
)
1082 extra_offset
= di
->rxextrahdrroom
;
1084 for (i
= 0; i
< n
; i
++) {
1085 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
1086 size to be allocated
1089 p
= osl_pktget(di
->osh
, di
->rxbufsize
+ extra_offset
);
1092 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
1095 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1096 if (dma64_rxidle(di
)) {
1097 DMA_ERROR(("%s: rxfill64: ring is empty !\n", di
->name
));
1100 } else if (DMA32_ENAB(di
)) {
1101 if (dma32_rxidle(di
)) {
1102 DMA_ERROR(("%s: rxfill32: ring is empty !\n", di
->name
));
1108 di
->hnddma
.rxnobuf
++;
1111 /* reserve an extra headroom, if applicable */
1113 skb_pull(p
, extra_offset
);
1115 /* Do a cached write instead of uncached write since DMA_MAP
1116 * will flush the cache.
1118 *(u32
*) (p
->data
) = 0;
1121 bzero(&di
->rxp_dmah
[rxout
], sizeof(hnddma_seg_map_t
));
1123 pa
= DMA_MAP(di
->osh
, p
->data
,
1124 di
->rxbufsize
, DMA_RX
, p
, &di
->rxp_dmah
[rxout
]);
1126 ASSERT(IS_ALIGNED(PHYSADDRLO(pa
), 4));
1128 /* save the free packet pointer */
1129 ASSERT(di
->rxp
[rxout
] == NULL
);
1132 /* reset flags for each descriptor */
1134 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1135 if (rxout
== (di
->nrxd
- 1))
1136 flags
= D64_CTRL1_EOT
;
1138 dma64_dd_upd(di
, di
->rxd64
, pa
, rxout
, &flags
,
1140 } else if (DMA32_ENAB(di
)) {
1141 if (rxout
== (di
->nrxd
- 1))
1144 ASSERT(PHYSADDRHI(pa
) == 0);
1145 dma32_dd_upd(di
, di
->rxd32
, pa
, rxout
, &flags
,
1149 rxout
= NEXTRXD(rxout
);
1154 /* update the chip lastdscr pointer */
1155 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1156 W_REG(di
->osh
, &di
->d64rxregs
->ptr
,
1157 di
->rcvptrbase
+ I2B(rxout
, dma64dd_t
));
1158 } else if (DMA32_ENAB(di
)) {
1159 W_REG(di
->osh
, &di
->d32rxregs
->ptr
, I2B(rxout
, dma32dd_t
));
1166 /* like getnexttxp but no reclaim */
1167 static void *_dma_peeknexttxp(dma_info_t
*di
)
1174 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1176 B2I(((R_REG(di
->osh
, &di
->d64txregs
->status0
) &
1177 D64_XS0_CD_MASK
) - di
->xmtptrbase
) & D64_XS0_CD_MASK
,
1179 } else if (DMA32_ENAB(di
)) {
1181 B2I(R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
,
1186 for (i
= di
->txin
; i
!= end
; i
= NEXTTXD(i
))
1193 /* like getnextrxp but not take off the ring */
1194 static void *_dma_peeknextrxp(dma_info_t
*di
)
1201 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1203 B2I(((R_REG(di
->osh
, &di
->d64rxregs
->status0
) &
1204 D64_RS0_CD_MASK
) - di
->rcvptrbase
) & D64_RS0_CD_MASK
,
1206 } else if (DMA32_ENAB(di
)) {
1208 B2I(R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
,
1213 for (i
= di
->rxin
; i
!= end
; i
= NEXTRXD(i
))
1220 static void _dma_rxreclaim(dma_info_t
*di
)
1224 /* "unused local" warning suppression for OSLs that
1225 * define PKTFREE() without using the di->osh arg
1229 DMA_TRACE(("%s: dma_rxreclaim\n", di
->name
));
1231 while ((p
= _dma_getnextrxp(di
, true)))
1232 PKTFREE(di
->osh
, p
, false);
1235 static void *BCMFASTPATH
_dma_getnextrxp(dma_info_t
*di
, bool forceall
)
1240 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1241 return dma64_getnextrxp(di
, forceall
);
1242 } else if (DMA32_ENAB(di
)) {
1243 return dma32_getnextrxp(di
, forceall
);
1248 static void _dma_txblock(dma_info_t
*di
)
1250 di
->hnddma
.txavail
= 0;
1253 static void _dma_txunblock(dma_info_t
*di
)
1255 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1258 static uint
_dma_txactive(dma_info_t
*di
)
1260 return NTXDACTIVE(di
->txin
, di
->txout
);
1263 static uint
_dma_txpending(dma_info_t
*di
)
1267 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1269 B2I(((R_REG(di
->osh
, &di
->d64txregs
->status0
) &
1270 D64_XS0_CD_MASK
) - di
->xmtptrbase
) & D64_XS0_CD_MASK
,
1272 } else if (DMA32_ENAB(di
)) {
1274 B2I(R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
,
1279 return NTXDACTIVE(curr
, di
->txout
);
1282 static uint
_dma_txcommitted(dma_info_t
*di
)
1285 uint txin
= di
->txin
;
1287 if (txin
== di
->txout
)
1290 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1291 ptr
= B2I(R_REG(di
->osh
, &di
->d64txregs
->ptr
), dma64dd_t
);
1292 } else if (DMA32_ENAB(di
)) {
1293 ptr
= B2I(R_REG(di
->osh
, &di
->d32txregs
->ptr
), dma32dd_t
);
1297 return NTXDACTIVE(di
->txin
, ptr
);
1300 static uint
_dma_rxactive(dma_info_t
*di
)
1302 return NRXDACTIVE(di
->rxin
, di
->rxout
);
1305 static void _dma_counterreset(dma_info_t
*di
)
1307 /* reset all software counter */
1308 di
->hnddma
.rxgiants
= 0;
1309 di
->hnddma
.rxnobuf
= 0;
1310 di
->hnddma
.txnobuf
= 0;
1313 static uint
_dma_ctrlflags(dma_info_t
*di
, uint mask
, uint flags
)
1315 uint dmactrlflags
= di
->hnddma
.dmactrlflags
;
1318 DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di
->name
));
1322 ASSERT((flags
& ~mask
) == 0);
1324 dmactrlflags
&= ~mask
;
1325 dmactrlflags
|= flags
;
1327 /* If trying to enable parity, check if parity is actually supported */
1328 if (dmactrlflags
& DMA_CTRL_PEN
) {
1331 if (DMA64_ENAB(di
) && DMA64_MODE(di
)) {
1332 control
= R_REG(di
->osh
, &di
->d64txregs
->control
);
1333 W_REG(di
->osh
, &di
->d64txregs
->control
,
1334 control
| D64_XC_PD
);
1335 if (R_REG(di
->osh
, &di
->d64txregs
->control
) & D64_XC_PD
) {
1336 /* We *can* disable it so it is supported,
1337 * restore control register
1339 W_REG(di
->osh
, &di
->d64txregs
->control
,
1342 /* Not supported, don't allow it to be enabled */
1343 dmactrlflags
&= ~DMA_CTRL_PEN
;
1345 } else if (DMA32_ENAB(di
)) {
1346 control
= R_REG(di
->osh
, &di
->d32txregs
->control
);
1347 W_REG(di
->osh
, &di
->d32txregs
->control
,
1349 if (R_REG(di
->osh
, &di
->d32txregs
->control
) & XC_PD
) {
1350 W_REG(di
->osh
, &di
->d32txregs
->control
,
1353 /* Not supported, don't allow it to be enabled */
1354 dmactrlflags
&= ~DMA_CTRL_PEN
;
1360 di
->hnddma
.dmactrlflags
= dmactrlflags
;
1362 return dmactrlflags
;
1365 /* get the address of the var in order to change later */
1366 static unsigned long _dma_getvar(dma_info_t
*di
, const char *name
)
1368 if (!strcmp(name
, "&txavail"))
1369 return (unsigned long)&(di
->hnddma
.txavail
);
1376 void dma_txpioloopback(struct osl_info
*osh
, dma32regs_t
*regs
)
1378 OR_REG(osh
, ®s
->control
, XC_LE
);
1382 u8
dma_align_sizetobits(uint size
)
1386 ASSERT(!(size
& (size
- 1)));
1387 while (size
>>= 1) {
1393 /* This function ensures that the DMA descriptor ring will not get allocated
1394 * across Page boundary. If the allocation is done across the page boundary
1395 * at the first time, then it is freed and the allocation is done at
1396 * descriptor ring size aligned location. This will ensure that the ring will
1397 * not cross page boundary
1399 static void *dma_ringalloc(struct osl_info
*osh
, u32 boundary
, uint size
,
1400 u16
*alignbits
, uint
*alloced
,
1401 dmaaddr_t
*descpa
, osldma_t
**dmah
)
1405 u32 alignbytes
= 1 << *alignbits
;
1407 va
= DMA_ALLOC_CONSISTENT(osh
, size
, *alignbits
, alloced
, descpa
,
1412 desc_strtaddr
= (u32
) roundup((unsigned long)va
, alignbytes
);
1413 if (((desc_strtaddr
+ size
- 1) & boundary
) != (desc_strtaddr
1415 *alignbits
= dma_align_sizetobits(size
);
1416 DMA_FREE_CONSISTENT(osh
, va
, size
, *descpa
, dmah
);
1417 va
= DMA_ALLOC_CONSISTENT(osh
, size
, *alignbits
, alloced
,
1423 /* 32-bit DMA functions */
1425 static void dma32_txinit(dma_info_t
*di
)
1427 u32 control
= XC_XE
;
1429 DMA_TRACE(("%s: dma_txinit\n", di
->name
));
1434 di
->txin
= di
->txout
= 0;
1435 di
->hnddma
.txavail
= di
->ntxd
- 1;
1437 /* clear tx descriptor ring */
1438 BZERO_SM((void *)di
->txd32
, (di
->ntxd
* sizeof(dma32dd_t
)));
1440 if ((di
->hnddma
.dmactrlflags
& DMA_CTRL_PEN
) == 0)
1442 W_REG(di
->osh
, &di
->d32txregs
->control
, control
);
1443 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
1446 static bool dma32_txenabled(dma_info_t
*di
)
1450 /* If the chip is dead, it is not enabled :-) */
1451 xc
= R_REG(di
->osh
, &di
->d32txregs
->control
);
1452 return (xc
!= 0xffffffff) && (xc
& XC_XE
);
1455 static void dma32_txsuspend(dma_info_t
*di
)
1457 DMA_TRACE(("%s: dma_txsuspend\n", di
->name
));
1462 OR_REG(di
->osh
, &di
->d32txregs
->control
, XC_SE
);
1465 static void dma32_txresume(dma_info_t
*di
)
1467 DMA_TRACE(("%s: dma_txresume\n", di
->name
));
1472 AND_REG(di
->osh
, &di
->d32txregs
->control
, ~XC_SE
);
1475 static bool dma32_txsuspended(dma_info_t
*di
)
1477 return (di
->ntxd
== 0)
1478 || ((R_REG(di
->osh
, &di
->d32txregs
->control
) & XC_SE
) == XC_SE
);
1481 static void dma32_txreclaim(dma_info_t
*di
, txd_range_t range
)
1485 DMA_TRACE(("%s: dma_txreclaim %s\n", di
->name
,
1486 (range
== HNDDMA_RANGE_ALL
) ? "all" :
1488 HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" :
1491 if (di
->txin
== di
->txout
)
1494 while ((p
= dma32_getnexttxp(di
, range
)))
1495 PKTFREE(di
->osh
, p
, true);
1498 static bool dma32_txstopped(dma_info_t
*di
)
1500 return ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) ==
1504 static bool dma32_rxstopped(dma_info_t
*di
)
1506 return ((R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_RS_MASK
) ==
1510 static bool dma32_alloc(dma_info_t
*di
, uint direction
)
1519 ddlen
= sizeof(dma32dd_t
);
1521 size
= (direction
== DMA_TX
) ? (di
->ntxd
* ddlen
) : (di
->nrxd
* ddlen
);
1524 align_bits
= di
->dmadesc_align
;
1525 align
= (1 << align_bits
);
1527 if (direction
== DMA_TX
) {
1528 va
= dma_ringalloc(di
->osh
, D32RINGALIGN
, size
, &align_bits
,
1529 &alloced
, &di
->txdpaorig
, &di
->tx_dmah
);
1531 DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di
->name
));
1535 PHYSADDRHISET(di
->txdpa
, 0);
1536 ASSERT(PHYSADDRHI(di
->txdpaorig
) == 0);
1537 di
->txd32
= (dma32dd_t
*) roundup((unsigned long)va
, align
);
1539 (uint
) ((s8
*)di
->txd32
- (s8
*) va
);
1541 PHYSADDRLOSET(di
->txdpa
,
1542 PHYSADDRLO(di
->txdpaorig
) + di
->txdalign
);
1543 /* Make sure that alignment didn't overflow */
1544 ASSERT(PHYSADDRLO(di
->txdpa
) >= PHYSADDRLO(di
->txdpaorig
));
1546 di
->txdalloc
= alloced
;
1547 ASSERT(IS_ALIGNED((unsigned long)di
->txd32
, align
));
1549 va
= dma_ringalloc(di
->osh
, D32RINGALIGN
, size
, &align_bits
,
1550 &alloced
, &di
->rxdpaorig
, &di
->rx_dmah
);
1552 DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di
->name
));
1556 PHYSADDRHISET(di
->rxdpa
, 0);
1557 ASSERT(PHYSADDRHI(di
->rxdpaorig
) == 0);
1558 di
->rxd32
= (dma32dd_t
*) roundup((unsigned long)va
, align
);
1560 (uint
) ((s8
*)di
->rxd32
- (s8
*) va
);
1562 PHYSADDRLOSET(di
->rxdpa
,
1563 PHYSADDRLO(di
->rxdpaorig
) + di
->rxdalign
);
1564 /* Make sure that alignment didn't overflow */
1565 ASSERT(PHYSADDRLO(di
->rxdpa
) >= PHYSADDRLO(di
->rxdpaorig
));
1566 di
->rxdalloc
= alloced
;
1567 ASSERT(IS_ALIGNED((unsigned long)di
->rxd32
, align
));
1573 static bool dma32_txreset(dma_info_t
*di
)
1580 /* suspend tx DMA first */
1581 W_REG(di
->osh
, &di
->d32txregs
->control
, XC_SE
);
1583 (R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
))
1584 != XS_XS_DISABLED
) && (status
!= XS_XS_IDLE
)
1585 && (status
!= XS_XS_STOPPED
), (10000));
1587 W_REG(di
->osh
, &di
->d32txregs
->control
, 0);
1588 SPINWAIT(((status
= (R_REG(di
->osh
,
1589 &di
->d32txregs
->status
) & XS_XS_MASK
)) !=
1590 XS_XS_DISABLED
), 10000);
1592 /* wait for the last transaction to complete */
1595 return status
== XS_XS_DISABLED
;
1598 static bool dma32_rxidle(dma_info_t
*di
)
1600 DMA_TRACE(("%s: dma_rxidle\n", di
->name
));
1605 return ((R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
) ==
1606 R_REG(di
->osh
, &di
->d32rxregs
->ptr
));
1609 static bool dma32_rxreset(dma_info_t
*di
)
1616 W_REG(di
->osh
, &di
->d32rxregs
->control
, 0);
1617 SPINWAIT(((status
= (R_REG(di
->osh
,
1618 &di
->d32rxregs
->status
) & RS_RS_MASK
)) !=
1619 RS_RS_DISABLED
), 10000);
1621 return status
== RS_RS_DISABLED
;
1624 static bool dma32_rxenabled(dma_info_t
*di
)
1628 rc
= R_REG(di
->osh
, &di
->d32rxregs
->control
);
1629 return (rc
!= 0xffffffff) && (rc
& RC_RE
);
1632 static bool dma32_txsuspendedidle(dma_info_t
*di
)
1637 if (!(R_REG(di
->osh
, &di
->d32txregs
->control
) & XC_SE
))
1640 if ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) != XS_XS_IDLE
)
1644 return ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) ==
1648 /* !! tx entry routine
1649 * supports full 32bit dma engine buffer addressing so
1650 * dma buffers can cross 4 Kbyte page boundaries.
1652 * WARNING: call must check the return value for error.
1653 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
1655 static int dma32_txfast(dma_info_t
*di
, struct sk_buff
*p0
, bool commit
)
1657 struct sk_buff
*p
, *next
;
1658 unsigned char *data
;
1664 DMA_TRACE(("%s: dma_txfast\n", di
->name
));
1669 * Walk the chain of packet buffers
1670 * allocating and initializing transmit descriptor entries.
1672 for (p
= p0
; p
; p
= next
) {
1674 hnddma_seg_map_t
*map
;
1679 len
+= PKTDMAPAD(di
->osh
, p
);
1683 /* return nonzero if out of tx descriptors */
1684 if (NEXTTXD(txout
) == di
->txin
)
1691 bzero(&di
->txp_dmah
[txout
], sizeof(hnddma_seg_map_t
));
1693 /* get physical address of buffer start */
1694 pa
= DMA_MAP(di
->osh
, data
, len
, DMA_TX
, p
,
1695 &di
->txp_dmah
[txout
]);
1697 if (DMASGLIST_ENAB
) {
1698 map
= &di
->txp_dmah
[txout
];
1700 /* See if all the segments can be accounted for */
1702 (uint
) (di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) -
1710 for (j
= 1; j
<= nsegs
; j
++) {
1712 if (p
== p0
&& j
== 1)
1715 /* With a DMA segment list, Descriptor table is filled
1716 * using the segment list instead of looping over
1717 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
1718 * end of segment list is reached.
1720 if ((!DMASGLIST_ENAB
&& next
== NULL
) ||
1721 (DMASGLIST_ENAB
&& j
== nsegs
))
1722 flags
|= (CTRL_IOC
| CTRL_EOF
);
1723 if (txout
== (di
->ntxd
- 1))
1726 if (DMASGLIST_ENAB
) {
1727 len
= map
->segs
[j
- 1].length
;
1728 pa
= map
->segs
[j
- 1].addr
;
1730 ASSERT(PHYSADDRHI(pa
) == 0);
1732 dma32_dd_upd(di
, di
->txd32
, pa
, txout
, &flags
, len
);
1733 ASSERT(di
->txp
[txout
] == NULL
);
1735 txout
= NEXTTXD(txout
);
1738 /* See above. No need to loop over individual buffers */
1743 /* if last txd eof not set, fix it */
1744 if (!(flags
& CTRL_EOF
))
1745 W_SM(&di
->txd32
[PREVTXD(txout
)].ctrl
,
1746 BUS_SWAP32(flags
| CTRL_IOC
| CTRL_EOF
));
1748 /* save the packet */
1749 di
->txp
[PREVTXD(txout
)] = p0
;
1751 /* bump the tx descriptor index */
1756 W_REG(di
->osh
, &di
->d32txregs
->ptr
, I2B(txout
, dma32dd_t
));
1758 /* tx flow control */
1759 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1764 DMA_ERROR(("%s: dma_txfast: out of txds\n", di
->name
));
1765 PKTFREE(di
->osh
, p0
, true);
1766 di
->hnddma
.txavail
= 0;
1767 di
->hnddma
.txnobuf
++;
1772 * Reclaim next completed txd (txds if using chained buffers) in the range
1773 * specified and return associated packet.
1774 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
1775 * transmitted as noted by the hardware "CurrDescr" pointer.
1776 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
1777 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
1778 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
1779 * return associated packet regardless of the value of hardware pointers.
1781 static void *dma32_getnexttxp(dma_info_t
*di
, txd_range_t range
)
1787 DMA_TRACE(("%s: dma_getnexttxp %s\n", di
->name
,
1788 (range
== HNDDMA_RANGE_ALL
) ? "all" :
1790 HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" :
1799 if (range
== HNDDMA_RANGE_ALL
)
1802 dma32regs_t
*dregs
= di
->d32txregs
;
1805 (u16
) B2I(R_REG(di
->osh
, &dregs
->status
) & XS_CD_MASK
,
1808 if (range
== HNDDMA_RANGE_TRANSFERED
) {
1810 (u16
) ((R_REG(di
->osh
, &dregs
->status
) &
1811 XS_AD_MASK
) >> XS_AD_SHIFT
);
1812 active_desc
= (u16
) B2I(active_desc
, dma32dd_t
);
1813 if (end
!= active_desc
)
1814 end
= PREVTXD(active_desc
);
1818 if ((start
== 0) && (end
> di
->txout
))
1821 for (i
= start
; i
!= end
&& !txp
; i
= NEXTTXD(i
)) {
1823 hnddma_seg_map_t
*map
= NULL
;
1824 uint size
, j
, nsegs
;
1827 (BUS_SWAP32(R_SM(&di
->txd32
[i
].addr
)) -
1828 di
->dataoffsetlow
));
1829 PHYSADDRHISET(pa
, 0);
1831 if (DMASGLIST_ENAB
) {
1832 map
= &di
->txp_dmah
[i
];
1833 size
= map
->origsize
;
1837 (BUS_SWAP32(R_SM(&di
->txd32
[i
].ctrl
)) &
1842 for (j
= nsegs
; j
> 0; j
--) {
1843 W_SM(&di
->txd32
[i
].addr
, 0xdeadbeef);
1851 DMA_UNMAP(di
->osh
, pa
, size
, DMA_TX
, txp
, map
);
1856 /* tx flow control */
1857 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1862 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start
, end
, di
->txout
, forceall
));
1866 static void *dma32_getnextrxp(dma_info_t
*di
, bool forceall
)
1871 /* if forcing, dma engine must be disabled */
1872 ASSERT(!forceall
|| !dma32_rxenabled(di
));
1876 /* return if no packets posted */
1881 B2I(R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
, dma32dd_t
);
1883 /* ignore curr if forceall */
1884 if (!forceall
&& (i
== curr
))
1887 /* get the packet pointer that corresponds to the rx descriptor */
1893 (BUS_SWAP32(R_SM(&di
->rxd32
[i
].addr
)) -
1894 di
->dataoffsetlow
));
1895 PHYSADDRHISET(pa
, 0);
1897 /* clear this packet from the descriptor ring */
1898 DMA_UNMAP(di
->osh
, pa
, di
->rxbufsize
, DMA_RX
, rxp
, &di
->rxp_dmah
[i
]);
1900 W_SM(&di
->rxd32
[i
].addr
, 0xdeadbeef);
1902 di
->rxin
= NEXTRXD(i
);
1908 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1910 static void dma32_txrotate(dma_info_t
*di
)
1919 ASSERT(dma32_txsuspendedidle(di
));
1921 nactive
= _dma_txactive(di
);
1923 (((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_AD_MASK
)
1924 >> XS_AD_SHIFT
), dma32dd_t
));
1925 rot
= TXD(ad
- di
->txin
);
1927 ASSERT(rot
< di
->ntxd
);
1929 /* full-ring case is a lot harder - don't worry about this */
1930 if (rot
>= (di
->ntxd
- nactive
)) {
1931 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di
->name
));
1936 last
= PREVTXD(di
->txout
);
1938 /* move entries starting at last and moving backwards to first */
1939 for (old
= last
; old
!= PREVTXD(first
); old
= PREVTXD(old
)) {
1940 new = TXD(old
+ rot
);
1943 * Move the tx dma descriptor.
1944 * EOT is set only in the last entry in the ring.
1946 w
= BUS_SWAP32(R_SM(&di
->txd32
[old
].ctrl
)) & ~CTRL_EOT
;
1947 if (new == (di
->ntxd
- 1))
1949 W_SM(&di
->txd32
[new].ctrl
, BUS_SWAP32(w
));
1950 W_SM(&di
->txd32
[new].addr
, R_SM(&di
->txd32
[old
].addr
));
1952 /* zap the old tx dma descriptor address field */
1953 W_SM(&di
->txd32
[old
].addr
, BUS_SWAP32(0xdeadbeef));
1955 /* move the corresponding txp[] entry */
1956 ASSERT(di
->txp
[new] == NULL
);
1957 di
->txp
[new] = di
->txp
[old
];
1959 /* Move the segment map as well */
1960 if (DMASGLIST_ENAB
) {
1961 bcopy(&di
->txp_dmah
[old
], &di
->txp_dmah
[new],
1962 sizeof(hnddma_seg_map_t
));
1963 bzero(&di
->txp_dmah
[old
], sizeof(hnddma_seg_map_t
));
1966 di
->txp
[old
] = NULL
;
1969 /* update txin and txout */
1971 di
->txout
= TXD(di
->txout
+ rot
);
1972 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1975 W_REG(di
->osh
, &di
->d32txregs
->ptr
, I2B(di
->txout
, dma32dd_t
));
1978 /* 64-bit DMA functions */
1980 static void dma64_txinit(dma_info_t
*di
)
1982 u32 control
= D64_XC_XE
;
1984 DMA_TRACE(("%s: dma_txinit\n", di
->name
));
1989 di
->txin
= di
->txout
= 0;
1990 di
->hnddma
.txavail
= di
->ntxd
- 1;
1992 /* clear tx descriptor ring */
1993 BZERO_SM((void *)di
->txd64
, (di
->ntxd
* sizeof(dma64dd_t
)));
1995 /* DMA engine with out alignment requirement requires table to be inited
1996 * before enabling the engine
1998 if (!di
->aligndesc_4k
)
1999 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
2001 if ((di
->hnddma
.dmactrlflags
& DMA_CTRL_PEN
) == 0)
2002 control
|= D64_XC_PD
;
2003 OR_REG(di
->osh
, &di
->d64txregs
->control
, control
);
2005 /* DMA engine with alignment requirement requires table to be inited
2006 * before enabling the engine
2008 if (di
->aligndesc_4k
)
2009 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
2012 static bool dma64_txenabled(dma_info_t
*di
)
2016 /* If the chip is dead, it is not enabled :-) */
2017 xc
= R_REG(di
->osh
, &di
->d64txregs
->control
);
2018 return (xc
!= 0xffffffff) && (xc
& D64_XC_XE
);
2021 static void dma64_txsuspend(dma_info_t
*di
)
2023 DMA_TRACE(("%s: dma_txsuspend\n", di
->name
));
2028 OR_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_SE
);
2031 static void dma64_txresume(dma_info_t
*di
)
2033 DMA_TRACE(("%s: dma_txresume\n", di
->name
));
2038 AND_REG(di
->osh
, &di
->d64txregs
->control
, ~D64_XC_SE
);
2041 static bool dma64_txsuspended(dma_info_t
*di
)
2043 return (di
->ntxd
== 0) ||
2044 ((R_REG(di
->osh
, &di
->d64txregs
->control
) & D64_XC_SE
) ==
2048 static void BCMFASTPATH
dma64_txreclaim(dma_info_t
*di
, txd_range_t range
)
2052 DMA_TRACE(("%s: dma_txreclaim %s\n", di
->name
,
2053 (range
== HNDDMA_RANGE_ALL
) ? "all" :
2055 HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" :
2058 if (di
->txin
== di
->txout
)
2061 while ((p
= dma64_getnexttxp(di
, range
))) {
2062 /* For unframed data, we don't have any packets to free */
2063 if (!(di
->hnddma
.dmactrlflags
& DMA_CTRL_UNFRAMED
))
2064 PKTFREE(di
->osh
, p
, true);
2068 static bool dma64_txstopped(dma_info_t
*di
)
2070 return ((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
) ==
2071 D64_XS0_XS_STOPPED
);
2074 static bool dma64_rxstopped(dma_info_t
*di
)
2076 return ((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
) ==
2077 D64_RS0_RS_STOPPED
);
2080 static bool dma64_alloc(dma_info_t
*di
, uint direction
)
2089 ddlen
= sizeof(dma64dd_t
);
2091 size
= (direction
== DMA_TX
) ? (di
->ntxd
* ddlen
) : (di
->nrxd
* ddlen
);
2092 align_bits
= di
->dmadesc_align
;
2093 align
= (1 << align_bits
);
2095 if (direction
== DMA_TX
) {
2096 va
= dma_ringalloc(di
->osh
, D64RINGALIGN
, size
, &align_bits
,
2097 &alloced
, &di
->txdpaorig
, &di
->tx_dmah
);
2099 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di
->name
));
2102 align
= (1 << align_bits
);
2103 di
->txd64
= (dma64dd_t
*) roundup((unsigned long)va
, align
);
2104 di
->txdalign
= (uint
) ((s8
*)di
->txd64
- (s8
*) va
);
2105 PHYSADDRLOSET(di
->txdpa
,
2106 PHYSADDRLO(di
->txdpaorig
) + di
->txdalign
);
2107 /* Make sure that alignment didn't overflow */
2108 ASSERT(PHYSADDRLO(di
->txdpa
) >= PHYSADDRLO(di
->txdpaorig
));
2110 PHYSADDRHISET(di
->txdpa
, PHYSADDRHI(di
->txdpaorig
));
2111 di
->txdalloc
= alloced
;
2112 ASSERT(IS_ALIGNED((unsigned long)di
->txd64
, align
));
2114 va
= dma_ringalloc(di
->osh
, D64RINGALIGN
, size
, &align_bits
,
2115 &alloced
, &di
->rxdpaorig
, &di
->rx_dmah
);
2117 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di
->name
));
2120 align
= (1 << align_bits
);
2121 di
->rxd64
= (dma64dd_t
*) roundup((unsigned long)va
, align
);
2122 di
->rxdalign
= (uint
) ((s8
*)di
->rxd64
- (s8
*) va
);
2123 PHYSADDRLOSET(di
->rxdpa
,
2124 PHYSADDRLO(di
->rxdpaorig
) + di
->rxdalign
);
2125 /* Make sure that alignment didn't overflow */
2126 ASSERT(PHYSADDRLO(di
->rxdpa
) >= PHYSADDRLO(di
->rxdpaorig
));
2128 PHYSADDRHISET(di
->rxdpa
, PHYSADDRHI(di
->rxdpaorig
));
2129 di
->rxdalloc
= alloced
;
2130 ASSERT(IS_ALIGNED((unsigned long)di
->rxd64
, align
));
2136 static bool dma64_txreset(dma_info_t
*di
)
2143 /* suspend tx DMA first */
2144 W_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_SE
);
2146 (R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
))
2147 != D64_XS0_XS_DISABLED
) && (status
!= D64_XS0_XS_IDLE
)
2148 && (status
!= D64_XS0_XS_STOPPED
), 10000);
2150 W_REG(di
->osh
, &di
->d64txregs
->control
, 0);
2152 (R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
))
2153 != D64_XS0_XS_DISABLED
), 10000);
2155 /* wait for the last transaction to complete */
2158 return status
== D64_XS0_XS_DISABLED
;
2161 static bool dma64_rxidle(dma_info_t
*di
)
2163 DMA_TRACE(("%s: dma_rxidle\n", di
->name
));
2168 return ((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) ==
2169 (R_REG(di
->osh
, &di
->d64rxregs
->ptr
) & D64_RS0_CD_MASK
));
2172 static bool dma64_rxreset(dma_info_t
*di
)
2179 W_REG(di
->osh
, &di
->d64rxregs
->control
, 0);
2181 (R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
))
2182 != D64_RS0_RS_DISABLED
), 10000);
2184 return status
== D64_RS0_RS_DISABLED
;
2187 static bool dma64_rxenabled(dma_info_t
*di
)
2191 rc
= R_REG(di
->osh
, &di
->d64rxregs
->control
);
2192 return (rc
!= 0xffffffff) && (rc
& D64_RC_RE
);
2195 static bool dma64_txsuspendedidle(dma_info_t
*di
)
2201 if (!(R_REG(di
->osh
, &di
->d64txregs
->control
) & D64_XC_SE
))
2204 if ((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
) ==
2211 /* Useful when sending unframed data. This allows us to get a progress report from the DMA.
2212 * We return a pointer to the beginning of the DATA buffer of the current descriptor.
2213 * If DMA is idle, we return NULL.
2215 static void *dma64_getpos(dma_info_t
*di
, bool direction
)
2221 if (direction
== DMA_TX
) {
2223 R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_CD_MASK
;
2224 idle
= !NTXDACTIVE(di
->txin
, di
->txout
);
2225 va
= di
->txp
[B2I(cd_offset
, dma64dd_t
)];
2228 R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_XS0_CD_MASK
;
2229 idle
= !NRXDACTIVE(di
->rxin
, di
->rxout
);
2230 va
= di
->rxp
[B2I(cd_offset
, dma64dd_t
)];
2233 /* If DMA is IDLE, return NULL */
2235 DMA_TRACE(("%s: DMA idle, return NULL\n", __func__
));
2242 /* TX of unframed data
2244 * Adds a DMA ring descriptor for the data pointed to by "buf".
2245 * This is for DMA of a buffer of data and is unlike other hnddma TX functions
2246 * that take a pointer to a "packet"
2247 * Each call to this is results in a single descriptor being added for "len" bytes of
2248 * data starting at "buf", it doesn't handle chained buffers.
2250 static int dma64_txunframed(dma_info_t
*di
, void *buf
, uint len
, bool commit
)
2254 dmaaddr_t pa
; /* phys addr */
2258 /* return nonzero if out of tx descriptors */
2259 if (NEXTTXD(txout
) == di
->txin
)
2265 pa
= DMA_MAP(di
->osh
, buf
, len
, DMA_TX
, NULL
, &di
->txp_dmah
[txout
]);
2267 flags
= (D64_CTRL1_SOF
| D64_CTRL1_IOC
| D64_CTRL1_EOF
);
2269 if (txout
== (di
->ntxd
- 1))
2270 flags
|= D64_CTRL1_EOT
;
2272 dma64_dd_upd(di
, di
->txd64
, pa
, txout
, &flags
, len
);
2273 ASSERT(di
->txp
[txout
] == NULL
);
2275 /* save the buffer pointer - used by dma_getpos */
2276 di
->txp
[txout
] = buf
;
2278 txout
= NEXTTXD(txout
);
2279 /* bump the tx descriptor index */
2284 W_REG(di
->osh
, &di
->d64txregs
->ptr
,
2285 di
->xmtptrbase
+ I2B(txout
, dma64dd_t
));
2288 /* tx flow control */
2289 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2294 DMA_ERROR(("%s: %s: out of txds !!!\n", di
->name
, __func__
));
2295 di
->hnddma
.txavail
= 0;
2296 di
->hnddma
.txnobuf
++;
2300 /* !! tx entry routine
2301 * WARNING: call must check the return value for error.
2302 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
2304 static int BCMFASTPATH
dma64_txfast(dma_info_t
*di
, struct sk_buff
*p0
,
2307 struct sk_buff
*p
, *next
;
2308 unsigned char *data
;
2314 DMA_TRACE(("%s: dma_txfast\n", di
->name
));
2319 * Walk the chain of packet buffers
2320 * allocating and initializing transmit descriptor entries.
2322 for (p
= p0
; p
; p
= next
) {
2324 hnddma_seg_map_t
*map
;
2329 len
+= PKTDMAPAD(di
->osh
, p
);
2330 #endif /* BCM_DMAPAD */
2333 /* return nonzero if out of tx descriptors */
2334 if (NEXTTXD(txout
) == di
->txin
)
2340 /* get physical address of buffer start */
2342 bzero(&di
->txp_dmah
[txout
], sizeof(hnddma_seg_map_t
));
2344 pa
= DMA_MAP(di
->osh
, data
, len
, DMA_TX
, p
,
2345 &di
->txp_dmah
[txout
]);
2347 if (DMASGLIST_ENAB
) {
2348 map
= &di
->txp_dmah
[txout
];
2350 /* See if all the segments can be accounted for */
2352 (uint
) (di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) -
2360 for (j
= 1; j
<= nsegs
; j
++) {
2362 if (p
== p0
&& j
== 1)
2363 flags
|= D64_CTRL1_SOF
;
2365 /* With a DMA segment list, Descriptor table is filled
2366 * using the segment list instead of looping over
2367 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
2368 * end of segment list is reached.
2370 if ((!DMASGLIST_ENAB
&& next
== NULL
) ||
2371 (DMASGLIST_ENAB
&& j
== nsegs
))
2372 flags
|= (D64_CTRL1_IOC
| D64_CTRL1_EOF
);
2373 if (txout
== (di
->ntxd
- 1))
2374 flags
|= D64_CTRL1_EOT
;
2376 if (DMASGLIST_ENAB
) {
2377 len
= map
->segs
[j
- 1].length
;
2378 pa
= map
->segs
[j
- 1].addr
;
2380 dma64_dd_upd(di
, di
->txd64
, pa
, txout
, &flags
, len
);
2381 ASSERT(di
->txp
[txout
] == NULL
);
2383 txout
= NEXTTXD(txout
);
2386 /* See above. No need to loop over individual buffers */
2391 /* if last txd eof not set, fix it */
2392 if (!(flags
& D64_CTRL1_EOF
))
2393 W_SM(&di
->txd64
[PREVTXD(txout
)].ctrl1
,
2394 BUS_SWAP32(flags
| D64_CTRL1_IOC
| D64_CTRL1_EOF
));
2396 /* save the packet */
2397 di
->txp
[PREVTXD(txout
)] = p0
;
2399 /* bump the tx descriptor index */
2404 W_REG(di
->osh
, &di
->d64txregs
->ptr
,
2405 di
->xmtptrbase
+ I2B(txout
, dma64dd_t
));
2407 /* tx flow control */
2408 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2413 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di
->name
));
2414 PKTFREE(di
->osh
, p0
, true);
2415 di
->hnddma
.txavail
= 0;
2416 di
->hnddma
.txnobuf
++;
2421 * Reclaim next completed txd (txds if using chained buffers) in the range
2422 * specified and return associated packet.
2423 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
2424 * transmitted as noted by the hardware "CurrDescr" pointer.
2425 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
2426 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
2427 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
2428 * return associated packet regardless of the value of hardware pointers.
2430 static void *BCMFASTPATH
dma64_getnexttxp(dma_info_t
*di
, txd_range_t range
)
2436 DMA_TRACE(("%s: dma_getnexttxp %s\n", di
->name
,
2437 (range
== HNDDMA_RANGE_ALL
) ? "all" :
2439 HNDDMA_RANGE_TRANSMITTED
) ? "transmitted" :
2448 if (range
== HNDDMA_RANGE_ALL
)
2451 dma64regs_t
*dregs
= di
->d64txregs
;
2455 (((R_REG(di
->osh
, &dregs
->status0
) &
2457 di
->xmtptrbase
) & D64_XS0_CD_MASK
, dma64dd_t
));
2459 if (range
== HNDDMA_RANGE_TRANSFERED
) {
2461 (u16
) (R_REG(di
->osh
, &dregs
->status1
) &
2464 (active_desc
- di
->xmtptrbase
) & D64_XS0_CD_MASK
;
2465 active_desc
= B2I(active_desc
, dma64dd_t
);
2466 if (end
!= active_desc
)
2467 end
= PREVTXD(active_desc
);
2471 if ((start
== 0) && (end
> di
->txout
))
2474 for (i
= start
; i
!= end
&& !txp
; i
= NEXTTXD(i
)) {
2476 hnddma_seg_map_t
*map
= NULL
;
2477 uint size
, j
, nsegs
;
2480 (BUS_SWAP32(R_SM(&di
->txd64
[i
].addrlow
)) -
2481 di
->dataoffsetlow
));
2483 (BUS_SWAP32(R_SM(&di
->txd64
[i
].addrhigh
)) -
2484 di
->dataoffsethigh
));
2486 if (DMASGLIST_ENAB
) {
2487 map
= &di
->txp_dmah
[i
];
2488 size
= map
->origsize
;
2492 (BUS_SWAP32(R_SM(&di
->txd64
[i
].ctrl2
)) &
2497 for (j
= nsegs
; j
> 0; j
--) {
2498 W_SM(&di
->txd64
[i
].addrlow
, 0xdeadbeef);
2499 W_SM(&di
->txd64
[i
].addrhigh
, 0xdeadbeef);
2507 DMA_UNMAP(di
->osh
, pa
, size
, DMA_TX
, txp
, map
);
2512 /* tx flow control */
2513 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2518 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start
, end
, di
->txout
, forceall
));
2522 static void *BCMFASTPATH
dma64_getnextrxp(dma_info_t
*di
, bool forceall
)
2528 /* if forcing, dma engine must be disabled */
2529 ASSERT(!forceall
|| !dma64_rxenabled(di
));
2533 /* return if no packets posted */
2538 B2I(((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) -
2539 di
->rcvptrbase
) & D64_RS0_CD_MASK
, dma64dd_t
);
2541 /* ignore curr if forceall */
2542 if (!forceall
&& (i
== curr
))
2545 /* get the packet pointer that corresponds to the rx descriptor */
2551 (BUS_SWAP32(R_SM(&di
->rxd64
[i
].addrlow
)) -
2552 di
->dataoffsetlow
));
2554 (BUS_SWAP32(R_SM(&di
->rxd64
[i
].addrhigh
)) -
2555 di
->dataoffsethigh
));
2557 /* clear this packet from the descriptor ring */
2558 DMA_UNMAP(di
->osh
, pa
, di
->rxbufsize
, DMA_RX
, rxp
, &di
->rxp_dmah
[i
]);
2560 W_SM(&di
->rxd64
[i
].addrlow
, 0xdeadbeef);
2561 W_SM(&di
->rxd64
[i
].addrhigh
, 0xdeadbeef);
2563 di
->rxin
= NEXTRXD(i
);
2568 static bool _dma64_addrext(struct osl_info
*osh
, dma64regs_t
* dma64regs
)
2571 OR_REG(osh
, &dma64regs
->control
, D64_XC_AE
);
2572 w
= R_REG(osh
, &dma64regs
->control
);
2573 AND_REG(osh
, &dma64regs
->control
, ~D64_XC_AE
);
2574 return (w
& D64_XC_AE
) == D64_XC_AE
;
2578 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
2580 static void dma64_txrotate(dma_info_t
*di
)
2589 ASSERT(dma64_txsuspendedidle(di
));
2591 nactive
= _dma_txactive(di
);
2593 ((((R_REG(di
->osh
, &di
->d64txregs
->status1
) &
2595 - di
->xmtptrbase
) & D64_XS1_AD_MASK
), dma64dd_t
));
2596 rot
= TXD(ad
- di
->txin
);
2598 ASSERT(rot
< di
->ntxd
);
2600 /* full-ring case is a lot harder - don't worry about this */
2601 if (rot
>= (di
->ntxd
- nactive
)) {
2602 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di
->name
));
2607 last
= PREVTXD(di
->txout
);
2609 /* move entries starting at last and moving backwards to first */
2610 for (old
= last
; old
!= PREVTXD(first
); old
= PREVTXD(old
)) {
2611 new = TXD(old
+ rot
);
2614 * Move the tx dma descriptor.
2615 * EOT is set only in the last entry in the ring.
2617 w
= BUS_SWAP32(R_SM(&di
->txd64
[old
].ctrl1
)) & ~D64_CTRL1_EOT
;
2618 if (new == (di
->ntxd
- 1))
2620 W_SM(&di
->txd64
[new].ctrl1
, BUS_SWAP32(w
));
2622 w
= BUS_SWAP32(R_SM(&di
->txd64
[old
].ctrl2
));
2623 W_SM(&di
->txd64
[new].ctrl2
, BUS_SWAP32(w
));
2625 W_SM(&di
->txd64
[new].addrlow
, R_SM(&di
->txd64
[old
].addrlow
));
2626 W_SM(&di
->txd64
[new].addrhigh
, R_SM(&di
->txd64
[old
].addrhigh
));
2628 /* zap the old tx dma descriptor address field */
2629 W_SM(&di
->txd64
[old
].addrlow
, BUS_SWAP32(0xdeadbeef));
2630 W_SM(&di
->txd64
[old
].addrhigh
, BUS_SWAP32(0xdeadbeef));
2632 /* move the corresponding txp[] entry */
2633 ASSERT(di
->txp
[new] == NULL
);
2634 di
->txp
[new] = di
->txp
[old
];
2637 if (DMASGLIST_ENAB
) {
2638 bcopy(&di
->txp_dmah
[old
], &di
->txp_dmah
[new],
2639 sizeof(hnddma_seg_map_t
));
2640 bzero(&di
->txp_dmah
[old
], sizeof(hnddma_seg_map_t
));
2643 di
->txp
[old
] = NULL
;
2646 /* update txin and txout */
2648 di
->txout
= TXD(di
->txout
+ rot
);
2649 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
2652 W_REG(di
->osh
, &di
->d64txregs
->ptr
,
2653 di
->xmtptrbase
+ I2B(di
->txout
, dma64dd_t
));
2656 uint
dma_addrwidth(si_t
*sih
, void *dmaregs
)
2658 dma32regs_t
*dma32regs
;
2659 struct osl_info
*osh
;
2663 /* Perform 64-bit checks only if we want to advertise 64-bit (> 32bit) capability) */
2664 /* DMA engine is 64-bit capable */
2665 if ((si_core_sflags(sih
, 0, 0) & SISF_DMA64
) == SISF_DMA64
) {
2666 /* backplane are 64-bit capable */
2667 if (si_backplane64(sih
))
2668 /* If bus is System Backplane or PCIE then we can access 64-bits */
2669 if ((sih
->bustype
== SI_BUS
) ||
2670 ((sih
->bustype
== PCI_BUS
) &&
2671 (sih
->buscoretype
== PCIE_CORE_ID
)))
2672 return DMADDRWIDTH_64
;
2674 /* DMA64 is always 32-bit capable, AE is always true */
2675 ASSERT(_dma64_addrext(osh
, (dma64regs_t
*) dmaregs
));
2677 return DMADDRWIDTH_32
;
2680 /* Start checking for 32-bit / 30-bit addressing */
2681 dma32regs
= (dma32regs_t
*) dmaregs
;
2683 /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */
2684 if ((sih
->bustype
== SI_BUS
) ||
2685 ((sih
->bustype
== PCI_BUS
)
2686 && sih
->buscoretype
== PCIE_CORE_ID
)
2687 || (_dma32_addrext(osh
, dma32regs
)))
2688 return DMADDRWIDTH_32
;
2691 return DMADDRWIDTH_30
;