]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/staging/brcm80211/util/hnddma.c
staging: brcm80211: Remove BUSTYPE macro
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / brcm80211 / util / hnddma.c
1 /*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/netdevice.h>
20 #include <linux/pci.h>
21 #include <bcmdefs.h>
22 #include <bcmdevs.h>
23 #include <osl.h>
24 #include <bcmendian.h>
25 #include <hndsoc.h>
26 #include <bcmutils.h>
27 #include <siutils.h>
28
29 #include <sbhnddma.h>
30 #include <hnddma.h>
31
32 /* debug/trace */
33 #ifdef BCMDBG
34 #define DMA_ERROR(args) \
35 do { \
36 if (!(*di->msg_level & 1)) \
37 ; \
38 else \
39 printf args; \
40 } while (0)
41 #define DMA_TRACE(args) \
42 do { \
43 if (!(*di->msg_level & 2)) \
44 ; \
45 else \
46 printf args; \
47 } while (0)
48 #else
49 #define DMA_ERROR(args)
50 #define DMA_TRACE(args)
51 #endif /* BCMDBG */
52
53 #define DMA_NONE(args)
54
55 #define d32txregs dregs.d32_u.txregs_32
56 #define d32rxregs dregs.d32_u.rxregs_32
57 #define txd32 dregs.d32_u.txd_32
58 #define rxd32 dregs.d32_u.rxd_32
59
60 #define d64txregs dregs.d64_u.txregs_64
61 #define d64rxregs dregs.d64_u.rxregs_64
62 #define txd64 dregs.d64_u.txd_64
63 #define rxd64 dregs.d64_u.rxd_64
64
65 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
66 static uint dma_msg_level;
67
68 #define MAXNAMEL 8 /* 8 char names */
69
70 #define DI_INFO(dmah) ((dma_info_t *)dmah)
71
72 /* dma engine software state */
73 typedef struct dma_info {
74 struct hnddma_pub hnddma; /* exported structure, don't use hnddma_t,
75 * which could be const
76 */
77 uint *msg_level; /* message level pointer */
78 char name[MAXNAMEL]; /* callers name for diag msgs */
79
80 void *osh; /* os handle */
81 si_t *sih; /* sb handle */
82
83 bool dma64; /* this dma engine is operating in 64-bit mode */
84 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
85
86 union {
87 struct {
88 dma32regs_t *txregs_32; /* 32-bit dma tx engine registers */
89 dma32regs_t *rxregs_32; /* 32-bit dma rx engine registers */
90 dma32dd_t *txd_32; /* pointer to dma32 tx descriptor ring */
91 dma32dd_t *rxd_32; /* pointer to dma32 rx descriptor ring */
92 } d32_u;
93 struct {
94 dma64regs_t *txregs_64; /* 64-bit dma tx engine registers */
95 dma64regs_t *rxregs_64; /* 64-bit dma rx engine registers */
96 dma64dd_t *txd_64; /* pointer to dma64 tx descriptor ring */
97 dma64dd_t *rxd_64; /* pointer to dma64 rx descriptor ring */
98 } d64_u;
99 } dregs;
100
101 u16 dmadesc_align; /* alignment requirement for dma descriptors */
102
103 u16 ntxd; /* # tx descriptors tunable */
104 u16 txin; /* index of next descriptor to reclaim */
105 u16 txout; /* index of next descriptor to post */
106 void **txp; /* pointer to parallel array of pointers to packets */
107 osldma_t *tx_dmah; /* DMA TX descriptor ring handle */
108 hnddma_seg_map_t *txp_dmah; /* DMA MAP meta-data handle */
109 dmaaddr_t txdpa; /* Aligned physical address of descriptor ring */
110 dmaaddr_t txdpaorig; /* Original physical address of descriptor ring */
111 u16 txdalign; /* #bytes added to alloc'd mem to align txd */
112 u32 txdalloc; /* #bytes allocated for the ring */
113 u32 xmtptrbase; /* When using unaligned descriptors, the ptr register
114 * is not just an index, it needs all 13 bits to be
115 * an offset from the addr register.
116 */
117
118 u16 nrxd; /* # rx descriptors tunable */
119 u16 rxin; /* index of next descriptor to reclaim */
120 u16 rxout; /* index of next descriptor to post */
121 void **rxp; /* pointer to parallel array of pointers to packets */
122 osldma_t *rx_dmah; /* DMA RX descriptor ring handle */
123 hnddma_seg_map_t *rxp_dmah; /* DMA MAP meta-data handle */
124 dmaaddr_t rxdpa; /* Aligned physical address of descriptor ring */
125 dmaaddr_t rxdpaorig; /* Original physical address of descriptor ring */
126 u16 rxdalign; /* #bytes added to alloc'd mem to align rxd */
127 u32 rxdalloc; /* #bytes allocated for the ring */
128 u32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */
129
130 /* tunables */
131 unsigned int rxbufsize; /* rx buffer size in bytes,
132 * not including the extra headroom
133 */
134 uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper stack
135 * e.g. some rx pkt buffers will be bridged to tx side
136 * without byte copying. The extra headroom needs to be
137 * large enough to fit txheader needs.
138 * Some dongle driver may not need it.
139 */
140 uint nrxpost; /* # rx buffers to keep posted */
141 unsigned int rxoffset; /* rxcontrol offset */
142 uint ddoffsetlow; /* add to get dma address of descriptor ring, low 32 bits */
143 uint ddoffsethigh; /* high 32 bits */
144 uint dataoffsetlow; /* add to get dma address of data buffer, low 32 bits */
145 uint dataoffsethigh; /* high 32 bits */
146 bool aligndesc_4k; /* descriptor base need to be aligned or not */
147 } dma_info_t;
148
149 /*
150 * If BCMDMA32 is defined, hnddma will support both 32-bit and 64-bit DMA engines.
151 * Otherwise it will support only 64-bit.
152 *
153 * DMA32_ENAB indicates whether hnddma is compiled with support for 32-bit DMA engines.
154 * DMA64_ENAB indicates whether hnddma is compiled with support for 64-bit DMA engines.
155 *
156 * DMA64_MODE indicates whether the current DMA engine is running as 64-bit.
157 */
158 #ifdef BCMDMA32
159 #define DMA32_ENAB(di) 1
160 #define DMA64_ENAB(di) 1
161 #define DMA64_MODE(di) ((di)->dma64)
162 #else /* !BCMDMA32 */
163 #define DMA32_ENAB(di) 0
164 #define DMA64_ENAB(di) 1
165 #define DMA64_MODE(di) 1
166 #endif /* !BCMDMA32 */
167
168 /* DMA Scatter-gather list is supported. Note this is limited to TX direction only */
169 #ifdef BCMDMASGLISTOSL
170 #define DMASGLIST_ENAB true
171 #else
172 #define DMASGLIST_ENAB false
173 #endif /* BCMDMASGLISTOSL */
174
175 /* descriptor bumping macros */
176 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
177 #define TXD(x) XXD((x), di->ntxd)
178 #define RXD(x) XXD((x), di->nrxd)
179 #define NEXTTXD(i) TXD((i) + 1)
180 #define PREVTXD(i) TXD((i) - 1)
181 #define NEXTRXD(i) RXD((i) + 1)
182 #define PREVRXD(i) RXD((i) - 1)
183
184 #define NTXDACTIVE(h, t) TXD((t) - (h))
185 #define NRXDACTIVE(h, t) RXD((t) - (h))
186
187 /* macros to convert between byte offsets and indexes */
188 #define B2I(bytes, type) ((bytes) / sizeof(type))
189 #define I2B(index, type) ((index) * sizeof(type))
190
191 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
192 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
193
194 #define PCI64ADDR_HIGH 0x80000000 /* address[63] */
195 #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
196
197 /* Common prototypes */
198 static bool _dma_isaddrext(dma_info_t *di);
199 static bool _dma_descriptor_align(dma_info_t *di);
200 static bool _dma_alloc(dma_info_t *di, uint direction);
201 static void _dma_detach(dma_info_t *di);
202 static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa);
203 static void _dma_rxinit(dma_info_t *di);
204 static void *_dma_rx(dma_info_t *di);
205 static bool _dma_rxfill(dma_info_t *di);
206 static void _dma_rxreclaim(dma_info_t *di);
207 static void _dma_rxenable(dma_info_t *di);
208 static void *_dma_getnextrxp(dma_info_t *di, bool forceall);
209 static void _dma_rx_param_get(dma_info_t *di, u16 *rxoffset,
210 u16 *rxbufsize);
211
212 static void _dma_txblock(dma_info_t *di);
213 static void _dma_txunblock(dma_info_t *di);
214 static uint _dma_txactive(dma_info_t *di);
215 static uint _dma_rxactive(dma_info_t *di);
216 static uint _dma_txpending(dma_info_t *di);
217 static uint _dma_txcommitted(dma_info_t *di);
218
219 static void *_dma_peeknexttxp(dma_info_t *di);
220 static void *_dma_peeknextrxp(dma_info_t *di);
221 static unsigned long _dma_getvar(dma_info_t *di, const char *name);
222 static void _dma_counterreset(dma_info_t *di);
223 static void _dma_fifoloopbackenable(dma_info_t *di);
224 static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags);
225 static u8 dma_align_sizetobits(uint size);
226 static void *dma_ringalloc(struct osl_info *osh, u32 boundary, uint size,
227 u16 *alignbits, uint *alloced,
228 dmaaddr_t *descpa, osldma_t **dmah);
229
230 /* Prototypes for 32-bit routines */
231 static bool dma32_alloc(dma_info_t *di, uint direction);
232 static bool dma32_txreset(dma_info_t *di);
233 static bool dma32_rxreset(dma_info_t *di);
234 static bool dma32_txsuspendedidle(dma_info_t *di);
235 static int dma32_txfast(dma_info_t *di, struct sk_buff *p0, bool commit);
236 static void *dma32_getnexttxp(dma_info_t *di, txd_range_t range);
237 static void *dma32_getnextrxp(dma_info_t *di, bool forceall);
238 static void dma32_txrotate(dma_info_t *di);
239 static bool dma32_rxidle(dma_info_t *di);
240 static void dma32_txinit(dma_info_t *di);
241 static bool dma32_txenabled(dma_info_t *di);
242 static void dma32_txsuspend(dma_info_t *di);
243 static void dma32_txresume(dma_info_t *di);
244 static bool dma32_txsuspended(dma_info_t *di);
245 static void dma32_txreclaim(dma_info_t *di, txd_range_t range);
246 static bool dma32_txstopped(dma_info_t *di);
247 static bool dma32_rxstopped(dma_info_t *di);
248 static bool dma32_rxenabled(dma_info_t *di);
249
250 static bool _dma32_addrext(struct osl_info *osh, dma32regs_t *dma32regs);
251
252 /* Prototypes for 64-bit routines */
253 static bool dma64_alloc(dma_info_t *di, uint direction);
254 static bool dma64_txreset(dma_info_t *di);
255 static bool dma64_rxreset(dma_info_t *di);
256 static bool dma64_txsuspendedidle(dma_info_t *di);
257 static int dma64_txfast(dma_info_t *di, struct sk_buff *p0, bool commit);
258 static int dma64_txunframed(dma_info_t *di, void *p0, uint len, bool commit);
259 static void *dma64_getpos(dma_info_t *di, bool direction);
260 static void *dma64_getnexttxp(dma_info_t *di, txd_range_t range);
261 static void *dma64_getnextrxp(dma_info_t *di, bool forceall);
262 static void dma64_txrotate(dma_info_t *di);
263
264 static bool dma64_rxidle(dma_info_t *di);
265 static void dma64_txinit(dma_info_t *di);
266 static bool dma64_txenabled(dma_info_t *di);
267 static void dma64_txsuspend(dma_info_t *di);
268 static void dma64_txresume(dma_info_t *di);
269 static bool dma64_txsuspended(dma_info_t *di);
270 static void dma64_txreclaim(dma_info_t *di, txd_range_t range);
271 static bool dma64_txstopped(dma_info_t *di);
272 static bool dma64_rxstopped(dma_info_t *di);
273 static bool dma64_rxenabled(dma_info_t *di);
274 static bool _dma64_addrext(struct osl_info *osh, dma64regs_t *dma64regs);
275
276 static inline u32 parity32(u32 data);
277
278 const di_fcn_t dma64proc = {
279 (di_detach_t) _dma_detach,
280 (di_txinit_t) dma64_txinit,
281 (di_txreset_t) dma64_txreset,
282 (di_txenabled_t) dma64_txenabled,
283 (di_txsuspend_t) dma64_txsuspend,
284 (di_txresume_t) dma64_txresume,
285 (di_txsuspended_t) dma64_txsuspended,
286 (di_txsuspendedidle_t) dma64_txsuspendedidle,
287 (di_txfast_t) dma64_txfast,
288 (di_txunframed_t) dma64_txunframed,
289 (di_getpos_t) dma64_getpos,
290 (di_txstopped_t) dma64_txstopped,
291 (di_txreclaim_t) dma64_txreclaim,
292 (di_getnexttxp_t) dma64_getnexttxp,
293 (di_peeknexttxp_t) _dma_peeknexttxp,
294 (di_txblock_t) _dma_txblock,
295 (di_txunblock_t) _dma_txunblock,
296 (di_txactive_t) _dma_txactive,
297 (di_txrotate_t) dma64_txrotate,
298
299 (di_rxinit_t) _dma_rxinit,
300 (di_rxreset_t) dma64_rxreset,
301 (di_rxidle_t) dma64_rxidle,
302 (di_rxstopped_t) dma64_rxstopped,
303 (di_rxenable_t) _dma_rxenable,
304 (di_rxenabled_t) dma64_rxenabled,
305 (di_rx_t) _dma_rx,
306 (di_rxfill_t) _dma_rxfill,
307 (di_rxreclaim_t) _dma_rxreclaim,
308 (di_getnextrxp_t) _dma_getnextrxp,
309 (di_peeknextrxp_t) _dma_peeknextrxp,
310 (di_rxparam_get_t) _dma_rx_param_get,
311
312 (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
313 (di_getvar_t) _dma_getvar,
314 (di_counterreset_t) _dma_counterreset,
315 (di_ctrlflags_t) _dma_ctrlflags,
316 NULL,
317 NULL,
318 NULL,
319 (di_rxactive_t) _dma_rxactive,
320 (di_txpending_t) _dma_txpending,
321 (di_txcommitted_t) _dma_txcommitted,
322 39
323 };
324
325 static const di_fcn_t dma32proc = {
326 (di_detach_t) _dma_detach,
327 (di_txinit_t) dma32_txinit,
328 (di_txreset_t) dma32_txreset,
329 (di_txenabled_t) dma32_txenabled,
330 (di_txsuspend_t) dma32_txsuspend,
331 (di_txresume_t) dma32_txresume,
332 (di_txsuspended_t) dma32_txsuspended,
333 (di_txsuspendedidle_t) dma32_txsuspendedidle,
334 (di_txfast_t) dma32_txfast,
335 NULL,
336 NULL,
337 (di_txstopped_t) dma32_txstopped,
338 (di_txreclaim_t) dma32_txreclaim,
339 (di_getnexttxp_t) dma32_getnexttxp,
340 (di_peeknexttxp_t) _dma_peeknexttxp,
341 (di_txblock_t) _dma_txblock,
342 (di_txunblock_t) _dma_txunblock,
343 (di_txactive_t) _dma_txactive,
344 (di_txrotate_t) dma32_txrotate,
345
346 (di_rxinit_t) _dma_rxinit,
347 (di_rxreset_t) dma32_rxreset,
348 (di_rxidle_t) dma32_rxidle,
349 (di_rxstopped_t) dma32_rxstopped,
350 (di_rxenable_t) _dma_rxenable,
351 (di_rxenabled_t) dma32_rxenabled,
352 (di_rx_t) _dma_rx,
353 (di_rxfill_t) _dma_rxfill,
354 (di_rxreclaim_t) _dma_rxreclaim,
355 (di_getnextrxp_t) _dma_getnextrxp,
356 (di_peeknextrxp_t) _dma_peeknextrxp,
357 (di_rxparam_get_t) _dma_rx_param_get,
358
359 (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
360 (di_getvar_t) _dma_getvar,
361 (di_counterreset_t) _dma_counterreset,
362 (di_ctrlflags_t) _dma_ctrlflags,
363 NULL,
364 NULL,
365 NULL,
366 (di_rxactive_t) _dma_rxactive,
367 (di_txpending_t) _dma_txpending,
368 (di_txcommitted_t) _dma_txcommitted,
369 39
370 };
371
372 hnddma_t *dma_attach(struct osl_info *osh, char *name, si_t *sih,
373 void *dmaregstx, void *dmaregsrx, uint ntxd,
374 uint nrxd, uint rxbufsize, int rxextheadroom,
375 uint nrxpost, uint rxoffset, uint *msg_level)
376 {
377 dma_info_t *di;
378 uint size;
379
380 /* allocate private info structure */
381 di = kzalloc(sizeof(dma_info_t), GFP_ATOMIC);
382 if (di == NULL) {
383 #ifdef BCMDBG
384 printf("dma_attach: out of memory\n");
385 #endif
386 return NULL;
387 }
388
389 di->msg_level = msg_level ? msg_level : &dma_msg_level;
390
391 /* old chips w/o sb is no longer supported */
392 ASSERT(sih != NULL);
393
394 if (DMA64_ENAB(di))
395 di->dma64 =
396 ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
397 else
398 di->dma64 = 0;
399
400 /* check arguments */
401 ASSERT(ISPOWEROF2(ntxd));
402 ASSERT(ISPOWEROF2(nrxd));
403
404 if (nrxd == 0)
405 ASSERT(dmaregsrx == NULL);
406 if (ntxd == 0)
407 ASSERT(dmaregstx == NULL);
408
409 /* init dma reg pointer */
410 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
411 ASSERT(ntxd <= D64MAXDD);
412 ASSERT(nrxd <= D64MAXDD);
413 di->d64txregs = (dma64regs_t *) dmaregstx;
414 di->d64rxregs = (dma64regs_t *) dmaregsrx;
415 di->hnddma.di_fn = (const di_fcn_t *)&dma64proc;
416 } else if (DMA32_ENAB(di)) {
417 ASSERT(ntxd <= D32MAXDD);
418 ASSERT(nrxd <= D32MAXDD);
419 di->d32txregs = (dma32regs_t *) dmaregstx;
420 di->d32rxregs = (dma32regs_t *) dmaregsrx;
421 di->hnddma.di_fn = (const di_fcn_t *)&dma32proc;
422 } else {
423 DMA_ERROR(("dma_attach: driver doesn't support 32-bit DMA\n"));
424 ASSERT(0);
425 goto fail;
426 }
427
428 /* Default flags (which can be changed by the driver calling dma_ctrlflags
429 * before enable): For backwards compatibility both Rx Overflow Continue
430 * and Parity are DISABLED.
431 * supports it.
432 */
433 di->hnddma.di_fn->ctrlflags(&di->hnddma, DMA_CTRL_ROC | DMA_CTRL_PEN,
434 0);
435
436 DMA_TRACE(("%s: dma_attach: %s osh %p flags 0x%x ntxd %d nrxd %d rxbufsize %d " "rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n", name, (DMA64_MODE(di) ? "DMA64" : "DMA32"), osh, di->hnddma.dmactrlflags, ntxd, nrxd, rxbufsize, rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
437
438 /* make a private copy of our callers name */
439 strncpy(di->name, name, MAXNAMEL);
440 di->name[MAXNAMEL - 1] = '\0';
441
442 di->osh = osh;
443 di->sih = sih;
444
445 /* save tunables */
446 di->ntxd = (u16) ntxd;
447 di->nrxd = (u16) nrxd;
448
449 /* the actual dma size doesn't include the extra headroom */
450 di->rxextrahdrroom =
451 (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom;
452 if (rxbufsize > BCMEXTRAHDROOM)
453 di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom);
454 else
455 di->rxbufsize = (u16) rxbufsize;
456
457 di->nrxpost = (u16) nrxpost;
458 di->rxoffset = (u8) rxoffset;
459
460 /*
461 * figure out the DMA physical address offset for dd and data
462 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
463 * Other bus: use zero
464 * SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
465 */
466 di->ddoffsetlow = 0;
467 di->dataoffsetlow = 0;
468 /* for pci bus, add offset */
469 if (sih->bustype == PCI_BUS) {
470 if ((sih->buscoretype == PCIE_CORE_ID) && DMA64_MODE(di)) {
471 /* pcie with DMA64 */
472 di->ddoffsetlow = 0;
473 di->ddoffsethigh = SI_PCIE_DMA_H32;
474 } else {
475 /* pci(DMA32/DMA64) or pcie with DMA32 */
476 di->ddoffsetlow = SI_PCI_DMA;
477 di->ddoffsethigh = 0;
478 }
479 di->dataoffsetlow = di->ddoffsetlow;
480 di->dataoffsethigh = di->ddoffsethigh;
481 }
482 #if defined(__mips__) && defined(IL_BIGENDIAN)
483 di->dataoffsetlow = di->dataoffsetlow + SI_SDRAM_SWAPPED;
484 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
485 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
486 if ((si_coreid(sih) == SDIOD_CORE_ID)
487 && ((si_corerev(sih) > 0) && (si_corerev(sih) <= 2)))
488 di->addrext = 0;
489 else if ((si_coreid(sih) == I2S_CORE_ID) &&
490 ((si_corerev(sih) == 0) || (si_corerev(sih) == 1)))
491 di->addrext = 0;
492 else
493 di->addrext = _dma_isaddrext(di);
494
495 /* does the descriptors need to be aligned and if yes, on 4K/8K or not */
496 di->aligndesc_4k = _dma_descriptor_align(di);
497 if (di->aligndesc_4k) {
498 if (DMA64_MODE(di)) {
499 di->dmadesc_align = D64RINGALIGN_BITS;
500 if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) {
501 /* for smaller dd table, HW relax the alignment requirement */
502 di->dmadesc_align = D64RINGALIGN_BITS - 1;
503 }
504 } else
505 di->dmadesc_align = D32RINGALIGN_BITS;
506 } else
507 di->dmadesc_align = 4; /* 16 byte alignment */
508
509 DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
510 di->aligndesc_4k, di->dmadesc_align));
511
512 /* allocate tx packet pointer vector */
513 if (ntxd) {
514 size = ntxd * sizeof(void *);
515 di->txp = kzalloc(size, GFP_ATOMIC);
516 if (di->txp == NULL) {
517 DMA_ERROR(("%s: dma_attach: out of tx memory\n", di->name));
518 goto fail;
519 }
520 }
521
522 /* allocate rx packet pointer vector */
523 if (nrxd) {
524 size = nrxd * sizeof(void *);
525 di->rxp = kzalloc(size, GFP_ATOMIC);
526 if (di->rxp == NULL) {
527 DMA_ERROR(("%s: dma_attach: out of rx memory\n", di->name));
528 goto fail;
529 }
530 }
531
532 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
533 if (ntxd) {
534 if (!_dma_alloc(di, DMA_TX))
535 goto fail;
536 }
537
538 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
539 if (nrxd) {
540 if (!_dma_alloc(di, DMA_RX))
541 goto fail;
542 }
543
544 if ((di->ddoffsetlow != 0) && !di->addrext) {
545 if (PHYSADDRLO(di->txdpa) > SI_PCI_DMA_SZ) {
546 DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->txdpa)));
547 goto fail;
548 }
549 if (PHYSADDRLO(di->rxdpa) > SI_PCI_DMA_SZ) {
550 DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->rxdpa)));
551 goto fail;
552 }
553 }
554
555 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext));
556
557 /* allocate DMA mapping vectors */
558 if (DMASGLIST_ENAB) {
559 if (ntxd) {
560 size = ntxd * sizeof(hnddma_seg_map_t);
561 di->txp_dmah = kzalloc(size, GFP_ATOMIC);
562 if (di->txp_dmah == NULL)
563 goto fail;
564 }
565
566 if (nrxd) {
567 size = nrxd * sizeof(hnddma_seg_map_t);
568 di->rxp_dmah = kzalloc(size, GFP_ATOMIC);
569 if (di->rxp_dmah == NULL)
570 goto fail;
571 }
572 }
573
574 return (hnddma_t *) di;
575
576 fail:
577 _dma_detach(di);
578 return NULL;
579 }
580
581 /* init the tx or rx descriptor */
582 static inline void
583 dma32_dd_upd(dma_info_t *di, dma32dd_t *ddring, dmaaddr_t pa, uint outidx,
584 u32 *flags, u32 bufcount)
585 {
586 /* dma32 uses 32-bit control to fit both flags and bufcounter */
587 *flags = *flags | (bufcount & CTRL_BC_MASK);
588
589 if ((di->dataoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
590 W_SM(&ddring[outidx].addr,
591 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
592 W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*flags));
593 } else {
594 /* address extension */
595 u32 ae;
596 ASSERT(di->addrext);
597 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
598 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
599
600 *flags |= (ae << CTRL_AE_SHIFT);
601 W_SM(&ddring[outidx].addr,
602 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
603 W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*flags));
604 }
605 }
606
607 /* Check for odd number of 1's */
608 static inline u32 parity32(u32 data)
609 {
610 data ^= data >> 16;
611 data ^= data >> 8;
612 data ^= data >> 4;
613 data ^= data >> 2;
614 data ^= data >> 1;
615
616 return data & 1;
617 }
618
619 #define DMA64_DD_PARITY(dd) parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
620
621 static inline void
622 dma64_dd_upd(dma_info_t *di, dma64dd_t *ddring, dmaaddr_t pa, uint outidx,
623 u32 *flags, u32 bufcount)
624 {
625 u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
626
627 /* PCI bus with big(>1G) physical address, use address extension */
628 #if defined(__mips__) && defined(IL_BIGENDIAN)
629 if ((di->dataoffsetlow == SI_SDRAM_SWAPPED)
630 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
631 #else
632 if ((di->dataoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
633 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
634 ASSERT((PHYSADDRHI(pa) & PCI64ADDR_HIGH) == 0);
635
636 W_SM(&ddring[outidx].addrlow,
637 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
638 W_SM(&ddring[outidx].addrhigh,
639 BUS_SWAP32(PHYSADDRHI(pa) + di->dataoffsethigh));
640 W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
641 W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
642 } else {
643 /* address extension for 32-bit PCI */
644 u32 ae;
645 ASSERT(di->addrext);
646
647 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
648 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
649 ASSERT(PHYSADDRHI(pa) == 0);
650
651 ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
652 W_SM(&ddring[outidx].addrlow,
653 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
654 W_SM(&ddring[outidx].addrhigh,
655 BUS_SWAP32(0 + di->dataoffsethigh));
656 W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
657 W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
658 }
659 if (di->hnddma.dmactrlflags & DMA_CTRL_PEN) {
660 if (DMA64_DD_PARITY(&ddring[outidx])) {
661 W_SM(&ddring[outidx].ctrl2,
662 BUS_SWAP32(ctrl2 | D64_CTRL2_PARITY));
663 }
664 }
665 }
666
667 static bool _dma32_addrext(struct osl_info *osh, dma32regs_t *dma32regs)
668 {
669 u32 w;
670
671 OR_REG(osh, &dma32regs->control, XC_AE);
672 w = R_REG(osh, &dma32regs->control);
673 AND_REG(osh, &dma32regs->control, ~XC_AE);
674 return (w & XC_AE) == XC_AE;
675 }
676
677 static bool _dma_alloc(dma_info_t *di, uint direction)
678 {
679 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
680 return dma64_alloc(di, direction);
681 } else if (DMA32_ENAB(di)) {
682 return dma32_alloc(di, direction);
683 } else
684 ASSERT(0);
685 }
686
687 /* !! may be called with core in reset */
688 static void _dma_detach(dma_info_t *di)
689 {
690
691 DMA_TRACE(("%s: dma_detach\n", di->name));
692
693 /* shouldn't be here if descriptors are unreclaimed */
694 ASSERT(di->txin == di->txout);
695 ASSERT(di->rxin == di->rxout);
696
697 /* free dma descriptor rings */
698 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
699 if (di->txd64)
700 DMA_FREE_CONSISTENT(di->osh,
701 ((s8 *)di->txd64 -
702 di->txdalign), di->txdalloc,
703 (di->txdpaorig), &di->tx_dmah);
704 if (di->rxd64)
705 DMA_FREE_CONSISTENT(di->osh,
706 ((s8 *)di->rxd64 -
707 di->rxdalign), di->rxdalloc,
708 (di->rxdpaorig), &di->rx_dmah);
709 } else if (DMA32_ENAB(di)) {
710 if (di->txd32)
711 DMA_FREE_CONSISTENT(di->osh,
712 ((s8 *)di->txd32 -
713 di->txdalign), di->txdalloc,
714 (di->txdpaorig), &di->tx_dmah);
715 if (di->rxd32)
716 DMA_FREE_CONSISTENT(di->osh,
717 ((s8 *)di->rxd32 -
718 di->rxdalign), di->rxdalloc,
719 (di->rxdpaorig), &di->rx_dmah);
720 } else
721 ASSERT(0);
722
723 /* free packet pointer vectors */
724 if (di->txp)
725 kfree((void *)di->txp);
726 if (di->rxp)
727 kfree((void *)di->rxp);
728
729 /* free tx packet DMA handles */
730 if (di->txp_dmah)
731 kfree(di->txp_dmah);
732
733 /* free rx packet DMA handles */
734 if (di->rxp_dmah)
735 kfree(di->rxp_dmah);
736
737 /* free our private info structure */
738 kfree((void *)di);
739
740 }
741
742 static bool _dma_descriptor_align(dma_info_t *di)
743 {
744 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
745 u32 addrl;
746
747 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
748 if (di->d64txregs != NULL) {
749 W_REG(di->osh, &di->d64txregs->addrlow, 0xff0);
750 addrl = R_REG(di->osh, &di->d64txregs->addrlow);
751 if (addrl != 0)
752 return false;
753 } else if (di->d64rxregs != NULL) {
754 W_REG(di->osh, &di->d64rxregs->addrlow, 0xff0);
755 addrl = R_REG(di->osh, &di->d64rxregs->addrlow);
756 if (addrl != 0)
757 return false;
758 }
759 }
760 return true;
761 }
762
763 /* return true if this dma engine supports DmaExtendedAddrChanges, otherwise false */
764 static bool _dma_isaddrext(dma_info_t *di)
765 {
766 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
767 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
768
769 /* not all tx or rx channel are available */
770 if (di->d64txregs != NULL) {
771 if (!_dma64_addrext(di->osh, di->d64txregs)) {
772 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have AE set\n", di->name));
773 ASSERT(0);
774 }
775 return true;
776 } else if (di->d64rxregs != NULL) {
777 if (!_dma64_addrext(di->osh, di->d64rxregs)) {
778 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have AE set\n", di->name));
779 ASSERT(0);
780 }
781 return true;
782 }
783 return false;
784 } else if (DMA32_ENAB(di)) {
785 if (di->d32txregs)
786 return _dma32_addrext(di->osh, di->d32txregs);
787 else if (di->d32rxregs)
788 return _dma32_addrext(di->osh, di->d32rxregs);
789 } else
790 ASSERT(0);
791
792 return false;
793 }
794
795 /* initialize descriptor table base address */
796 static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa)
797 {
798 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
799 if (!di->aligndesc_4k) {
800 if (direction == DMA_TX)
801 di->xmtptrbase = PHYSADDRLO(pa);
802 else
803 di->rcvptrbase = PHYSADDRLO(pa);
804 }
805
806 if ((di->ddoffsetlow == 0)
807 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
808 if (direction == DMA_TX) {
809 W_REG(di->osh, &di->d64txregs->addrlow,
810 (PHYSADDRLO(pa) + di->ddoffsetlow));
811 W_REG(di->osh, &di->d64txregs->addrhigh,
812 (PHYSADDRHI(pa) + di->ddoffsethigh));
813 } else {
814 W_REG(di->osh, &di->d64rxregs->addrlow,
815 (PHYSADDRLO(pa) + di->ddoffsetlow));
816 W_REG(di->osh, &di->d64rxregs->addrhigh,
817 (PHYSADDRHI(pa) + di->ddoffsethigh));
818 }
819 } else {
820 /* DMA64 32bits address extension */
821 u32 ae;
822 ASSERT(di->addrext);
823 ASSERT(PHYSADDRHI(pa) == 0);
824
825 /* shift the high bit(s) from pa to ae */
826 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >>
827 PCI32ADDR_HIGH_SHIFT;
828 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
829
830 if (direction == DMA_TX) {
831 W_REG(di->osh, &di->d64txregs->addrlow,
832 (PHYSADDRLO(pa) + di->ddoffsetlow));
833 W_REG(di->osh, &di->d64txregs->addrhigh,
834 di->ddoffsethigh);
835 SET_REG(di->osh, &di->d64txregs->control,
836 D64_XC_AE, (ae << D64_XC_AE_SHIFT));
837 } else {
838 W_REG(di->osh, &di->d64rxregs->addrlow,
839 (PHYSADDRLO(pa) + di->ddoffsetlow));
840 W_REG(di->osh, &di->d64rxregs->addrhigh,
841 di->ddoffsethigh);
842 SET_REG(di->osh, &di->d64rxregs->control,
843 D64_RC_AE, (ae << D64_RC_AE_SHIFT));
844 }
845 }
846
847 } else if (DMA32_ENAB(di)) {
848 ASSERT(PHYSADDRHI(pa) == 0);
849 if ((di->ddoffsetlow == 0)
850 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
851 if (direction == DMA_TX)
852 W_REG(di->osh, &di->d32txregs->addr,
853 (PHYSADDRLO(pa) + di->ddoffsetlow));
854 else
855 W_REG(di->osh, &di->d32rxregs->addr,
856 (PHYSADDRLO(pa) + di->ddoffsetlow));
857 } else {
858 /* dma32 address extension */
859 u32 ae;
860 ASSERT(di->addrext);
861
862 /* shift the high bit(s) from pa to ae */
863 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >>
864 PCI32ADDR_HIGH_SHIFT;
865 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
866
867 if (direction == DMA_TX) {
868 W_REG(di->osh, &di->d32txregs->addr,
869 (PHYSADDRLO(pa) + di->ddoffsetlow));
870 SET_REG(di->osh, &di->d32txregs->control, XC_AE,
871 ae << XC_AE_SHIFT);
872 } else {
873 W_REG(di->osh, &di->d32rxregs->addr,
874 (PHYSADDRLO(pa) + di->ddoffsetlow));
875 SET_REG(di->osh, &di->d32rxregs->control, RC_AE,
876 ae << RC_AE_SHIFT);
877 }
878 }
879 } else
880 ASSERT(0);
881 }
882
883 static void _dma_fifoloopbackenable(dma_info_t *di)
884 {
885 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name));
886
887 if (DMA64_ENAB(di) && DMA64_MODE(di))
888 OR_REG(di->osh, &di->d64txregs->control, D64_XC_LE);
889 else if (DMA32_ENAB(di))
890 OR_REG(di->osh, &di->d32txregs->control, XC_LE);
891 else
892 ASSERT(0);
893 }
894
895 static void _dma_rxinit(dma_info_t *di)
896 {
897 DMA_TRACE(("%s: dma_rxinit\n", di->name));
898
899 if (di->nrxd == 0)
900 return;
901
902 di->rxin = di->rxout = 0;
903
904 /* clear rx descriptor ring */
905 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
906 BZERO_SM((void *)di->rxd64,
907 (di->nrxd * sizeof(dma64dd_t)));
908
909 /* DMA engine with out alignment requirement requires table to be inited
910 * before enabling the engine
911 */
912 if (!di->aligndesc_4k)
913 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
914
915 _dma_rxenable(di);
916
917 if (di->aligndesc_4k)
918 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
919 } else if (DMA32_ENAB(di)) {
920 BZERO_SM((void *)di->rxd32,
921 (di->nrxd * sizeof(dma32dd_t)));
922 _dma_rxenable(di);
923 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
924 } else
925 ASSERT(0);
926 }
927
928 static void _dma_rxenable(dma_info_t *di)
929 {
930 uint dmactrlflags = di->hnddma.dmactrlflags;
931
932 DMA_TRACE(("%s: dma_rxenable\n", di->name));
933
934 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
935 u32 control =
936 (R_REG(di->osh, &di->d64rxregs->control) & D64_RC_AE) |
937 D64_RC_RE;
938
939 if ((dmactrlflags & DMA_CTRL_PEN) == 0)
940 control |= D64_RC_PD;
941
942 if (dmactrlflags & DMA_CTRL_ROC)
943 control |= D64_RC_OC;
944
945 W_REG(di->osh, &di->d64rxregs->control,
946 ((di->rxoffset << D64_RC_RO_SHIFT) | control));
947 } else if (DMA32_ENAB(di)) {
948 u32 control =
949 (R_REG(di->osh, &di->d32rxregs->control) & RC_AE) | RC_RE;
950
951 if ((dmactrlflags & DMA_CTRL_PEN) == 0)
952 control |= RC_PD;
953
954 if (dmactrlflags & DMA_CTRL_ROC)
955 control |= RC_OC;
956
957 W_REG(di->osh, &di->d32rxregs->control,
958 ((di->rxoffset << RC_RO_SHIFT) | control));
959 } else
960 ASSERT(0);
961 }
962
963 static void
964 _dma_rx_param_get(dma_info_t *di, u16 *rxoffset, u16 *rxbufsize)
965 {
966 /* the normal values fit into 16 bits */
967 *rxoffset = (u16) di->rxoffset;
968 *rxbufsize = (u16) di->rxbufsize;
969 }
970
971 /* !! rx entry routine
972 * returns a pointer to the next frame received, or NULL if there are no more
973 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is supported
974 * with pkts chain
975 * otherwise, it's treated as giant pkt and will be tossed.
976 * The DMA scattering starts with normal DMA header, followed by first buffer data.
977 * After it reaches the max size of buffer, the data continues in next DMA descriptor
978 * buffer WITHOUT DMA header
979 */
980 static void *BCMFASTPATH _dma_rx(dma_info_t *di)
981 {
982 struct sk_buff *p, *head, *tail;
983 uint len;
984 uint pkt_len;
985 int resid = 0;
986
987 next_frame:
988 head = _dma_getnextrxp(di, false);
989 if (head == NULL)
990 return NULL;
991
992 len = ltoh16(*(u16 *) (head->data));
993 DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
994
995 #if defined(__mips__)
996 if (!len) {
997 while (!(len = *(u16 *) OSL_UNCACHED(head->data)))
998 udelay(1);
999
1000 *(u16 *) (head->data) = htol16((u16) len);
1001 }
1002 #endif /* defined(__mips__) */
1003
1004 /* set actual length */
1005 pkt_len = min((di->rxoffset + len), di->rxbufsize);
1006 __skb_trim(head, pkt_len);
1007 resid = len - (di->rxbufsize - di->rxoffset);
1008
1009 /* check for single or multi-buffer rx */
1010 if (resid > 0) {
1011 tail = head;
1012 while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
1013 tail->next = p;
1014 pkt_len = min(resid, (int)di->rxbufsize);
1015 __skb_trim(p, pkt_len);
1016
1017 tail = p;
1018 resid -= di->rxbufsize;
1019 }
1020
1021 #ifdef BCMDBG
1022 if (resid > 0) {
1023 uint cur;
1024 ASSERT(p == NULL);
1025 cur = (DMA64_ENAB(di) && DMA64_MODE(di)) ?
1026 B2I(((R_REG(di->osh, &di->d64rxregs->status0) &
1027 D64_RS0_CD_MASK) -
1028 di->rcvptrbase) & D64_RS0_CD_MASK,
1029 dma64dd_t) : B2I(R_REG(di->osh,
1030 &di->d32rxregs->
1031 status) & RS_CD_MASK,
1032 dma32dd_t);
1033 DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n",
1034 di->rxin, di->rxout, cur));
1035 }
1036 #endif /* BCMDBG */
1037
1038 if ((di->hnddma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
1039 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
1040 di->name, len));
1041 PKTFREE(di->osh, head, false);
1042 di->hnddma.rxgiants++;
1043 goto next_frame;
1044 }
1045 }
1046
1047 return head;
1048 }
1049
1050 /* post receive buffers
1051 * return false is refill failed completely and ring is empty
1052 * this will stall the rx dma and user might want to call rxfill again asap
1053 * This unlikely happens on memory-rich NIC, but often on memory-constrained dongle
1054 */
1055 static bool BCMFASTPATH _dma_rxfill(dma_info_t *di)
1056 {
1057 struct sk_buff *p;
1058 u16 rxin, rxout;
1059 u32 flags = 0;
1060 uint n;
1061 uint i;
1062 dmaaddr_t pa;
1063 uint extra_offset = 0;
1064 bool ring_empty;
1065
1066 ring_empty = false;
1067
1068 /*
1069 * Determine how many receive buffers we're lacking
1070 * from the full complement, allocate, initialize,
1071 * and post them, then update the chip rx lastdscr.
1072 */
1073
1074 rxin = di->rxin;
1075 rxout = di->rxout;
1076
1077 n = di->nrxpost - NRXDACTIVE(rxin, rxout);
1078
1079 DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
1080
1081 if (di->rxbufsize > BCMEXTRAHDROOM)
1082 extra_offset = di->rxextrahdrroom;
1083
1084 for (i = 0; i < n; i++) {
1085 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
1086 size to be allocated
1087 */
1088
1089 p = osl_pktget(di->osh, di->rxbufsize + extra_offset);
1090
1091 if (p == NULL) {
1092 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
1093 di->name));
1094 if (i == 0) {
1095 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1096 if (dma64_rxidle(di)) {
1097 DMA_ERROR(("%s: rxfill64: ring is empty !\n", di->name));
1098 ring_empty = true;
1099 }
1100 } else if (DMA32_ENAB(di)) {
1101 if (dma32_rxidle(di)) {
1102 DMA_ERROR(("%s: rxfill32: ring is empty !\n", di->name));
1103 ring_empty = true;
1104 }
1105 } else
1106 ASSERT(0);
1107 }
1108 di->hnddma.rxnobuf++;
1109 break;
1110 }
1111 /* reserve an extra headroom, if applicable */
1112 if (extra_offset)
1113 skb_pull(p, extra_offset);
1114
1115 /* Do a cached write instead of uncached write since DMA_MAP
1116 * will flush the cache.
1117 */
1118 *(u32 *) (p->data) = 0;
1119
1120 if (DMASGLIST_ENAB)
1121 bzero(&di->rxp_dmah[rxout], sizeof(hnddma_seg_map_t));
1122
1123 pa = DMA_MAP(di->osh, p->data,
1124 di->rxbufsize, DMA_RX, p, &di->rxp_dmah[rxout]);
1125
1126 ASSERT(IS_ALIGNED(PHYSADDRLO(pa), 4));
1127
1128 /* save the free packet pointer */
1129 ASSERT(di->rxp[rxout] == NULL);
1130 di->rxp[rxout] = p;
1131
1132 /* reset flags for each descriptor */
1133 flags = 0;
1134 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1135 if (rxout == (di->nrxd - 1))
1136 flags = D64_CTRL1_EOT;
1137
1138 dma64_dd_upd(di, di->rxd64, pa, rxout, &flags,
1139 di->rxbufsize);
1140 } else if (DMA32_ENAB(di)) {
1141 if (rxout == (di->nrxd - 1))
1142 flags = CTRL_EOT;
1143
1144 ASSERT(PHYSADDRHI(pa) == 0);
1145 dma32_dd_upd(di, di->rxd32, pa, rxout, &flags,
1146 di->rxbufsize);
1147 } else
1148 ASSERT(0);
1149 rxout = NEXTRXD(rxout);
1150 }
1151
1152 di->rxout = rxout;
1153
1154 /* update the chip lastdscr pointer */
1155 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1156 W_REG(di->osh, &di->d64rxregs->ptr,
1157 di->rcvptrbase + I2B(rxout, dma64dd_t));
1158 } else if (DMA32_ENAB(di)) {
1159 W_REG(di->osh, &di->d32rxregs->ptr, I2B(rxout, dma32dd_t));
1160 } else
1161 ASSERT(0);
1162
1163 return ring_empty;
1164 }
1165
1166 /* like getnexttxp but no reclaim */
1167 static void *_dma_peeknexttxp(dma_info_t *di)
1168 {
1169 uint end, i;
1170
1171 if (di->ntxd == 0)
1172 return NULL;
1173
1174 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1175 end =
1176 B2I(((R_REG(di->osh, &di->d64txregs->status0) &
1177 D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
1178 dma64dd_t);
1179 } else if (DMA32_ENAB(di)) {
1180 end =
1181 B2I(R_REG(di->osh, &di->d32txregs->status) & XS_CD_MASK,
1182 dma32dd_t);
1183 } else
1184 ASSERT(0);
1185
1186 for (i = di->txin; i != end; i = NEXTTXD(i))
1187 if (di->txp[i])
1188 return di->txp[i];
1189
1190 return NULL;
1191 }
1192
1193 /* like getnextrxp but not take off the ring */
1194 static void *_dma_peeknextrxp(dma_info_t *di)
1195 {
1196 uint end, i;
1197
1198 if (di->nrxd == 0)
1199 return NULL;
1200
1201 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1202 end =
1203 B2I(((R_REG(di->osh, &di->d64rxregs->status0) &
1204 D64_RS0_CD_MASK) - di->rcvptrbase) & D64_RS0_CD_MASK,
1205 dma64dd_t);
1206 } else if (DMA32_ENAB(di)) {
1207 end =
1208 B2I(R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK,
1209 dma32dd_t);
1210 } else
1211 ASSERT(0);
1212
1213 for (i = di->rxin; i != end; i = NEXTRXD(i))
1214 if (di->rxp[i])
1215 return di->rxp[i];
1216
1217 return NULL;
1218 }
1219
1220 static void _dma_rxreclaim(dma_info_t *di)
1221 {
1222 void *p;
1223
1224 /* "unused local" warning suppression for OSLs that
1225 * define PKTFREE() without using the di->osh arg
1226 */
1227 di = di;
1228
1229 DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
1230
1231 while ((p = _dma_getnextrxp(di, true)))
1232 PKTFREE(di->osh, p, false);
1233 }
1234
1235 static void *BCMFASTPATH _dma_getnextrxp(dma_info_t *di, bool forceall)
1236 {
1237 if (di->nrxd == 0)
1238 return NULL;
1239
1240 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1241 return dma64_getnextrxp(di, forceall);
1242 } else if (DMA32_ENAB(di)) {
1243 return dma32_getnextrxp(di, forceall);
1244 } else
1245 ASSERT(0);
1246 }
1247
1248 static void _dma_txblock(dma_info_t *di)
1249 {
1250 di->hnddma.txavail = 0;
1251 }
1252
1253 static void _dma_txunblock(dma_info_t *di)
1254 {
1255 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1256 }
1257
1258 static uint _dma_txactive(dma_info_t *di)
1259 {
1260 return NTXDACTIVE(di->txin, di->txout);
1261 }
1262
1263 static uint _dma_txpending(dma_info_t *di)
1264 {
1265 uint curr;
1266
1267 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1268 curr =
1269 B2I(((R_REG(di->osh, &di->d64txregs->status0) &
1270 D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
1271 dma64dd_t);
1272 } else if (DMA32_ENAB(di)) {
1273 curr =
1274 B2I(R_REG(di->osh, &di->d32txregs->status) & XS_CD_MASK,
1275 dma32dd_t);
1276 } else
1277 ASSERT(0);
1278
1279 return NTXDACTIVE(curr, di->txout);
1280 }
1281
1282 static uint _dma_txcommitted(dma_info_t *di)
1283 {
1284 uint ptr;
1285 uint txin = di->txin;
1286
1287 if (txin == di->txout)
1288 return 0;
1289
1290 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1291 ptr = B2I(R_REG(di->osh, &di->d64txregs->ptr), dma64dd_t);
1292 } else if (DMA32_ENAB(di)) {
1293 ptr = B2I(R_REG(di->osh, &di->d32txregs->ptr), dma32dd_t);
1294 } else
1295 ASSERT(0);
1296
1297 return NTXDACTIVE(di->txin, ptr);
1298 }
1299
1300 static uint _dma_rxactive(dma_info_t *di)
1301 {
1302 return NRXDACTIVE(di->rxin, di->rxout);
1303 }
1304
1305 static void _dma_counterreset(dma_info_t *di)
1306 {
1307 /* reset all software counter */
1308 di->hnddma.rxgiants = 0;
1309 di->hnddma.rxnobuf = 0;
1310 di->hnddma.txnobuf = 0;
1311 }
1312
1313 static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags)
1314 {
1315 uint dmactrlflags = di->hnddma.dmactrlflags;
1316
1317 if (di == NULL) {
1318 DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di->name));
1319 return 0;
1320 }
1321
1322 ASSERT((flags & ~mask) == 0);
1323
1324 dmactrlflags &= ~mask;
1325 dmactrlflags |= flags;
1326
1327 /* If trying to enable parity, check if parity is actually supported */
1328 if (dmactrlflags & DMA_CTRL_PEN) {
1329 u32 control;
1330
1331 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1332 control = R_REG(di->osh, &di->d64txregs->control);
1333 W_REG(di->osh, &di->d64txregs->control,
1334 control | D64_XC_PD);
1335 if (R_REG(di->osh, &di->d64txregs->control) & D64_XC_PD) {
1336 /* We *can* disable it so it is supported,
1337 * restore control register
1338 */
1339 W_REG(di->osh, &di->d64txregs->control,
1340 control);
1341 } else {
1342 /* Not supported, don't allow it to be enabled */
1343 dmactrlflags &= ~DMA_CTRL_PEN;
1344 }
1345 } else if (DMA32_ENAB(di)) {
1346 control = R_REG(di->osh, &di->d32txregs->control);
1347 W_REG(di->osh, &di->d32txregs->control,
1348 control | XC_PD);
1349 if (R_REG(di->osh, &di->d32txregs->control) & XC_PD) {
1350 W_REG(di->osh, &di->d32txregs->control,
1351 control);
1352 } else {
1353 /* Not supported, don't allow it to be enabled */
1354 dmactrlflags &= ~DMA_CTRL_PEN;
1355 }
1356 } else
1357 ASSERT(0);
1358 }
1359
1360 di->hnddma.dmactrlflags = dmactrlflags;
1361
1362 return dmactrlflags;
1363 }
1364
1365 /* get the address of the var in order to change later */
1366 static unsigned long _dma_getvar(dma_info_t *di, const char *name)
1367 {
1368 if (!strcmp(name, "&txavail"))
1369 return (unsigned long)&(di->hnddma.txavail);
1370 else {
1371 ASSERT(0);
1372 }
1373 return 0;
1374 }
1375
1376 void dma_txpioloopback(struct osl_info *osh, dma32regs_t *regs)
1377 {
1378 OR_REG(osh, &regs->control, XC_LE);
1379 }
1380
1381 static
1382 u8 dma_align_sizetobits(uint size)
1383 {
1384 u8 bitpos = 0;
1385 ASSERT(size);
1386 ASSERT(!(size & (size - 1)));
1387 while (size >>= 1) {
1388 bitpos++;
1389 }
1390 return bitpos;
1391 }
1392
1393 /* This function ensures that the DMA descriptor ring will not get allocated
1394 * across Page boundary. If the allocation is done across the page boundary
1395 * at the first time, then it is freed and the allocation is done at
1396 * descriptor ring size aligned location. This will ensure that the ring will
1397 * not cross page boundary
1398 */
1399 static void *dma_ringalloc(struct osl_info *osh, u32 boundary, uint size,
1400 u16 *alignbits, uint *alloced,
1401 dmaaddr_t *descpa, osldma_t **dmah)
1402 {
1403 void *va;
1404 u32 desc_strtaddr;
1405 u32 alignbytes = 1 << *alignbits;
1406
1407 va = DMA_ALLOC_CONSISTENT(osh, size, *alignbits, alloced, descpa,
1408 dmah);
1409 if (NULL == va)
1410 return NULL;
1411
1412 desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes);
1413 if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
1414 & boundary)) {
1415 *alignbits = dma_align_sizetobits(size);
1416 DMA_FREE_CONSISTENT(osh, va, size, *descpa, dmah);
1417 va = DMA_ALLOC_CONSISTENT(osh, size, *alignbits, alloced,
1418 descpa, dmah);
1419 }
1420 return va;
1421 }
1422
1423 /* 32-bit DMA functions */
1424
1425 static void dma32_txinit(dma_info_t *di)
1426 {
1427 u32 control = XC_XE;
1428
1429 DMA_TRACE(("%s: dma_txinit\n", di->name));
1430
1431 if (di->ntxd == 0)
1432 return;
1433
1434 di->txin = di->txout = 0;
1435 di->hnddma.txavail = di->ntxd - 1;
1436
1437 /* clear tx descriptor ring */
1438 BZERO_SM((void *)di->txd32, (di->ntxd * sizeof(dma32dd_t)));
1439
1440 if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0)
1441 control |= XC_PD;
1442 W_REG(di->osh, &di->d32txregs->control, control);
1443 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1444 }
1445
1446 static bool dma32_txenabled(dma_info_t *di)
1447 {
1448 u32 xc;
1449
1450 /* If the chip is dead, it is not enabled :-) */
1451 xc = R_REG(di->osh, &di->d32txregs->control);
1452 return (xc != 0xffffffff) && (xc & XC_XE);
1453 }
1454
1455 static void dma32_txsuspend(dma_info_t *di)
1456 {
1457 DMA_TRACE(("%s: dma_txsuspend\n", di->name));
1458
1459 if (di->ntxd == 0)
1460 return;
1461
1462 OR_REG(di->osh, &di->d32txregs->control, XC_SE);
1463 }
1464
1465 static void dma32_txresume(dma_info_t *di)
1466 {
1467 DMA_TRACE(("%s: dma_txresume\n", di->name));
1468
1469 if (di->ntxd == 0)
1470 return;
1471
1472 AND_REG(di->osh, &di->d32txregs->control, ~XC_SE);
1473 }
1474
1475 static bool dma32_txsuspended(dma_info_t *di)
1476 {
1477 return (di->ntxd == 0)
1478 || ((R_REG(di->osh, &di->d32txregs->control) & XC_SE) == XC_SE);
1479 }
1480
1481 static void dma32_txreclaim(dma_info_t *di, txd_range_t range)
1482 {
1483 void *p;
1484
1485 DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
1486 (range == HNDDMA_RANGE_ALL) ? "all" :
1487 ((range ==
1488 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
1489 "transfered")));
1490
1491 if (di->txin == di->txout)
1492 return;
1493
1494 while ((p = dma32_getnexttxp(di, range)))
1495 PKTFREE(di->osh, p, true);
1496 }
1497
1498 static bool dma32_txstopped(dma_info_t *di)
1499 {
1500 return ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) ==
1501 XS_XS_STOPPED);
1502 }
1503
1504 static bool dma32_rxstopped(dma_info_t *di)
1505 {
1506 return ((R_REG(di->osh, &di->d32rxregs->status) & RS_RS_MASK) ==
1507 RS_RS_STOPPED);
1508 }
1509
1510 static bool dma32_alloc(dma_info_t *di, uint direction)
1511 {
1512 uint size;
1513 uint ddlen;
1514 void *va;
1515 uint alloced;
1516 u16 align;
1517 u16 align_bits;
1518
1519 ddlen = sizeof(dma32dd_t);
1520
1521 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
1522
1523 alloced = 0;
1524 align_bits = di->dmadesc_align;
1525 align = (1 << align_bits);
1526
1527 if (direction == DMA_TX) {
1528 va = dma_ringalloc(di->osh, D32RINGALIGN, size, &align_bits,
1529 &alloced, &di->txdpaorig, &di->tx_dmah);
1530 if (va == NULL) {
1531 DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
1532 return false;
1533 }
1534
1535 PHYSADDRHISET(di->txdpa, 0);
1536 ASSERT(PHYSADDRHI(di->txdpaorig) == 0);
1537 di->txd32 = (dma32dd_t *) roundup((unsigned long)va, align);
1538 di->txdalign =
1539 (uint) ((s8 *)di->txd32 - (s8 *) va);
1540
1541 PHYSADDRLOSET(di->txdpa,
1542 PHYSADDRLO(di->txdpaorig) + di->txdalign);
1543 /* Make sure that alignment didn't overflow */
1544 ASSERT(PHYSADDRLO(di->txdpa) >= PHYSADDRLO(di->txdpaorig));
1545
1546 di->txdalloc = alloced;
1547 ASSERT(IS_ALIGNED((unsigned long)di->txd32, align));
1548 } else {
1549 va = dma_ringalloc(di->osh, D32RINGALIGN, size, &align_bits,
1550 &alloced, &di->rxdpaorig, &di->rx_dmah);
1551 if (va == NULL) {
1552 DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
1553 return false;
1554 }
1555
1556 PHYSADDRHISET(di->rxdpa, 0);
1557 ASSERT(PHYSADDRHI(di->rxdpaorig) == 0);
1558 di->rxd32 = (dma32dd_t *) roundup((unsigned long)va, align);
1559 di->rxdalign =
1560 (uint) ((s8 *)di->rxd32 - (s8 *) va);
1561
1562 PHYSADDRLOSET(di->rxdpa,
1563 PHYSADDRLO(di->rxdpaorig) + di->rxdalign);
1564 /* Make sure that alignment didn't overflow */
1565 ASSERT(PHYSADDRLO(di->rxdpa) >= PHYSADDRLO(di->rxdpaorig));
1566 di->rxdalloc = alloced;
1567 ASSERT(IS_ALIGNED((unsigned long)di->rxd32, align));
1568 }
1569
1570 return true;
1571 }
1572
1573 static bool dma32_txreset(dma_info_t *di)
1574 {
1575 u32 status;
1576
1577 if (di->ntxd == 0)
1578 return true;
1579
1580 /* suspend tx DMA first */
1581 W_REG(di->osh, &di->d32txregs->control, XC_SE);
1582 SPINWAIT(((status =
1583 (R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK))
1584 != XS_XS_DISABLED) && (status != XS_XS_IDLE)
1585 && (status != XS_XS_STOPPED), (10000));
1586
1587 W_REG(di->osh, &di->d32txregs->control, 0);
1588 SPINWAIT(((status = (R_REG(di->osh,
1589 &di->d32txregs->status) & XS_XS_MASK)) !=
1590 XS_XS_DISABLED), 10000);
1591
1592 /* wait for the last transaction to complete */
1593 udelay(300);
1594
1595 return status == XS_XS_DISABLED;
1596 }
1597
1598 static bool dma32_rxidle(dma_info_t *di)
1599 {
1600 DMA_TRACE(("%s: dma_rxidle\n", di->name));
1601
1602 if (di->nrxd == 0)
1603 return true;
1604
1605 return ((R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK) ==
1606 R_REG(di->osh, &di->d32rxregs->ptr));
1607 }
1608
1609 static bool dma32_rxreset(dma_info_t *di)
1610 {
1611 u32 status;
1612
1613 if (di->nrxd == 0)
1614 return true;
1615
1616 W_REG(di->osh, &di->d32rxregs->control, 0);
1617 SPINWAIT(((status = (R_REG(di->osh,
1618 &di->d32rxregs->status) & RS_RS_MASK)) !=
1619 RS_RS_DISABLED), 10000);
1620
1621 return status == RS_RS_DISABLED;
1622 }
1623
1624 static bool dma32_rxenabled(dma_info_t *di)
1625 {
1626 u32 rc;
1627
1628 rc = R_REG(di->osh, &di->d32rxregs->control);
1629 return (rc != 0xffffffff) && (rc & RC_RE);
1630 }
1631
1632 static bool dma32_txsuspendedidle(dma_info_t *di)
1633 {
1634 if (di->ntxd == 0)
1635 return true;
1636
1637 if (!(R_REG(di->osh, &di->d32txregs->control) & XC_SE))
1638 return 0;
1639
1640 if ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) != XS_XS_IDLE)
1641 return 0;
1642
1643 udelay(2);
1644 return ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) ==
1645 XS_XS_IDLE);
1646 }
1647
1648 /* !! tx entry routine
1649 * supports full 32bit dma engine buffer addressing so
1650 * dma buffers can cross 4 Kbyte page boundaries.
1651 *
1652 * WARNING: call must check the return value for error.
1653 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
1654 */
1655 static int dma32_txfast(dma_info_t *di, struct sk_buff *p0, bool commit)
1656 {
1657 struct sk_buff *p, *next;
1658 unsigned char *data;
1659 uint len;
1660 u16 txout;
1661 u32 flags = 0;
1662 dmaaddr_t pa;
1663
1664 DMA_TRACE(("%s: dma_txfast\n", di->name));
1665
1666 txout = di->txout;
1667
1668 /*
1669 * Walk the chain of packet buffers
1670 * allocating and initializing transmit descriptor entries.
1671 */
1672 for (p = p0; p; p = next) {
1673 uint nsegs, j;
1674 hnddma_seg_map_t *map;
1675
1676 data = p->data;
1677 len = p->len;
1678 #ifdef BCM_DMAPAD
1679 len += PKTDMAPAD(di->osh, p);
1680 #endif
1681 next = p->next;
1682
1683 /* return nonzero if out of tx descriptors */
1684 if (NEXTTXD(txout) == di->txin)
1685 goto outoftxd;
1686
1687 if (len == 0)
1688 continue;
1689
1690 if (DMASGLIST_ENAB)
1691 bzero(&di->txp_dmah[txout], sizeof(hnddma_seg_map_t));
1692
1693 /* get physical address of buffer start */
1694 pa = DMA_MAP(di->osh, data, len, DMA_TX, p,
1695 &di->txp_dmah[txout]);
1696
1697 if (DMASGLIST_ENAB) {
1698 map = &di->txp_dmah[txout];
1699
1700 /* See if all the segments can be accounted for */
1701 if (map->nsegs >
1702 (uint) (di->ntxd - NTXDACTIVE(di->txin, di->txout) -
1703 1))
1704 goto outoftxd;
1705
1706 nsegs = map->nsegs;
1707 } else
1708 nsegs = 1;
1709
1710 for (j = 1; j <= nsegs; j++) {
1711 flags = 0;
1712 if (p == p0 && j == 1)
1713 flags |= CTRL_SOF;
1714
1715 /* With a DMA segment list, Descriptor table is filled
1716 * using the segment list instead of looping over
1717 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
1718 * end of segment list is reached.
1719 */
1720 if ((!DMASGLIST_ENAB && next == NULL) ||
1721 (DMASGLIST_ENAB && j == nsegs))
1722 flags |= (CTRL_IOC | CTRL_EOF);
1723 if (txout == (di->ntxd - 1))
1724 flags |= CTRL_EOT;
1725
1726 if (DMASGLIST_ENAB) {
1727 len = map->segs[j - 1].length;
1728 pa = map->segs[j - 1].addr;
1729 }
1730 ASSERT(PHYSADDRHI(pa) == 0);
1731
1732 dma32_dd_upd(di, di->txd32, pa, txout, &flags, len);
1733 ASSERT(di->txp[txout] == NULL);
1734
1735 txout = NEXTTXD(txout);
1736 }
1737
1738 /* See above. No need to loop over individual buffers */
1739 if (DMASGLIST_ENAB)
1740 break;
1741 }
1742
1743 /* if last txd eof not set, fix it */
1744 if (!(flags & CTRL_EOF))
1745 W_SM(&di->txd32[PREVTXD(txout)].ctrl,
1746 BUS_SWAP32(flags | CTRL_IOC | CTRL_EOF));
1747
1748 /* save the packet */
1749 di->txp[PREVTXD(txout)] = p0;
1750
1751 /* bump the tx descriptor index */
1752 di->txout = txout;
1753
1754 /* kick the chip */
1755 if (commit)
1756 W_REG(di->osh, &di->d32txregs->ptr, I2B(txout, dma32dd_t));
1757
1758 /* tx flow control */
1759 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1760
1761 return 0;
1762
1763 outoftxd:
1764 DMA_ERROR(("%s: dma_txfast: out of txds\n", di->name));
1765 PKTFREE(di->osh, p0, true);
1766 di->hnddma.txavail = 0;
1767 di->hnddma.txnobuf++;
1768 return -1;
1769 }
1770
1771 /*
1772 * Reclaim next completed txd (txds if using chained buffers) in the range
1773 * specified and return associated packet.
1774 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
1775 * transmitted as noted by the hardware "CurrDescr" pointer.
1776 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
1777 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
1778 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
1779 * return associated packet regardless of the value of hardware pointers.
1780 */
1781 static void *dma32_getnexttxp(dma_info_t *di, txd_range_t range)
1782 {
1783 u16 start, end, i;
1784 u16 active_desc;
1785 void *txp;
1786
1787 DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
1788 (range == HNDDMA_RANGE_ALL) ? "all" :
1789 ((range ==
1790 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
1791 "transfered")));
1792
1793 if (di->ntxd == 0)
1794 return NULL;
1795
1796 txp = NULL;
1797
1798 start = di->txin;
1799 if (range == HNDDMA_RANGE_ALL)
1800 end = di->txout;
1801 else {
1802 dma32regs_t *dregs = di->d32txregs;
1803
1804 end =
1805 (u16) B2I(R_REG(di->osh, &dregs->status) & XS_CD_MASK,
1806 dma32dd_t);
1807
1808 if (range == HNDDMA_RANGE_TRANSFERED) {
1809 active_desc =
1810 (u16) ((R_REG(di->osh, &dregs->status) &
1811 XS_AD_MASK) >> XS_AD_SHIFT);
1812 active_desc = (u16) B2I(active_desc, dma32dd_t);
1813 if (end != active_desc)
1814 end = PREVTXD(active_desc);
1815 }
1816 }
1817
1818 if ((start == 0) && (end > di->txout))
1819 goto bogus;
1820
1821 for (i = start; i != end && !txp; i = NEXTTXD(i)) {
1822 dmaaddr_t pa;
1823 hnddma_seg_map_t *map = NULL;
1824 uint size, j, nsegs;
1825
1826 PHYSADDRLOSET(pa,
1827 (BUS_SWAP32(R_SM(&di->txd32[i].addr)) -
1828 di->dataoffsetlow));
1829 PHYSADDRHISET(pa, 0);
1830
1831 if (DMASGLIST_ENAB) {
1832 map = &di->txp_dmah[i];
1833 size = map->origsize;
1834 nsegs = map->nsegs;
1835 } else {
1836 size =
1837 (BUS_SWAP32(R_SM(&di->txd32[i].ctrl)) &
1838 CTRL_BC_MASK);
1839 nsegs = 1;
1840 }
1841
1842 for (j = nsegs; j > 0; j--) {
1843 W_SM(&di->txd32[i].addr, 0xdeadbeef);
1844
1845 txp = di->txp[i];
1846 di->txp[i] = NULL;
1847 if (j > 1)
1848 i = NEXTTXD(i);
1849 }
1850
1851 DMA_UNMAP(di->osh, pa, size, DMA_TX, txp, map);
1852 }
1853
1854 di->txin = i;
1855
1856 /* tx flow control */
1857 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1858
1859 return txp;
1860
1861 bogus:
1862 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start, end, di->txout, forceall));
1863 return NULL;
1864 }
1865
1866 static void *dma32_getnextrxp(dma_info_t *di, bool forceall)
1867 {
1868 uint i, curr;
1869 void *rxp;
1870 dmaaddr_t pa;
1871 /* if forcing, dma engine must be disabled */
1872 ASSERT(!forceall || !dma32_rxenabled(di));
1873
1874 i = di->rxin;
1875
1876 /* return if no packets posted */
1877 if (i == di->rxout)
1878 return NULL;
1879
1880 curr =
1881 B2I(R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK, dma32dd_t);
1882
1883 /* ignore curr if forceall */
1884 if (!forceall && (i == curr))
1885 return NULL;
1886
1887 /* get the packet pointer that corresponds to the rx descriptor */
1888 rxp = di->rxp[i];
1889 ASSERT(rxp);
1890 di->rxp[i] = NULL;
1891
1892 PHYSADDRLOSET(pa,
1893 (BUS_SWAP32(R_SM(&di->rxd32[i].addr)) -
1894 di->dataoffsetlow));
1895 PHYSADDRHISET(pa, 0);
1896
1897 /* clear this packet from the descriptor ring */
1898 DMA_UNMAP(di->osh, pa, di->rxbufsize, DMA_RX, rxp, &di->rxp_dmah[i]);
1899
1900 W_SM(&di->rxd32[i].addr, 0xdeadbeef);
1901
1902 di->rxin = NEXTRXD(i);
1903
1904 return rxp;
1905 }
1906
1907 /*
1908 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1909 */
1910 static void dma32_txrotate(dma_info_t *di)
1911 {
1912 u16 ad;
1913 uint nactive;
1914 uint rot;
1915 u16 old, new;
1916 u32 w;
1917 u16 first, last;
1918
1919 ASSERT(dma32_txsuspendedidle(di));
1920
1921 nactive = _dma_txactive(di);
1922 ad = (u16) (B2I
1923 (((R_REG(di->osh, &di->d32txregs->status) & XS_AD_MASK)
1924 >> XS_AD_SHIFT), dma32dd_t));
1925 rot = TXD(ad - di->txin);
1926
1927 ASSERT(rot < di->ntxd);
1928
1929 /* full-ring case is a lot harder - don't worry about this */
1930 if (rot >= (di->ntxd - nactive)) {
1931 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
1932 return;
1933 }
1934
1935 first = di->txin;
1936 last = PREVTXD(di->txout);
1937
1938 /* move entries starting at last and moving backwards to first */
1939 for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
1940 new = TXD(old + rot);
1941
1942 /*
1943 * Move the tx dma descriptor.
1944 * EOT is set only in the last entry in the ring.
1945 */
1946 w = BUS_SWAP32(R_SM(&di->txd32[old].ctrl)) & ~CTRL_EOT;
1947 if (new == (di->ntxd - 1))
1948 w |= CTRL_EOT;
1949 W_SM(&di->txd32[new].ctrl, BUS_SWAP32(w));
1950 W_SM(&di->txd32[new].addr, R_SM(&di->txd32[old].addr));
1951
1952 /* zap the old tx dma descriptor address field */
1953 W_SM(&di->txd32[old].addr, BUS_SWAP32(0xdeadbeef));
1954
1955 /* move the corresponding txp[] entry */
1956 ASSERT(di->txp[new] == NULL);
1957 di->txp[new] = di->txp[old];
1958
1959 /* Move the segment map as well */
1960 if (DMASGLIST_ENAB) {
1961 bcopy(&di->txp_dmah[old], &di->txp_dmah[new],
1962 sizeof(hnddma_seg_map_t));
1963 bzero(&di->txp_dmah[old], sizeof(hnddma_seg_map_t));
1964 }
1965
1966 di->txp[old] = NULL;
1967 }
1968
1969 /* update txin and txout */
1970 di->txin = ad;
1971 di->txout = TXD(di->txout + rot);
1972 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1973
1974 /* kick the chip */
1975 W_REG(di->osh, &di->d32txregs->ptr, I2B(di->txout, dma32dd_t));
1976 }
1977
1978 /* 64-bit DMA functions */
1979
1980 static void dma64_txinit(dma_info_t *di)
1981 {
1982 u32 control = D64_XC_XE;
1983
1984 DMA_TRACE(("%s: dma_txinit\n", di->name));
1985
1986 if (di->ntxd == 0)
1987 return;
1988
1989 di->txin = di->txout = 0;
1990 di->hnddma.txavail = di->ntxd - 1;
1991
1992 /* clear tx descriptor ring */
1993 BZERO_SM((void *)di->txd64, (di->ntxd * sizeof(dma64dd_t)));
1994
1995 /* DMA engine with out alignment requirement requires table to be inited
1996 * before enabling the engine
1997 */
1998 if (!di->aligndesc_4k)
1999 _dma_ddtable_init(di, DMA_TX, di->txdpa);
2000
2001 if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0)
2002 control |= D64_XC_PD;
2003 OR_REG(di->osh, &di->d64txregs->control, control);
2004
2005 /* DMA engine with alignment requirement requires table to be inited
2006 * before enabling the engine
2007 */
2008 if (di->aligndesc_4k)
2009 _dma_ddtable_init(di, DMA_TX, di->txdpa);
2010 }
2011
2012 static bool dma64_txenabled(dma_info_t *di)
2013 {
2014 u32 xc;
2015
2016 /* If the chip is dead, it is not enabled :-) */
2017 xc = R_REG(di->osh, &di->d64txregs->control);
2018 return (xc != 0xffffffff) && (xc & D64_XC_XE);
2019 }
2020
2021 static void dma64_txsuspend(dma_info_t *di)
2022 {
2023 DMA_TRACE(("%s: dma_txsuspend\n", di->name));
2024
2025 if (di->ntxd == 0)
2026 return;
2027
2028 OR_REG(di->osh, &di->d64txregs->control, D64_XC_SE);
2029 }
2030
2031 static void dma64_txresume(dma_info_t *di)
2032 {
2033 DMA_TRACE(("%s: dma_txresume\n", di->name));
2034
2035 if (di->ntxd == 0)
2036 return;
2037
2038 AND_REG(di->osh, &di->d64txregs->control, ~D64_XC_SE);
2039 }
2040
2041 static bool dma64_txsuspended(dma_info_t *di)
2042 {
2043 return (di->ntxd == 0) ||
2044 ((R_REG(di->osh, &di->d64txregs->control) & D64_XC_SE) ==
2045 D64_XC_SE);
2046 }
2047
2048 static void BCMFASTPATH dma64_txreclaim(dma_info_t *di, txd_range_t range)
2049 {
2050 void *p;
2051
2052 DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
2053 (range == HNDDMA_RANGE_ALL) ? "all" :
2054 ((range ==
2055 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
2056 "transfered")));
2057
2058 if (di->txin == di->txout)
2059 return;
2060
2061 while ((p = dma64_getnexttxp(di, range))) {
2062 /* For unframed data, we don't have any packets to free */
2063 if (!(di->hnddma.dmactrlflags & DMA_CTRL_UNFRAMED))
2064 PKTFREE(di->osh, p, true);
2065 }
2066 }
2067
2068 static bool dma64_txstopped(dma_info_t *di)
2069 {
2070 return ((R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) ==
2071 D64_XS0_XS_STOPPED);
2072 }
2073
2074 static bool dma64_rxstopped(dma_info_t *di)
2075 {
2076 return ((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK) ==
2077 D64_RS0_RS_STOPPED);
2078 }
2079
2080 static bool dma64_alloc(dma_info_t *di, uint direction)
2081 {
2082 u16 size;
2083 uint ddlen;
2084 void *va;
2085 uint alloced = 0;
2086 u16 align;
2087 u16 align_bits;
2088
2089 ddlen = sizeof(dma64dd_t);
2090
2091 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
2092 align_bits = di->dmadesc_align;
2093 align = (1 << align_bits);
2094
2095 if (direction == DMA_TX) {
2096 va = dma_ringalloc(di->osh, D64RINGALIGN, size, &align_bits,
2097 &alloced, &di->txdpaorig, &di->tx_dmah);
2098 if (va == NULL) {
2099 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
2100 return false;
2101 }
2102 align = (1 << align_bits);
2103 di->txd64 = (dma64dd_t *) roundup((unsigned long)va, align);
2104 di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va);
2105 PHYSADDRLOSET(di->txdpa,
2106 PHYSADDRLO(di->txdpaorig) + di->txdalign);
2107 /* Make sure that alignment didn't overflow */
2108 ASSERT(PHYSADDRLO(di->txdpa) >= PHYSADDRLO(di->txdpaorig));
2109
2110 PHYSADDRHISET(di->txdpa, PHYSADDRHI(di->txdpaorig));
2111 di->txdalloc = alloced;
2112 ASSERT(IS_ALIGNED((unsigned long)di->txd64, align));
2113 } else {
2114 va = dma_ringalloc(di->osh, D64RINGALIGN, size, &align_bits,
2115 &alloced, &di->rxdpaorig, &di->rx_dmah);
2116 if (va == NULL) {
2117 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
2118 return false;
2119 }
2120 align = (1 << align_bits);
2121 di->rxd64 = (dma64dd_t *) roundup((unsigned long)va, align);
2122 di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va);
2123 PHYSADDRLOSET(di->rxdpa,
2124 PHYSADDRLO(di->rxdpaorig) + di->rxdalign);
2125 /* Make sure that alignment didn't overflow */
2126 ASSERT(PHYSADDRLO(di->rxdpa) >= PHYSADDRLO(di->rxdpaorig));
2127
2128 PHYSADDRHISET(di->rxdpa, PHYSADDRHI(di->rxdpaorig));
2129 di->rxdalloc = alloced;
2130 ASSERT(IS_ALIGNED((unsigned long)di->rxd64, align));
2131 }
2132
2133 return true;
2134 }
2135
2136 static bool dma64_txreset(dma_info_t *di)
2137 {
2138 u32 status;
2139
2140 if (di->ntxd == 0)
2141 return true;
2142
2143 /* suspend tx DMA first */
2144 W_REG(di->osh, &di->d64txregs->control, D64_XC_SE);
2145 SPINWAIT(((status =
2146 (R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK))
2147 != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
2148 && (status != D64_XS0_XS_STOPPED), 10000);
2149
2150 W_REG(di->osh, &di->d64txregs->control, 0);
2151 SPINWAIT(((status =
2152 (R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK))
2153 != D64_XS0_XS_DISABLED), 10000);
2154
2155 /* wait for the last transaction to complete */
2156 udelay(300);
2157
2158 return status == D64_XS0_XS_DISABLED;
2159 }
2160
2161 static bool dma64_rxidle(dma_info_t *di)
2162 {
2163 DMA_TRACE(("%s: dma_rxidle\n", di->name));
2164
2165 if (di->nrxd == 0)
2166 return true;
2167
2168 return ((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
2169 (R_REG(di->osh, &di->d64rxregs->ptr) & D64_RS0_CD_MASK));
2170 }
2171
2172 static bool dma64_rxreset(dma_info_t *di)
2173 {
2174 u32 status;
2175
2176 if (di->nrxd == 0)
2177 return true;
2178
2179 W_REG(di->osh, &di->d64rxregs->control, 0);
2180 SPINWAIT(((status =
2181 (R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK))
2182 != D64_RS0_RS_DISABLED), 10000);
2183
2184 return status == D64_RS0_RS_DISABLED;
2185 }
2186
2187 static bool dma64_rxenabled(dma_info_t *di)
2188 {
2189 u32 rc;
2190
2191 rc = R_REG(di->osh, &di->d64rxregs->control);
2192 return (rc != 0xffffffff) && (rc & D64_RC_RE);
2193 }
2194
2195 static bool dma64_txsuspendedidle(dma_info_t *di)
2196 {
2197
2198 if (di->ntxd == 0)
2199 return true;
2200
2201 if (!(R_REG(di->osh, &di->d64txregs->control) & D64_XC_SE))
2202 return 0;
2203
2204 if ((R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) ==
2205 D64_XS0_XS_IDLE)
2206 return 1;
2207
2208 return 0;
2209 }
2210
2211 /* Useful when sending unframed data. This allows us to get a progress report from the DMA.
2212 * We return a pointer to the beginning of the DATA buffer of the current descriptor.
2213 * If DMA is idle, we return NULL.
2214 */
2215 static void *dma64_getpos(dma_info_t *di, bool direction)
2216 {
2217 void *va;
2218 bool idle;
2219 u32 cd_offset;
2220
2221 if (direction == DMA_TX) {
2222 cd_offset =
2223 R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_CD_MASK;
2224 idle = !NTXDACTIVE(di->txin, di->txout);
2225 va = di->txp[B2I(cd_offset, dma64dd_t)];
2226 } else {
2227 cd_offset =
2228 R_REG(di->osh, &di->d64rxregs->status0) & D64_XS0_CD_MASK;
2229 idle = !NRXDACTIVE(di->rxin, di->rxout);
2230 va = di->rxp[B2I(cd_offset, dma64dd_t)];
2231 }
2232
2233 /* If DMA is IDLE, return NULL */
2234 if (idle) {
2235 DMA_TRACE(("%s: DMA idle, return NULL\n", __func__));
2236 va = NULL;
2237 }
2238
2239 return va;
2240 }
2241
2242 /* TX of unframed data
2243 *
2244 * Adds a DMA ring descriptor for the data pointed to by "buf".
2245 * This is for DMA of a buffer of data and is unlike other hnddma TX functions
2246 * that take a pointer to a "packet"
2247 * Each call to this is results in a single descriptor being added for "len" bytes of
2248 * data starting at "buf", it doesn't handle chained buffers.
2249 */
2250 static int dma64_txunframed(dma_info_t *di, void *buf, uint len, bool commit)
2251 {
2252 u16 txout;
2253 u32 flags = 0;
2254 dmaaddr_t pa; /* phys addr */
2255
2256 txout = di->txout;
2257
2258 /* return nonzero if out of tx descriptors */
2259 if (NEXTTXD(txout) == di->txin)
2260 goto outoftxd;
2261
2262 if (len == 0)
2263 return 0;
2264
2265 pa = DMA_MAP(di->osh, buf, len, DMA_TX, NULL, &di->txp_dmah[txout]);
2266
2267 flags = (D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF);
2268
2269 if (txout == (di->ntxd - 1))
2270 flags |= D64_CTRL1_EOT;
2271
2272 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
2273 ASSERT(di->txp[txout] == NULL);
2274
2275 /* save the buffer pointer - used by dma_getpos */
2276 di->txp[txout] = buf;
2277
2278 txout = NEXTTXD(txout);
2279 /* bump the tx descriptor index */
2280 di->txout = txout;
2281
2282 /* kick the chip */
2283 if (commit) {
2284 W_REG(di->osh, &di->d64txregs->ptr,
2285 di->xmtptrbase + I2B(txout, dma64dd_t));
2286 }
2287
2288 /* tx flow control */
2289 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
2290
2291 return 0;
2292
2293 outoftxd:
2294 DMA_ERROR(("%s: %s: out of txds !!!\n", di->name, __func__));
2295 di->hnddma.txavail = 0;
2296 di->hnddma.txnobuf++;
2297 return -1;
2298 }
2299
2300 /* !! tx entry routine
2301 * WARNING: call must check the return value for error.
2302 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
2303 */
2304 static int BCMFASTPATH dma64_txfast(dma_info_t *di, struct sk_buff *p0,
2305 bool commit)
2306 {
2307 struct sk_buff *p, *next;
2308 unsigned char *data;
2309 uint len;
2310 u16 txout;
2311 u32 flags = 0;
2312 dmaaddr_t pa;
2313
2314 DMA_TRACE(("%s: dma_txfast\n", di->name));
2315
2316 txout = di->txout;
2317
2318 /*
2319 * Walk the chain of packet buffers
2320 * allocating and initializing transmit descriptor entries.
2321 */
2322 for (p = p0; p; p = next) {
2323 uint nsegs, j;
2324 hnddma_seg_map_t *map;
2325
2326 data = p->data;
2327 len = p->len;
2328 #ifdef BCM_DMAPAD
2329 len += PKTDMAPAD(di->osh, p);
2330 #endif /* BCM_DMAPAD */
2331 next = p->next;
2332
2333 /* return nonzero if out of tx descriptors */
2334 if (NEXTTXD(txout) == di->txin)
2335 goto outoftxd;
2336
2337 if (len == 0)
2338 continue;
2339
2340 /* get physical address of buffer start */
2341 if (DMASGLIST_ENAB)
2342 bzero(&di->txp_dmah[txout], sizeof(hnddma_seg_map_t));
2343
2344 pa = DMA_MAP(di->osh, data, len, DMA_TX, p,
2345 &di->txp_dmah[txout]);
2346
2347 if (DMASGLIST_ENAB) {
2348 map = &di->txp_dmah[txout];
2349
2350 /* See if all the segments can be accounted for */
2351 if (map->nsegs >
2352 (uint) (di->ntxd - NTXDACTIVE(di->txin, di->txout) -
2353 1))
2354 goto outoftxd;
2355
2356 nsegs = map->nsegs;
2357 } else
2358 nsegs = 1;
2359
2360 for (j = 1; j <= nsegs; j++) {
2361 flags = 0;
2362 if (p == p0 && j == 1)
2363 flags |= D64_CTRL1_SOF;
2364
2365 /* With a DMA segment list, Descriptor table is filled
2366 * using the segment list instead of looping over
2367 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
2368 * end of segment list is reached.
2369 */
2370 if ((!DMASGLIST_ENAB && next == NULL) ||
2371 (DMASGLIST_ENAB && j == nsegs))
2372 flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
2373 if (txout == (di->ntxd - 1))
2374 flags |= D64_CTRL1_EOT;
2375
2376 if (DMASGLIST_ENAB) {
2377 len = map->segs[j - 1].length;
2378 pa = map->segs[j - 1].addr;
2379 }
2380 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
2381 ASSERT(di->txp[txout] == NULL);
2382
2383 txout = NEXTTXD(txout);
2384 }
2385
2386 /* See above. No need to loop over individual buffers */
2387 if (DMASGLIST_ENAB)
2388 break;
2389 }
2390
2391 /* if last txd eof not set, fix it */
2392 if (!(flags & D64_CTRL1_EOF))
2393 W_SM(&di->txd64[PREVTXD(txout)].ctrl1,
2394 BUS_SWAP32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF));
2395
2396 /* save the packet */
2397 di->txp[PREVTXD(txout)] = p0;
2398
2399 /* bump the tx descriptor index */
2400 di->txout = txout;
2401
2402 /* kick the chip */
2403 if (commit)
2404 W_REG(di->osh, &di->d64txregs->ptr,
2405 di->xmtptrbase + I2B(txout, dma64dd_t));
2406
2407 /* tx flow control */
2408 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
2409
2410 return 0;
2411
2412 outoftxd:
2413 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
2414 PKTFREE(di->osh, p0, true);
2415 di->hnddma.txavail = 0;
2416 di->hnddma.txnobuf++;
2417 return -1;
2418 }
2419
2420 /*
2421 * Reclaim next completed txd (txds if using chained buffers) in the range
2422 * specified and return associated packet.
2423 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
2424 * transmitted as noted by the hardware "CurrDescr" pointer.
2425 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
2426 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
2427 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
2428 * return associated packet regardless of the value of hardware pointers.
2429 */
2430 static void *BCMFASTPATH dma64_getnexttxp(dma_info_t *di, txd_range_t range)
2431 {
2432 u16 start, end, i;
2433 u16 active_desc;
2434 void *txp;
2435
2436 DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
2437 (range == HNDDMA_RANGE_ALL) ? "all" :
2438 ((range ==
2439 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
2440 "transfered")));
2441
2442 if (di->ntxd == 0)
2443 return NULL;
2444
2445 txp = NULL;
2446
2447 start = di->txin;
2448 if (range == HNDDMA_RANGE_ALL)
2449 end = di->txout;
2450 else {
2451 dma64regs_t *dregs = di->d64txregs;
2452
2453 end =
2454 (u16) (B2I
2455 (((R_REG(di->osh, &dregs->status0) &
2456 D64_XS0_CD_MASK) -
2457 di->xmtptrbase) & D64_XS0_CD_MASK, dma64dd_t));
2458
2459 if (range == HNDDMA_RANGE_TRANSFERED) {
2460 active_desc =
2461 (u16) (R_REG(di->osh, &dregs->status1) &
2462 D64_XS1_AD_MASK);
2463 active_desc =
2464 (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
2465 active_desc = B2I(active_desc, dma64dd_t);
2466 if (end != active_desc)
2467 end = PREVTXD(active_desc);
2468 }
2469 }
2470
2471 if ((start == 0) && (end > di->txout))
2472 goto bogus;
2473
2474 for (i = start; i != end && !txp; i = NEXTTXD(i)) {
2475 dmaaddr_t pa;
2476 hnddma_seg_map_t *map = NULL;
2477 uint size, j, nsegs;
2478
2479 PHYSADDRLOSET(pa,
2480 (BUS_SWAP32(R_SM(&di->txd64[i].addrlow)) -
2481 di->dataoffsetlow));
2482 PHYSADDRHISET(pa,
2483 (BUS_SWAP32(R_SM(&di->txd64[i].addrhigh)) -
2484 di->dataoffsethigh));
2485
2486 if (DMASGLIST_ENAB) {
2487 map = &di->txp_dmah[i];
2488 size = map->origsize;
2489 nsegs = map->nsegs;
2490 } else {
2491 size =
2492 (BUS_SWAP32(R_SM(&di->txd64[i].ctrl2)) &
2493 D64_CTRL2_BC_MASK);
2494 nsegs = 1;
2495 }
2496
2497 for (j = nsegs; j > 0; j--) {
2498 W_SM(&di->txd64[i].addrlow, 0xdeadbeef);
2499 W_SM(&di->txd64[i].addrhigh, 0xdeadbeef);
2500
2501 txp = di->txp[i];
2502 di->txp[i] = NULL;
2503 if (j > 1)
2504 i = NEXTTXD(i);
2505 }
2506
2507 DMA_UNMAP(di->osh, pa, size, DMA_TX, txp, map);
2508 }
2509
2510 di->txin = i;
2511
2512 /* tx flow control */
2513 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
2514
2515 return txp;
2516
2517 bogus:
2518 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start, end, di->txout, forceall));
2519 return NULL;
2520 }
2521
2522 static void *BCMFASTPATH dma64_getnextrxp(dma_info_t *di, bool forceall)
2523 {
2524 uint i, curr;
2525 void *rxp;
2526 dmaaddr_t pa;
2527
2528 /* if forcing, dma engine must be disabled */
2529 ASSERT(!forceall || !dma64_rxenabled(di));
2530
2531 i = di->rxin;
2532
2533 /* return if no packets posted */
2534 if (i == di->rxout)
2535 return NULL;
2536
2537 curr =
2538 B2I(((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) -
2539 di->rcvptrbase) & D64_RS0_CD_MASK, dma64dd_t);
2540
2541 /* ignore curr if forceall */
2542 if (!forceall && (i == curr))
2543 return NULL;
2544
2545 /* get the packet pointer that corresponds to the rx descriptor */
2546 rxp = di->rxp[i];
2547 ASSERT(rxp);
2548 di->rxp[i] = NULL;
2549
2550 PHYSADDRLOSET(pa,
2551 (BUS_SWAP32(R_SM(&di->rxd64[i].addrlow)) -
2552 di->dataoffsetlow));
2553 PHYSADDRHISET(pa,
2554 (BUS_SWAP32(R_SM(&di->rxd64[i].addrhigh)) -
2555 di->dataoffsethigh));
2556
2557 /* clear this packet from the descriptor ring */
2558 DMA_UNMAP(di->osh, pa, di->rxbufsize, DMA_RX, rxp, &di->rxp_dmah[i]);
2559
2560 W_SM(&di->rxd64[i].addrlow, 0xdeadbeef);
2561 W_SM(&di->rxd64[i].addrhigh, 0xdeadbeef);
2562
2563 di->rxin = NEXTRXD(i);
2564
2565 return rxp;
2566 }
2567
2568 static bool _dma64_addrext(struct osl_info *osh, dma64regs_t * dma64regs)
2569 {
2570 u32 w;
2571 OR_REG(osh, &dma64regs->control, D64_XC_AE);
2572 w = R_REG(osh, &dma64regs->control);
2573 AND_REG(osh, &dma64regs->control, ~D64_XC_AE);
2574 return (w & D64_XC_AE) == D64_XC_AE;
2575 }
2576
2577 /*
2578 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
2579 */
2580 static void dma64_txrotate(dma_info_t *di)
2581 {
2582 u16 ad;
2583 uint nactive;
2584 uint rot;
2585 u16 old, new;
2586 u32 w;
2587 u16 first, last;
2588
2589 ASSERT(dma64_txsuspendedidle(di));
2590
2591 nactive = _dma_txactive(di);
2592 ad = (u16) (B2I
2593 ((((R_REG(di->osh, &di->d64txregs->status1) &
2594 D64_XS1_AD_MASK)
2595 - di->xmtptrbase) & D64_XS1_AD_MASK), dma64dd_t));
2596 rot = TXD(ad - di->txin);
2597
2598 ASSERT(rot < di->ntxd);
2599
2600 /* full-ring case is a lot harder - don't worry about this */
2601 if (rot >= (di->ntxd - nactive)) {
2602 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
2603 return;
2604 }
2605
2606 first = di->txin;
2607 last = PREVTXD(di->txout);
2608
2609 /* move entries starting at last and moving backwards to first */
2610 for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
2611 new = TXD(old + rot);
2612
2613 /*
2614 * Move the tx dma descriptor.
2615 * EOT is set only in the last entry in the ring.
2616 */
2617 w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl1)) & ~D64_CTRL1_EOT;
2618 if (new == (di->ntxd - 1))
2619 w |= D64_CTRL1_EOT;
2620 W_SM(&di->txd64[new].ctrl1, BUS_SWAP32(w));
2621
2622 w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl2));
2623 W_SM(&di->txd64[new].ctrl2, BUS_SWAP32(w));
2624
2625 W_SM(&di->txd64[new].addrlow, R_SM(&di->txd64[old].addrlow));
2626 W_SM(&di->txd64[new].addrhigh, R_SM(&di->txd64[old].addrhigh));
2627
2628 /* zap the old tx dma descriptor address field */
2629 W_SM(&di->txd64[old].addrlow, BUS_SWAP32(0xdeadbeef));
2630 W_SM(&di->txd64[old].addrhigh, BUS_SWAP32(0xdeadbeef));
2631
2632 /* move the corresponding txp[] entry */
2633 ASSERT(di->txp[new] == NULL);
2634 di->txp[new] = di->txp[old];
2635
2636 /* Move the map */
2637 if (DMASGLIST_ENAB) {
2638 bcopy(&di->txp_dmah[old], &di->txp_dmah[new],
2639 sizeof(hnddma_seg_map_t));
2640 bzero(&di->txp_dmah[old], sizeof(hnddma_seg_map_t));
2641 }
2642
2643 di->txp[old] = NULL;
2644 }
2645
2646 /* update txin and txout */
2647 di->txin = ad;
2648 di->txout = TXD(di->txout + rot);
2649 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
2650
2651 /* kick the chip */
2652 W_REG(di->osh, &di->d64txregs->ptr,
2653 di->xmtptrbase + I2B(di->txout, dma64dd_t));
2654 }
2655
2656 uint dma_addrwidth(si_t *sih, void *dmaregs)
2657 {
2658 dma32regs_t *dma32regs;
2659 struct osl_info *osh;
2660
2661 osh = si_osh(sih);
2662
2663 /* Perform 64-bit checks only if we want to advertise 64-bit (> 32bit) capability) */
2664 /* DMA engine is 64-bit capable */
2665 if ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64) {
2666 /* backplane are 64-bit capable */
2667 if (si_backplane64(sih))
2668 /* If bus is System Backplane or PCIE then we can access 64-bits */
2669 if ((sih->bustype == SI_BUS) ||
2670 ((sih->bustype == PCI_BUS) &&
2671 (sih->buscoretype == PCIE_CORE_ID)))
2672 return DMADDRWIDTH_64;
2673
2674 /* DMA64 is always 32-bit capable, AE is always true */
2675 ASSERT(_dma64_addrext(osh, (dma64regs_t *) dmaregs));
2676
2677 return DMADDRWIDTH_32;
2678 }
2679
2680 /* Start checking for 32-bit / 30-bit addressing */
2681 dma32regs = (dma32regs_t *) dmaregs;
2682
2683 /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */
2684 if ((sih->bustype == SI_BUS) ||
2685 ((sih->bustype == PCI_BUS)
2686 && sih->buscoretype == PCIE_CORE_ID)
2687 || (_dma32_addrext(osh, dma32regs)))
2688 return DMADDRWIDTH_32;
2689
2690 /* Fallthru */
2691 return DMADDRWIDTH_30;
2692 }