]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/staging/brcm80211/brcmsmac/dma.c
staging: brcm80211: use static qualifier for local symbols in brcmsmac
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / brcm80211 / brcmsmac / dma.c
1 /*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16 #include <linux/slab.h>
17 #include <linux/skbuff.h>
18 #include <linux/delay.h>
19 #include <linux/pci.h>
20
21 #include <brcmu_utils.h>
22 #include <aiutils.h>
23 #include "types.h"
24 #include "dma.h"
25
26 /*
27 * Each descriptor ring must be 8kB aligned, and fit within a
28 * contiguous 8kB physical address.
29 */
30 #define D64RINGALIGN_BITS 13
31 #define D64MAXRINGSZ (1 << D64RINGALIGN_BITS)
32 #define D64RINGALIGN (1 << D64RINGALIGN_BITS)
33
34 #define D64MAXDD (D64MAXRINGSZ / sizeof(struct dma64desc))
35
36 /* transmit channel control */
37 #define D64_XC_XE 0x00000001 /* transmit enable */
38 #define D64_XC_SE 0x00000002 /* transmit suspend request */
39 #define D64_XC_LE 0x00000004 /* loopback enable */
40 #define D64_XC_FL 0x00000010 /* flush request */
41 #define D64_XC_PD 0x00000800 /* parity check disable */
42 #define D64_XC_AE 0x00030000 /* address extension bits */
43 #define D64_XC_AE_SHIFT 16
44
45 /* transmit descriptor table pointer */
46 #define D64_XP_LD_MASK 0x00000fff /* last valid descriptor */
47
48 /* transmit channel status */
49 #define D64_XS0_CD_MASK 0x00001fff /* current descriptor pointer */
50 #define D64_XS0_XS_MASK 0xf0000000 /* transmit state */
51 #define D64_XS0_XS_SHIFT 28
52 #define D64_XS0_XS_DISABLED 0x00000000 /* disabled */
53 #define D64_XS0_XS_ACTIVE 0x10000000 /* active */
54 #define D64_XS0_XS_IDLE 0x20000000 /* idle wait */
55 #define D64_XS0_XS_STOPPED 0x30000000 /* stopped */
56 #define D64_XS0_XS_SUSP 0x40000000 /* suspend pending */
57
58 #define D64_XS1_AD_MASK 0x00001fff /* active descriptor */
59 #define D64_XS1_XE_MASK 0xf0000000 /* transmit errors */
60 #define D64_XS1_XE_SHIFT 28
61 #define D64_XS1_XE_NOERR 0x00000000 /* no error */
62 #define D64_XS1_XE_DPE 0x10000000 /* descriptor protocol error */
63 #define D64_XS1_XE_DFU 0x20000000 /* data fifo underrun */
64 #define D64_XS1_XE_DTE 0x30000000 /* data transfer error */
65 #define D64_XS1_XE_DESRE 0x40000000 /* descriptor read error */
66 #define D64_XS1_XE_COREE 0x50000000 /* core error */
67
68 /* receive channel control */
69 /* receive enable */
70 #define D64_RC_RE 0x00000001
71 /* receive frame offset */
72 #define D64_RC_RO_MASK 0x000000fe
73 #define D64_RC_RO_SHIFT 1
74 /* direct fifo receive (pio) mode */
75 #define D64_RC_FM 0x00000100
76 /* separate rx header descriptor enable */
77 #define D64_RC_SH 0x00000200
78 /* overflow continue */
79 #define D64_RC_OC 0x00000400
80 /* parity check disable */
81 #define D64_RC_PD 0x00000800
82 /* address extension bits */
83 #define D64_RC_AE 0x00030000
84 #define D64_RC_AE_SHIFT 16
85
86 /* flags for dma controller */
87 /* partity enable */
88 #define DMA_CTRL_PEN (1 << 0)
89 /* rx overflow continue */
90 #define DMA_CTRL_ROC (1 << 1)
91 /* allow rx scatter to multiple descriptors */
92 #define DMA_CTRL_RXMULTI (1 << 2)
93 /* Unframed Rx/Tx data */
94 #define DMA_CTRL_UNFRAMED (1 << 3)
95
96 /* receive descriptor table pointer */
97 #define D64_RP_LD_MASK 0x00000fff /* last valid descriptor */
98
99 /* receive channel status */
100 #define D64_RS0_CD_MASK 0x00001fff /* current descriptor pointer */
101 #define D64_RS0_RS_MASK 0xf0000000 /* receive state */
102 #define D64_RS0_RS_SHIFT 28
103 #define D64_RS0_RS_DISABLED 0x00000000 /* disabled */
104 #define D64_RS0_RS_ACTIVE 0x10000000 /* active */
105 #define D64_RS0_RS_IDLE 0x20000000 /* idle wait */
106 #define D64_RS0_RS_STOPPED 0x30000000 /* stopped */
107 #define D64_RS0_RS_SUSP 0x40000000 /* suspend pending */
108
109 #define D64_RS1_AD_MASK 0x0001ffff /* active descriptor */
110 #define D64_RS1_RE_MASK 0xf0000000 /* receive errors */
111 #define D64_RS1_RE_SHIFT 28
112 #define D64_RS1_RE_NOERR 0x00000000 /* no error */
113 #define D64_RS1_RE_DPO 0x10000000 /* descriptor protocol error */
114 #define D64_RS1_RE_DFU 0x20000000 /* data fifo overflow */
115 #define D64_RS1_RE_DTE 0x30000000 /* data transfer error */
116 #define D64_RS1_RE_DESRE 0x40000000 /* descriptor read error */
117 #define D64_RS1_RE_COREE 0x50000000 /* core error */
118
119 /* fifoaddr */
120 #define D64_FA_OFF_MASK 0xffff /* offset */
121 #define D64_FA_SEL_MASK 0xf0000 /* select */
122 #define D64_FA_SEL_SHIFT 16
123 #define D64_FA_SEL_XDD 0x00000 /* transmit dma data */
124 #define D64_FA_SEL_XDP 0x10000 /* transmit dma pointers */
125 #define D64_FA_SEL_RDD 0x40000 /* receive dma data */
126 #define D64_FA_SEL_RDP 0x50000 /* receive dma pointers */
127 #define D64_FA_SEL_XFD 0x80000 /* transmit fifo data */
128 #define D64_FA_SEL_XFP 0x90000 /* transmit fifo pointers */
129 #define D64_FA_SEL_RFD 0xc0000 /* receive fifo data */
130 #define D64_FA_SEL_RFP 0xd0000 /* receive fifo pointers */
131 #define D64_FA_SEL_RSD 0xe0000 /* receive frame status data */
132 #define D64_FA_SEL_RSP 0xf0000 /* receive frame status pointers */
133
134 /* descriptor control flags 1 */
135 #define D64_CTRL_COREFLAGS 0x0ff00000 /* core specific flags */
136 #define D64_CTRL1_EOT ((u32)1 << 28) /* end of descriptor table */
137 #define D64_CTRL1_IOC ((u32)1 << 29) /* interrupt on completion */
138 #define D64_CTRL1_EOF ((u32)1 << 30) /* end of frame */
139 #define D64_CTRL1_SOF ((u32)1 << 31) /* start of frame */
140
141 /* descriptor control flags 2 */
142 /* buffer byte count. real data len must <= 16KB */
143 #define D64_CTRL2_BC_MASK 0x00007fff
144 /* address extension bits */
145 #define D64_CTRL2_AE 0x00030000
146 #define D64_CTRL2_AE_SHIFT 16
147 /* parity bit */
148 #define D64_CTRL2_PARITY 0x00040000
149
150 /* control flags in the range [27:20] are core-specific and not defined here */
151 #define D64_CTRL_CORE_MASK 0x0ff00000
152
153 #define D64_RX_FRM_STS_LEN 0x0000ffff /* frame length mask */
154 #define D64_RX_FRM_STS_OVFL 0x00800000 /* RxOverFlow */
155 #define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /* no. of descriptors used - 1 */
156 #define D64_RX_FRM_STS_DATATYPE 0xf0000000 /* core-dependent data type */
157
158 #define DMA64_DD_PARITY(dd) \
159 parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
160
161 /*
162 * packet headroom necessary to accommodate the largest header
163 * in the system, (i.e TXOFF). By doing, we avoid the need to
164 * allocate an extra buffer for the header when bridging to WL.
165 * There is a compile time check in wlc.c which ensure that this
166 * value is at least as big as TXOFF. This value is used in
167 * dma_rxfill().
168 */
169
170 #define BCMEXTRAHDROOM 172
171
172 /* debug/trace */
173 #ifdef BCMDBG
174 #define DMA_ERROR(args) \
175 do { \
176 if (!(*di->msg_level & 1)) \
177 ; \
178 else \
179 printk args; \
180 } while (0)
181 #define DMA_TRACE(args) \
182 do { \
183 if (!(*di->msg_level & 2)) \
184 ; \
185 else \
186 printk args; \
187 } while (0)
188 #else
189 #define DMA_ERROR(args)
190 #define DMA_TRACE(args)
191 #endif /* BCMDBG */
192
193 #define DMA_NONE(args)
194
195 #define d64txregs dregs.d64_u.txregs_64
196 #define d64rxregs dregs.d64_u.rxregs_64
197 #define txd64 dregs.d64_u.txd_64
198 #define rxd64 dregs.d64_u.rxd_64
199
200 #define MAXNAMEL 8 /* 8 char names */
201
202 #define DI_INFO(dmah) ((dma_info_t *)dmah)
203
204 /* descriptor bumping macros */
205 /* faster than %, but n must be power of 2 */
206 #define XXD(x, n) ((x) & ((n) - 1))
207
208 #define TXD(x) XXD((x), di->ntxd)
209 #define RXD(x) XXD((x), di->nrxd)
210 #define NEXTTXD(i) TXD((i) + 1)
211 #define PREVTXD(i) TXD((i) - 1)
212 #define NEXTRXD(i) RXD((i) + 1)
213 #define PREVRXD(i) RXD((i) - 1)
214
215 #define NTXDACTIVE(h, t) TXD((t) - (h))
216 #define NRXDACTIVE(h, t) RXD((t) - (h))
217
218 /* macros to convert between byte offsets and indexes */
219 #define B2I(bytes, type) ((bytes) / sizeof(type))
220 #define I2B(index, type) ((index) * sizeof(type))
221
222 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
223 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
224
225 #define PCI64ADDR_HIGH 0x80000000 /* address[63] */
226 #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
227
228 /*
229 * DMA Descriptor
230 * Descriptors are only read by the hardware, never written back.
231 */
232 struct dma64desc {
233 u32 ctrl1; /* misc control bits & bufcount */
234 u32 ctrl2; /* buffer count and address extension */
235 u32 addrlow; /* memory address of the date buffer, bits 31:0 */
236 u32 addrhigh; /* memory address of the date buffer, bits 63:32 */
237 };
238
239 /* dma engine software state */
240 struct dma_info {
241 struct dma_pub dma; /* exported structure */
242 uint *msg_level; /* message level pointer */
243 char name[MAXNAMEL]; /* callers name for diag msgs */
244
245 struct pci_dev *pbus; /* bus handle */
246
247 bool dma64; /* this dma engine is operating in 64-bit mode */
248 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
249
250 union {
251 struct {
252 /* 64-bit dma tx engine registers */
253 struct dma64regs *txregs_64;
254 /* 64-bit dma rx engine registers */
255 struct dma64regs *rxregs_64;
256 /* pointer to dma64 tx descriptor ring */
257 struct dma64desc *txd_64;
258 /* pointer to dma64 rx descriptor ring */
259 struct dma64desc *rxd_64;
260 } d64_u;
261 } dregs;
262
263 u16 dmadesc_align; /* alignment requirement for dma descriptors */
264
265 u16 ntxd; /* # tx descriptors tunable */
266 u16 txin; /* index of next descriptor to reclaim */
267 u16 txout; /* index of next descriptor to post */
268 /* pointer to parallel array of pointers to packets */
269 struct sk_buff **txp;
270 /* Aligned physical address of descriptor ring */
271 dma_addr_t txdpa;
272 /* Original physical address of descriptor ring */
273 dma_addr_t txdpaorig;
274 u16 txdalign; /* #bytes added to alloc'd mem to align txd */
275 u32 txdalloc; /* #bytes allocated for the ring */
276 u32 xmtptrbase; /* When using unaligned descriptors, the ptr register
277 * is not just an index, it needs all 13 bits to be
278 * an offset from the addr register.
279 */
280
281 u16 nrxd; /* # rx descriptors tunable */
282 u16 rxin; /* index of next descriptor to reclaim */
283 u16 rxout; /* index of next descriptor to post */
284 /* pointer to parallel array of pointers to packets */
285 struct sk_buff **rxp;
286 /* Aligned physical address of descriptor ring */
287 dma_addr_t rxdpa;
288 /* Original physical address of descriptor ring */
289 dma_addr_t rxdpaorig;
290 u16 rxdalign; /* #bytes added to alloc'd mem to align rxd */
291 u32 rxdalloc; /* #bytes allocated for the ring */
292 u32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */
293
294 /* tunables */
295 unsigned int rxbufsize; /* rx buffer size in bytes, not including
296 * the extra headroom
297 */
298 uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper
299 * stack, e.g. some rx pkt buffers will be
300 * bridged to tx side without byte copying.
301 * The extra headroom needs to be large enough
302 * to fit txheader needs. Some dongle driver may
303 * not need it.
304 */
305 uint nrxpost; /* # rx buffers to keep posted */
306 unsigned int rxoffset; /* rxcontrol offset */
307 /* add to get dma address of descriptor ring, low 32 bits */
308 uint ddoffsetlow;
309 /* high 32 bits */
310 uint ddoffsethigh;
311 /* add to get dma address of data buffer, low 32 bits */
312 uint dataoffsetlow;
313 /* high 32 bits */
314 uint dataoffsethigh;
315 /* descriptor base need to be aligned or not */
316 bool aligndesc_4k;
317 };
318
319 /*
320 * default dma message level (if input msg_level
321 * pointer is null in dma_attach())
322 */
323 static uint dma_msg_level;
324
325 static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
326 {
327 uint dmactrlflags = di->dma.dmactrlflags;
328
329 if (di == NULL) {
330 DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di->name));
331 return 0;
332 }
333
334 dmactrlflags &= ~mask;
335 dmactrlflags |= flags;
336
337 /* If trying to enable parity, check if parity is actually supported */
338 if (dmactrlflags & DMA_CTRL_PEN) {
339 u32 control;
340
341 control = R_REG(&di->d64txregs->control);
342 W_REG(&di->d64txregs->control,
343 control | D64_XC_PD);
344 if (R_REG(&di->d64txregs->control) & D64_XC_PD)
345 /* We *can* disable it so it is supported,
346 * restore control register
347 */
348 W_REG(&di->d64txregs->control,
349 control);
350 else
351 /* Not supported, don't allow it to be enabled */
352 dmactrlflags &= ~DMA_CTRL_PEN;
353 }
354
355 di->dma.dmactrlflags = dmactrlflags;
356
357 return dmactrlflags;
358 }
359
360 static bool _dma64_addrext(struct dma64regs *dma64regs)
361 {
362 u32 w;
363 OR_REG(&dma64regs->control, D64_XC_AE);
364 w = R_REG(&dma64regs->control);
365 AND_REG(&dma64regs->control, ~D64_XC_AE);
366 return (w & D64_XC_AE) == D64_XC_AE;
367 }
368
369 /*
370 * return true if this dma engine supports DmaExtendedAddrChanges,
371 * otherwise false
372 */
373 static bool _dma_isaddrext(struct dma_info *di)
374 {
375 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
376
377 /* not all tx or rx channel are available */
378 if (di->d64txregs != NULL) {
379 if (!_dma64_addrext(di->d64txregs))
380 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have "
381 "AE set\n", di->name));
382 return true;
383 } else if (di->d64rxregs != NULL) {
384 if (!_dma64_addrext(di->d64rxregs))
385 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have "
386 "AE set\n", di->name));
387 return true;
388 }
389
390 return false;
391 }
392
393 static bool _dma_descriptor_align(struct dma_info *di)
394 {
395 u32 addrl;
396
397 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
398 if (di->d64txregs != NULL) {
399 W_REG(&di->d64txregs->addrlow, 0xff0);
400 addrl = R_REG(&di->d64txregs->addrlow);
401 if (addrl != 0)
402 return false;
403 } else if (di->d64rxregs != NULL) {
404 W_REG(&di->d64rxregs->addrlow, 0xff0);
405 addrl = R_REG(&di->d64rxregs->addrlow);
406 if (addrl != 0)
407 return false;
408 }
409 return true;
410 }
411
412 static void *dma_alloc_consistent(struct pci_dev *pdev, uint size,
413 u16 align_bits, uint *alloced,
414 dma_addr_t *pap)
415 {
416 if (align_bits) {
417 u16 align = (1 << align_bits);
418 if (!IS_ALIGNED(PAGE_SIZE, align))
419 size += align;
420 *alloced = size;
421 }
422 return pci_alloc_consistent(pdev, size, pap);
423 }
424
425 static
426 u8 dma_align_sizetobits(uint size)
427 {
428 u8 bitpos = 0;
429 while (size >>= 1)
430 bitpos++;
431 return bitpos;
432 }
433
434 /* This function ensures that the DMA descriptor ring will not get allocated
435 * across Page boundary. If the allocation is done across the page boundary
436 * at the first time, then it is freed and the allocation is done at
437 * descriptor ring size aligned location. This will ensure that the ring will
438 * not cross page boundary
439 */
440 static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
441 u16 *alignbits, uint *alloced,
442 dma_addr_t *descpa)
443 {
444 void *va;
445 u32 desc_strtaddr;
446 u32 alignbytes = 1 << *alignbits;
447
448 va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa);
449
450 if (NULL == va)
451 return NULL;
452
453 desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes);
454 if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
455 & boundary)) {
456 *alignbits = dma_align_sizetobits(size);
457 pci_free_consistent(di->pbus, size, va, *descpa);
458 va = dma_alloc_consistent(di->pbus, size, *alignbits,
459 alloced, descpa);
460 }
461 return va;
462 }
463
464 static bool dma64_alloc(struct dma_info *di, uint direction)
465 {
466 u16 size;
467 uint ddlen;
468 void *va;
469 uint alloced = 0;
470 u16 align;
471 u16 align_bits;
472
473 ddlen = sizeof(struct dma64desc);
474
475 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
476 align_bits = di->dmadesc_align;
477 align = (1 << align_bits);
478
479 if (direction == DMA_TX) {
480 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
481 &alloced, &di->txdpaorig);
482 if (va == NULL) {
483 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd)"
484 " failed\n", di->name));
485 return false;
486 }
487 align = (1 << align_bits);
488 di->txd64 = (struct dma64desc *)
489 roundup((unsigned long)va, align);
490 di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va);
491 di->txdpa = di->txdpaorig + di->txdalign;
492 di->txdalloc = alloced;
493 } else {
494 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
495 &alloced, &di->rxdpaorig);
496 if (va == NULL) {
497 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd)"
498 " failed\n", di->name));
499 return false;
500 }
501 align = (1 << align_bits);
502 di->rxd64 = (struct dma64desc *)
503 roundup((unsigned long)va, align);
504 di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va);
505 di->rxdpa = di->rxdpaorig + di->rxdalign;
506 di->rxdalloc = alloced;
507 }
508
509 return true;
510 }
511
512 static bool _dma_alloc(struct dma_info *di, uint direction)
513 {
514 return dma64_alloc(di, direction);
515 }
516
517 struct dma_pub *dma_attach(char *name, struct si_pub *sih,
518 void *dmaregstx, void *dmaregsrx, uint ntxd,
519 uint nrxd, uint rxbufsize, int rxextheadroom,
520 uint nrxpost, uint rxoffset, uint *msg_level)
521 {
522 struct dma_info *di;
523 uint size;
524
525 /* allocate private info structure */
526 di = kzalloc(sizeof(struct dma_info), GFP_ATOMIC);
527 if (di == NULL) {
528 #ifdef BCMDBG
529 printk(KERN_ERR "dma_attach: out of memory\n");
530 #endif
531 return NULL;
532 }
533
534 di->msg_level = msg_level ? msg_level : &dma_msg_level;
535
536
537 di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
538
539 /* init dma reg pointer */
540 di->d64txregs = (struct dma64regs *) dmaregstx;
541 di->d64rxregs = (struct dma64regs *) dmaregsrx;
542
543 /*
544 * Default flags (which can be changed by the driver calling
545 * dma_ctrlflags before enable): For backwards compatibility
546 * both Rx Overflow Continue and Parity are DISABLED.
547 */
548 _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
549
550 DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d "
551 "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
552 "dmaregstx %p dmaregsrx %p\n", name, "DMA64",
553 di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
554 rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
555
556 /* make a private copy of our callers name */
557 strncpy(di->name, name, MAXNAMEL);
558 di->name[MAXNAMEL - 1] = '\0';
559
560 di->pbus = ((struct si_info *)sih)->pbus;
561
562 /* save tunables */
563 di->ntxd = (u16) ntxd;
564 di->nrxd = (u16) nrxd;
565
566 /* the actual dma size doesn't include the extra headroom */
567 di->rxextrahdrroom =
568 (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom;
569 if (rxbufsize > BCMEXTRAHDROOM)
570 di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom);
571 else
572 di->rxbufsize = (u16) rxbufsize;
573
574 di->nrxpost = (u16) nrxpost;
575 di->rxoffset = (u8) rxoffset;
576
577 /*
578 * figure out the DMA physical address offset for dd and data
579 * PCI/PCIE: they map silicon backplace address to zero
580 * based memory, need offset
581 * Other bus: use zero SI_BUS BIGENDIAN kludge: use sdram
582 * swapped region for data buffer, not descriptor
583 */
584 di->ddoffsetlow = 0;
585 di->dataoffsetlow = 0;
586 /* add offset for pcie with DMA64 bus */
587 di->ddoffsetlow = 0;
588 di->ddoffsethigh = SI_PCIE_DMA_H32;
589 di->dataoffsetlow = di->ddoffsetlow;
590 di->dataoffsethigh = di->ddoffsethigh;
591 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
592 if ((ai_coreid(sih) == SDIOD_CORE_ID)
593 && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2)))
594 di->addrext = 0;
595 else if ((ai_coreid(sih) == I2S_CORE_ID) &&
596 ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1)))
597 di->addrext = 0;
598 else
599 di->addrext = _dma_isaddrext(di);
600
601 /* does the descriptor need to be aligned and if yes, on 4K/8K or not */
602 di->aligndesc_4k = _dma_descriptor_align(di);
603 if (di->aligndesc_4k) {
604 di->dmadesc_align = D64RINGALIGN_BITS;
605 if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2))
606 /* for smaller dd table, HW relax alignment reqmnt */
607 di->dmadesc_align = D64RINGALIGN_BITS - 1;
608 } else {
609 di->dmadesc_align = 4; /* 16 byte alignment */
610 }
611
612 DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
613 di->aligndesc_4k, di->dmadesc_align));
614
615 /* allocate tx packet pointer vector */
616 if (ntxd) {
617 size = ntxd * sizeof(void *);
618 di->txp = kzalloc(size, GFP_ATOMIC);
619 if (di->txp == NULL) {
620 DMA_ERROR(("%s: dma_attach: out of tx memory\n",
621 di->name));
622 goto fail;
623 }
624 }
625
626 /* allocate rx packet pointer vector */
627 if (nrxd) {
628 size = nrxd * sizeof(void *);
629 di->rxp = kzalloc(size, GFP_ATOMIC);
630 if (di->rxp == NULL) {
631 DMA_ERROR(("%s: dma_attach: out of rx memory\n",
632 di->name));
633 goto fail;
634 }
635 }
636
637 /*
638 * allocate transmit descriptor ring, only need ntxd descriptors
639 * but it must be aligned
640 */
641 if (ntxd) {
642 if (!_dma_alloc(di, DMA_TX))
643 goto fail;
644 }
645
646 /*
647 * allocate receive descriptor ring, only need nrxd descriptors
648 * but it must be aligned
649 */
650 if (nrxd) {
651 if (!_dma_alloc(di, DMA_RX))
652 goto fail;
653 }
654
655 if ((di->ddoffsetlow != 0) && !di->addrext) {
656 if (di->txdpa > SI_PCI_DMA_SZ) {
657 DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not "
658 "supported\n", di->name, (u32)di->txdpa));
659 goto fail;
660 }
661 if (di->rxdpa > SI_PCI_DMA_SZ) {
662 DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not "
663 "supported\n", di->name, (u32)di->rxdpa));
664 goto fail;
665 }
666 }
667
668 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x "
669 "dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow,
670 di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh,
671 di->addrext));
672
673 return (struct dma_pub *) di;
674
675 fail:
676 dma_detach((struct dma_pub *)di);
677 return NULL;
678 }
679
680 /* Check for odd number of 1's */
681 static inline u32 parity32(u32 data)
682 {
683 data ^= data >> 16;
684 data ^= data >> 8;
685 data ^= data >> 4;
686 data ^= data >> 2;
687 data ^= data >> 1;
688
689 return data & 1;
690 }
691
692 static inline void
693 dma64_dd_upd(struct dma_info *di, struct dma64desc *ddring,
694 dma_addr_t pa, uint outidx, u32 *flags, u32 bufcount)
695 {
696 u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
697
698 /* PCI bus with big(>1G) physical address, use address extension */
699 if ((di->dataoffsetlow == 0) || !(pa & PCI32ADDR_HIGH)) {
700 ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow);
701 ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh);
702 ddring[outidx].ctrl1 = cpu_to_le32(*flags);
703 ddring[outidx].ctrl2 = cpu_to_le32(ctrl2);
704 } else {
705 /* address extension for 32-bit PCI */
706 u32 ae;
707
708 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
709 pa &= ~PCI32ADDR_HIGH;
710
711 ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
712 ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow);
713 ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh);
714 ddring[outidx].ctrl1 = cpu_to_le32(*flags);
715 ddring[outidx].ctrl2 = cpu_to_le32(ctrl2);
716 }
717 if (di->dma.dmactrlflags & DMA_CTRL_PEN) {
718 if (DMA64_DD_PARITY(&ddring[outidx]))
719 ddring[outidx].ctrl2 =
720 cpu_to_le32(ctrl2 | D64_CTRL2_PARITY);
721 }
722 }
723
724 /* !! may be called with core in reset */
725 void dma_detach(struct dma_pub *pub)
726 {
727 struct dma_info *di = (struct dma_info *)pub;
728
729 DMA_TRACE(("%s: dma_detach\n", di->name));
730
731 /* free dma descriptor rings */
732 if (di->txd64)
733 pci_free_consistent(di->pbus, di->txdalloc,
734 ((s8 *)di->txd64 - di->txdalign),
735 (di->txdpaorig));
736 if (di->rxd64)
737 pci_free_consistent(di->pbus, di->rxdalloc,
738 ((s8 *)di->rxd64 - di->rxdalign),
739 (di->rxdpaorig));
740
741 /* free packet pointer vectors */
742 kfree(di->txp);
743 kfree(di->rxp);
744
745 /* free our private info structure */
746 kfree(di);
747
748 }
749
750 /* initialize descriptor table base address */
751 static void
752 _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
753 {
754 if (!di->aligndesc_4k) {
755 if (direction == DMA_TX)
756 di->xmtptrbase = pa;
757 else
758 di->rcvptrbase = pa;
759 }
760
761 if ((di->ddoffsetlow == 0)
762 || !(pa & PCI32ADDR_HIGH)) {
763 if (direction == DMA_TX) {
764 W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
765 W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
766 } else {
767 W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
768 W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
769 }
770 } else {
771 /* DMA64 32bits address extension */
772 u32 ae;
773
774 /* shift the high bit(s) from pa to ae */
775 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
776 pa &= ~PCI32ADDR_HIGH;
777
778 if (direction == DMA_TX) {
779 W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
780 W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
781 SET_REG(&di->d64txregs->control,
782 D64_XC_AE, (ae << D64_XC_AE_SHIFT));
783 } else {
784 W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
785 W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
786 SET_REG(&di->d64rxregs->control,
787 D64_RC_AE, (ae << D64_RC_AE_SHIFT));
788 }
789 }
790 }
791
792 static void _dma_rxenable(struct dma_info *di)
793 {
794 uint dmactrlflags = di->dma.dmactrlflags;
795 u32 control;
796
797 DMA_TRACE(("%s: dma_rxenable\n", di->name));
798
799 control =
800 (R_REG(&di->d64rxregs->control) & D64_RC_AE) |
801 D64_RC_RE;
802
803 if ((dmactrlflags & DMA_CTRL_PEN) == 0)
804 control |= D64_RC_PD;
805
806 if (dmactrlflags & DMA_CTRL_ROC)
807 control |= D64_RC_OC;
808
809 W_REG(&di->d64rxregs->control,
810 ((di->rxoffset << D64_RC_RO_SHIFT) | control));
811 }
812
813 void dma_rxinit(struct dma_pub *pub)
814 {
815 struct dma_info *di = (struct dma_info *)pub;
816
817 DMA_TRACE(("%s: dma_rxinit\n", di->name));
818
819 if (di->nrxd == 0)
820 return;
821
822 di->rxin = di->rxout = 0;
823
824 /* clear rx descriptor ring */
825 memset(di->rxd64, '\0', di->nrxd * sizeof(struct dma64desc));
826
827 /* DMA engine with out alignment requirement requires table to be inited
828 * before enabling the engine
829 */
830 if (!di->aligndesc_4k)
831 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
832
833 _dma_rxenable(di);
834
835 if (di->aligndesc_4k)
836 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
837 }
838
839 static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
840 {
841 uint i, curr;
842 struct sk_buff *rxp;
843 dma_addr_t pa;
844
845 i = di->rxin;
846
847 /* return if no packets posted */
848 if (i == di->rxout)
849 return NULL;
850
851 curr =
852 B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) -
853 di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc);
854
855 /* ignore curr if forceall */
856 if (!forceall && (i == curr))
857 return NULL;
858
859 /* get the packet pointer that corresponds to the rx descriptor */
860 rxp = di->rxp[i];
861 di->rxp[i] = NULL;
862
863 pa = cpu_to_le32(di->rxd64[i].addrlow) - di->dataoffsetlow;
864
865 /* clear this packet from the descriptor ring */
866 pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE);
867
868 di->rxd64[i].addrlow = 0xdeadbeef;
869 di->rxd64[i].addrhigh = 0xdeadbeef;
870
871 di->rxin = NEXTRXD(i);
872
873 return rxp;
874 }
875
876 static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall)
877 {
878 if (di->nrxd == 0)
879 return NULL;
880
881 return dma64_getnextrxp(di, forceall);
882 }
883
884 /*
885 * !! rx entry routine
886 * returns a pointer to the next frame received, or NULL if there are no more
887 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is
888 * supported with pkts chain
889 * otherwise, it's treated as giant pkt and will be tossed.
890 * The DMA scattering starts with normal DMA header, followed by first
891 * buffer data. After it reaches the max size of buffer, the data continues
892 * in next DMA descriptor buffer WITHOUT DMA header
893 */
894 struct sk_buff *dma_rx(struct dma_pub *pub)
895 {
896 struct dma_info *di = (struct dma_info *)pub;
897 struct sk_buff *p, *head, *tail;
898 uint len;
899 uint pkt_len;
900 int resid = 0;
901
902 next_frame:
903 head = _dma_getnextrxp(di, false);
904 if (head == NULL)
905 return NULL;
906
907 len = le16_to_cpu(*(u16 *) (head->data));
908 DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
909 dma_spin_for_len(len, head);
910
911 /* set actual length */
912 pkt_len = min((di->rxoffset + len), di->rxbufsize);
913 __skb_trim(head, pkt_len);
914 resid = len - (di->rxbufsize - di->rxoffset);
915
916 /* check for single or multi-buffer rx */
917 if (resid > 0) {
918 tail = head;
919 while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
920 tail->next = p;
921 pkt_len = min_t(uint, resid, di->rxbufsize);
922 __skb_trim(p, pkt_len);
923
924 tail = p;
925 resid -= di->rxbufsize;
926 }
927
928 #ifdef BCMDBG
929 if (resid > 0) {
930 uint cur;
931 cur =
932 B2I(((R_REG(&di->d64rxregs->status0) &
933 D64_RS0_CD_MASK) -
934 di->rcvptrbase) & D64_RS0_CD_MASK,
935 struct dma64desc);
936 DMA_ERROR(("dma_rx, rxin %d rxout %d, hw_curr %d\n",
937 di->rxin, di->rxout, cur));
938 }
939 #endif /* BCMDBG */
940
941 if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
942 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
943 di->name, len));
944 brcmu_pkt_buf_free_skb(head);
945 di->dma.rxgiants++;
946 goto next_frame;
947 }
948 }
949
950 return head;
951 }
952
953 static bool dma64_rxidle(struct dma_info *di)
954 {
955 DMA_TRACE(("%s: dma_rxidle\n", di->name));
956
957 if (di->nrxd == 0)
958 return true;
959
960 return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
961 (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK));
962 }
963
964 /*
965 * post receive buffers
966 * return false is refill failed completely and ring is empty this will stall
967 * the rx dma and user might want to call rxfill again asap. This unlikely
968 * happens on memory-rich NIC, but often on memory-constrained dongle
969 */
970 bool dma_rxfill(struct dma_pub *pub)
971 {
972 struct dma_info *di = (struct dma_info *)pub;
973 struct sk_buff *p;
974 u16 rxin, rxout;
975 u32 flags = 0;
976 uint n;
977 uint i;
978 dma_addr_t pa;
979 uint extra_offset = 0;
980 bool ring_empty;
981
982 ring_empty = false;
983
984 /*
985 * Determine how many receive buffers we're lacking
986 * from the full complement, allocate, initialize,
987 * and post them, then update the chip rx lastdscr.
988 */
989
990 rxin = di->rxin;
991 rxout = di->rxout;
992
993 n = di->nrxpost - NRXDACTIVE(rxin, rxout);
994
995 DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
996
997 if (di->rxbufsize > BCMEXTRAHDROOM)
998 extra_offset = di->rxextrahdrroom;
999
1000 for (i = 0; i < n; i++) {
1001 /*
1002 * the di->rxbufsize doesn't include the extra headroom,
1003 * we need to add it to the size to be allocated
1004 */
1005 p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
1006
1007 if (p == NULL) {
1008 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
1009 di->name));
1010 if (i == 0 && dma64_rxidle(di)) {
1011 DMA_ERROR(("%s: rxfill64: ring is empty !\n",
1012 di->name));
1013 ring_empty = true;
1014 }
1015 di->dma.rxnobuf++;
1016 break;
1017 }
1018 /* reserve an extra headroom, if applicable */
1019 if (extra_offset)
1020 skb_pull(p, extra_offset);
1021
1022 /* Do a cached write instead of uncached write since DMA_MAP
1023 * will flush the cache.
1024 */
1025 *(u32 *) (p->data) = 0;
1026
1027 pa = pci_map_single(di->pbus, p->data,
1028 di->rxbufsize, PCI_DMA_FROMDEVICE);
1029
1030 /* save the free packet pointer */
1031 di->rxp[rxout] = p;
1032
1033 /* reset flags for each descriptor */
1034 flags = 0;
1035 if (rxout == (di->nrxd - 1))
1036 flags = D64_CTRL1_EOT;
1037
1038 dma64_dd_upd(di, di->rxd64, pa, rxout, &flags,
1039 di->rxbufsize);
1040 rxout = NEXTRXD(rxout);
1041 }
1042
1043 di->rxout = rxout;
1044
1045 /* update the chip lastdscr pointer */
1046 W_REG(&di->d64rxregs->ptr,
1047 di->rcvptrbase + I2B(rxout, struct dma64desc));
1048
1049 return ring_empty;
1050 }
1051
1052 void dma_rxreclaim(struct dma_pub *pub)
1053 {
1054 struct dma_info *di = (struct dma_info *)pub;
1055 struct sk_buff *p;
1056
1057 DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
1058
1059 while ((p = _dma_getnextrxp(di, true)))
1060 brcmu_pkt_buf_free_skb(p);
1061 }
1062
1063 void dma_counterreset(struct dma_pub *pub)
1064 {
1065 /* reset all software counters */
1066 pub->rxgiants = 0;
1067 pub->rxnobuf = 0;
1068 pub->txnobuf = 0;
1069 }
1070
1071 /* get the address of the var in order to change later */
1072 unsigned long dma_getvar(struct dma_pub *pub, const char *name)
1073 {
1074 struct dma_info *di = (struct dma_info *)pub;
1075
1076 if (!strcmp(name, "&txavail"))
1077 return (unsigned long)&(di->dma.txavail);
1078 return 0;
1079 }
1080
1081 /* 64-bit DMA functions */
1082
1083 void dma_txinit(struct dma_pub *pub)
1084 {
1085 struct dma_info *di = (struct dma_info *)pub;
1086 u32 control = D64_XC_XE;
1087
1088 DMA_TRACE(("%s: dma_txinit\n", di->name));
1089
1090 if (di->ntxd == 0)
1091 return;
1092
1093 di->txin = di->txout = 0;
1094 di->dma.txavail = di->ntxd - 1;
1095
1096 /* clear tx descriptor ring */
1097 memset(di->txd64, '\0', (di->ntxd * sizeof(struct dma64desc)));
1098
1099 /* DMA engine with out alignment requirement requires table to be inited
1100 * before enabling the engine
1101 */
1102 if (!di->aligndesc_4k)
1103 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1104
1105 if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0)
1106 control |= D64_XC_PD;
1107 OR_REG(&di->d64txregs->control, control);
1108
1109 /* DMA engine with alignment requirement requires table to be inited
1110 * before enabling the engine
1111 */
1112 if (di->aligndesc_4k)
1113 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1114 }
1115
1116 void dma_txsuspend(struct dma_pub *pub)
1117 {
1118 struct dma_info *di = (struct dma_info *)pub;
1119
1120 DMA_TRACE(("%s: dma_txsuspend\n", di->name));
1121
1122 if (di->ntxd == 0)
1123 return;
1124
1125 OR_REG(&di->d64txregs->control, D64_XC_SE);
1126 }
1127
1128 void dma_txresume(struct dma_pub *pub)
1129 {
1130 struct dma_info *di = (struct dma_info *)pub;
1131
1132 DMA_TRACE(("%s: dma_txresume\n", di->name));
1133
1134 if (di->ntxd == 0)
1135 return;
1136
1137 AND_REG(&di->d64txregs->control, ~D64_XC_SE);
1138 }
1139
1140 bool dma_txsuspended(struct dma_pub *pub)
1141 {
1142 struct dma_info *di = (struct dma_info *)pub;
1143
1144 return (di->ntxd == 0) ||
1145 ((R_REG(&di->d64txregs->control) & D64_XC_SE) ==
1146 D64_XC_SE);
1147 }
1148
1149 void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
1150 {
1151 struct dma_info *di = (struct dma_info *)pub;
1152 struct sk_buff *p;
1153
1154 DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
1155 (range == DMA_RANGE_ALL) ? "all" :
1156 ((range ==
1157 DMA_RANGE_TRANSMITTED) ? "transmitted" :
1158 "transferred")));
1159
1160 if (di->txin == di->txout)
1161 return;
1162
1163 while ((p = dma_getnexttxp(pub, range))) {
1164 /* For unframed data, we don't have any packets to free */
1165 if (!(di->dma.dmactrlflags & DMA_CTRL_UNFRAMED))
1166 brcmu_pkt_buf_free_skb(p);
1167 }
1168 }
1169
1170 bool dma_txreset(struct dma_pub *pub)
1171 {
1172 struct dma_info *di = (struct dma_info *)pub;
1173 u32 status;
1174
1175 if (di->ntxd == 0)
1176 return true;
1177
1178 /* suspend tx DMA first */
1179 W_REG(&di->d64txregs->control, D64_XC_SE);
1180 SPINWAIT(((status =
1181 (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
1182 != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
1183 && (status != D64_XS0_XS_STOPPED), 10000);
1184
1185 W_REG(&di->d64txregs->control, 0);
1186 SPINWAIT(((status =
1187 (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
1188 != D64_XS0_XS_DISABLED), 10000);
1189
1190 /* wait for the last transaction to complete */
1191 udelay(300);
1192
1193 return status == D64_XS0_XS_DISABLED;
1194 }
1195
1196 bool dma_rxreset(struct dma_pub *pub)
1197 {
1198 struct dma_info *di = (struct dma_info *)pub;
1199 u32 status;
1200
1201 if (di->nrxd == 0)
1202 return true;
1203
1204 W_REG(&di->d64rxregs->control, 0);
1205 SPINWAIT(((status =
1206 (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK))
1207 != D64_RS0_RS_DISABLED), 10000);
1208
1209 return status == D64_RS0_RS_DISABLED;
1210 }
1211
1212 /*
1213 * !! tx entry routine
1214 * WARNING: call must check the return value for error.
1215 * the error(toss frames) could be fatal and cause many subsequent hard
1216 * to debug problems
1217 */
1218 int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
1219 {
1220 struct dma_info *di = (struct dma_info *)pub;
1221 struct sk_buff *p, *next;
1222 unsigned char *data;
1223 uint len;
1224 u16 txout;
1225 u32 flags = 0;
1226 dma_addr_t pa;
1227
1228 DMA_TRACE(("%s: dma_txfast\n", di->name));
1229
1230 txout = di->txout;
1231
1232 /*
1233 * Walk the chain of packet buffers
1234 * allocating and initializing transmit descriptor entries.
1235 */
1236 for (p = p0; p; p = next) {
1237 data = p->data;
1238 len = p->len;
1239 next = p->next;
1240
1241 /* return nonzero if out of tx descriptors */
1242 if (NEXTTXD(txout) == di->txin)
1243 goto outoftxd;
1244
1245 if (len == 0)
1246 continue;
1247
1248 /* get physical address of buffer start */
1249 pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
1250
1251 flags = 0;
1252 if (p == p0)
1253 flags |= D64_CTRL1_SOF;
1254
1255 /* With a DMA segment list, Descriptor table is filled
1256 * using the segment list instead of looping over
1257 * buffers in multi-chain DMA. Therefore, EOF for SGLIST
1258 * is when end of segment list is reached.
1259 */
1260 if (next == NULL)
1261 flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
1262 if (txout == (di->ntxd - 1))
1263 flags |= D64_CTRL1_EOT;
1264
1265 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
1266
1267 txout = NEXTTXD(txout);
1268 }
1269
1270 /* if last txd eof not set, fix it */
1271 if (!(flags & D64_CTRL1_EOF))
1272 di->txd64[PREVTXD(txout)].ctrl1 =
1273 cpu_to_le32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF);
1274
1275 /* save the packet */
1276 di->txp[PREVTXD(txout)] = p0;
1277
1278 /* bump the tx descriptor index */
1279 di->txout = txout;
1280
1281 /* kick the chip */
1282 if (commit)
1283 W_REG(&di->d64txregs->ptr,
1284 di->xmtptrbase + I2B(txout, struct dma64desc));
1285
1286 /* tx flow control */
1287 di->dma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1288
1289 return 0;
1290
1291 outoftxd:
1292 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
1293 brcmu_pkt_buf_free_skb(p0);
1294 di->dma.txavail = 0;
1295 di->dma.txnobuf++;
1296 return -1;
1297 }
1298
1299 /*
1300 * Reclaim next completed txd (txds if using chained buffers) in the range
1301 * specified and return associated packet.
1302 * If range is DMA_RANGE_TRANSMITTED, reclaim descriptors that have be
1303 * transmitted as noted by the hardware "CurrDescr" pointer.
1304 * If range is DMA_RANGE_TRANSFERED, reclaim descriptors that have be
1305 * transferred by the DMA as noted by the hardware "ActiveDescr" pointer.
1306 * If range is DMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
1307 * return associated packet regardless of the value of hardware pointers.
1308 */
1309 struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
1310 {
1311 struct dma_info *di = (struct dma_info *)pub;
1312 u16 start, end, i;
1313 u16 active_desc;
1314 struct sk_buff *txp;
1315
1316 DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
1317 (range == DMA_RANGE_ALL) ? "all" :
1318 ((range ==
1319 DMA_RANGE_TRANSMITTED) ? "transmitted" :
1320 "transferred")));
1321
1322 if (di->ntxd == 0)
1323 return NULL;
1324
1325 txp = NULL;
1326
1327 start = di->txin;
1328 if (range == DMA_RANGE_ALL)
1329 end = di->txout;
1330 else {
1331 struct dma64regs *dregs = di->d64txregs;
1332
1333 end = (u16) (B2I(((R_REG(&dregs->status0) &
1334 D64_XS0_CD_MASK) -
1335 di->xmtptrbase) & D64_XS0_CD_MASK,
1336 struct dma64desc));
1337
1338 if (range == DMA_RANGE_TRANSFERED) {
1339 active_desc =
1340 (u16) (R_REG(&dregs->status1) &
1341 D64_XS1_AD_MASK);
1342 active_desc =
1343 (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
1344 active_desc = B2I(active_desc, struct dma64desc);
1345 if (end != active_desc)
1346 end = PREVTXD(active_desc);
1347 }
1348 }
1349
1350 if ((start == 0) && (end > di->txout))
1351 goto bogus;
1352
1353 for (i = start; i != end && !txp; i = NEXTTXD(i)) {
1354 dma_addr_t pa;
1355 uint size;
1356
1357 pa = cpu_to_le32(di->txd64[i].addrlow) - di->dataoffsetlow;
1358
1359 size =
1360 (cpu_to_le32(di->txd64[i].ctrl2) &
1361 D64_CTRL2_BC_MASK);
1362
1363 di->txd64[i].addrlow = 0xdeadbeef;
1364 di->txd64[i].addrhigh = 0xdeadbeef;
1365
1366 txp = di->txp[i];
1367 di->txp[i] = NULL;
1368
1369 pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE);
1370 }
1371
1372 di->txin = i;
1373
1374 /* tx flow control */
1375 di->dma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1376
1377 return txp;
1378
1379 bogus:
1380 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d "
1381 "force %d\n", start, end, di->txout, forceall));
1382 return NULL;
1383 }
1384
1385 /*
1386 * Mac80211 initiated actions sometimes require packets in the DMA queue to be
1387 * modified. The modified portion of the packet is not under control of the DMA
1388 * engine. This function calls a caller-supplied function for each packet in
1389 * the caller specified dma chain.
1390 */
1391 void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
1392 (void *pkt, void *arg_a), void *arg_a)
1393 {
1394 struct dma_info *di = (struct dma_info *) dmah;
1395 uint i = di->txin;
1396 uint end = di->txout;
1397 struct sk_buff *skb;
1398 struct ieee80211_tx_info *tx_info;
1399
1400 while (i != end) {
1401 skb = (struct sk_buff *)di->txp[i];
1402 if (skb != NULL) {
1403 tx_info = (struct ieee80211_tx_info *)skb->cb;
1404 (callback_fnc)(tx_info, arg_a);
1405 }
1406 i = NEXTTXD(i);
1407 }
1408 }