]>
Commit | Line | Data |
---|---|---|
5b435de0 AS |
1 | /* |
2 | * Copyright (c) 2010 Broadcom Corporation | |
3 | * | |
4 | * Permission to use, copy, modify, and/or distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY | |
11 | * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION | |
13 | * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN | |
14 | * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
8505a7e6 JP |
16 | |
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
18 | ||
5b435de0 | 19 | #include <linux/slab.h> |
5b435de0 AS |
20 | #include <linux/delay.h> |
21 | #include <linux/pci.h> | |
22 | ||
23 | #include <brcmu_utils.h> | |
24 | #include <aiutils.h> | |
25 | #include "types.h" | |
26 | #include "dma.h" | |
23038214 | 27 | #include "soc.h" |
5b435de0 | 28 | |
e81da650 AS |
29 | /* |
30 | * dma register field offset calculation | |
31 | */ | |
32 | #define DMA64REGOFFS(field) offsetof(struct dma64regs, field) | |
33 | #define DMA64TXREGOFFS(di, field) (di->d64txregbase + DMA64REGOFFS(field)) | |
34 | #define DMA64RXREGOFFS(di, field) (di->d64rxregbase + DMA64REGOFFS(field)) | |
35 | ||
5b435de0 AS |
36 | /* |
37 | * DMA hardware requires each descriptor ring to be 8kB aligned, and fit within | |
38 | * a contiguous 8kB physical address. | |
39 | */ | |
40 | #define D64RINGALIGN_BITS 13 | |
41 | #define D64MAXRINGSZ (1 << D64RINGALIGN_BITS) | |
42 | #define D64RINGALIGN (1 << D64RINGALIGN_BITS) | |
43 | ||
44 | #define D64MAXDD (D64MAXRINGSZ / sizeof(struct dma64desc)) | |
45 | ||
46 | /* transmit channel control */ | |
47 | #define D64_XC_XE 0x00000001 /* transmit enable */ | |
48 | #define D64_XC_SE 0x00000002 /* transmit suspend request */ | |
49 | #define D64_XC_LE 0x00000004 /* loopback enable */ | |
50 | #define D64_XC_FL 0x00000010 /* flush request */ | |
51 | #define D64_XC_PD 0x00000800 /* parity check disable */ | |
52 | #define D64_XC_AE 0x00030000 /* address extension bits */ | |
53 | #define D64_XC_AE_SHIFT 16 | |
54 | ||
55 | /* transmit descriptor table pointer */ | |
56 | #define D64_XP_LD_MASK 0x00000fff /* last valid descriptor */ | |
57 | ||
58 | /* transmit channel status */ | |
59 | #define D64_XS0_CD_MASK 0x00001fff /* current descriptor pointer */ | |
60 | #define D64_XS0_XS_MASK 0xf0000000 /* transmit state */ | |
61 | #define D64_XS0_XS_SHIFT 28 | |
62 | #define D64_XS0_XS_DISABLED 0x00000000 /* disabled */ | |
63 | #define D64_XS0_XS_ACTIVE 0x10000000 /* active */ | |
64 | #define D64_XS0_XS_IDLE 0x20000000 /* idle wait */ | |
65 | #define D64_XS0_XS_STOPPED 0x30000000 /* stopped */ | |
66 | #define D64_XS0_XS_SUSP 0x40000000 /* suspend pending */ | |
67 | ||
68 | #define D64_XS1_AD_MASK 0x00001fff /* active descriptor */ | |
69 | #define D64_XS1_XE_MASK 0xf0000000 /* transmit errors */ | |
70 | #define D64_XS1_XE_SHIFT 28 | |
71 | #define D64_XS1_XE_NOERR 0x00000000 /* no error */ | |
72 | #define D64_XS1_XE_DPE 0x10000000 /* descriptor protocol error */ | |
73 | #define D64_XS1_XE_DFU 0x20000000 /* data fifo underrun */ | |
74 | #define D64_XS1_XE_DTE 0x30000000 /* data transfer error */ | |
75 | #define D64_XS1_XE_DESRE 0x40000000 /* descriptor read error */ | |
76 | #define D64_XS1_XE_COREE 0x50000000 /* core error */ | |
77 | ||
78 | /* receive channel control */ | |
79 | /* receive enable */ | |
80 | #define D64_RC_RE 0x00000001 | |
81 | /* receive frame offset */ | |
82 | #define D64_RC_RO_MASK 0x000000fe | |
83 | #define D64_RC_RO_SHIFT 1 | |
84 | /* direct fifo receive (pio) mode */ | |
85 | #define D64_RC_FM 0x00000100 | |
86 | /* separate rx header descriptor enable */ | |
87 | #define D64_RC_SH 0x00000200 | |
88 | /* overflow continue */ | |
89 | #define D64_RC_OC 0x00000400 | |
90 | /* parity check disable */ | |
91 | #define D64_RC_PD 0x00000800 | |
92 | /* address extension bits */ | |
93 | #define D64_RC_AE 0x00030000 | |
94 | #define D64_RC_AE_SHIFT 16 | |
95 | ||
96 | /* flags for dma controller */ | |
97 | /* partity enable */ | |
98 | #define DMA_CTRL_PEN (1 << 0) | |
99 | /* rx overflow continue */ | |
100 | #define DMA_CTRL_ROC (1 << 1) | |
101 | /* allow rx scatter to multiple descriptors */ | |
102 | #define DMA_CTRL_RXMULTI (1 << 2) | |
103 | /* Unframed Rx/Tx data */ | |
104 | #define DMA_CTRL_UNFRAMED (1 << 3) | |
105 | ||
106 | /* receive descriptor table pointer */ | |
107 | #define D64_RP_LD_MASK 0x00000fff /* last valid descriptor */ | |
108 | ||
109 | /* receive channel status */ | |
110 | #define D64_RS0_CD_MASK 0x00001fff /* current descriptor pointer */ | |
111 | #define D64_RS0_RS_MASK 0xf0000000 /* receive state */ | |
112 | #define D64_RS0_RS_SHIFT 28 | |
113 | #define D64_RS0_RS_DISABLED 0x00000000 /* disabled */ | |
114 | #define D64_RS0_RS_ACTIVE 0x10000000 /* active */ | |
115 | #define D64_RS0_RS_IDLE 0x20000000 /* idle wait */ | |
116 | #define D64_RS0_RS_STOPPED 0x30000000 /* stopped */ | |
117 | #define D64_RS0_RS_SUSP 0x40000000 /* suspend pending */ | |
118 | ||
119 | #define D64_RS1_AD_MASK 0x0001ffff /* active descriptor */ | |
120 | #define D64_RS1_RE_MASK 0xf0000000 /* receive errors */ | |
121 | #define D64_RS1_RE_SHIFT 28 | |
122 | #define D64_RS1_RE_NOERR 0x00000000 /* no error */ | |
123 | #define D64_RS1_RE_DPO 0x10000000 /* descriptor protocol error */ | |
124 | #define D64_RS1_RE_DFU 0x20000000 /* data fifo overflow */ | |
125 | #define D64_RS1_RE_DTE 0x30000000 /* data transfer error */ | |
126 | #define D64_RS1_RE_DESRE 0x40000000 /* descriptor read error */ | |
127 | #define D64_RS1_RE_COREE 0x50000000 /* core error */ | |
128 | ||
129 | /* fifoaddr */ | |
130 | #define D64_FA_OFF_MASK 0xffff /* offset */ | |
131 | #define D64_FA_SEL_MASK 0xf0000 /* select */ | |
132 | #define D64_FA_SEL_SHIFT 16 | |
133 | #define D64_FA_SEL_XDD 0x00000 /* transmit dma data */ | |
134 | #define D64_FA_SEL_XDP 0x10000 /* transmit dma pointers */ | |
135 | #define D64_FA_SEL_RDD 0x40000 /* receive dma data */ | |
136 | #define D64_FA_SEL_RDP 0x50000 /* receive dma pointers */ | |
137 | #define D64_FA_SEL_XFD 0x80000 /* transmit fifo data */ | |
138 | #define D64_FA_SEL_XFP 0x90000 /* transmit fifo pointers */ | |
139 | #define D64_FA_SEL_RFD 0xc0000 /* receive fifo data */ | |
140 | #define D64_FA_SEL_RFP 0xd0000 /* receive fifo pointers */ | |
141 | #define D64_FA_SEL_RSD 0xe0000 /* receive frame status data */ | |
142 | #define D64_FA_SEL_RSP 0xf0000 /* receive frame status pointers */ | |
143 | ||
144 | /* descriptor control flags 1 */ | |
145 | #define D64_CTRL_COREFLAGS 0x0ff00000 /* core specific flags */ | |
146 | #define D64_CTRL1_EOT ((u32)1 << 28) /* end of descriptor table */ | |
147 | #define D64_CTRL1_IOC ((u32)1 << 29) /* interrupt on completion */ | |
148 | #define D64_CTRL1_EOF ((u32)1 << 30) /* end of frame */ | |
149 | #define D64_CTRL1_SOF ((u32)1 << 31) /* start of frame */ | |
150 | ||
151 | /* descriptor control flags 2 */ | |
152 | /* buffer byte count. real data len must <= 16KB */ | |
153 | #define D64_CTRL2_BC_MASK 0x00007fff | |
154 | /* address extension bits */ | |
155 | #define D64_CTRL2_AE 0x00030000 | |
156 | #define D64_CTRL2_AE_SHIFT 16 | |
157 | /* parity bit */ | |
158 | #define D64_CTRL2_PARITY 0x00040000 | |
159 | ||
160 | /* control flags in the range [27:20] are core-specific and not defined here */ | |
161 | #define D64_CTRL_CORE_MASK 0x0ff00000 | |
162 | ||
163 | #define D64_RX_FRM_STS_LEN 0x0000ffff /* frame length mask */ | |
164 | #define D64_RX_FRM_STS_OVFL 0x00800000 /* RxOverFlow */ | |
165 | #define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /* no. of descriptors used - 1 */ | |
166 | #define D64_RX_FRM_STS_DATATYPE 0xf0000000 /* core-dependent data type */ | |
167 | ||
168 | /* | |
169 | * packet headroom necessary to accommodate the largest header | |
170 | * in the system, (i.e TXOFF). By doing, we avoid the need to | |
171 | * allocate an extra buffer for the header when bridging to WL. | |
172 | * There is a compile time check in wlc.c which ensure that this | |
173 | * value is at least as big as TXOFF. This value is used in | |
174 | * dma_rxfill(). | |
175 | */ | |
176 | ||
177 | #define BCMEXTRAHDROOM 172 | |
178 | ||
179 | /* debug/trace */ | |
8ae74654 | 180 | #ifdef DEBUG |
8505a7e6 JP |
181 | #define DMA_ERROR(fmt, ...) \ |
182 | do { \ | |
183 | if (*di->msg_level & 1) \ | |
184 | pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \ | |
185 | } while (0) | |
186 | #define DMA_TRACE(fmt, ...) \ | |
187 | do { \ | |
188 | if (*di->msg_level & 2) \ | |
189 | pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \ | |
190 | } while (0) | |
5b435de0 | 191 | #else |
8505a7e6 JP |
192 | #define DMA_ERROR(fmt, ...) \ |
193 | no_printk(fmt, ##__VA_ARGS__) | |
194 | #define DMA_TRACE(fmt, ...) \ | |
195 | no_printk(fmt, ##__VA_ARGS__) | |
8ae74654 | 196 | #endif /* DEBUG */ |
5b435de0 | 197 | |
8505a7e6 JP |
198 | #define DMA_NONE(fmt, ...) \ |
199 | no_printk(fmt, ##__VA_ARGS__) | |
5b435de0 AS |
200 | |
201 | #define MAXNAMEL 8 /* 8 char names */ | |
202 | ||
203 | /* macros to convert between byte offsets and indexes */ | |
204 | #define B2I(bytes, type) ((bytes) / sizeof(type)) | |
205 | #define I2B(index, type) ((index) * sizeof(type)) | |
206 | ||
207 | #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */ | |
208 | #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */ | |
209 | ||
210 | #define PCI64ADDR_HIGH 0x80000000 /* address[63] */ | |
211 | #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */ | |
212 | ||
213 | /* | |
214 | * DMA Descriptor | |
215 | * Descriptors are only read by the hardware, never written back. | |
216 | */ | |
217 | struct dma64desc { | |
218 | __le32 ctrl1; /* misc control bits & bufcount */ | |
219 | __le32 ctrl2; /* buffer count and address extension */ | |
220 | __le32 addrlow; /* memory address of the date buffer, bits 31:0 */ | |
221 | __le32 addrhigh; /* memory address of the date buffer, bits 63:32 */ | |
222 | }; | |
223 | ||
224 | /* dma engine software state */ | |
225 | struct dma_info { | |
226 | struct dma_pub dma; /* exported structure */ | |
227 | uint *msg_level; /* message level pointer */ | |
228 | char name[MAXNAMEL]; /* callers name for diag msgs */ | |
229 | ||
3b758a68 | 230 | struct bcma_device *core; |
2e81b9b1 | 231 | struct device *dmadev; |
5b435de0 AS |
232 | |
233 | bool dma64; /* this dma engine is operating in 64-bit mode */ | |
234 | bool addrext; /* this dma engine supports DmaExtendedAddrChanges */ | |
235 | ||
236 | /* 64-bit dma tx engine registers */ | |
e81da650 | 237 | uint d64txregbase; |
5b435de0 | 238 | /* 64-bit dma rx engine registers */ |
e81da650 | 239 | uint d64rxregbase; |
5b435de0 AS |
240 | /* pointer to dma64 tx descriptor ring */ |
241 | struct dma64desc *txd64; | |
242 | /* pointer to dma64 rx descriptor ring */ | |
243 | struct dma64desc *rxd64; | |
244 | ||
245 | u16 dmadesc_align; /* alignment requirement for dma descriptors */ | |
246 | ||
247 | u16 ntxd; /* # tx descriptors tunable */ | |
248 | u16 txin; /* index of next descriptor to reclaim */ | |
249 | u16 txout; /* index of next descriptor to post */ | |
250 | /* pointer to parallel array of pointers to packets */ | |
251 | struct sk_buff **txp; | |
252 | /* Aligned physical address of descriptor ring */ | |
253 | dma_addr_t txdpa; | |
254 | /* Original physical address of descriptor ring */ | |
255 | dma_addr_t txdpaorig; | |
256 | u16 txdalign; /* #bytes added to alloc'd mem to align txd */ | |
257 | u32 txdalloc; /* #bytes allocated for the ring */ | |
258 | u32 xmtptrbase; /* When using unaligned descriptors, the ptr register | |
259 | * is not just an index, it needs all 13 bits to be | |
260 | * an offset from the addr register. | |
261 | */ | |
262 | ||
263 | u16 nrxd; /* # rx descriptors tunable */ | |
264 | u16 rxin; /* index of next descriptor to reclaim */ | |
265 | u16 rxout; /* index of next descriptor to post */ | |
266 | /* pointer to parallel array of pointers to packets */ | |
267 | struct sk_buff **rxp; | |
268 | /* Aligned physical address of descriptor ring */ | |
269 | dma_addr_t rxdpa; | |
270 | /* Original physical address of descriptor ring */ | |
271 | dma_addr_t rxdpaorig; | |
272 | u16 rxdalign; /* #bytes added to alloc'd mem to align rxd */ | |
273 | u32 rxdalloc; /* #bytes allocated for the ring */ | |
274 | u32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */ | |
275 | ||
276 | /* tunables */ | |
277 | unsigned int rxbufsize; /* rx buffer size in bytes, not including | |
278 | * the extra headroom | |
279 | */ | |
280 | uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper | |
281 | * stack, e.g. some rx pkt buffers will be | |
282 | * bridged to tx side without byte copying. | |
283 | * The extra headroom needs to be large enough | |
284 | * to fit txheader needs. Some dongle driver may | |
285 | * not need it. | |
286 | */ | |
287 | uint nrxpost; /* # rx buffers to keep posted */ | |
288 | unsigned int rxoffset; /* rxcontrol offset */ | |
289 | /* add to get dma address of descriptor ring, low 32 bits */ | |
290 | uint ddoffsetlow; | |
291 | /* high 32 bits */ | |
292 | uint ddoffsethigh; | |
293 | /* add to get dma address of data buffer, low 32 bits */ | |
294 | uint dataoffsetlow; | |
295 | /* high 32 bits */ | |
296 | uint dataoffsethigh; | |
297 | /* descriptor base need to be aligned or not */ | |
298 | bool aligndesc_4k; | |
299 | }; | |
300 | ||
301 | /* | |
302 | * default dma message level (if input msg_level | |
303 | * pointer is null in dma_attach()) | |
304 | */ | |
305 | static uint dma_msg_level; | |
306 | ||
307 | /* Check for odd number of 1's */ | |
308 | static u32 parity32(__le32 data) | |
309 | { | |
310 | /* no swap needed for counting 1's */ | |
311 | u32 par_data = *(u32 *)&data; | |
312 | ||
313 | par_data ^= par_data >> 16; | |
314 | par_data ^= par_data >> 8; | |
315 | par_data ^= par_data >> 4; | |
316 | par_data ^= par_data >> 2; | |
317 | par_data ^= par_data >> 1; | |
318 | ||
319 | return par_data & 1; | |
320 | } | |
321 | ||
322 | static bool dma64_dd_parity(struct dma64desc *dd) | |
323 | { | |
324 | return parity32(dd->addrlow ^ dd->addrhigh ^ dd->ctrl1 ^ dd->ctrl2); | |
325 | } | |
326 | ||
327 | /* descriptor bumping functions */ | |
328 | ||
329 | static uint xxd(uint x, uint n) | |
330 | { | |
331 | return x & (n - 1); /* faster than %, but n must be power of 2 */ | |
332 | } | |
333 | ||
334 | static uint txd(struct dma_info *di, uint x) | |
335 | { | |
336 | return xxd(x, di->ntxd); | |
337 | } | |
338 | ||
339 | static uint rxd(struct dma_info *di, uint x) | |
340 | { | |
341 | return xxd(x, di->nrxd); | |
342 | } | |
343 | ||
344 | static uint nexttxd(struct dma_info *di, uint i) | |
345 | { | |
346 | return txd(di, i + 1); | |
347 | } | |
348 | ||
349 | static uint prevtxd(struct dma_info *di, uint i) | |
350 | { | |
351 | return txd(di, i - 1); | |
352 | } | |
353 | ||
354 | static uint nextrxd(struct dma_info *di, uint i) | |
355 | { | |
356 | return txd(di, i + 1); | |
357 | } | |
358 | ||
359 | static uint ntxdactive(struct dma_info *di, uint h, uint t) | |
360 | { | |
361 | return txd(di, t-h); | |
362 | } | |
363 | ||
364 | static uint nrxdactive(struct dma_info *di, uint h, uint t) | |
365 | { | |
366 | return rxd(di, t-h); | |
367 | } | |
368 | ||
369 | static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags) | |
370 | { | |
ae8e4672 | 371 | uint dmactrlflags; |
5b435de0 AS |
372 | |
373 | if (di == NULL) { | |
8505a7e6 | 374 | DMA_ERROR("NULL dma handle\n"); |
5b435de0 AS |
375 | return 0; |
376 | } | |
377 | ||
ae8e4672 | 378 | dmactrlflags = di->dma.dmactrlflags; |
5b435de0 AS |
379 | dmactrlflags &= ~mask; |
380 | dmactrlflags |= flags; | |
381 | ||
382 | /* If trying to enable parity, check if parity is actually supported */ | |
383 | if (dmactrlflags & DMA_CTRL_PEN) { | |
384 | u32 control; | |
385 | ||
3b758a68 AS |
386 | control = bcma_read32(di->core, DMA64TXREGOFFS(di, control)); |
387 | bcma_write32(di->core, DMA64TXREGOFFS(di, control), | |
5b435de0 | 388 | control | D64_XC_PD); |
3b758a68 | 389 | if (bcma_read32(di->core, DMA64TXREGOFFS(di, control)) & |
e81da650 | 390 | D64_XC_PD) |
5b435de0 AS |
391 | /* We *can* disable it so it is supported, |
392 | * restore control register | |
393 | */ | |
3b758a68 | 394 | bcma_write32(di->core, DMA64TXREGOFFS(di, control), |
e81da650 | 395 | control); |
5b435de0 AS |
396 | else |
397 | /* Not supported, don't allow it to be enabled */ | |
398 | dmactrlflags &= ~DMA_CTRL_PEN; | |
399 | } | |
400 | ||
401 | di->dma.dmactrlflags = dmactrlflags; | |
402 | ||
403 | return dmactrlflags; | |
404 | } | |
405 | ||
e81da650 | 406 | static bool _dma64_addrext(struct dma_info *di, uint ctrl_offset) |
5b435de0 AS |
407 | { |
408 | u32 w; | |
3b758a68 AS |
409 | bcma_set32(di->core, ctrl_offset, D64_XC_AE); |
410 | w = bcma_read32(di->core, ctrl_offset); | |
411 | bcma_mask32(di->core, ctrl_offset, ~D64_XC_AE); | |
5b435de0 AS |
412 | return (w & D64_XC_AE) == D64_XC_AE; |
413 | } | |
414 | ||
415 | /* | |
416 | * return true if this dma engine supports DmaExtendedAddrChanges, | |
417 | * otherwise false | |
418 | */ | |
419 | static bool _dma_isaddrext(struct dma_info *di) | |
420 | { | |
421 | /* DMA64 supports full 32- or 64-bit operation. AE is always valid */ | |
422 | ||
423 | /* not all tx or rx channel are available */ | |
e81da650 AS |
424 | if (di->d64txregbase != 0) { |
425 | if (!_dma64_addrext(di, DMA64TXREGOFFS(di, control))) | |
8505a7e6 JP |
426 | DMA_ERROR("%s: DMA64 tx doesn't have AE set\n", |
427 | di->name); | |
5b435de0 | 428 | return true; |
e81da650 AS |
429 | } else if (di->d64rxregbase != 0) { |
430 | if (!_dma64_addrext(di, DMA64RXREGOFFS(di, control))) | |
8505a7e6 JP |
431 | DMA_ERROR("%s: DMA64 rx doesn't have AE set\n", |
432 | di->name); | |
5b435de0 AS |
433 | return true; |
434 | } | |
435 | ||
436 | return false; | |
437 | } | |
438 | ||
439 | static bool _dma_descriptor_align(struct dma_info *di) | |
440 | { | |
441 | u32 addrl; | |
442 | ||
443 | /* Check to see if the descriptors need to be aligned on 4K/8K or not */ | |
e81da650 | 444 | if (di->d64txregbase != 0) { |
3b758a68 AS |
445 | bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), 0xff0); |
446 | addrl = bcma_read32(di->core, DMA64TXREGOFFS(di, addrlow)); | |
5b435de0 AS |
447 | if (addrl != 0) |
448 | return false; | |
e81da650 | 449 | } else if (di->d64rxregbase != 0) { |
3b758a68 AS |
450 | bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), 0xff0); |
451 | addrl = bcma_read32(di->core, DMA64RXREGOFFS(di, addrlow)); | |
5b435de0 AS |
452 | if (addrl != 0) |
453 | return false; | |
454 | } | |
455 | return true; | |
456 | } | |
457 | ||
458 | /* | |
459 | * Descriptor table must start at the DMA hardware dictated alignment, so | |
460 | * allocated memory must be large enough to support this requirement. | |
461 | */ | |
2e81b9b1 | 462 | static void *dma_alloc_consistent(struct dma_info *di, uint size, |
5b435de0 AS |
463 | u16 align_bits, uint *alloced, |
464 | dma_addr_t *pap) | |
465 | { | |
466 | if (align_bits) { | |
467 | u16 align = (1 << align_bits); | |
468 | if (!IS_ALIGNED(PAGE_SIZE, align)) | |
469 | size += align; | |
470 | *alloced = size; | |
471 | } | |
2e81b9b1 | 472 | return dma_alloc_coherent(di->dmadev, size, pap, GFP_ATOMIC); |
5b435de0 AS |
473 | } |
474 | ||
475 | static | |
476 | u8 dma_align_sizetobits(uint size) | |
477 | { | |
478 | u8 bitpos = 0; | |
479 | while (size >>= 1) | |
480 | bitpos++; | |
481 | return bitpos; | |
482 | } | |
483 | ||
484 | /* This function ensures that the DMA descriptor ring will not get allocated | |
485 | * across Page boundary. If the allocation is done across the page boundary | |
486 | * at the first time, then it is freed and the allocation is done at | |
487 | * descriptor ring size aligned location. This will ensure that the ring will | |
488 | * not cross page boundary | |
489 | */ | |
490 | static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size, | |
491 | u16 *alignbits, uint *alloced, | |
492 | dma_addr_t *descpa) | |
493 | { | |
494 | void *va; | |
495 | u32 desc_strtaddr; | |
496 | u32 alignbytes = 1 << *alignbits; | |
497 | ||
2e81b9b1 | 498 | va = dma_alloc_consistent(di, size, *alignbits, alloced, descpa); |
5b435de0 AS |
499 | |
500 | if (NULL == va) | |
501 | return NULL; | |
502 | ||
503 | desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes); | |
504 | if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr | |
505 | & boundary)) { | |
506 | *alignbits = dma_align_sizetobits(size); | |
2e81b9b1 AS |
507 | dma_free_coherent(di->dmadev, size, va, *descpa); |
508 | va = dma_alloc_consistent(di, size, *alignbits, | |
5b435de0 AS |
509 | alloced, descpa); |
510 | } | |
511 | return va; | |
512 | } | |
513 | ||
514 | static bool dma64_alloc(struct dma_info *di, uint direction) | |
515 | { | |
516 | u16 size; | |
517 | uint ddlen; | |
518 | void *va; | |
519 | uint alloced = 0; | |
520 | u16 align; | |
521 | u16 align_bits; | |
522 | ||
523 | ddlen = sizeof(struct dma64desc); | |
524 | ||
525 | size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen); | |
526 | align_bits = di->dmadesc_align; | |
527 | align = (1 << align_bits); | |
528 | ||
529 | if (direction == DMA_TX) { | |
530 | va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits, | |
531 | &alloced, &di->txdpaorig); | |
532 | if (va == NULL) { | |
8505a7e6 JP |
533 | DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n", |
534 | di->name); | |
5b435de0 AS |
535 | return false; |
536 | } | |
537 | align = (1 << align_bits); | |
538 | di->txd64 = (struct dma64desc *) | |
539 | roundup((unsigned long)va, align); | |
540 | di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va); | |
541 | di->txdpa = di->txdpaorig + di->txdalign; | |
542 | di->txdalloc = alloced; | |
543 | } else { | |
544 | va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits, | |
545 | &alloced, &di->rxdpaorig); | |
546 | if (va == NULL) { | |
8505a7e6 JP |
547 | DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n", |
548 | di->name); | |
5b435de0 AS |
549 | return false; |
550 | } | |
551 | align = (1 << align_bits); | |
552 | di->rxd64 = (struct dma64desc *) | |
553 | roundup((unsigned long)va, align); | |
554 | di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va); | |
555 | di->rxdpa = di->rxdpaorig + di->rxdalign; | |
556 | di->rxdalloc = alloced; | |
557 | } | |
558 | ||
559 | return true; | |
560 | } | |
561 | ||
562 | static bool _dma_alloc(struct dma_info *di, uint direction) | |
563 | { | |
564 | return dma64_alloc(di, direction); | |
565 | } | |
566 | ||
567 | struct dma_pub *dma_attach(char *name, struct si_pub *sih, | |
3b758a68 | 568 | struct bcma_device *core, |
e81da650 | 569 | uint txregbase, uint rxregbase, uint ntxd, uint nrxd, |
2e81b9b1 AS |
570 | uint rxbufsize, int rxextheadroom, |
571 | uint nrxpost, uint rxoffset, uint *msg_level) | |
5b435de0 AS |
572 | { |
573 | struct dma_info *di; | |
3b758a68 | 574 | u8 rev = core->id.rev; |
5b435de0 | 575 | uint size; |
ec5ab1dd | 576 | struct si_info *sii = container_of(sih, struct si_info, pub); |
5b435de0 AS |
577 | |
578 | /* allocate private info structure */ | |
579 | di = kzalloc(sizeof(struct dma_info), GFP_ATOMIC); | |
580 | if (di == NULL) | |
581 | return NULL; | |
582 | ||
583 | di->msg_level = msg_level ? msg_level : &dma_msg_level; | |
584 | ||
585 | ||
a8779e4a | 586 | di->dma64 = |
3b758a68 | 587 | ((bcma_aread32(core, BCMA_IOST) & SISF_DMA64) == SISF_DMA64); |
5b435de0 | 588 | |
e81da650 | 589 | /* init dma reg info */ |
3b758a68 | 590 | di->core = core; |
e81da650 AS |
591 | di->d64txregbase = txregbase; |
592 | di->d64rxregbase = rxregbase; | |
5b435de0 AS |
593 | |
594 | /* | |
595 | * Default flags (which can be changed by the driver calling | |
596 | * dma_ctrlflags before enable): For backwards compatibility | |
597 | * both Rx Overflow Continue and Parity are DISABLED. | |
598 | */ | |
599 | _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0); | |
600 | ||
e81da650 AS |
601 | DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d " |
602 | "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d " | |
603 | "txregbase %u rxregbase %u\n", name, "DMA64", | |
8505a7e6 | 604 | di->dma.dmactrlflags, ntxd, nrxd, rxbufsize, |
e81da650 | 605 | rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase); |
5b435de0 AS |
606 | |
607 | /* make a private copy of our callers name */ | |
608 | strncpy(di->name, name, MAXNAMEL); | |
609 | di->name[MAXNAMEL - 1] = '\0'; | |
610 | ||
3b758a68 | 611 | di->dmadev = core->dma_dev; |
5b435de0 AS |
612 | |
613 | /* save tunables */ | |
614 | di->ntxd = (u16) ntxd; | |
615 | di->nrxd = (u16) nrxd; | |
616 | ||
617 | /* the actual dma size doesn't include the extra headroom */ | |
618 | di->rxextrahdrroom = | |
619 | (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom; | |
620 | if (rxbufsize > BCMEXTRAHDROOM) | |
621 | di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom); | |
622 | else | |
623 | di->rxbufsize = (u16) rxbufsize; | |
624 | ||
625 | di->nrxpost = (u16) nrxpost; | |
626 | di->rxoffset = (u8) rxoffset; | |
627 | ||
628 | /* | |
629 | * figure out the DMA physical address offset for dd and data | |
630 | * PCI/PCIE: they map silicon backplace address to zero | |
631 | * based memory, need offset | |
632 | * Other bus: use zero SI_BUS BIGENDIAN kludge: use sdram | |
633 | * swapped region for data buffer, not descriptor | |
634 | */ | |
635 | di->ddoffsetlow = 0; | |
636 | di->dataoffsetlow = 0; | |
ec5ab1dd HM |
637 | /* for pci bus, add offset */ |
638 | if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI) { | |
639 | /* add offset for pcie with DMA64 bus */ | |
640 | di->ddoffsetlow = 0; | |
641 | di->ddoffsethigh = SI_PCIE_DMA_H32; | |
642 | } | |
5b435de0 AS |
643 | di->dataoffsetlow = di->ddoffsetlow; |
644 | di->dataoffsethigh = di->ddoffsethigh; | |
ec5ab1dd | 645 | |
5b435de0 | 646 | /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */ |
780b9c46 | 647 | if ((core->id.id == BCMA_CORE_SDIO_DEV) |
3b758a68 | 648 | && ((rev > 0) && (rev <= 2))) |
3db1cd5c | 649 | di->addrext = false; |
780b9c46 | 650 | else if ((core->id.id == BCMA_CORE_I2S) && |
3b758a68 | 651 | ((rev == 0) || (rev == 1))) |
3db1cd5c | 652 | di->addrext = false; |
5b435de0 AS |
653 | else |
654 | di->addrext = _dma_isaddrext(di); | |
655 | ||
656 | /* does the descriptor need to be aligned and if yes, on 4K/8K or not */ | |
657 | di->aligndesc_4k = _dma_descriptor_align(di); | |
658 | if (di->aligndesc_4k) { | |
659 | di->dmadesc_align = D64RINGALIGN_BITS; | |
660 | if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) | |
661 | /* for smaller dd table, HW relax alignment reqmnt */ | |
662 | di->dmadesc_align = D64RINGALIGN_BITS - 1; | |
663 | } else { | |
664 | di->dmadesc_align = 4; /* 16 byte alignment */ | |
665 | } | |
666 | ||
8505a7e6 JP |
667 | DMA_NONE("DMA descriptor align_needed %d, align %d\n", |
668 | di->aligndesc_4k, di->dmadesc_align); | |
5b435de0 AS |
669 | |
670 | /* allocate tx packet pointer vector */ | |
671 | if (ntxd) { | |
672 | size = ntxd * sizeof(void *); | |
673 | di->txp = kzalloc(size, GFP_ATOMIC); | |
674 | if (di->txp == NULL) | |
675 | goto fail; | |
676 | } | |
677 | ||
678 | /* allocate rx packet pointer vector */ | |
679 | if (nrxd) { | |
680 | size = nrxd * sizeof(void *); | |
681 | di->rxp = kzalloc(size, GFP_ATOMIC); | |
682 | if (di->rxp == NULL) | |
683 | goto fail; | |
684 | } | |
685 | ||
686 | /* | |
687 | * allocate transmit descriptor ring, only need ntxd descriptors | |
688 | * but it must be aligned | |
689 | */ | |
690 | if (ntxd) { | |
691 | if (!_dma_alloc(di, DMA_TX)) | |
692 | goto fail; | |
693 | } | |
694 | ||
695 | /* | |
696 | * allocate receive descriptor ring, only need nrxd descriptors | |
697 | * but it must be aligned | |
698 | */ | |
699 | if (nrxd) { | |
700 | if (!_dma_alloc(di, DMA_RX)) | |
701 | goto fail; | |
702 | } | |
703 | ||
704 | if ((di->ddoffsetlow != 0) && !di->addrext) { | |
705 | if (di->txdpa > SI_PCI_DMA_SZ) { | |
8505a7e6 JP |
706 | DMA_ERROR("%s: txdpa 0x%x: addrext not supported\n", |
707 | di->name, (u32)di->txdpa); | |
5b435de0 AS |
708 | goto fail; |
709 | } | |
710 | if (di->rxdpa > SI_PCI_DMA_SZ) { | |
8505a7e6 JP |
711 | DMA_ERROR("%s: rxdpa 0x%x: addrext not supported\n", |
712 | di->name, (u32)di->rxdpa); | |
5b435de0 AS |
713 | goto fail; |
714 | } | |
715 | } | |
716 | ||
8505a7e6 JP |
717 | DMA_TRACE("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n", |
718 | di->ddoffsetlow, di->ddoffsethigh, | |
719 | di->dataoffsetlow, di->dataoffsethigh, | |
720 | di->addrext); | |
5b435de0 AS |
721 | |
722 | return (struct dma_pub *) di; | |
723 | ||
724 | fail: | |
725 | dma_detach((struct dma_pub *)di); | |
726 | return NULL; | |
727 | } | |
728 | ||
729 | static inline void | |
730 | dma64_dd_upd(struct dma_info *di, struct dma64desc *ddring, | |
731 | dma_addr_t pa, uint outidx, u32 *flags, u32 bufcount) | |
732 | { | |
733 | u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK; | |
734 | ||
735 | /* PCI bus with big(>1G) physical address, use address extension */ | |
736 | if ((di->dataoffsetlow == 0) || !(pa & PCI32ADDR_HIGH)) { | |
737 | ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow); | |
738 | ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh); | |
739 | ddring[outidx].ctrl1 = cpu_to_le32(*flags); | |
740 | ddring[outidx].ctrl2 = cpu_to_le32(ctrl2); | |
741 | } else { | |
742 | /* address extension for 32-bit PCI */ | |
743 | u32 ae; | |
744 | ||
745 | ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT; | |
746 | pa &= ~PCI32ADDR_HIGH; | |
747 | ||
748 | ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE; | |
749 | ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow); | |
750 | ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh); | |
751 | ddring[outidx].ctrl1 = cpu_to_le32(*flags); | |
752 | ddring[outidx].ctrl2 = cpu_to_le32(ctrl2); | |
753 | } | |
754 | if (di->dma.dmactrlflags & DMA_CTRL_PEN) { | |
755 | if (dma64_dd_parity(&ddring[outidx])) | |
756 | ddring[outidx].ctrl2 = | |
757 | cpu_to_le32(ctrl2 | D64_CTRL2_PARITY); | |
758 | } | |
759 | } | |
760 | ||
761 | /* !! may be called with core in reset */ | |
762 | void dma_detach(struct dma_pub *pub) | |
763 | { | |
764 | struct dma_info *di = (struct dma_info *)pub; | |
765 | ||
8505a7e6 | 766 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
767 | |
768 | /* free dma descriptor rings */ | |
769 | if (di->txd64) | |
2e81b9b1 AS |
770 | dma_free_coherent(di->dmadev, di->txdalloc, |
771 | ((s8 *)di->txd64 - di->txdalign), | |
772 | (di->txdpaorig)); | |
5b435de0 | 773 | if (di->rxd64) |
2e81b9b1 AS |
774 | dma_free_coherent(di->dmadev, di->rxdalloc, |
775 | ((s8 *)di->rxd64 - di->rxdalign), | |
776 | (di->rxdpaorig)); | |
5b435de0 AS |
777 | |
778 | /* free packet pointer vectors */ | |
779 | kfree(di->txp); | |
780 | kfree(di->rxp); | |
781 | ||
782 | /* free our private info structure */ | |
783 | kfree(di); | |
784 | ||
785 | } | |
786 | ||
787 | /* initialize descriptor table base address */ | |
788 | static void | |
789 | _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa) | |
790 | { | |
791 | if (!di->aligndesc_4k) { | |
792 | if (direction == DMA_TX) | |
793 | di->xmtptrbase = pa; | |
794 | else | |
795 | di->rcvptrbase = pa; | |
796 | } | |
797 | ||
798 | if ((di->ddoffsetlow == 0) | |
799 | || !(pa & PCI32ADDR_HIGH)) { | |
800 | if (direction == DMA_TX) { | |
3b758a68 | 801 | bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), |
e81da650 | 802 | pa + di->ddoffsetlow); |
3b758a68 | 803 | bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh), |
e81da650 | 804 | di->ddoffsethigh); |
5b435de0 | 805 | } else { |
3b758a68 | 806 | bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), |
e81da650 | 807 | pa + di->ddoffsetlow); |
3b758a68 | 808 | bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh), |
e81da650 | 809 | di->ddoffsethigh); |
5b435de0 AS |
810 | } |
811 | } else { | |
812 | /* DMA64 32bits address extension */ | |
813 | u32 ae; | |
814 | ||
815 | /* shift the high bit(s) from pa to ae */ | |
816 | ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT; | |
817 | pa &= ~PCI32ADDR_HIGH; | |
818 | ||
819 | if (direction == DMA_TX) { | |
3b758a68 | 820 | bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), |
e81da650 | 821 | pa + di->ddoffsetlow); |
3b758a68 | 822 | bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh), |
e81da650 | 823 | di->ddoffsethigh); |
3b758a68 | 824 | bcma_maskset32(di->core, DMA64TXREGOFFS(di, control), |
e81da650 | 825 | D64_XC_AE, (ae << D64_XC_AE_SHIFT)); |
5b435de0 | 826 | } else { |
3b758a68 | 827 | bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), |
e81da650 | 828 | pa + di->ddoffsetlow); |
3b758a68 | 829 | bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh), |
e81da650 | 830 | di->ddoffsethigh); |
3b758a68 | 831 | bcma_maskset32(di->core, DMA64RXREGOFFS(di, control), |
e81da650 | 832 | D64_RC_AE, (ae << D64_RC_AE_SHIFT)); |
5b435de0 AS |
833 | } |
834 | } | |
835 | } | |
836 | ||
837 | static void _dma_rxenable(struct dma_info *di) | |
838 | { | |
839 | uint dmactrlflags = di->dma.dmactrlflags; | |
840 | u32 control; | |
841 | ||
8505a7e6 | 842 | DMA_TRACE("%s:\n", di->name); |
5b435de0 | 843 | |
3b758a68 | 844 | control = D64_RC_RE | (bcma_read32(di->core, |
e81da650 AS |
845 | DMA64RXREGOFFS(di, control)) & |
846 | D64_RC_AE); | |
5b435de0 AS |
847 | |
848 | if ((dmactrlflags & DMA_CTRL_PEN) == 0) | |
849 | control |= D64_RC_PD; | |
850 | ||
851 | if (dmactrlflags & DMA_CTRL_ROC) | |
852 | control |= D64_RC_OC; | |
853 | ||
3b758a68 | 854 | bcma_write32(di->core, DMA64RXREGOFFS(di, control), |
5b435de0 AS |
855 | ((di->rxoffset << D64_RC_RO_SHIFT) | control)); |
856 | } | |
857 | ||
858 | void dma_rxinit(struct dma_pub *pub) | |
859 | { | |
860 | struct dma_info *di = (struct dma_info *)pub; | |
861 | ||
8505a7e6 | 862 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
863 | |
864 | if (di->nrxd == 0) | |
865 | return; | |
866 | ||
867 | di->rxin = di->rxout = 0; | |
868 | ||
869 | /* clear rx descriptor ring */ | |
870 | memset(di->rxd64, '\0', di->nrxd * sizeof(struct dma64desc)); | |
871 | ||
872 | /* DMA engine with out alignment requirement requires table to be inited | |
873 | * before enabling the engine | |
874 | */ | |
875 | if (!di->aligndesc_4k) | |
876 | _dma_ddtable_init(di, DMA_RX, di->rxdpa); | |
877 | ||
878 | _dma_rxenable(di); | |
879 | ||
880 | if (di->aligndesc_4k) | |
881 | _dma_ddtable_init(di, DMA_RX, di->rxdpa); | |
882 | } | |
883 | ||
884 | static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall) | |
885 | { | |
886 | uint i, curr; | |
887 | struct sk_buff *rxp; | |
888 | dma_addr_t pa; | |
889 | ||
890 | i = di->rxin; | |
891 | ||
892 | /* return if no packets posted */ | |
893 | if (i == di->rxout) | |
894 | return NULL; | |
895 | ||
896 | curr = | |
3b758a68 | 897 | B2I(((bcma_read32(di->core, |
e81da650 | 898 | DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) - |
5b435de0 AS |
899 | di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc); |
900 | ||
901 | /* ignore curr if forceall */ | |
902 | if (!forceall && (i == curr)) | |
903 | return NULL; | |
904 | ||
905 | /* get the packet pointer that corresponds to the rx descriptor */ | |
906 | rxp = di->rxp[i]; | |
907 | di->rxp[i] = NULL; | |
908 | ||
909 | pa = le32_to_cpu(di->rxd64[i].addrlow) - di->dataoffsetlow; | |
910 | ||
911 | /* clear this packet from the descriptor ring */ | |
2e81b9b1 | 912 | dma_unmap_single(di->dmadev, pa, di->rxbufsize, DMA_FROM_DEVICE); |
5b435de0 AS |
913 | |
914 | di->rxd64[i].addrlow = cpu_to_le32(0xdeadbeef); | |
915 | di->rxd64[i].addrhigh = cpu_to_le32(0xdeadbeef); | |
916 | ||
917 | di->rxin = nextrxd(di, i); | |
918 | ||
919 | return rxp; | |
920 | } | |
921 | ||
922 | static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall) | |
923 | { | |
924 | if (di->nrxd == 0) | |
925 | return NULL; | |
926 | ||
927 | return dma64_getnextrxp(di, forceall); | |
928 | } | |
929 | ||
930 | /* | |
931 | * !! rx entry routine | |
3fd172d3 | 932 | * returns the number packages in the next frame, or 0 if there are no more |
5b435de0 AS |
933 | * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is |
934 | * supported with pkts chain | |
935 | * otherwise, it's treated as giant pkt and will be tossed. | |
936 | * The DMA scattering starts with normal DMA header, followed by first | |
937 | * buffer data. After it reaches the max size of buffer, the data continues | |
938 | * in next DMA descriptor buffer WITHOUT DMA header | |
939 | */ | |
3fd172d3 | 940 | int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list) |
5b435de0 AS |
941 | { |
942 | struct dma_info *di = (struct dma_info *)pub; | |
3fd172d3 AS |
943 | struct sk_buff_head dma_frames; |
944 | struct sk_buff *p, *next; | |
5b435de0 AS |
945 | uint len; |
946 | uint pkt_len; | |
947 | int resid = 0; | |
3fd172d3 | 948 | int pktcnt = 1; |
5b435de0 | 949 | |
3fd172d3 | 950 | skb_queue_head_init(&dma_frames); |
5b435de0 | 951 | next_frame: |
3fd172d3 AS |
952 | p = _dma_getnextrxp(di, false); |
953 | if (p == NULL) | |
954 | return 0; | |
5b435de0 | 955 | |
3fd172d3 | 956 | len = le16_to_cpu(*(__le16 *) (p->data)); |
8505a7e6 | 957 | DMA_TRACE("%s: dma_rx len %d\n", di->name, len); |
3fd172d3 | 958 | dma_spin_for_len(len, p); |
5b435de0 AS |
959 | |
960 | /* set actual length */ | |
961 | pkt_len = min((di->rxoffset + len), di->rxbufsize); | |
3fd172d3 AS |
962 | __skb_trim(p, pkt_len); |
963 | skb_queue_tail(&dma_frames, p); | |
5b435de0 AS |
964 | resid = len - (di->rxbufsize - di->rxoffset); |
965 | ||
966 | /* check for single or multi-buffer rx */ | |
967 | if (resid > 0) { | |
5b435de0 | 968 | while ((resid > 0) && (p = _dma_getnextrxp(di, false))) { |
5b435de0 AS |
969 | pkt_len = min_t(uint, resid, di->rxbufsize); |
970 | __skb_trim(p, pkt_len); | |
3fd172d3 | 971 | skb_queue_tail(&dma_frames, p); |
5b435de0 | 972 | resid -= di->rxbufsize; |
3fd172d3 | 973 | pktcnt++; |
5b435de0 AS |
974 | } |
975 | ||
8ae74654 | 976 | #ifdef DEBUG |
5b435de0 AS |
977 | if (resid > 0) { |
978 | uint cur; | |
979 | cur = | |
3b758a68 | 980 | B2I(((bcma_read32(di->core, |
e81da650 AS |
981 | DMA64RXREGOFFS(di, status0)) & |
982 | D64_RS0_CD_MASK) - di->rcvptrbase) & | |
983 | D64_RS0_CD_MASK, struct dma64desc); | |
8505a7e6 | 984 | DMA_ERROR("rxin %d rxout %d, hw_curr %d\n", |
e81da650 | 985 | di->rxin, di->rxout, cur); |
5b435de0 | 986 | } |
8ae74654 | 987 | #endif /* DEBUG */ |
5b435de0 AS |
988 | |
989 | if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) { | |
8505a7e6 JP |
990 | DMA_ERROR("%s: bad frame length (%d)\n", |
991 | di->name, len); | |
3fd172d3 AS |
992 | skb_queue_walk_safe(&dma_frames, p, next) { |
993 | skb_unlink(p, &dma_frames); | |
994 | brcmu_pkt_buf_free_skb(p); | |
995 | } | |
5b435de0 | 996 | di->dma.rxgiants++; |
3fd172d3 | 997 | pktcnt = 1; |
5b435de0 AS |
998 | goto next_frame; |
999 | } | |
1000 | } | |
1001 | ||
3fd172d3 AS |
1002 | skb_queue_splice_tail(&dma_frames, skb_list); |
1003 | return pktcnt; | |
5b435de0 AS |
1004 | } |
1005 | ||
1006 | static bool dma64_rxidle(struct dma_info *di) | |
1007 | { | |
8505a7e6 | 1008 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
1009 | |
1010 | if (di->nrxd == 0) | |
1011 | return true; | |
1012 | ||
3b758a68 | 1013 | return ((bcma_read32(di->core, |
e81da650 | 1014 | DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) == |
3b758a68 | 1015 | (bcma_read32(di->core, DMA64RXREGOFFS(di, ptr)) & |
e81da650 | 1016 | D64_RS0_CD_MASK)); |
5b435de0 AS |
1017 | } |
1018 | ||
1019 | /* | |
1020 | * post receive buffers | |
1021 | * return false is refill failed completely and ring is empty this will stall | |
1022 | * the rx dma and user might want to call rxfill again asap. This unlikely | |
1023 | * happens on memory-rich NIC, but often on memory-constrained dongle | |
1024 | */ | |
1025 | bool dma_rxfill(struct dma_pub *pub) | |
1026 | { | |
1027 | struct dma_info *di = (struct dma_info *)pub; | |
1028 | struct sk_buff *p; | |
1029 | u16 rxin, rxout; | |
1030 | u32 flags = 0; | |
1031 | uint n; | |
1032 | uint i; | |
1033 | dma_addr_t pa; | |
1034 | uint extra_offset = 0; | |
1035 | bool ring_empty; | |
1036 | ||
1037 | ring_empty = false; | |
1038 | ||
1039 | /* | |
1040 | * Determine how many receive buffers we're lacking | |
1041 | * from the full complement, allocate, initialize, | |
1042 | * and post them, then update the chip rx lastdscr. | |
1043 | */ | |
1044 | ||
1045 | rxin = di->rxin; | |
1046 | rxout = di->rxout; | |
1047 | ||
1048 | n = di->nrxpost - nrxdactive(di, rxin, rxout); | |
1049 | ||
8505a7e6 | 1050 | DMA_TRACE("%s: post %d\n", di->name, n); |
5b435de0 AS |
1051 | |
1052 | if (di->rxbufsize > BCMEXTRAHDROOM) | |
1053 | extra_offset = di->rxextrahdrroom; | |
1054 | ||
1055 | for (i = 0; i < n; i++) { | |
1056 | /* | |
1057 | * the di->rxbufsize doesn't include the extra headroom, | |
1058 | * we need to add it to the size to be allocated | |
1059 | */ | |
1060 | p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset); | |
1061 | ||
1062 | if (p == NULL) { | |
8505a7e6 | 1063 | DMA_ERROR("%s: out of rxbufs\n", di->name); |
5b435de0 | 1064 | if (i == 0 && dma64_rxidle(di)) { |
8505a7e6 | 1065 | DMA_ERROR("%s: ring is empty !\n", di->name); |
5b435de0 AS |
1066 | ring_empty = true; |
1067 | } | |
1068 | di->dma.rxnobuf++; | |
1069 | break; | |
1070 | } | |
1071 | /* reserve an extra headroom, if applicable */ | |
1072 | if (extra_offset) | |
1073 | skb_pull(p, extra_offset); | |
1074 | ||
1075 | /* Do a cached write instead of uncached write since DMA_MAP | |
1076 | * will flush the cache. | |
1077 | */ | |
1078 | *(u32 *) (p->data) = 0; | |
1079 | ||
2e81b9b1 AS |
1080 | pa = dma_map_single(di->dmadev, p->data, di->rxbufsize, |
1081 | DMA_FROM_DEVICE); | |
5b435de0 AS |
1082 | |
1083 | /* save the free packet pointer */ | |
1084 | di->rxp[rxout] = p; | |
1085 | ||
1086 | /* reset flags for each descriptor */ | |
1087 | flags = 0; | |
1088 | if (rxout == (di->nrxd - 1)) | |
1089 | flags = D64_CTRL1_EOT; | |
1090 | ||
1091 | dma64_dd_upd(di, di->rxd64, pa, rxout, &flags, | |
1092 | di->rxbufsize); | |
1093 | rxout = nextrxd(di, rxout); | |
1094 | } | |
1095 | ||
1096 | di->rxout = rxout; | |
1097 | ||
1098 | /* update the chip lastdscr pointer */ | |
3b758a68 | 1099 | bcma_write32(di->core, DMA64RXREGOFFS(di, ptr), |
5b435de0 AS |
1100 | di->rcvptrbase + I2B(rxout, struct dma64desc)); |
1101 | ||
1102 | return ring_empty; | |
1103 | } | |
1104 | ||
1105 | void dma_rxreclaim(struct dma_pub *pub) | |
1106 | { | |
1107 | struct dma_info *di = (struct dma_info *)pub; | |
1108 | struct sk_buff *p; | |
1109 | ||
8505a7e6 | 1110 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
1111 | |
1112 | while ((p = _dma_getnextrxp(di, true))) | |
1113 | brcmu_pkt_buf_free_skb(p); | |
1114 | } | |
1115 | ||
1116 | void dma_counterreset(struct dma_pub *pub) | |
1117 | { | |
1118 | /* reset all software counters */ | |
1119 | pub->rxgiants = 0; | |
1120 | pub->rxnobuf = 0; | |
1121 | pub->txnobuf = 0; | |
1122 | } | |
1123 | ||
1124 | /* get the address of the var in order to change later */ | |
1125 | unsigned long dma_getvar(struct dma_pub *pub, const char *name) | |
1126 | { | |
1127 | struct dma_info *di = (struct dma_info *)pub; | |
1128 | ||
1129 | if (!strcmp(name, "&txavail")) | |
1130 | return (unsigned long)&(di->dma.txavail); | |
1131 | return 0; | |
1132 | } | |
1133 | ||
1134 | /* 64-bit DMA functions */ | |
1135 | ||
1136 | void dma_txinit(struct dma_pub *pub) | |
1137 | { | |
1138 | struct dma_info *di = (struct dma_info *)pub; | |
1139 | u32 control = D64_XC_XE; | |
1140 | ||
8505a7e6 | 1141 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
1142 | |
1143 | if (di->ntxd == 0) | |
1144 | return; | |
1145 | ||
1146 | di->txin = di->txout = 0; | |
1147 | di->dma.txavail = di->ntxd - 1; | |
1148 | ||
1149 | /* clear tx descriptor ring */ | |
1150 | memset(di->txd64, '\0', (di->ntxd * sizeof(struct dma64desc))); | |
1151 | ||
1152 | /* DMA engine with out alignment requirement requires table to be inited | |
1153 | * before enabling the engine | |
1154 | */ | |
1155 | if (!di->aligndesc_4k) | |
1156 | _dma_ddtable_init(di, DMA_TX, di->txdpa); | |
1157 | ||
1158 | if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0) | |
1159 | control |= D64_XC_PD; | |
3b758a68 | 1160 | bcma_set32(di->core, DMA64TXREGOFFS(di, control), control); |
5b435de0 AS |
1161 | |
1162 | /* DMA engine with alignment requirement requires table to be inited | |
1163 | * before enabling the engine | |
1164 | */ | |
1165 | if (di->aligndesc_4k) | |
1166 | _dma_ddtable_init(di, DMA_TX, di->txdpa); | |
1167 | } | |
1168 | ||
1169 | void dma_txsuspend(struct dma_pub *pub) | |
1170 | { | |
1171 | struct dma_info *di = (struct dma_info *)pub; | |
1172 | ||
8505a7e6 | 1173 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
1174 | |
1175 | if (di->ntxd == 0) | |
1176 | return; | |
1177 | ||
3b758a68 | 1178 | bcma_set32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE); |
5b435de0 AS |
1179 | } |
1180 | ||
1181 | void dma_txresume(struct dma_pub *pub) | |
1182 | { | |
1183 | struct dma_info *di = (struct dma_info *)pub; | |
1184 | ||
8505a7e6 | 1185 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
1186 | |
1187 | if (di->ntxd == 0) | |
1188 | return; | |
1189 | ||
3b758a68 | 1190 | bcma_mask32(di->core, DMA64TXREGOFFS(di, control), ~D64_XC_SE); |
5b435de0 AS |
1191 | } |
1192 | ||
1193 | bool dma_txsuspended(struct dma_pub *pub) | |
1194 | { | |
1195 | struct dma_info *di = (struct dma_info *)pub; | |
1196 | ||
1197 | return (di->ntxd == 0) || | |
3b758a68 | 1198 | ((bcma_read32(di->core, |
e81da650 AS |
1199 | DMA64TXREGOFFS(di, control)) & D64_XC_SE) == |
1200 | D64_XC_SE); | |
5b435de0 AS |
1201 | } |
1202 | ||
1203 | void dma_txreclaim(struct dma_pub *pub, enum txd_range range) | |
1204 | { | |
1205 | struct dma_info *di = (struct dma_info *)pub; | |
1206 | struct sk_buff *p; | |
1207 | ||
8505a7e6 JP |
1208 | DMA_TRACE("%s: %s\n", |
1209 | di->name, | |
1210 | range == DMA_RANGE_ALL ? "all" : | |
1211 | range == DMA_RANGE_TRANSMITTED ? "transmitted" : | |
1212 | "transferred"); | |
5b435de0 AS |
1213 | |
1214 | if (di->txin == di->txout) | |
1215 | return; | |
1216 | ||
1217 | while ((p = dma_getnexttxp(pub, range))) { | |
1218 | /* For unframed data, we don't have any packets to free */ | |
1219 | if (!(di->dma.dmactrlflags & DMA_CTRL_UNFRAMED)) | |
1220 | brcmu_pkt_buf_free_skb(p); | |
1221 | } | |
1222 | } | |
1223 | ||
1224 | bool dma_txreset(struct dma_pub *pub) | |
1225 | { | |
1226 | struct dma_info *di = (struct dma_info *)pub; | |
1227 | u32 status; | |
1228 | ||
1229 | if (di->ntxd == 0) | |
1230 | return true; | |
1231 | ||
1232 | /* suspend tx DMA first */ | |
3b758a68 | 1233 | bcma_write32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE); |
5b435de0 | 1234 | SPINWAIT(((status = |
3b758a68 | 1235 | (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) & |
e81da650 AS |
1236 | D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED) && |
1237 | (status != D64_XS0_XS_IDLE) && (status != D64_XS0_XS_STOPPED), | |
1238 | 10000); | |
5b435de0 | 1239 | |
3b758a68 | 1240 | bcma_write32(di->core, DMA64TXREGOFFS(di, control), 0); |
5b435de0 | 1241 | SPINWAIT(((status = |
3b758a68 | 1242 | (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) & |
e81da650 | 1243 | D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED), 10000); |
5b435de0 AS |
1244 | |
1245 | /* wait for the last transaction to complete */ | |
1246 | udelay(300); | |
1247 | ||
1248 | return status == D64_XS0_XS_DISABLED; | |
1249 | } | |
1250 | ||
1251 | bool dma_rxreset(struct dma_pub *pub) | |
1252 | { | |
1253 | struct dma_info *di = (struct dma_info *)pub; | |
1254 | u32 status; | |
1255 | ||
1256 | if (di->nrxd == 0) | |
1257 | return true; | |
1258 | ||
3b758a68 | 1259 | bcma_write32(di->core, DMA64RXREGOFFS(di, control), 0); |
5b435de0 | 1260 | SPINWAIT(((status = |
3b758a68 | 1261 | (bcma_read32(di->core, DMA64RXREGOFFS(di, status0)) & |
e81da650 | 1262 | D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED), 10000); |
5b435de0 AS |
1263 | |
1264 | return status == D64_RS0_RS_DISABLED; | |
1265 | } | |
1266 | ||
1267 | /* | |
1268 | * !! tx entry routine | |
1269 | * WARNING: call must check the return value for error. | |
1270 | * the error(toss frames) could be fatal and cause many subsequent hard | |
1271 | * to debug problems | |
1272 | */ | |
3030794f | 1273 | int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit) |
5b435de0 AS |
1274 | { |
1275 | struct dma_info *di = (struct dma_info *)pub; | |
5b435de0 AS |
1276 | unsigned char *data; |
1277 | uint len; | |
1278 | u16 txout; | |
1279 | u32 flags = 0; | |
1280 | dma_addr_t pa; | |
1281 | ||
8505a7e6 | 1282 | DMA_TRACE("%s:\n", di->name); |
5b435de0 AS |
1283 | |
1284 | txout = di->txout; | |
1285 | ||
1286 | /* | |
3030794f | 1287 | * obtain and initialize transmit descriptor entry. |
5b435de0 | 1288 | */ |
3030794f AS |
1289 | data = p->data; |
1290 | len = p->len; | |
5b435de0 | 1291 | |
3030794f AS |
1292 | /* no use to transmit a zero length packet */ |
1293 | if (len == 0) | |
1294 | return 0; | |
5b435de0 | 1295 | |
3030794f AS |
1296 | /* return nonzero if out of tx descriptors */ |
1297 | if (nexttxd(di, txout) == di->txin) | |
1298 | goto outoftxd; | |
5b435de0 | 1299 | |
3030794f | 1300 | /* get physical address of buffer start */ |
2e81b9b1 | 1301 | pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE); |
5b435de0 | 1302 | |
3030794f AS |
1303 | /* With a DMA segment list, Descriptor table is filled |
1304 | * using the segment list instead of looping over | |
1305 | * buffers in multi-chain DMA. Therefore, EOF for SGLIST | |
1306 | * is when end of segment list is reached. | |
1307 | */ | |
1308 | flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF; | |
1309 | if (txout == (di->ntxd - 1)) | |
1310 | flags |= D64_CTRL1_EOT; | |
5b435de0 | 1311 | |
3030794f | 1312 | dma64_dd_upd(di, di->txd64, pa, txout, &flags, len); |
5b435de0 | 1313 | |
3030794f | 1314 | txout = nexttxd(di, txout); |
5b435de0 AS |
1315 | |
1316 | /* save the packet */ | |
3030794f | 1317 | di->txp[prevtxd(di, txout)] = p; |
5b435de0 AS |
1318 | |
1319 | /* bump the tx descriptor index */ | |
1320 | di->txout = txout; | |
1321 | ||
1322 | /* kick the chip */ | |
1323 | if (commit) | |
3b758a68 | 1324 | bcma_write32(di->core, DMA64TXREGOFFS(di, ptr), |
5b435de0 AS |
1325 | di->xmtptrbase + I2B(txout, struct dma64desc)); |
1326 | ||
1327 | /* tx flow control */ | |
1328 | di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1; | |
1329 | ||
1330 | return 0; | |
1331 | ||
1332 | outoftxd: | |
8505a7e6 | 1333 | DMA_ERROR("%s: out of txds !!!\n", di->name); |
3030794f | 1334 | brcmu_pkt_buf_free_skb(p); |
5b435de0 AS |
1335 | di->dma.txavail = 0; |
1336 | di->dma.txnobuf++; | |
1337 | return -1; | |
1338 | } | |
1339 | ||
1340 | /* | |
1341 | * Reclaim next completed txd (txds if using chained buffers) in the range | |
1342 | * specified and return associated packet. | |
1343 | * If range is DMA_RANGE_TRANSMITTED, reclaim descriptors that have be | |
1344 | * transmitted as noted by the hardware "CurrDescr" pointer. | |
1345 | * If range is DMA_RANGE_TRANSFERED, reclaim descriptors that have be | |
1346 | * transferred by the DMA as noted by the hardware "ActiveDescr" pointer. | |
1347 | * If range is DMA_RANGE_ALL, reclaim all txd(s) posted to the ring and | |
1348 | * return associated packet regardless of the value of hardware pointers. | |
1349 | */ | |
1350 | struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range) | |
1351 | { | |
1352 | struct dma_info *di = (struct dma_info *)pub; | |
1353 | u16 start, end, i; | |
1354 | u16 active_desc; | |
1355 | struct sk_buff *txp; | |
1356 | ||
8505a7e6 JP |
1357 | DMA_TRACE("%s: %s\n", |
1358 | di->name, | |
1359 | range == DMA_RANGE_ALL ? "all" : | |
1360 | range == DMA_RANGE_TRANSMITTED ? "transmitted" : | |
1361 | "transferred"); | |
5b435de0 AS |
1362 | |
1363 | if (di->ntxd == 0) | |
1364 | return NULL; | |
1365 | ||
1366 | txp = NULL; | |
1367 | ||
1368 | start = di->txin; | |
1369 | if (range == DMA_RANGE_ALL) | |
1370 | end = di->txout; | |
1371 | else { | |
3b758a68 | 1372 | end = (u16) (B2I(((bcma_read32(di->core, |
e81da650 AS |
1373 | DMA64TXREGOFFS(di, status0)) & |
1374 | D64_XS0_CD_MASK) - di->xmtptrbase) & | |
1375 | D64_XS0_CD_MASK, struct dma64desc)); | |
5b435de0 AS |
1376 | |
1377 | if (range == DMA_RANGE_TRANSFERED) { | |
1378 | active_desc = | |
3b758a68 | 1379 | (u16)(bcma_read32(di->core, |
e81da650 | 1380 | DMA64TXREGOFFS(di, status1)) & |
5b435de0 AS |
1381 | D64_XS1_AD_MASK); |
1382 | active_desc = | |
1383 | (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK; | |
1384 | active_desc = B2I(active_desc, struct dma64desc); | |
1385 | if (end != active_desc) | |
1386 | end = prevtxd(di, active_desc); | |
1387 | } | |
1388 | } | |
1389 | ||
1390 | if ((start == 0) && (end > di->txout)) | |
1391 | goto bogus; | |
1392 | ||
1393 | for (i = start; i != end && !txp; i = nexttxd(di, i)) { | |
1394 | dma_addr_t pa; | |
1395 | uint size; | |
1396 | ||
1397 | pa = le32_to_cpu(di->txd64[i].addrlow) - di->dataoffsetlow; | |
1398 | ||
1399 | size = | |
1400 | (le32_to_cpu(di->txd64[i].ctrl2) & | |
1401 | D64_CTRL2_BC_MASK); | |
1402 | ||
1403 | di->txd64[i].addrlow = cpu_to_le32(0xdeadbeef); | |
1404 | di->txd64[i].addrhigh = cpu_to_le32(0xdeadbeef); | |
1405 | ||
1406 | txp = di->txp[i]; | |
1407 | di->txp[i] = NULL; | |
1408 | ||
2e81b9b1 | 1409 | dma_unmap_single(di->dmadev, pa, size, DMA_TO_DEVICE); |
5b435de0 AS |
1410 | } |
1411 | ||
1412 | di->txin = i; | |
1413 | ||
1414 | /* tx flow control */ | |
1415 | di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1; | |
1416 | ||
1417 | return txp; | |
1418 | ||
1419 | bogus: | |
8505a7e6 JP |
1420 | DMA_NONE("bogus curr: start %d end %d txout %d\n", |
1421 | start, end, di->txout); | |
5b435de0 AS |
1422 | return NULL; |
1423 | } | |
1424 | ||
1425 | /* | |
1426 | * Mac80211 initiated actions sometimes require packets in the DMA queue to be | |
1427 | * modified. The modified portion of the packet is not under control of the DMA | |
1428 | * engine. This function calls a caller-supplied function for each packet in | |
1429 | * the caller specified dma chain. | |
1430 | */ | |
1431 | void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc) | |
1432 | (void *pkt, void *arg_a), void *arg_a) | |
1433 | { | |
1434 | struct dma_info *di = (struct dma_info *) dmah; | |
1435 | uint i = di->txin; | |
1436 | uint end = di->txout; | |
1437 | struct sk_buff *skb; | |
1438 | struct ieee80211_tx_info *tx_info; | |
1439 | ||
1440 | while (i != end) { | |
2c208890 | 1441 | skb = di->txp[i]; |
5b435de0 AS |
1442 | if (skb != NULL) { |
1443 | tx_info = (struct ieee80211_tx_info *)skb->cb; | |
1444 | (callback_fnc)(tx_info, arg_a); | |
1445 | } | |
1446 | i = nexttxd(di, i); | |
1447 | } | |
1448 | } |