]>
Commit | Line | Data |
---|---|---|
5b435de0 AS |
1 | /* |
2 | * Copyright (c) 2010 Broadcom Corporation | |
3 | * | |
4 | * Permission to use, copy, modify, and/or distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY | |
11 | * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION | |
13 | * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN | |
14 | * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
8505a7e6 | 16 | |
5b435de0 | 17 | #include <linux/slab.h> |
5b435de0 AS |
18 | #include <linux/delay.h> |
19 | #include <linux/pci.h> | |
e041f65d SF |
20 | #include <net/cfg80211.h> |
21 | #include <net/mac80211.h> | |
5b435de0 AS |
22 | |
23 | #include <brcmu_utils.h> | |
24 | #include <aiutils.h> | |
25 | #include "types.h" | |
e041f65d | 26 | #include "main.h" |
5b435de0 | 27 | #include "dma.h" |
23038214 | 28 | #include "soc.h" |
e041f65d SF |
29 | #include "scb.h" |
30 | #include "ampdu.h" | |
90123e04 | 31 | #include "debug.h" |
0c9a0a1d | 32 | #include "brcms_trace_events.h" |
5b435de0 | 33 | |
e81da650 AS |
34 | /* |
35 | * dma register field offset calculation | |
36 | */ | |
37 | #define DMA64REGOFFS(field) offsetof(struct dma64regs, field) | |
38 | #define DMA64TXREGOFFS(di, field) (di->d64txregbase + DMA64REGOFFS(field)) | |
39 | #define DMA64RXREGOFFS(di, field) (di->d64rxregbase + DMA64REGOFFS(field)) | |
40 | ||
5b435de0 AS |
41 | /* |
42 | * DMA hardware requires each descriptor ring to be 8kB aligned, and fit within | |
43 | * a contiguous 8kB physical address. | |
44 | */ | |
45 | #define D64RINGALIGN_BITS 13 | |
46 | #define D64MAXRINGSZ (1 << D64RINGALIGN_BITS) | |
47 | #define D64RINGALIGN (1 << D64RINGALIGN_BITS) | |
48 | ||
49 | #define D64MAXDD (D64MAXRINGSZ / sizeof(struct dma64desc)) | |
50 | ||
51 | /* transmit channel control */ | |
52 | #define D64_XC_XE 0x00000001 /* transmit enable */ | |
53 | #define D64_XC_SE 0x00000002 /* transmit suspend request */ | |
54 | #define D64_XC_LE 0x00000004 /* loopback enable */ | |
55 | #define D64_XC_FL 0x00000010 /* flush request */ | |
56 | #define D64_XC_PD 0x00000800 /* parity check disable */ | |
57 | #define D64_XC_AE 0x00030000 /* address extension bits */ | |
58 | #define D64_XC_AE_SHIFT 16 | |
59 | ||
60 | /* transmit descriptor table pointer */ | |
61 | #define D64_XP_LD_MASK 0x00000fff /* last valid descriptor */ | |
62 | ||
63 | /* transmit channel status */ | |
64 | #define D64_XS0_CD_MASK 0x00001fff /* current descriptor pointer */ | |
65 | #define D64_XS0_XS_MASK 0xf0000000 /* transmit state */ | |
66 | #define D64_XS0_XS_SHIFT 28 | |
67 | #define D64_XS0_XS_DISABLED 0x00000000 /* disabled */ | |
68 | #define D64_XS0_XS_ACTIVE 0x10000000 /* active */ | |
69 | #define D64_XS0_XS_IDLE 0x20000000 /* idle wait */ | |
70 | #define D64_XS0_XS_STOPPED 0x30000000 /* stopped */ | |
71 | #define D64_XS0_XS_SUSP 0x40000000 /* suspend pending */ | |
72 | ||
73 | #define D64_XS1_AD_MASK 0x00001fff /* active descriptor */ | |
74 | #define D64_XS1_XE_MASK 0xf0000000 /* transmit errors */ | |
75 | #define D64_XS1_XE_SHIFT 28 | |
76 | #define D64_XS1_XE_NOERR 0x00000000 /* no error */ | |
77 | #define D64_XS1_XE_DPE 0x10000000 /* descriptor protocol error */ | |
78 | #define D64_XS1_XE_DFU 0x20000000 /* data fifo underrun */ | |
79 | #define D64_XS1_XE_DTE 0x30000000 /* data transfer error */ | |
80 | #define D64_XS1_XE_DESRE 0x40000000 /* descriptor read error */ | |
81 | #define D64_XS1_XE_COREE 0x50000000 /* core error */ | |
82 | ||
83 | /* receive channel control */ | |
84 | /* receive enable */ | |
85 | #define D64_RC_RE 0x00000001 | |
86 | /* receive frame offset */ | |
87 | #define D64_RC_RO_MASK 0x000000fe | |
88 | #define D64_RC_RO_SHIFT 1 | |
89 | /* direct fifo receive (pio) mode */ | |
90 | #define D64_RC_FM 0x00000100 | |
91 | /* separate rx header descriptor enable */ | |
92 | #define D64_RC_SH 0x00000200 | |
93 | /* overflow continue */ | |
94 | #define D64_RC_OC 0x00000400 | |
95 | /* parity check disable */ | |
96 | #define D64_RC_PD 0x00000800 | |
97 | /* address extension bits */ | |
98 | #define D64_RC_AE 0x00030000 | |
99 | #define D64_RC_AE_SHIFT 16 | |
100 | ||
101 | /* flags for dma controller */ | |
102 | /* partity enable */ | |
103 | #define DMA_CTRL_PEN (1 << 0) | |
104 | /* rx overflow continue */ | |
105 | #define DMA_CTRL_ROC (1 << 1) | |
106 | /* allow rx scatter to multiple descriptors */ | |
107 | #define DMA_CTRL_RXMULTI (1 << 2) | |
108 | /* Unframed Rx/Tx data */ | |
109 | #define DMA_CTRL_UNFRAMED (1 << 3) | |
110 | ||
111 | /* receive descriptor table pointer */ | |
112 | #define D64_RP_LD_MASK 0x00000fff /* last valid descriptor */ | |
113 | ||
114 | /* receive channel status */ | |
115 | #define D64_RS0_CD_MASK 0x00001fff /* current descriptor pointer */ | |
116 | #define D64_RS0_RS_MASK 0xf0000000 /* receive state */ | |
117 | #define D64_RS0_RS_SHIFT 28 | |
118 | #define D64_RS0_RS_DISABLED 0x00000000 /* disabled */ | |
119 | #define D64_RS0_RS_ACTIVE 0x10000000 /* active */ | |
120 | #define D64_RS0_RS_IDLE 0x20000000 /* idle wait */ | |
121 | #define D64_RS0_RS_STOPPED 0x30000000 /* stopped */ | |
122 | #define D64_RS0_RS_SUSP 0x40000000 /* suspend pending */ | |
123 | ||
124 | #define D64_RS1_AD_MASK 0x0001ffff /* active descriptor */ | |
125 | #define D64_RS1_RE_MASK 0xf0000000 /* receive errors */ | |
126 | #define D64_RS1_RE_SHIFT 28 | |
127 | #define D64_RS1_RE_NOERR 0x00000000 /* no error */ | |
128 | #define D64_RS1_RE_DPO 0x10000000 /* descriptor protocol error */ | |
129 | #define D64_RS1_RE_DFU 0x20000000 /* data fifo overflow */ | |
130 | #define D64_RS1_RE_DTE 0x30000000 /* data transfer error */ | |
131 | #define D64_RS1_RE_DESRE 0x40000000 /* descriptor read error */ | |
132 | #define D64_RS1_RE_COREE 0x50000000 /* core error */ | |
133 | ||
134 | /* fifoaddr */ | |
135 | #define D64_FA_OFF_MASK 0xffff /* offset */ | |
136 | #define D64_FA_SEL_MASK 0xf0000 /* select */ | |
137 | #define D64_FA_SEL_SHIFT 16 | |
138 | #define D64_FA_SEL_XDD 0x00000 /* transmit dma data */ | |
139 | #define D64_FA_SEL_XDP 0x10000 /* transmit dma pointers */ | |
140 | #define D64_FA_SEL_RDD 0x40000 /* receive dma data */ | |
141 | #define D64_FA_SEL_RDP 0x50000 /* receive dma pointers */ | |
142 | #define D64_FA_SEL_XFD 0x80000 /* transmit fifo data */ | |
143 | #define D64_FA_SEL_XFP 0x90000 /* transmit fifo pointers */ | |
144 | #define D64_FA_SEL_RFD 0xc0000 /* receive fifo data */ | |
145 | #define D64_FA_SEL_RFP 0xd0000 /* receive fifo pointers */ | |
146 | #define D64_FA_SEL_RSD 0xe0000 /* receive frame status data */ | |
147 | #define D64_FA_SEL_RSP 0xf0000 /* receive frame status pointers */ | |
148 | ||
149 | /* descriptor control flags 1 */ | |
150 | #define D64_CTRL_COREFLAGS 0x0ff00000 /* core specific flags */ | |
151 | #define D64_CTRL1_EOT ((u32)1 << 28) /* end of descriptor table */ | |
152 | #define D64_CTRL1_IOC ((u32)1 << 29) /* interrupt on completion */ | |
153 | #define D64_CTRL1_EOF ((u32)1 << 30) /* end of frame */ | |
154 | #define D64_CTRL1_SOF ((u32)1 << 31) /* start of frame */ | |
155 | ||
156 | /* descriptor control flags 2 */ | |
157 | /* buffer byte count. real data len must <= 16KB */ | |
158 | #define D64_CTRL2_BC_MASK 0x00007fff | |
159 | /* address extension bits */ | |
160 | #define D64_CTRL2_AE 0x00030000 | |
161 | #define D64_CTRL2_AE_SHIFT 16 | |
162 | /* parity bit */ | |
163 | #define D64_CTRL2_PARITY 0x00040000 | |
164 | ||
165 | /* control flags in the range [27:20] are core-specific and not defined here */ | |
166 | #define D64_CTRL_CORE_MASK 0x0ff00000 | |
167 | ||
168 | #define D64_RX_FRM_STS_LEN 0x0000ffff /* frame length mask */ | |
169 | #define D64_RX_FRM_STS_OVFL 0x00800000 /* RxOverFlow */ | |
170 | #define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /* no. of descriptors used - 1 */ | |
171 | #define D64_RX_FRM_STS_DATATYPE 0xf0000000 /* core-dependent data type */ | |
172 | ||
173 | /* | |
174 | * packet headroom necessary to accommodate the largest header | |
175 | * in the system, (i.e TXOFF). By doing, we avoid the need to | |
176 | * allocate an extra buffer for the header when bridging to WL. | |
177 | * There is a compile time check in wlc.c which ensure that this | |
178 | * value is at least as big as TXOFF. This value is used in | |
179 | * dma_rxfill(). | |
180 | */ | |
181 | ||
182 | #define BCMEXTRAHDROOM 172 | |
183 | ||
5b435de0 AS |
184 | #define MAXNAMEL 8 /* 8 char names */ |
185 | ||
186 | /* macros to convert between byte offsets and indexes */ | |
187 | #define B2I(bytes, type) ((bytes) / sizeof(type)) | |
188 | #define I2B(index, type) ((index) * sizeof(type)) | |
189 | ||
190 | #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */ | |
191 | #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */ | |
192 | ||
193 | #define PCI64ADDR_HIGH 0x80000000 /* address[63] */ | |
194 | #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */ | |
195 | ||
196 | /* | |
197 | * DMA Descriptor | |
198 | * Descriptors are only read by the hardware, never written back. | |
199 | */ | |
200 | struct dma64desc { | |
201 | __le32 ctrl1; /* misc control bits & bufcount */ | |
202 | __le32 ctrl2; /* buffer count and address extension */ | |
203 | __le32 addrlow; /* memory address of the date buffer, bits 31:0 */ | |
204 | __le32 addrhigh; /* memory address of the date buffer, bits 63:32 */ | |
205 | }; | |
206 | ||
207 | /* dma engine software state */ | |
208 | struct dma_info { | |
209 | struct dma_pub dma; /* exported structure */ | |
5b435de0 AS |
210 | char name[MAXNAMEL]; /* callers name for diag msgs */ |
211 | ||
3b758a68 | 212 | struct bcma_device *core; |
2e81b9b1 | 213 | struct device *dmadev; |
5b435de0 | 214 | |
e041f65d SF |
215 | /* session information for AMPDU */ |
216 | struct brcms_ampdu_session ampdu_session; | |
217 | ||
5b435de0 AS |
218 | bool dma64; /* this dma engine is operating in 64-bit mode */ |
219 | bool addrext; /* this dma engine supports DmaExtendedAddrChanges */ | |
220 | ||
221 | /* 64-bit dma tx engine registers */ | |
e81da650 | 222 | uint d64txregbase; |
5b435de0 | 223 | /* 64-bit dma rx engine registers */ |
e81da650 | 224 | uint d64rxregbase; |
5b435de0 AS |
225 | /* pointer to dma64 tx descriptor ring */ |
226 | struct dma64desc *txd64; | |
227 | /* pointer to dma64 rx descriptor ring */ | |
228 | struct dma64desc *rxd64; | |
229 | ||
230 | u16 dmadesc_align; /* alignment requirement for dma descriptors */ | |
231 | ||
232 | u16 ntxd; /* # tx descriptors tunable */ | |
233 | u16 txin; /* index of next descriptor to reclaim */ | |
234 | u16 txout; /* index of next descriptor to post */ | |
235 | /* pointer to parallel array of pointers to packets */ | |
236 | struct sk_buff **txp; | |
237 | /* Aligned physical address of descriptor ring */ | |
238 | dma_addr_t txdpa; | |
239 | /* Original physical address of descriptor ring */ | |
240 | dma_addr_t txdpaorig; | |
241 | u16 txdalign; /* #bytes added to alloc'd mem to align txd */ | |
242 | u32 txdalloc; /* #bytes allocated for the ring */ | |
243 | u32 xmtptrbase; /* When using unaligned descriptors, the ptr register | |
244 | * is not just an index, it needs all 13 bits to be | |
245 | * an offset from the addr register. | |
246 | */ | |
247 | ||
248 | u16 nrxd; /* # rx descriptors tunable */ | |
249 | u16 rxin; /* index of next descriptor to reclaim */ | |
250 | u16 rxout; /* index of next descriptor to post */ | |
251 | /* pointer to parallel array of pointers to packets */ | |
252 | struct sk_buff **rxp; | |
253 | /* Aligned physical address of descriptor ring */ | |
254 | dma_addr_t rxdpa; | |
255 | /* Original physical address of descriptor ring */ | |
256 | dma_addr_t rxdpaorig; | |
257 | u16 rxdalign; /* #bytes added to alloc'd mem to align rxd */ | |
258 | u32 rxdalloc; /* #bytes allocated for the ring */ | |
259 | u32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */ | |
260 | ||
261 | /* tunables */ | |
262 | unsigned int rxbufsize; /* rx buffer size in bytes, not including | |
263 | * the extra headroom | |
264 | */ | |
265 | uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper | |
266 | * stack, e.g. some rx pkt buffers will be | |
267 | * bridged to tx side without byte copying. | |
268 | * The extra headroom needs to be large enough | |
269 | * to fit txheader needs. Some dongle driver may | |
270 | * not need it. | |
271 | */ | |
272 | uint nrxpost; /* # rx buffers to keep posted */ | |
273 | unsigned int rxoffset; /* rxcontrol offset */ | |
274 | /* add to get dma address of descriptor ring, low 32 bits */ | |
275 | uint ddoffsetlow; | |
276 | /* high 32 bits */ | |
277 | uint ddoffsethigh; | |
278 | /* add to get dma address of data buffer, low 32 bits */ | |
279 | uint dataoffsetlow; | |
280 | /* high 32 bits */ | |
281 | uint dataoffsethigh; | |
282 | /* descriptor base need to be aligned or not */ | |
283 | bool aligndesc_4k; | |
284 | }; | |
285 | ||
5b435de0 AS |
286 | /* Check for odd number of 1's */ |
287 | static u32 parity32(__le32 data) | |
288 | { | |
289 | /* no swap needed for counting 1's */ | |
290 | u32 par_data = *(u32 *)&data; | |
291 | ||
292 | par_data ^= par_data >> 16; | |
293 | par_data ^= par_data >> 8; | |
294 | par_data ^= par_data >> 4; | |
295 | par_data ^= par_data >> 2; | |
296 | par_data ^= par_data >> 1; | |
297 | ||
298 | return par_data & 1; | |
299 | } | |
300 | ||
301 | static bool dma64_dd_parity(struct dma64desc *dd) | |
302 | { | |
303 | return parity32(dd->addrlow ^ dd->addrhigh ^ dd->ctrl1 ^ dd->ctrl2); | |
304 | } | |
305 | ||
306 | /* descriptor bumping functions */ | |
307 | ||
308 | static uint xxd(uint x, uint n) | |
309 | { | |
310 | return x & (n - 1); /* faster than %, but n must be power of 2 */ | |
311 | } | |
312 | ||
313 | static uint txd(struct dma_info *di, uint x) | |
314 | { | |
315 | return xxd(x, di->ntxd); | |
316 | } | |
317 | ||
318 | static uint rxd(struct dma_info *di, uint x) | |
319 | { | |
320 | return xxd(x, di->nrxd); | |
321 | } | |
322 | ||
323 | static uint nexttxd(struct dma_info *di, uint i) | |
324 | { | |
325 | return txd(di, i + 1); | |
326 | } | |
327 | ||
328 | static uint prevtxd(struct dma_info *di, uint i) | |
329 | { | |
330 | return txd(di, i - 1); | |
331 | } | |
332 | ||
333 | static uint nextrxd(struct dma_info *di, uint i) | |
334 | { | |
b05618de | 335 | return rxd(di, i + 1); |
5b435de0 AS |
336 | } |
337 | ||
338 | static uint ntxdactive(struct dma_info *di, uint h, uint t) | |
339 | { | |
340 | return txd(di, t-h); | |
341 | } | |
342 | ||
343 | static uint nrxdactive(struct dma_info *di, uint h, uint t) | |
344 | { | |
345 | return rxd(di, t-h); | |
346 | } | |
347 | ||
348 | static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags) | |
349 | { | |
ae8e4672 | 350 | uint dmactrlflags; |
5b435de0 | 351 | |
55cec505 | 352 | if (di == NULL) |
5b435de0 | 353 | return 0; |
5b435de0 | 354 | |
ae8e4672 | 355 | dmactrlflags = di->dma.dmactrlflags; |
5b435de0 AS |
356 | dmactrlflags &= ~mask; |
357 | dmactrlflags |= flags; | |
358 | ||
359 | /* If trying to enable parity, check if parity is actually supported */ | |
360 | if (dmactrlflags & DMA_CTRL_PEN) { | |
361 | u32 control; | |
362 | ||
3b758a68 AS |
363 | control = bcma_read32(di->core, DMA64TXREGOFFS(di, control)); |
364 | bcma_write32(di->core, DMA64TXREGOFFS(di, control), | |
5b435de0 | 365 | control | D64_XC_PD); |
3b758a68 | 366 | if (bcma_read32(di->core, DMA64TXREGOFFS(di, control)) & |
e81da650 | 367 | D64_XC_PD) |
5b435de0 AS |
368 | /* We *can* disable it so it is supported, |
369 | * restore control register | |
370 | */ | |
3b758a68 | 371 | bcma_write32(di->core, DMA64TXREGOFFS(di, control), |
e81da650 | 372 | control); |
5b435de0 AS |
373 | else |
374 | /* Not supported, don't allow it to be enabled */ | |
375 | dmactrlflags &= ~DMA_CTRL_PEN; | |
376 | } | |
377 | ||
378 | di->dma.dmactrlflags = dmactrlflags; | |
379 | ||
380 | return dmactrlflags; | |
381 | } | |
382 | ||
e81da650 | 383 | static bool _dma64_addrext(struct dma_info *di, uint ctrl_offset) |
5b435de0 AS |
384 | { |
385 | u32 w; | |
3b758a68 AS |
386 | bcma_set32(di->core, ctrl_offset, D64_XC_AE); |
387 | w = bcma_read32(di->core, ctrl_offset); | |
388 | bcma_mask32(di->core, ctrl_offset, ~D64_XC_AE); | |
5b435de0 AS |
389 | return (w & D64_XC_AE) == D64_XC_AE; |
390 | } | |
391 | ||
392 | /* | |
393 | * return true if this dma engine supports DmaExtendedAddrChanges, | |
394 | * otherwise false | |
395 | */ | |
396 | static bool _dma_isaddrext(struct dma_info *di) | |
397 | { | |
398 | /* DMA64 supports full 32- or 64-bit operation. AE is always valid */ | |
399 | ||
400 | /* not all tx or rx channel are available */ | |
e81da650 AS |
401 | if (di->d64txregbase != 0) { |
402 | if (!_dma64_addrext(di, DMA64TXREGOFFS(di, control))) | |
90123e04 SF |
403 | brcms_dbg_dma(di->core, |
404 | "%s: DMA64 tx doesn't have AE set\n", | |
405 | di->name); | |
5b435de0 | 406 | return true; |
e81da650 AS |
407 | } else if (di->d64rxregbase != 0) { |
408 | if (!_dma64_addrext(di, DMA64RXREGOFFS(di, control))) | |
90123e04 SF |
409 | brcms_dbg_dma(di->core, |
410 | "%s: DMA64 rx doesn't have AE set\n", | |
411 | di->name); | |
5b435de0 AS |
412 | return true; |
413 | } | |
414 | ||
415 | return false; | |
416 | } | |
417 | ||
418 | static bool _dma_descriptor_align(struct dma_info *di) | |
419 | { | |
420 | u32 addrl; | |
421 | ||
422 | /* Check to see if the descriptors need to be aligned on 4K/8K or not */ | |
e81da650 | 423 | if (di->d64txregbase != 0) { |
3b758a68 AS |
424 | bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), 0xff0); |
425 | addrl = bcma_read32(di->core, DMA64TXREGOFFS(di, addrlow)); | |
5b435de0 AS |
426 | if (addrl != 0) |
427 | return false; | |
e81da650 | 428 | } else if (di->d64rxregbase != 0) { |
3b758a68 AS |
429 | bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), 0xff0); |
430 | addrl = bcma_read32(di->core, DMA64RXREGOFFS(di, addrlow)); | |
5b435de0 AS |
431 | if (addrl != 0) |
432 | return false; | |
433 | } | |
434 | return true; | |
435 | } | |
436 | ||
437 | /* | |
438 | * Descriptor table must start at the DMA hardware dictated alignment, so | |
439 | * allocated memory must be large enough to support this requirement. | |
440 | */ | |
2e81b9b1 | 441 | static void *dma_alloc_consistent(struct dma_info *di, uint size, |
5b435de0 AS |
442 | u16 align_bits, uint *alloced, |
443 | dma_addr_t *pap) | |
444 | { | |
445 | if (align_bits) { | |
446 | u16 align = (1 << align_bits); | |
447 | if (!IS_ALIGNED(PAGE_SIZE, align)) | |
448 | size += align; | |
449 | *alloced = size; | |
450 | } | |
2e81b9b1 | 451 | return dma_alloc_coherent(di->dmadev, size, pap, GFP_ATOMIC); |
5b435de0 AS |
452 | } |
453 | ||
454 | static | |
455 | u8 dma_align_sizetobits(uint size) | |
456 | { | |
457 | u8 bitpos = 0; | |
458 | while (size >>= 1) | |
459 | bitpos++; | |
460 | return bitpos; | |
461 | } | |
462 | ||
463 | /* This function ensures that the DMA descriptor ring will not get allocated | |
464 | * across Page boundary. If the allocation is done across the page boundary | |
465 | * at the first time, then it is freed and the allocation is done at | |
466 | * descriptor ring size aligned location. This will ensure that the ring will | |
467 | * not cross page boundary | |
468 | */ | |
469 | static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size, | |
470 | u16 *alignbits, uint *alloced, | |
471 | dma_addr_t *descpa) | |
472 | { | |
473 | void *va; | |
474 | u32 desc_strtaddr; | |
475 | u32 alignbytes = 1 << *alignbits; | |
476 | ||
2e81b9b1 | 477 | va = dma_alloc_consistent(di, size, *alignbits, alloced, descpa); |
5b435de0 AS |
478 | |
479 | if (NULL == va) | |
480 | return NULL; | |
481 | ||
482 | desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes); | |
483 | if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr | |
484 | & boundary)) { | |
485 | *alignbits = dma_align_sizetobits(size); | |
2e81b9b1 AS |
486 | dma_free_coherent(di->dmadev, size, va, *descpa); |
487 | va = dma_alloc_consistent(di, size, *alignbits, | |
5b435de0 AS |
488 | alloced, descpa); |
489 | } | |
490 | return va; | |
491 | } | |
492 | ||
493 | static bool dma64_alloc(struct dma_info *di, uint direction) | |
494 | { | |
495 | u16 size; | |
496 | uint ddlen; | |
497 | void *va; | |
498 | uint alloced = 0; | |
499 | u16 align; | |
500 | u16 align_bits; | |
501 | ||
502 | ddlen = sizeof(struct dma64desc); | |
503 | ||
504 | size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen); | |
505 | align_bits = di->dmadesc_align; | |
506 | align = (1 << align_bits); | |
507 | ||
508 | if (direction == DMA_TX) { | |
509 | va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits, | |
510 | &alloced, &di->txdpaorig); | |
511 | if (va == NULL) { | |
90123e04 SF |
512 | brcms_dbg_dma(di->core, |
513 | "%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n", | |
514 | di->name); | |
5b435de0 AS |
515 | return false; |
516 | } | |
517 | align = (1 << align_bits); | |
518 | di->txd64 = (struct dma64desc *) | |
519 | roundup((unsigned long)va, align); | |
520 | di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va); | |
521 | di->txdpa = di->txdpaorig + di->txdalign; | |
522 | di->txdalloc = alloced; | |
523 | } else { | |
524 | va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits, | |
525 | &alloced, &di->rxdpaorig); | |
526 | if (va == NULL) { | |
90123e04 SF |
527 | brcms_dbg_dma(di->core, |
528 | "%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n", | |
529 | di->name); | |
5b435de0 AS |
530 | return false; |
531 | } | |
532 | align = (1 << align_bits); | |
533 | di->rxd64 = (struct dma64desc *) | |
534 | roundup((unsigned long)va, align); | |
535 | di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va); | |
536 | di->rxdpa = di->rxdpaorig + di->rxdalign; | |
537 | di->rxdalloc = alloced; | |
538 | } | |
539 | ||
540 | return true; | |
541 | } | |
542 | ||
543 | static bool _dma_alloc(struct dma_info *di, uint direction) | |
544 | { | |
545 | return dma64_alloc(di, direction); | |
546 | } | |
547 | ||
e041f65d | 548 | struct dma_pub *dma_attach(char *name, struct brcms_c_info *wlc, |
e81da650 | 549 | uint txregbase, uint rxregbase, uint ntxd, uint nrxd, |
2e81b9b1 | 550 | uint rxbufsize, int rxextheadroom, |
90123e04 | 551 | uint nrxpost, uint rxoffset) |
5b435de0 | 552 | { |
e041f65d SF |
553 | struct si_pub *sih = wlc->hw->sih; |
554 | struct bcma_device *core = wlc->hw->d11core; | |
5b435de0 | 555 | struct dma_info *di; |
3b758a68 | 556 | u8 rev = core->id.rev; |
5b435de0 | 557 | uint size; |
ec5ab1dd | 558 | struct si_info *sii = container_of(sih, struct si_info, pub); |
5b435de0 AS |
559 | |
560 | /* allocate private info structure */ | |
561 | di = kzalloc(sizeof(struct dma_info), GFP_ATOMIC); | |
562 | if (di == NULL) | |
563 | return NULL; | |
564 | ||
a8779e4a | 565 | di->dma64 = |
3b758a68 | 566 | ((bcma_aread32(core, BCMA_IOST) & SISF_DMA64) == SISF_DMA64); |
5b435de0 | 567 | |
e81da650 | 568 | /* init dma reg info */ |
3b758a68 | 569 | di->core = core; |
e81da650 AS |
570 | di->d64txregbase = txregbase; |
571 | di->d64rxregbase = rxregbase; | |
5b435de0 AS |
572 | |
573 | /* | |
574 | * Default flags (which can be changed by the driver calling | |
575 | * dma_ctrlflags before enable): For backwards compatibility | |
576 | * both Rx Overflow Continue and Parity are DISABLED. | |
577 | */ | |
578 | _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0); | |
579 | ||
90123e04 SF |
580 | brcms_dbg_dma(di->core, "%s: %s flags 0x%x ntxd %d nrxd %d " |
581 | "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d " | |
582 | "txregbase %u rxregbase %u\n", name, "DMA64", | |
583 | di->dma.dmactrlflags, ntxd, nrxd, rxbufsize, | |
584 | rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase); | |
5b435de0 AS |
585 | |
586 | /* make a private copy of our callers name */ | |
587 | strncpy(di->name, name, MAXNAMEL); | |
588 | di->name[MAXNAMEL - 1] = '\0'; | |
589 | ||
3b758a68 | 590 | di->dmadev = core->dma_dev; |
5b435de0 AS |
591 | |
592 | /* save tunables */ | |
593 | di->ntxd = (u16) ntxd; | |
594 | di->nrxd = (u16) nrxd; | |
595 | ||
596 | /* the actual dma size doesn't include the extra headroom */ | |
597 | di->rxextrahdrroom = | |
598 | (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom; | |
599 | if (rxbufsize > BCMEXTRAHDROOM) | |
600 | di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom); | |
601 | else | |
602 | di->rxbufsize = (u16) rxbufsize; | |
603 | ||
604 | di->nrxpost = (u16) nrxpost; | |
605 | di->rxoffset = (u8) rxoffset; | |
606 | ||
607 | /* | |
608 | * figure out the DMA physical address offset for dd and data | |
609 | * PCI/PCIE: they map silicon backplace address to zero | |
610 | * based memory, need offset | |
611 | * Other bus: use zero SI_BUS BIGENDIAN kludge: use sdram | |
612 | * swapped region for data buffer, not descriptor | |
613 | */ | |
614 | di->ddoffsetlow = 0; | |
615 | di->dataoffsetlow = 0; | |
ec5ab1dd HM |
616 | /* for pci bus, add offset */ |
617 | if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI) { | |
618 | /* add offset for pcie with DMA64 bus */ | |
619 | di->ddoffsetlow = 0; | |
620 | di->ddoffsethigh = SI_PCIE_DMA_H32; | |
621 | } | |
5b435de0 AS |
622 | di->dataoffsetlow = di->ddoffsetlow; |
623 | di->dataoffsethigh = di->ddoffsethigh; | |
ec5ab1dd | 624 | |
5b435de0 | 625 | /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */ |
780b9c46 | 626 | if ((core->id.id == BCMA_CORE_SDIO_DEV) |
3b758a68 | 627 | && ((rev > 0) && (rev <= 2))) |
3db1cd5c | 628 | di->addrext = false; |
780b9c46 | 629 | else if ((core->id.id == BCMA_CORE_I2S) && |
3b758a68 | 630 | ((rev == 0) || (rev == 1))) |
3db1cd5c | 631 | di->addrext = false; |
5b435de0 AS |
632 | else |
633 | di->addrext = _dma_isaddrext(di); | |
634 | ||
635 | /* does the descriptor need to be aligned and if yes, on 4K/8K or not */ | |
636 | di->aligndesc_4k = _dma_descriptor_align(di); | |
637 | if (di->aligndesc_4k) { | |
638 | di->dmadesc_align = D64RINGALIGN_BITS; | |
639 | if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) | |
640 | /* for smaller dd table, HW relax alignment reqmnt */ | |
641 | di->dmadesc_align = D64RINGALIGN_BITS - 1; | |
642 | } else { | |
643 | di->dmadesc_align = 4; /* 16 byte alignment */ | |
644 | } | |
645 | ||
90123e04 SF |
646 | brcms_dbg_dma(di->core, "DMA descriptor align_needed %d, align %d\n", |
647 | di->aligndesc_4k, di->dmadesc_align); | |
5b435de0 AS |
648 | |
649 | /* allocate tx packet pointer vector */ | |
650 | if (ntxd) { | |
651 | size = ntxd * sizeof(void *); | |
652 | di->txp = kzalloc(size, GFP_ATOMIC); | |
653 | if (di->txp == NULL) | |
654 | goto fail; | |
655 | } | |
656 | ||
657 | /* allocate rx packet pointer vector */ | |
658 | if (nrxd) { | |
659 | size = nrxd * sizeof(void *); | |
660 | di->rxp = kzalloc(size, GFP_ATOMIC); | |
661 | if (di->rxp == NULL) | |
662 | goto fail; | |
663 | } | |
664 | ||
665 | /* | |
666 | * allocate transmit descriptor ring, only need ntxd descriptors | |
667 | * but it must be aligned | |
668 | */ | |
669 | if (ntxd) { | |
670 | if (!_dma_alloc(di, DMA_TX)) | |
671 | goto fail; | |
672 | } | |
673 | ||
674 | /* | |
675 | * allocate receive descriptor ring, only need nrxd descriptors | |
676 | * but it must be aligned | |
677 | */ | |
678 | if (nrxd) { | |
679 | if (!_dma_alloc(di, DMA_RX)) | |
680 | goto fail; | |
681 | } | |
682 | ||
683 | if ((di->ddoffsetlow != 0) && !di->addrext) { | |
684 | if (di->txdpa > SI_PCI_DMA_SZ) { | |
90123e04 SF |
685 | brcms_dbg_dma(di->core, |
686 | "%s: txdpa 0x%x: addrext not supported\n", | |
687 | di->name, (u32)di->txdpa); | |
5b435de0 AS |
688 | goto fail; |
689 | } | |
690 | if (di->rxdpa > SI_PCI_DMA_SZ) { | |
90123e04 SF |
691 | brcms_dbg_dma(di->core, |
692 | "%s: rxdpa 0x%x: addrext not supported\n", | |
693 | di->name, (u32)di->rxdpa); | |
5b435de0 AS |
694 | goto fail; |
695 | } | |
696 | } | |
697 | ||
e041f65d SF |
698 | /* Initialize AMPDU session */ |
699 | brcms_c_ampdu_reset_session(&di->ampdu_session, wlc); | |
700 | ||
90123e04 SF |
701 | brcms_dbg_dma(di->core, |
702 | "ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n", | |
703 | di->ddoffsetlow, di->ddoffsethigh, | |
704 | di->dataoffsetlow, di->dataoffsethigh, | |
705 | di->addrext); | |
5b435de0 AS |
706 | |
707 | return (struct dma_pub *) di; | |
708 | ||
709 | fail: | |
710 | dma_detach((struct dma_pub *)di); | |
711 | return NULL; | |
712 | } | |
713 | ||
714 | static inline void | |
715 | dma64_dd_upd(struct dma_info *di, struct dma64desc *ddring, | |
716 | dma_addr_t pa, uint outidx, u32 *flags, u32 bufcount) | |
717 | { | |
718 | u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK; | |
719 | ||
720 | /* PCI bus with big(>1G) physical address, use address extension */ | |
721 | if ((di->dataoffsetlow == 0) || !(pa & PCI32ADDR_HIGH)) { | |
722 | ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow); | |
723 | ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh); | |
724 | ddring[outidx].ctrl1 = cpu_to_le32(*flags); | |
725 | ddring[outidx].ctrl2 = cpu_to_le32(ctrl2); | |
726 | } else { | |
727 | /* address extension for 32-bit PCI */ | |
728 | u32 ae; | |
729 | ||
730 | ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT; | |
731 | pa &= ~PCI32ADDR_HIGH; | |
732 | ||
733 | ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE; | |
734 | ddring[outidx].addrlow = cpu_to_le32(pa + di->dataoffsetlow); | |
735 | ddring[outidx].addrhigh = cpu_to_le32(di->dataoffsethigh); | |
736 | ddring[outidx].ctrl1 = cpu_to_le32(*flags); | |
737 | ddring[outidx].ctrl2 = cpu_to_le32(ctrl2); | |
738 | } | |
739 | if (di->dma.dmactrlflags & DMA_CTRL_PEN) { | |
740 | if (dma64_dd_parity(&ddring[outidx])) | |
741 | ddring[outidx].ctrl2 = | |
742 | cpu_to_le32(ctrl2 | D64_CTRL2_PARITY); | |
743 | } | |
744 | } | |
745 | ||
746 | /* !! may be called with core in reset */ | |
747 | void dma_detach(struct dma_pub *pub) | |
748 | { | |
749 | struct dma_info *di = (struct dma_info *)pub; | |
750 | ||
90123e04 | 751 | brcms_dbg_dma(di->core, "%s:\n", di->name); |
5b435de0 AS |
752 | |
753 | /* free dma descriptor rings */ | |
754 | if (di->txd64) | |
2e81b9b1 AS |
755 | dma_free_coherent(di->dmadev, di->txdalloc, |
756 | ((s8 *)di->txd64 - di->txdalign), | |
757 | (di->txdpaorig)); | |
5b435de0 | 758 | if (di->rxd64) |
2e81b9b1 AS |
759 | dma_free_coherent(di->dmadev, di->rxdalloc, |
760 | ((s8 *)di->rxd64 - di->rxdalign), | |
761 | (di->rxdpaorig)); | |
5b435de0 AS |
762 | |
763 | /* free packet pointer vectors */ | |
764 | kfree(di->txp); | |
765 | kfree(di->rxp); | |
766 | ||
767 | /* free our private info structure */ | |
768 | kfree(di); | |
769 | ||
770 | } | |
771 | ||
772 | /* initialize descriptor table base address */ | |
773 | static void | |
774 | _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa) | |
775 | { | |
776 | if (!di->aligndesc_4k) { | |
777 | if (direction == DMA_TX) | |
778 | di->xmtptrbase = pa; | |
779 | else | |
780 | di->rcvptrbase = pa; | |
781 | } | |
782 | ||
783 | if ((di->ddoffsetlow == 0) | |
784 | || !(pa & PCI32ADDR_HIGH)) { | |
785 | if (direction == DMA_TX) { | |
3b758a68 | 786 | bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), |
e81da650 | 787 | pa + di->ddoffsetlow); |
3b758a68 | 788 | bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh), |
e81da650 | 789 | di->ddoffsethigh); |
5b435de0 | 790 | } else { |
3b758a68 | 791 | bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), |
e81da650 | 792 | pa + di->ddoffsetlow); |
3b758a68 | 793 | bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh), |
e81da650 | 794 | di->ddoffsethigh); |
5b435de0 AS |
795 | } |
796 | } else { | |
797 | /* DMA64 32bits address extension */ | |
798 | u32 ae; | |
799 | ||
800 | /* shift the high bit(s) from pa to ae */ | |
801 | ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT; | |
802 | pa &= ~PCI32ADDR_HIGH; | |
803 | ||
804 | if (direction == DMA_TX) { | |
3b758a68 | 805 | bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), |
e81da650 | 806 | pa + di->ddoffsetlow); |
3b758a68 | 807 | bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh), |
e81da650 | 808 | di->ddoffsethigh); |
3b758a68 | 809 | bcma_maskset32(di->core, DMA64TXREGOFFS(di, control), |
e81da650 | 810 | D64_XC_AE, (ae << D64_XC_AE_SHIFT)); |
5b435de0 | 811 | } else { |
3b758a68 | 812 | bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), |
e81da650 | 813 | pa + di->ddoffsetlow); |
3b758a68 | 814 | bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh), |
e81da650 | 815 | di->ddoffsethigh); |
3b758a68 | 816 | bcma_maskset32(di->core, DMA64RXREGOFFS(di, control), |
e81da650 | 817 | D64_RC_AE, (ae << D64_RC_AE_SHIFT)); |
5b435de0 AS |
818 | } |
819 | } | |
820 | } | |
821 | ||
822 | static void _dma_rxenable(struct dma_info *di) | |
823 | { | |
824 | uint dmactrlflags = di->dma.dmactrlflags; | |
825 | u32 control; | |
826 | ||
90123e04 | 827 | brcms_dbg_dma(di->core, "%s:\n", di->name); |
5b435de0 | 828 | |
3b758a68 | 829 | control = D64_RC_RE | (bcma_read32(di->core, |
e81da650 AS |
830 | DMA64RXREGOFFS(di, control)) & |
831 | D64_RC_AE); | |
5b435de0 AS |
832 | |
833 | if ((dmactrlflags & DMA_CTRL_PEN) == 0) | |
834 | control |= D64_RC_PD; | |
835 | ||
836 | if (dmactrlflags & DMA_CTRL_ROC) | |
837 | control |= D64_RC_OC; | |
838 | ||
3b758a68 | 839 | bcma_write32(di->core, DMA64RXREGOFFS(di, control), |
5b435de0 AS |
840 | ((di->rxoffset << D64_RC_RO_SHIFT) | control)); |
841 | } | |
842 | ||
843 | void dma_rxinit(struct dma_pub *pub) | |
844 | { | |
845 | struct dma_info *di = (struct dma_info *)pub; | |
846 | ||
90123e04 | 847 | brcms_dbg_dma(di->core, "%s:\n", di->name); |
5b435de0 AS |
848 | |
849 | if (di->nrxd == 0) | |
850 | return; | |
851 | ||
852 | di->rxin = di->rxout = 0; | |
853 | ||
854 | /* clear rx descriptor ring */ | |
855 | memset(di->rxd64, '\0', di->nrxd * sizeof(struct dma64desc)); | |
856 | ||
857 | /* DMA engine with out alignment requirement requires table to be inited | |
858 | * before enabling the engine | |
859 | */ | |
860 | if (!di->aligndesc_4k) | |
861 | _dma_ddtable_init(di, DMA_RX, di->rxdpa); | |
862 | ||
863 | _dma_rxenable(di); | |
864 | ||
865 | if (di->aligndesc_4k) | |
866 | _dma_ddtable_init(di, DMA_RX, di->rxdpa); | |
867 | } | |
868 | ||
869 | static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall) | |
870 | { | |
871 | uint i, curr; | |
872 | struct sk_buff *rxp; | |
873 | dma_addr_t pa; | |
874 | ||
875 | i = di->rxin; | |
876 | ||
877 | /* return if no packets posted */ | |
878 | if (i == di->rxout) | |
879 | return NULL; | |
880 | ||
881 | curr = | |
3b758a68 | 882 | B2I(((bcma_read32(di->core, |
e81da650 | 883 | DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) - |
5b435de0 AS |
884 | di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc); |
885 | ||
886 | /* ignore curr if forceall */ | |
887 | if (!forceall && (i == curr)) | |
888 | return NULL; | |
889 | ||
890 | /* get the packet pointer that corresponds to the rx descriptor */ | |
891 | rxp = di->rxp[i]; | |
892 | di->rxp[i] = NULL; | |
893 | ||
894 | pa = le32_to_cpu(di->rxd64[i].addrlow) - di->dataoffsetlow; | |
895 | ||
896 | /* clear this packet from the descriptor ring */ | |
2e81b9b1 | 897 | dma_unmap_single(di->dmadev, pa, di->rxbufsize, DMA_FROM_DEVICE); |
5b435de0 AS |
898 | |
899 | di->rxd64[i].addrlow = cpu_to_le32(0xdeadbeef); | |
900 | di->rxd64[i].addrhigh = cpu_to_le32(0xdeadbeef); | |
901 | ||
902 | di->rxin = nextrxd(di, i); | |
903 | ||
904 | return rxp; | |
905 | } | |
906 | ||
907 | static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall) | |
908 | { | |
909 | if (di->nrxd == 0) | |
910 | return NULL; | |
911 | ||
912 | return dma64_getnextrxp(di, forceall); | |
913 | } | |
914 | ||
915 | /* | |
916 | * !! rx entry routine | |
3fd172d3 | 917 | * returns the number packages in the next frame, or 0 if there are no more |
5b435de0 AS |
918 | * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is |
919 | * supported with pkts chain | |
920 | * otherwise, it's treated as giant pkt and will be tossed. | |
921 | * The DMA scattering starts with normal DMA header, followed by first | |
922 | * buffer data. After it reaches the max size of buffer, the data continues | |
923 | * in next DMA descriptor buffer WITHOUT DMA header | |
924 | */ | |
3fd172d3 | 925 | int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list) |
5b435de0 AS |
926 | { |
927 | struct dma_info *di = (struct dma_info *)pub; | |
3fd172d3 AS |
928 | struct sk_buff_head dma_frames; |
929 | struct sk_buff *p, *next; | |
5b435de0 AS |
930 | uint len; |
931 | uint pkt_len; | |
932 | int resid = 0; | |
3fd172d3 | 933 | int pktcnt = 1; |
5b435de0 | 934 | |
3fd172d3 | 935 | skb_queue_head_init(&dma_frames); |
5b435de0 | 936 | next_frame: |
3fd172d3 AS |
937 | p = _dma_getnextrxp(di, false); |
938 | if (p == NULL) | |
939 | return 0; | |
5b435de0 | 940 | |
3fd172d3 | 941 | len = le16_to_cpu(*(__le16 *) (p->data)); |
90123e04 | 942 | brcms_dbg_dma(di->core, "%s: dma_rx len %d\n", di->name, len); |
3fd172d3 | 943 | dma_spin_for_len(len, p); |
5b435de0 AS |
944 | |
945 | /* set actual length */ | |
946 | pkt_len = min((di->rxoffset + len), di->rxbufsize); | |
3fd172d3 AS |
947 | __skb_trim(p, pkt_len); |
948 | skb_queue_tail(&dma_frames, p); | |
5b435de0 AS |
949 | resid = len - (di->rxbufsize - di->rxoffset); |
950 | ||
951 | /* check for single or multi-buffer rx */ | |
952 | if (resid > 0) { | |
5b435de0 | 953 | while ((resid > 0) && (p = _dma_getnextrxp(di, false))) { |
5b435de0 AS |
954 | pkt_len = min_t(uint, resid, di->rxbufsize); |
955 | __skb_trim(p, pkt_len); | |
3fd172d3 | 956 | skb_queue_tail(&dma_frames, p); |
5b435de0 | 957 | resid -= di->rxbufsize; |
3fd172d3 | 958 | pktcnt++; |
5b435de0 AS |
959 | } |
960 | ||
8ae74654 | 961 | #ifdef DEBUG |
5b435de0 AS |
962 | if (resid > 0) { |
963 | uint cur; | |
964 | cur = | |
3b758a68 | 965 | B2I(((bcma_read32(di->core, |
e81da650 AS |
966 | DMA64RXREGOFFS(di, status0)) & |
967 | D64_RS0_CD_MASK) - di->rcvptrbase) & | |
968 | D64_RS0_CD_MASK, struct dma64desc); | |
90123e04 SF |
969 | brcms_dbg_dma(di->core, |
970 | "rxin %d rxout %d, hw_curr %d\n", | |
971 | di->rxin, di->rxout, cur); | |
5b435de0 | 972 | } |
8ae74654 | 973 | #endif /* DEBUG */ |
5b435de0 AS |
974 | |
975 | if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) { | |
90123e04 SF |
976 | brcms_dbg_dma(di->core, "%s: bad frame length (%d)\n", |
977 | di->name, len); | |
3fd172d3 AS |
978 | skb_queue_walk_safe(&dma_frames, p, next) { |
979 | skb_unlink(p, &dma_frames); | |
980 | brcmu_pkt_buf_free_skb(p); | |
981 | } | |
5b435de0 | 982 | di->dma.rxgiants++; |
3fd172d3 | 983 | pktcnt = 1; |
5b435de0 AS |
984 | goto next_frame; |
985 | } | |
986 | } | |
987 | ||
3fd172d3 AS |
988 | skb_queue_splice_tail(&dma_frames, skb_list); |
989 | return pktcnt; | |
5b435de0 AS |
990 | } |
991 | ||
992 | static bool dma64_rxidle(struct dma_info *di) | |
993 | { | |
90123e04 | 994 | brcms_dbg_dma(di->core, "%s:\n", di->name); |
5b435de0 AS |
995 | |
996 | if (di->nrxd == 0) | |
997 | return true; | |
998 | ||
3b758a68 | 999 | return ((bcma_read32(di->core, |
e81da650 | 1000 | DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) == |
3b758a68 | 1001 | (bcma_read32(di->core, DMA64RXREGOFFS(di, ptr)) & |
e81da650 | 1002 | D64_RS0_CD_MASK)); |
5b435de0 AS |
1003 | } |
1004 | ||
e041f65d SF |
1005 | static bool dma64_txidle(struct dma_info *di) |
1006 | { | |
1007 | if (di->ntxd == 0) | |
1008 | return true; | |
1009 | ||
1010 | return ((bcma_read32(di->core, | |
1011 | DMA64TXREGOFFS(di, status0)) & D64_XS0_CD_MASK) == | |
1012 | (bcma_read32(di->core, DMA64TXREGOFFS(di, ptr)) & | |
1013 | D64_XS0_CD_MASK)); | |
1014 | } | |
1015 | ||
5b435de0 AS |
1016 | /* |
1017 | * post receive buffers | |
1018 | * return false is refill failed completely and ring is empty this will stall | |
1019 | * the rx dma and user might want to call rxfill again asap. This unlikely | |
1020 | * happens on memory-rich NIC, but often on memory-constrained dongle | |
1021 | */ | |
1022 | bool dma_rxfill(struct dma_pub *pub) | |
1023 | { | |
1024 | struct dma_info *di = (struct dma_info *)pub; | |
1025 | struct sk_buff *p; | |
1026 | u16 rxin, rxout; | |
1027 | u32 flags = 0; | |
1028 | uint n; | |
1029 | uint i; | |
1030 | dma_addr_t pa; | |
1031 | uint extra_offset = 0; | |
1032 | bool ring_empty; | |
1033 | ||
1034 | ring_empty = false; | |
1035 | ||
1036 | /* | |
1037 | * Determine how many receive buffers we're lacking | |
1038 | * from the full complement, allocate, initialize, | |
1039 | * and post them, then update the chip rx lastdscr. | |
1040 | */ | |
1041 | ||
1042 | rxin = di->rxin; | |
1043 | rxout = di->rxout; | |
1044 | ||
1045 | n = di->nrxpost - nrxdactive(di, rxin, rxout); | |
1046 | ||
90123e04 | 1047 | brcms_dbg_dma(di->core, "%s: post %d\n", di->name, n); |
5b435de0 AS |
1048 | |
1049 | if (di->rxbufsize > BCMEXTRAHDROOM) | |
1050 | extra_offset = di->rxextrahdrroom; | |
1051 | ||
1052 | for (i = 0; i < n; i++) { | |
1053 | /* | |
1054 | * the di->rxbufsize doesn't include the extra headroom, | |
1055 | * we need to add it to the size to be allocated | |
1056 | */ | |
1057 | p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset); | |
1058 | ||
1059 | if (p == NULL) { | |
90123e04 SF |
1060 | brcms_dbg_dma(di->core, "%s: out of rxbufs\n", |
1061 | di->name); | |
5b435de0 | 1062 | if (i == 0 && dma64_rxidle(di)) { |
90123e04 SF |
1063 | brcms_dbg_dma(di->core, "%s: ring is empty !\n", |
1064 | di->name); | |
5b435de0 AS |
1065 | ring_empty = true; |
1066 | } | |
1067 | di->dma.rxnobuf++; | |
1068 | break; | |
1069 | } | |
1070 | /* reserve an extra headroom, if applicable */ | |
1071 | if (extra_offset) | |
1072 | skb_pull(p, extra_offset); | |
1073 | ||
1074 | /* Do a cached write instead of uncached write since DMA_MAP | |
1075 | * will flush the cache. | |
1076 | */ | |
1077 | *(u32 *) (p->data) = 0; | |
1078 | ||
2e81b9b1 AS |
1079 | pa = dma_map_single(di->dmadev, p->data, di->rxbufsize, |
1080 | DMA_FROM_DEVICE); | |
5b435de0 AS |
1081 | |
1082 | /* save the free packet pointer */ | |
1083 | di->rxp[rxout] = p; | |
1084 | ||
1085 | /* reset flags for each descriptor */ | |
1086 | flags = 0; | |
1087 | if (rxout == (di->nrxd - 1)) | |
1088 | flags = D64_CTRL1_EOT; | |
1089 | ||
1090 | dma64_dd_upd(di, di->rxd64, pa, rxout, &flags, | |
1091 | di->rxbufsize); | |
1092 | rxout = nextrxd(di, rxout); | |
1093 | } | |
1094 | ||
1095 | di->rxout = rxout; | |
1096 | ||
1097 | /* update the chip lastdscr pointer */ | |
3b758a68 | 1098 | bcma_write32(di->core, DMA64RXREGOFFS(di, ptr), |
5b435de0 AS |
1099 | di->rcvptrbase + I2B(rxout, struct dma64desc)); |
1100 | ||
1101 | return ring_empty; | |
1102 | } | |
1103 | ||
1104 | void dma_rxreclaim(struct dma_pub *pub) | |
1105 | { | |
1106 | struct dma_info *di = (struct dma_info *)pub; | |
1107 | struct sk_buff *p; | |
1108 | ||
90123e04 | 1109 | brcms_dbg_dma(di->core, "%s:\n", di->name); |
5b435de0 AS |
1110 | |
1111 | while ((p = _dma_getnextrxp(di, true))) | |
1112 | brcmu_pkt_buf_free_skb(p); | |
1113 | } | |
1114 | ||
1115 | void dma_counterreset(struct dma_pub *pub) | |
1116 | { | |
1117 | /* reset all software counters */ | |
1118 | pub->rxgiants = 0; | |
1119 | pub->rxnobuf = 0; | |
1120 | pub->txnobuf = 0; | |
1121 | } | |
1122 | ||
1123 | /* get the address of the var in order to change later */ | |
1124 | unsigned long dma_getvar(struct dma_pub *pub, const char *name) | |
1125 | { | |
1126 | struct dma_info *di = (struct dma_info *)pub; | |
1127 | ||
1128 | if (!strcmp(name, "&txavail")) | |
1129 | return (unsigned long)&(di->dma.txavail); | |
1130 | return 0; | |
1131 | } | |
1132 | ||
1133 | /* 64-bit DMA functions */ | |
1134 | ||
1135 | void dma_txinit(struct dma_pub *pub) | |
1136 | { | |
1137 | struct dma_info *di = (struct dma_info *)pub; | |
1138 | u32 control = D64_XC_XE; | |
1139 | ||
90123e04 | 1140 | brcms_dbg_dma(di->core, "%s:\n", di->name); |
5b435de0 AS |
1141 | |
1142 | if (di->ntxd == 0) | |
1143 | return; | |
1144 | ||
1145 | di->txin = di->txout = 0; | |
1146 | di->dma.txavail = di->ntxd - 1; | |
1147 | ||
1148 | /* clear tx descriptor ring */ | |
1149 | memset(di->txd64, '\0', (di->ntxd * sizeof(struct dma64desc))); | |
1150 | ||
1151 | /* DMA engine with out alignment requirement requires table to be inited | |
1152 | * before enabling the engine | |
1153 | */ | |
1154 | if (!di->aligndesc_4k) | |
1155 | _dma_ddtable_init(di, DMA_TX, di->txdpa); | |
1156 | ||
1157 | if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0) | |
1158 | control |= D64_XC_PD; | |
3b758a68 | 1159 | bcma_set32(di->core, DMA64TXREGOFFS(di, control), control); |
5b435de0 AS |
1160 | |
1161 | /* DMA engine with alignment requirement requires table to be inited | |
1162 | * before enabling the engine | |
1163 | */ | |
1164 | if (di->aligndesc_4k) | |
1165 | _dma_ddtable_init(di, DMA_TX, di->txdpa); | |
1166 | } | |
1167 | ||
1168 | void dma_txsuspend(struct dma_pub *pub) | |
1169 | { | |
1170 | struct dma_info *di = (struct dma_info *)pub; | |
1171 | ||
90123e04 | 1172 | brcms_dbg_dma(di->core, "%s:\n", di->name); |
5b435de0 AS |
1173 | |
1174 | if (di->ntxd == 0) | |
1175 | return; | |
1176 | ||
3b758a68 | 1177 | bcma_set32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE); |
5b435de0 AS |
1178 | } |
1179 | ||
1180 | void dma_txresume(struct dma_pub *pub) | |
1181 | { | |
1182 | struct dma_info *di = (struct dma_info *)pub; | |
1183 | ||
90123e04 | 1184 | brcms_dbg_dma(di->core, "%s:\n", di->name); |
5b435de0 AS |
1185 | |
1186 | if (di->ntxd == 0) | |
1187 | return; | |
1188 | ||
3b758a68 | 1189 | bcma_mask32(di->core, DMA64TXREGOFFS(di, control), ~D64_XC_SE); |
5b435de0 AS |
1190 | } |
1191 | ||
1192 | bool dma_txsuspended(struct dma_pub *pub) | |
1193 | { | |
1194 | struct dma_info *di = (struct dma_info *)pub; | |
1195 | ||
1196 | return (di->ntxd == 0) || | |
3b758a68 | 1197 | ((bcma_read32(di->core, |
e81da650 AS |
1198 | DMA64TXREGOFFS(di, control)) & D64_XC_SE) == |
1199 | D64_XC_SE); | |
5b435de0 AS |
1200 | } |
1201 | ||
1202 | void dma_txreclaim(struct dma_pub *pub, enum txd_range range) | |
1203 | { | |
1204 | struct dma_info *di = (struct dma_info *)pub; | |
1205 | struct sk_buff *p; | |
1206 | ||
90123e04 SF |
1207 | brcms_dbg_dma(di->core, "%s: %s\n", |
1208 | di->name, | |
1209 | range == DMA_RANGE_ALL ? "all" : | |
1210 | range == DMA_RANGE_TRANSMITTED ? "transmitted" : | |
1211 | "transferred"); | |
5b435de0 AS |
1212 | |
1213 | if (di->txin == di->txout) | |
1214 | return; | |
1215 | ||
1216 | while ((p = dma_getnexttxp(pub, range))) { | |
1217 | /* For unframed data, we don't have any packets to free */ | |
1218 | if (!(di->dma.dmactrlflags & DMA_CTRL_UNFRAMED)) | |
1219 | brcmu_pkt_buf_free_skb(p); | |
1220 | } | |
1221 | } | |
1222 | ||
1223 | bool dma_txreset(struct dma_pub *pub) | |
1224 | { | |
1225 | struct dma_info *di = (struct dma_info *)pub; | |
1226 | u32 status; | |
1227 | ||
1228 | if (di->ntxd == 0) | |
1229 | return true; | |
1230 | ||
1231 | /* suspend tx DMA first */ | |
3b758a68 | 1232 | bcma_write32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE); |
5b435de0 | 1233 | SPINWAIT(((status = |
3b758a68 | 1234 | (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) & |
e81da650 AS |
1235 | D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED) && |
1236 | (status != D64_XS0_XS_IDLE) && (status != D64_XS0_XS_STOPPED), | |
1237 | 10000); | |
5b435de0 | 1238 | |
3b758a68 | 1239 | bcma_write32(di->core, DMA64TXREGOFFS(di, control), 0); |
5b435de0 | 1240 | SPINWAIT(((status = |
3b758a68 | 1241 | (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) & |
e81da650 | 1242 | D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED), 10000); |
5b435de0 AS |
1243 | |
1244 | /* wait for the last transaction to complete */ | |
1245 | udelay(300); | |
1246 | ||
1247 | return status == D64_XS0_XS_DISABLED; | |
1248 | } | |
1249 | ||
1250 | bool dma_rxreset(struct dma_pub *pub) | |
1251 | { | |
1252 | struct dma_info *di = (struct dma_info *)pub; | |
1253 | u32 status; | |
1254 | ||
1255 | if (di->nrxd == 0) | |
1256 | return true; | |
1257 | ||
3b758a68 | 1258 | bcma_write32(di->core, DMA64RXREGOFFS(di, control), 0); |
5b435de0 | 1259 | SPINWAIT(((status = |
3b758a68 | 1260 | (bcma_read32(di->core, DMA64RXREGOFFS(di, status0)) & |
e81da650 | 1261 | D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED), 10000); |
5b435de0 AS |
1262 | |
1263 | return status == D64_RS0_RS_DISABLED; | |
1264 | } | |
1265 | ||
e041f65d | 1266 | static void dma_txenq(struct dma_info *di, struct sk_buff *p) |
05f8a616 | 1267 | { |
5b435de0 AS |
1268 | unsigned char *data; |
1269 | uint len; | |
1270 | u16 txout; | |
1271 | u32 flags = 0; | |
1272 | dma_addr_t pa; | |
1273 | ||
5b435de0 AS |
1274 | txout = di->txout; |
1275 | ||
e041f65d SF |
1276 | if (WARN_ON(nexttxd(di, txout) == di->txin)) |
1277 | return; | |
1278 | ||
5b435de0 | 1279 | /* |
3030794f | 1280 | * obtain and initialize transmit descriptor entry. |
5b435de0 | 1281 | */ |
3030794f AS |
1282 | data = p->data; |
1283 | len = p->len; | |
5b435de0 | 1284 | |
3030794f | 1285 | /* get physical address of buffer start */ |
2e81b9b1 | 1286 | pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE); |
5b435de0 | 1287 | |
3030794f AS |
1288 | /* With a DMA segment list, Descriptor table is filled |
1289 | * using the segment list instead of looping over | |
1290 | * buffers in multi-chain DMA. Therefore, EOF for SGLIST | |
1291 | * is when end of segment list is reached. | |
1292 | */ | |
1293 | flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF; | |
1294 | if (txout == (di->ntxd - 1)) | |
1295 | flags |= D64_CTRL1_EOT; | |
5b435de0 | 1296 | |
3030794f | 1297 | dma64_dd_upd(di, di->txd64, pa, txout, &flags, len); |
5b435de0 | 1298 | |
3030794f | 1299 | txout = nexttxd(di, txout); |
5b435de0 AS |
1300 | |
1301 | /* save the packet */ | |
3030794f | 1302 | di->txp[prevtxd(di, txout)] = p; |
5b435de0 AS |
1303 | |
1304 | /* bump the tx descriptor index */ | |
1305 | di->txout = txout; | |
e041f65d | 1306 | } |
5b435de0 | 1307 | |
e041f65d SF |
1308 | static void ampdu_finalize(struct dma_info *di) |
1309 | { | |
1310 | struct brcms_ampdu_session *session = &di->ampdu_session; | |
1311 | struct sk_buff *p; | |
1312 | ||
0c9a0a1d SF |
1313 | trace_brcms_ampdu_session(&session->wlc->hw->d11core->dev, |
1314 | session->max_ampdu_len, | |
1315 | session->max_ampdu_frames, | |
1316 | session->ampdu_len, | |
1317 | skb_queue_len(&session->skb_list), | |
1318 | session->dma_len); | |
1319 | ||
e041f65d SF |
1320 | if (WARN_ON(skb_queue_empty(&session->skb_list))) |
1321 | return; | |
1322 | ||
1323 | brcms_c_ampdu_finalize(session); | |
1324 | ||
1325 | while (!skb_queue_empty(&session->skb_list)) { | |
1326 | p = skb_dequeue(&session->skb_list); | |
1327 | dma_txenq(di, p); | |
1328 | } | |
1329 | ||
1330 | bcma_write32(di->core, DMA64TXREGOFFS(di, ptr), | |
1331 | di->xmtptrbase + I2B(di->txout, struct dma64desc)); | |
1332 | brcms_c_ampdu_reset_session(session, session->wlc); | |
1333 | } | |
1334 | ||
1335 | static void prep_ampdu_frame(struct dma_info *di, struct sk_buff *p) | |
1336 | { | |
1337 | struct brcms_ampdu_session *session = &di->ampdu_session; | |
1338 | int ret; | |
1339 | ||
1340 | ret = brcms_c_ampdu_add_frame(session, p); | |
1341 | if (ret == -ENOSPC) { | |
1342 | /* | |
1343 | * AMPDU cannot accomodate this frame. Close out the in- | |
1344 | * progress AMPDU session and start a new one. | |
1345 | */ | |
1346 | ampdu_finalize(di); | |
1347 | ret = brcms_c_ampdu_add_frame(session, p); | |
1348 | } | |
1349 | ||
1350 | WARN_ON(ret); | |
1351 | } | |
1352 | ||
1353 | /* Update count of available tx descriptors based on current DMA state */ | |
1354 | static void dma_update_txavail(struct dma_info *di) | |
1355 | { | |
1356 | /* | |
1357 | * Available space is number of descriptors less the number of | |
1358 | * active descriptors and the number of queued AMPDU frames. | |
1359 | */ | |
1360 | di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - | |
1361 | skb_queue_len(&di->ampdu_session.skb_list) - 1; | |
1362 | } | |
1363 | ||
1364 | /* | |
1365 | * !! tx entry routine | |
1366 | * WARNING: call must check the return value for error. | |
1367 | * the error(toss frames) could be fatal and cause many subsequent hard | |
1368 | * to debug problems | |
1369 | */ | |
1370 | int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub, | |
1371 | struct sk_buff *p) | |
1372 | { | |
1373 | struct dma_info *di = (struct dma_info *)pub; | |
1374 | struct brcms_ampdu_session *session = &di->ampdu_session; | |
1375 | struct ieee80211_tx_info *tx_info; | |
1376 | bool is_ampdu; | |
1377 | ||
e041f65d SF |
1378 | /* no use to transmit a zero length packet */ |
1379 | if (p->len == 0) | |
1380 | return 0; | |
1381 | ||
1382 | /* return nonzero if out of tx descriptors */ | |
1383 | if (di->dma.txavail == 0 || nexttxd(di, di->txout) == di->txin) | |
1384 | goto outoftxd; | |
1385 | ||
1386 | tx_info = IEEE80211_SKB_CB(p); | |
1387 | is_ampdu = tx_info->flags & IEEE80211_TX_CTL_AMPDU; | |
1388 | if (is_ampdu) | |
1389 | prep_ampdu_frame(di, p); | |
1390 | else | |
1391 | dma_txenq(di, p); | |
5b435de0 AS |
1392 | |
1393 | /* tx flow control */ | |
05f8a616 | 1394 | dma_update_txavail(di); |
5b435de0 | 1395 | |
e041f65d SF |
1396 | /* kick the chip */ |
1397 | if (is_ampdu) { | |
1398 | /* | |
1399 | * Start sending data if we've got a full AMPDU, there's | |
1400 | * no more space in the DMA ring, or the ring isn't | |
1401 | * currently transmitting. | |
1402 | */ | |
1403 | if (skb_queue_len(&session->skb_list) == session->max_ampdu_frames || | |
1404 | di->dma.txavail == 0 || dma64_txidle(di)) | |
1405 | ampdu_finalize(di); | |
1406 | } else { | |
1407 | bcma_write32(di->core, DMA64TXREGOFFS(di, ptr), | |
1408 | di->xmtptrbase + I2B(di->txout, struct dma64desc)); | |
1409 | } | |
1410 | ||
5b435de0 AS |
1411 | return 0; |
1412 | ||
1413 | outoftxd: | |
90123e04 | 1414 | brcms_dbg_dma(di->core, "%s: out of txds !!!\n", di->name); |
3030794f | 1415 | brcmu_pkt_buf_free_skb(p); |
5b435de0 AS |
1416 | di->dma.txavail = 0; |
1417 | di->dma.txnobuf++; | |
e041f65d SF |
1418 | return -ENOSPC; |
1419 | } | |
1420 | ||
1421 | void dma_txflush(struct dma_pub *pub) | |
1422 | { | |
1423 | struct dma_info *di = (struct dma_info *)pub; | |
1424 | struct brcms_ampdu_session *session = &di->ampdu_session; | |
1425 | ||
1426 | if (!skb_queue_empty(&session->skb_list)) | |
1427 | ampdu_finalize(di); | |
1428 | } | |
1429 | ||
1430 | int dma_txpending(struct dma_pub *pub) | |
1431 | { | |
1432 | struct dma_info *di = (struct dma_info *)pub; | |
1433 | return ntxdactive(di, di->txin, di->txout); | |
1434 | } | |
1435 | ||
1436 | /* | |
1437 | * If we have an active AMPDU session and are not transmitting, | |
1438 | * this function will force tx to start. | |
1439 | */ | |
1440 | void dma_kick_tx(struct dma_pub *pub) | |
1441 | { | |
1442 | struct dma_info *di = (struct dma_info *)pub; | |
1443 | struct brcms_ampdu_session *session = &di->ampdu_session; | |
1444 | ||
1445 | if (!skb_queue_empty(&session->skb_list) && dma64_txidle(di)) | |
1446 | ampdu_finalize(di); | |
5b435de0 AS |
1447 | } |
1448 | ||
1449 | /* | |
1450 | * Reclaim next completed txd (txds if using chained buffers) in the range | |
1451 | * specified and return associated packet. | |
1452 | * If range is DMA_RANGE_TRANSMITTED, reclaim descriptors that have be | |
1453 | * transmitted as noted by the hardware "CurrDescr" pointer. | |
1454 | * If range is DMA_RANGE_TRANSFERED, reclaim descriptors that have be | |
1455 | * transferred by the DMA as noted by the hardware "ActiveDescr" pointer. | |
1456 | * If range is DMA_RANGE_ALL, reclaim all txd(s) posted to the ring and | |
1457 | * return associated packet regardless of the value of hardware pointers. | |
1458 | */ | |
1459 | struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range) | |
1460 | { | |
1461 | struct dma_info *di = (struct dma_info *)pub; | |
1462 | u16 start, end, i; | |
1463 | u16 active_desc; | |
1464 | struct sk_buff *txp; | |
1465 | ||
90123e04 SF |
1466 | brcms_dbg_dma(di->core, "%s: %s\n", |
1467 | di->name, | |
1468 | range == DMA_RANGE_ALL ? "all" : | |
1469 | range == DMA_RANGE_TRANSMITTED ? "transmitted" : | |
1470 | "transferred"); | |
5b435de0 AS |
1471 | |
1472 | if (di->ntxd == 0) | |
1473 | return NULL; | |
1474 | ||
1475 | txp = NULL; | |
1476 | ||
1477 | start = di->txin; | |
1478 | if (range == DMA_RANGE_ALL) | |
1479 | end = di->txout; | |
1480 | else { | |
3b758a68 | 1481 | end = (u16) (B2I(((bcma_read32(di->core, |
e81da650 AS |
1482 | DMA64TXREGOFFS(di, status0)) & |
1483 | D64_XS0_CD_MASK) - di->xmtptrbase) & | |
1484 | D64_XS0_CD_MASK, struct dma64desc)); | |
5b435de0 AS |
1485 | |
1486 | if (range == DMA_RANGE_TRANSFERED) { | |
1487 | active_desc = | |
3b758a68 | 1488 | (u16)(bcma_read32(di->core, |
e81da650 | 1489 | DMA64TXREGOFFS(di, status1)) & |
5b435de0 AS |
1490 | D64_XS1_AD_MASK); |
1491 | active_desc = | |
1492 | (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK; | |
1493 | active_desc = B2I(active_desc, struct dma64desc); | |
1494 | if (end != active_desc) | |
1495 | end = prevtxd(di, active_desc); | |
1496 | } | |
1497 | } | |
1498 | ||
1499 | if ((start == 0) && (end > di->txout)) | |
1500 | goto bogus; | |
1501 | ||
1502 | for (i = start; i != end && !txp; i = nexttxd(di, i)) { | |
1503 | dma_addr_t pa; | |
1504 | uint size; | |
1505 | ||
1506 | pa = le32_to_cpu(di->txd64[i].addrlow) - di->dataoffsetlow; | |
1507 | ||
1508 | size = | |
1509 | (le32_to_cpu(di->txd64[i].ctrl2) & | |
1510 | D64_CTRL2_BC_MASK); | |
1511 | ||
1512 | di->txd64[i].addrlow = cpu_to_le32(0xdeadbeef); | |
1513 | di->txd64[i].addrhigh = cpu_to_le32(0xdeadbeef); | |
1514 | ||
1515 | txp = di->txp[i]; | |
1516 | di->txp[i] = NULL; | |
1517 | ||
2e81b9b1 | 1518 | dma_unmap_single(di->dmadev, pa, size, DMA_TO_DEVICE); |
5b435de0 AS |
1519 | } |
1520 | ||
1521 | di->txin = i; | |
1522 | ||
1523 | /* tx flow control */ | |
05f8a616 | 1524 | dma_update_txavail(di); |
5b435de0 AS |
1525 | |
1526 | return txp; | |
1527 | ||
1528 | bogus: | |
90123e04 SF |
1529 | brcms_dbg_dma(di->core, "bogus curr: start %d end %d txout %d\n", |
1530 | start, end, di->txout); | |
5b435de0 AS |
1531 | return NULL; |
1532 | } | |
1533 | ||
1534 | /* | |
1535 | * Mac80211 initiated actions sometimes require packets in the DMA queue to be | |
1536 | * modified. The modified portion of the packet is not under control of the DMA | |
1537 | * engine. This function calls a caller-supplied function for each packet in | |
1538 | * the caller specified dma chain. | |
1539 | */ | |
1540 | void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc) | |
1541 | (void *pkt, void *arg_a), void *arg_a) | |
1542 | { | |
1543 | struct dma_info *di = (struct dma_info *) dmah; | |
1544 | uint i = di->txin; | |
1545 | uint end = di->txout; | |
1546 | struct sk_buff *skb; | |
1547 | struct ieee80211_tx_info *tx_info; | |
1548 | ||
1549 | while (i != end) { | |
2c208890 | 1550 | skb = di->txp[i]; |
5b435de0 AS |
1551 | if (skb != NULL) { |
1552 | tx_info = (struct ieee80211_tx_info *)skb->cb; | |
1553 | (callback_fnc)(tx_info, arg_a); | |
1554 | } | |
1555 | i = nexttxd(di, i); | |
1556 | } | |
1557 | } |