]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/staging/brcm80211/sys/wlc_ampdu.c
staging: brcm80211: bzero => memset
[mirror_ubuntu-kernels.git] / drivers / staging / brcm80211 / sys / wlc_ampdu.c
1 /*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16 #include <linux/kernel.h>
17 #include <wlc_cfg.h>
18 #include <bcmdefs.h>
19 #include <osl.h>
20 #include <bcmutils.h>
21 #include <siutils.h>
22 #include <bcmendian.h>
23 #include <wlioctl.h>
24 #include <sbhndpio.h>
25 #include <sbhnddma.h>
26 #include <hnddma.h>
27 #include <d11.h>
28 #include <wlc_rate.h>
29 #include <wlc_pub.h>
30 #include <wlc_key.h>
31 #include <wlc_event.h>
32 #include <wlc_mac80211.h>
33 #include <wlc_phy_hal.h>
34 #include <wlc_antsel.h>
35 #include <wlc_scb.h>
36 #include <net/mac80211.h>
37 #include <wlc_ampdu.h>
38 #include <wl_export.h>
39 #include <wl_dbg.h>
40
41
42 #define AMPDU_MAX_MPDU 32 /* max number of mpdus in an ampdu */
43 #define AMPDU_NUM_MPDU_LEGACY 16 /* max number of mpdus in an ampdu to a legacy */
44 #define AMPDU_TX_BA_MAX_WSIZE 64 /* max Tx ba window size (in pdu) */
45 #define AMPDU_TX_BA_DEF_WSIZE 64 /* default Tx ba window size (in pdu) */
46 #define AMPDU_RX_BA_DEF_WSIZE 64 /* max Rx ba window size (in pdu) */
47 #define AMPDU_RX_BA_MAX_WSIZE 64 /* default Rx ba window size (in pdu) */
48 #define AMPDU_MAX_DUR 5 /* max dur of tx ampdu (in msec) */
49 #define AMPDU_DEF_RETRY_LIMIT 5 /* default tx retry limit */
50 #define AMPDU_DEF_RR_RETRY_LIMIT 2 /* default tx retry limit at reg rate */
51 #define AMPDU_DEF_TXPKT_WEIGHT 2 /* default weight of ampdu in txfifo */
52 #define AMPDU_DEF_FFPLD_RSVD 2048 /* default ffpld reserved bytes */
53 #define AMPDU_INI_FREE 10 /* # of inis to be freed on detach */
54 #define AMPDU_SCB_MAX_RELEASE 20 /* max # of mpdus released at a time */
55
56 #define NUM_FFPLD_FIFO 4 /* number of fifo concerned by pre-loading */
57 #define FFPLD_TX_MAX_UNFL 200 /* default value of the average number of ampdu
58 * without underflows
59 */
60 #define FFPLD_MPDU_SIZE 1800 /* estimate of maximum mpdu size */
61 #define FFPLD_MAX_MCS 23 /* we don't deal with mcs 32 */
62 #define FFPLD_PLD_INCR 1000 /* increments in bytes */
63 #define FFPLD_MAX_AMPDU_CNT 5000 /* maximum number of ampdu we
64 * accumulate between resets.
65 */
66
67 #define TX_SEQ_TO_INDEX(seq) ((seq) % AMPDU_TX_BA_MAX_WSIZE)
68
69 /* max possible overhead per mpdu in the ampdu; 3 is for roundup if needed */
70 #define AMPDU_MAX_MPDU_OVERHEAD (DOT11_FCS_LEN + DOT11_ICV_AES_LEN + AMPDU_DELIMITER_LEN + 3 \
71 + DOT11_A4_HDR_LEN + DOT11_QOS_LEN + DOT11_IV_MAX_LEN)
72
73 #ifdef BCMDBG
74 u32 wl_ampdu_dbg =
75 WL_AMPDU_UPDN_VAL |
76 WL_AMPDU_ERR_VAL |
77 WL_AMPDU_TX_VAL |
78 WL_AMPDU_RX_VAL |
79 WL_AMPDU_CTL_VAL |
80 WL_AMPDU_HW_VAL | WL_AMPDU_HWTXS_VAL | WL_AMPDU_HWDBG_VAL;
81 #endif
82
83 /* structure to hold tx fifo information and pre-loading state
84 * counters specific to tx underflows of ampdus
85 * some counters might be redundant with the ones in wlc or ampdu structures.
86 * This allows to maintain a specific state independantly of
87 * how often and/or when the wlc counters are updated.
88 */
89 typedef struct wlc_fifo_info {
90 u16 ampdu_pld_size; /* number of bytes to be pre-loaded */
91 u8 mcs2ampdu_table[FFPLD_MAX_MCS + 1]; /* per-mcs max # of mpdus in an ampdu */
92 u16 prev_txfunfl; /* num of underflows last read from the HW macstats counter */
93 u32 accum_txfunfl; /* num of underflows since we modified pld params */
94 u32 accum_txampdu; /* num of tx ampdu since we modified pld params */
95 u32 prev_txampdu; /* previous reading of tx ampdu */
96 u32 dmaxferrate; /* estimated dma avg xfer rate in kbits/sec */
97 } wlc_fifo_info_t;
98
99 /* AMPDU module specific state */
100 struct ampdu_info {
101 wlc_info_t *wlc; /* pointer to main wlc structure */
102 int scb_handle; /* scb cubby handle to retrieve data from scb */
103 u8 ini_enable[AMPDU_MAX_SCB_TID]; /* per-tid initiator enable/disable of ampdu */
104 u8 ba_tx_wsize; /* Tx ba window size (in pdu) */
105 u8 ba_rx_wsize; /* Rx ba window size (in pdu) */
106 u8 retry_limit; /* mpdu transmit retry limit */
107 u8 rr_retry_limit; /* mpdu transmit retry limit at regular rate */
108 u8 retry_limit_tid[AMPDU_MAX_SCB_TID]; /* per-tid mpdu transmit retry limit */
109 /* per-tid mpdu transmit retry limit at regular rate */
110 u8 rr_retry_limit_tid[AMPDU_MAX_SCB_TID];
111 u8 mpdu_density; /* min mpdu spacing (0-7) ==> 2^(x-1)/8 usec */
112 s8 max_pdu; /* max pdus allowed in ampdu */
113 u8 dur; /* max duration of an ampdu (in msec) */
114 u8 txpkt_weight; /* weight of ampdu in txfifo; reduces rate lag */
115 u8 rx_factor; /* maximum rx ampdu factor (0-3) ==> 2^(13+x) bytes */
116 u32 ffpld_rsvd; /* number of bytes to reserve for preload */
117 u32 max_txlen[MCS_TABLE_SIZE][2][2]; /* max size of ampdu per mcs, bw and sgi */
118 void *ini_free[AMPDU_INI_FREE]; /* array of ini's to be freed on detach */
119 bool mfbr; /* enable multiple fallback rate */
120 u32 tx_max_funl; /* underflows should be kept such that
121 * (tx_max_funfl*underflows) < tx frames
122 */
123 wlc_fifo_info_t fifo_tb[NUM_FFPLD_FIFO]; /* table of fifo infos */
124
125 };
126
127 #define AMPDU_CLEANUPFLAG_RX (0x1)
128 #define AMPDU_CLEANUPFLAG_TX (0x2)
129
130 #define SCB_AMPDU_CUBBY(ampdu, scb) (&(scb->scb_ampdu))
131 #define SCB_AMPDU_INI(scb_ampdu, tid) (&(scb_ampdu->ini[tid]))
132
133 static void wlc_ffpld_init(ampdu_info_t *ampdu);
134 static int wlc_ffpld_check_txfunfl(wlc_info_t *wlc, int f);
135 static void wlc_ffpld_calc_mcs2ampdu_table(ampdu_info_t *ampdu, int f);
136
137 static scb_ampdu_tid_ini_t *wlc_ampdu_init_tid_ini(ampdu_info_t *ampdu,
138 scb_ampdu_t *scb_ampdu,
139 u8 tid, bool override);
140 static void ampdu_cleanup_tid_ini(ampdu_info_t *ampdu, scb_ampdu_t *scb_ampdu,
141 u8 tid, bool force);
142 static void ampdu_update_max_txlen(ampdu_info_t *ampdu, u8 dur);
143 static void scb_ampdu_update_config(ampdu_info_t *ampdu, struct scb *scb);
144 static void scb_ampdu_update_config_all(ampdu_info_t *ampdu);
145
146 #define wlc_ampdu_txflowcontrol(a, b, c) do {} while (0)
147
148 static void wlc_ampdu_dotxstatus_complete(ampdu_info_t *ampdu, struct scb *scb,
149 struct sk_buff *p, tx_status_t *txs,
150 u32 frmtxstatus, u32 frmtxstatus2);
151
152 static inline u16 pkt_txh_seqnum(wlc_info_t *wlc, struct sk_buff *p)
153 {
154 d11txh_t *txh;
155 struct dot11_header *h;
156 txh = (d11txh_t *) p->data;
157 h = (struct dot11_header *)((u8 *) (txh + 1) + D11_PHY_HDR_LEN);
158 return ltoh16(h->seq) >> SEQNUM_SHIFT;
159 }
160
161 ampdu_info_t *wlc_ampdu_attach(wlc_info_t *wlc)
162 {
163 ampdu_info_t *ampdu;
164 int i;
165
166 /* some code depends on packed structures */
167 ASSERT(DOT11_MAXNUMFRAGS == NBITS(u16));
168 ASSERT(ISPOWEROF2(AMPDU_TX_BA_MAX_WSIZE));
169 ASSERT(ISPOWEROF2(AMPDU_RX_BA_MAX_WSIZE));
170 ASSERT(wlc->pub->tunables->ampdunummpdu <= AMPDU_MAX_MPDU);
171 ASSERT(wlc->pub->tunables->ampdunummpdu > 0);
172
173 ampdu = kzalloc(sizeof(ampdu_info_t), GFP_ATOMIC);
174 if (!ampdu) {
175 WL_ERROR(("wl%d: wlc_ampdu_attach: out of mem\n", wlc->pub->unit));
176 return NULL;
177 }
178 ampdu->wlc = wlc;
179
180 for (i = 0; i < AMPDU_MAX_SCB_TID; i++)
181 ampdu->ini_enable[i] = true;
182 /* Disable ampdu for VO by default */
183 ampdu->ini_enable[PRIO_8021D_VO] = false;
184 ampdu->ini_enable[PRIO_8021D_NC] = false;
185
186 /* Disable ampdu for BK by default since not enough fifo space */
187 ampdu->ini_enable[PRIO_8021D_NONE] = false;
188 ampdu->ini_enable[PRIO_8021D_BK] = false;
189
190 ampdu->ba_tx_wsize = AMPDU_TX_BA_DEF_WSIZE;
191 ampdu->ba_rx_wsize = AMPDU_RX_BA_DEF_WSIZE;
192 ampdu->mpdu_density = AMPDU_DEF_MPDU_DENSITY;
193 ampdu->max_pdu = AUTO;
194 ampdu->dur = AMPDU_MAX_DUR;
195 ampdu->txpkt_weight = AMPDU_DEF_TXPKT_WEIGHT;
196
197 ampdu->ffpld_rsvd = AMPDU_DEF_FFPLD_RSVD;
198 /* bump max ampdu rcv size to 64k for all 11n devices except 4321A0 and 4321A1 */
199 if (WLCISNPHY(wlc->band) && NREV_LT(wlc->band->phyrev, 2))
200 ampdu->rx_factor = AMPDU_RX_FACTOR_32K;
201 else
202 ampdu->rx_factor = AMPDU_RX_FACTOR_64K;
203 ampdu->retry_limit = AMPDU_DEF_RETRY_LIMIT;
204 ampdu->rr_retry_limit = AMPDU_DEF_RR_RETRY_LIMIT;
205
206 for (i = 0; i < AMPDU_MAX_SCB_TID; i++) {
207 ampdu->retry_limit_tid[i] = ampdu->retry_limit;
208 ampdu->rr_retry_limit_tid[i] = ampdu->rr_retry_limit;
209 }
210
211 ampdu_update_max_txlen(ampdu, ampdu->dur);
212 ampdu->mfbr = false;
213 /* try to set ampdu to the default value */
214 wlc_ampdu_set(ampdu, wlc->pub->_ampdu);
215
216 ampdu->tx_max_funl = FFPLD_TX_MAX_UNFL;
217 wlc_ffpld_init(ampdu);
218
219 return ampdu;
220 }
221
222 void wlc_ampdu_detach(ampdu_info_t *ampdu)
223 {
224 int i;
225
226 if (!ampdu)
227 return;
228
229 /* free all ini's which were to be freed on callbacks which were never called */
230 for (i = 0; i < AMPDU_INI_FREE; i++) {
231 if (ampdu->ini_free[i]) {
232 kfree(ampdu->ini_free[i]);
233 }
234 }
235
236 wlc_module_unregister(ampdu->wlc->pub, "ampdu", ampdu);
237 kfree(ampdu);
238 }
239
240 void scb_ampdu_cleanup(ampdu_info_t *ampdu, struct scb *scb)
241 {
242 scb_ampdu_t *scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
243 u8 tid;
244
245 WL_AMPDU_UPDN(("scb_ampdu_cleanup: enter\n"));
246 ASSERT(scb_ampdu);
247
248 for (tid = 0; tid < AMPDU_MAX_SCB_TID; tid++) {
249 ampdu_cleanup_tid_ini(ampdu, scb_ampdu, tid, false);
250 }
251 }
252
253 /* reset the ampdu state machine so that it can gracefully handle packets that were
254 * freed from the dma and tx queues during reinit
255 */
256 void wlc_ampdu_reset(ampdu_info_t *ampdu)
257 {
258 WL_NONE(("%s: Entering\n", __func__));
259 }
260
261 static void scb_ampdu_update_config(ampdu_info_t *ampdu, struct scb *scb)
262 {
263 scb_ampdu_t *scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
264 int i;
265
266 scb_ampdu->max_pdu = (u8) ampdu->wlc->pub->tunables->ampdunummpdu;
267
268 /* go back to legacy size if some preloading is occuring */
269 for (i = 0; i < NUM_FFPLD_FIFO; i++) {
270 if (ampdu->fifo_tb[i].ampdu_pld_size > FFPLD_PLD_INCR)
271 scb_ampdu->max_pdu = AMPDU_NUM_MPDU_LEGACY;
272 }
273
274 /* apply user override */
275 if (ampdu->max_pdu != AUTO)
276 scb_ampdu->max_pdu = (u8) ampdu->max_pdu;
277
278 scb_ampdu->release = min_t(u8, scb_ampdu->max_pdu, AMPDU_SCB_MAX_RELEASE);
279
280 if (scb_ampdu->max_rxlen)
281 scb_ampdu->release =
282 min_t(u8, scb_ampdu->release, scb_ampdu->max_rxlen / 1600);
283
284 scb_ampdu->release = min(scb_ampdu->release,
285 ampdu->fifo_tb[TX_AC_BE_FIFO].
286 mcs2ampdu_table[FFPLD_MAX_MCS]);
287
288 ASSERT(scb_ampdu->release);
289 }
290
291 void scb_ampdu_update_config_all(ampdu_info_t *ampdu)
292 {
293 scb_ampdu_update_config(ampdu, ampdu->wlc->pub->global_scb);
294 }
295
296 static void wlc_ffpld_init(ampdu_info_t *ampdu)
297 {
298 int i, j;
299 wlc_fifo_info_t *fifo;
300
301 for (j = 0; j < NUM_FFPLD_FIFO; j++) {
302 fifo = (ampdu->fifo_tb + j);
303 fifo->ampdu_pld_size = 0;
304 for (i = 0; i <= FFPLD_MAX_MCS; i++)
305 fifo->mcs2ampdu_table[i] = 255;
306 fifo->dmaxferrate = 0;
307 fifo->accum_txampdu = 0;
308 fifo->prev_txfunfl = 0;
309 fifo->accum_txfunfl = 0;
310
311 }
312 }
313
314 /* evaluate the dma transfer rate using the tx underflows as feedback.
315 * If necessary, increase tx fifo preloading. If not enough,
316 * decrease maximum ampdu size for each mcs till underflows stop
317 * Return 1 if pre-loading not active, -1 if not an underflow event,
318 * 0 if pre-loading module took care of the event.
319 */
320 static int wlc_ffpld_check_txfunfl(wlc_info_t *wlc, int fid)
321 {
322 ampdu_info_t *ampdu = wlc->ampdu;
323 u32 phy_rate = MCS_RATE(FFPLD_MAX_MCS, true, false);
324 u32 txunfl_ratio;
325 u8 max_mpdu;
326 u32 current_ampdu_cnt = 0;
327 u16 max_pld_size;
328 u32 new_txunfl;
329 wlc_fifo_info_t *fifo = (ampdu->fifo_tb + fid);
330 uint xmtfifo_sz;
331 u16 cur_txunfl;
332
333 /* return if we got here for a different reason than underflows */
334 cur_txunfl =
335 wlc_read_shm(wlc,
336 M_UCODE_MACSTAT + offsetof(macstat_t, txfunfl[fid]));
337 new_txunfl = (u16) (cur_txunfl - fifo->prev_txfunfl);
338 if (new_txunfl == 0) {
339 WL_FFPLD(("check_txunfl : TX status FRAG set but no tx underflows\n"));
340 return -1;
341 }
342 fifo->prev_txfunfl = cur_txunfl;
343
344 if (!ampdu->tx_max_funl)
345 return 1;
346
347 /* check if fifo is big enough */
348 if (wlc_xmtfifo_sz_get(wlc, fid, &xmtfifo_sz)) {
349 WL_FFPLD(("check_txunfl : get xmtfifo_sz failed.\n"));
350 return -1;
351 }
352
353 if ((TXFIFO_SIZE_UNIT * (u32) xmtfifo_sz) <= ampdu->ffpld_rsvd)
354 return 1;
355
356 max_pld_size = TXFIFO_SIZE_UNIT * xmtfifo_sz - ampdu->ffpld_rsvd;
357 fifo->accum_txfunfl += new_txunfl;
358
359 /* we need to wait for at least 10 underflows */
360 if (fifo->accum_txfunfl < 10)
361 return 0;
362
363 WL_FFPLD(("ampdu_count %d tx_underflows %d\n",
364 current_ampdu_cnt, fifo->accum_txfunfl));
365
366 /*
367 compute the current ratio of tx unfl per ampdu.
368 When the current ampdu count becomes too
369 big while the ratio remains small, we reset
370 the current count in order to not
371 introduce too big of a latency in detecting a
372 large amount of tx underflows later.
373 */
374
375 txunfl_ratio = current_ampdu_cnt / fifo->accum_txfunfl;
376
377 if (txunfl_ratio > ampdu->tx_max_funl) {
378 if (current_ampdu_cnt >= FFPLD_MAX_AMPDU_CNT) {
379 fifo->accum_txfunfl = 0;
380 }
381 return 0;
382 }
383 max_mpdu =
384 min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS], AMPDU_NUM_MPDU_LEGACY);
385
386 /* In case max value max_pdu is already lower than
387 the fifo depth, there is nothing more we can do.
388 */
389
390 if (fifo->ampdu_pld_size >= max_mpdu * FFPLD_MPDU_SIZE) {
391 WL_FFPLD(("tx fifo pld : max ampdu fits in fifo\n)"));
392 fifo->accum_txfunfl = 0;
393 return 0;
394 }
395
396 if (fifo->ampdu_pld_size < max_pld_size) {
397
398 /* increment by TX_FIFO_PLD_INC bytes */
399 fifo->ampdu_pld_size += FFPLD_PLD_INCR;
400 if (fifo->ampdu_pld_size > max_pld_size)
401 fifo->ampdu_pld_size = max_pld_size;
402
403 /* update scb release size */
404 scb_ampdu_update_config_all(ampdu);
405
406 /*
407 compute a new dma xfer rate for max_mpdu @ max mcs.
408 This is the minimum dma rate that
409 can acheive no unferflow condition for the current mpdu size.
410 */
411 /* note : we divide/multiply by 100 to avoid integer overflows */
412 fifo->dmaxferrate =
413 (((phy_rate / 100) *
414 (max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size))
415 / (max_mpdu * FFPLD_MPDU_SIZE)) * 100;
416
417 WL_FFPLD(("DMA estimated transfer rate %d; pre-load size %d\n",
418 fifo->dmaxferrate, fifo->ampdu_pld_size));
419 } else {
420
421 /* decrease ampdu size */
422 if (fifo->mcs2ampdu_table[FFPLD_MAX_MCS] > 1) {
423 if (fifo->mcs2ampdu_table[FFPLD_MAX_MCS] == 255)
424 fifo->mcs2ampdu_table[FFPLD_MAX_MCS] =
425 AMPDU_NUM_MPDU_LEGACY - 1;
426 else
427 fifo->mcs2ampdu_table[FFPLD_MAX_MCS] -= 1;
428
429 /* recompute the table */
430 wlc_ffpld_calc_mcs2ampdu_table(ampdu, fid);
431
432 /* update scb release size */
433 scb_ampdu_update_config_all(ampdu);
434 }
435 }
436 fifo->accum_txfunfl = 0;
437 return 0;
438 }
439
440 static void wlc_ffpld_calc_mcs2ampdu_table(ampdu_info_t *ampdu, int f)
441 {
442 int i;
443 u32 phy_rate, dma_rate, tmp;
444 u8 max_mpdu;
445 wlc_fifo_info_t *fifo = (ampdu->fifo_tb + f);
446
447 /* recompute the dma rate */
448 /* note : we divide/multiply by 100 to avoid integer overflows */
449 max_mpdu =
450 min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS], AMPDU_NUM_MPDU_LEGACY);
451 phy_rate = MCS_RATE(FFPLD_MAX_MCS, true, false);
452 dma_rate =
453 (((phy_rate / 100) *
454 (max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size))
455 / (max_mpdu * FFPLD_MPDU_SIZE)) * 100;
456 fifo->dmaxferrate = dma_rate;
457
458 /* fill up the mcs2ampdu table; do not recalc the last mcs */
459 dma_rate = dma_rate >> 7;
460 for (i = 0; i < FFPLD_MAX_MCS; i++) {
461 /* shifting to keep it within integer range */
462 phy_rate = MCS_RATE(i, true, false) >> 7;
463 if (phy_rate > dma_rate) {
464 tmp = ((fifo->ampdu_pld_size * phy_rate) /
465 ((phy_rate - dma_rate) * FFPLD_MPDU_SIZE)) + 1;
466 tmp = min_t(u32, tmp, 255);
467 fifo->mcs2ampdu_table[i] = (u8) tmp;
468 }
469 }
470 }
471
472 static void BCMFASTPATH
473 wlc_ampdu_agg(ampdu_info_t *ampdu, struct scb *scb, struct sk_buff *p,
474 uint prec)
475 {
476 scb_ampdu_t *scb_ampdu;
477 scb_ampdu_tid_ini_t *ini;
478 u8 tid = (u8) (p->priority);
479
480 scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
481
482 /* initialize initiator on first packet; sends addba req */
483 ini = SCB_AMPDU_INI(scb_ampdu, tid);
484 if (ini->magic != INI_MAGIC) {
485 ini = wlc_ampdu_init_tid_ini(ampdu, scb_ampdu, tid, false);
486 }
487 return;
488 }
489
490 int BCMFASTPATH
491 wlc_sendampdu(ampdu_info_t *ampdu, wlc_txq_info_t *qi, struct sk_buff **pdu,
492 int prec)
493 {
494 wlc_info_t *wlc;
495 struct osl_info *osh;
496 struct sk_buff *p, *pkt[AMPDU_MAX_MPDU];
497 u8 tid, ndelim;
498 int err = 0;
499 u8 preamble_type = WLC_GF_PREAMBLE;
500 u8 fbr_preamble_type = WLC_GF_PREAMBLE;
501 u8 rts_preamble_type = WLC_LONG_PREAMBLE;
502 u8 rts_fbr_preamble_type = WLC_LONG_PREAMBLE;
503
504 bool rr = true, fbr = false;
505 uint i, count = 0, fifo, seg_cnt = 0;
506 u16 plen, len, seq = 0, mcl, mch, index, frameid, dma_len = 0;
507 u32 ampdu_len, maxlen = 0;
508 d11txh_t *txh = NULL;
509 u8 *plcp;
510 struct dot11_header *h;
511 struct scb *scb;
512 scb_ampdu_t *scb_ampdu;
513 scb_ampdu_tid_ini_t *ini;
514 u8 mcs = 0;
515 bool use_rts = false, use_cts = false;
516 ratespec_t rspec = 0, rspec_fallback = 0;
517 ratespec_t rts_rspec = 0, rts_rspec_fallback = 0;
518 u16 mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
519 struct dot11_rts_frame *rts;
520 u8 rr_retry_limit;
521 wlc_fifo_info_t *f;
522 bool fbr_iscck;
523 struct ieee80211_tx_info *tx_info;
524 u16 qlen;
525
526 wlc = ampdu->wlc;
527 osh = wlc->osh;
528 p = *pdu;
529
530 ASSERT(p);
531
532 tid = (u8) (p->priority);
533 ASSERT(tid < AMPDU_MAX_SCB_TID);
534
535 f = ampdu->fifo_tb + prio2fifo[tid];
536
537 scb = wlc->pub->global_scb;
538 ASSERT(scb->magic == SCB_MAGIC);
539
540 scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
541 ASSERT(scb_ampdu);
542 ini = &scb_ampdu->ini[tid];
543
544 /* Let pressure continue to build ... */
545 qlen = pktq_plen(&qi->q, prec);
546 if (ini->tx_in_transit > 0 && qlen < scb_ampdu->max_pdu) {
547 return BCME_BUSY;
548 }
549
550 wlc_ampdu_agg(ampdu, scb, p, tid);
551
552 if (wlc->block_datafifo) {
553 WL_ERROR(("%s: Fifo blocked\n", __func__));
554 return BCME_BUSY;
555 }
556 rr_retry_limit = ampdu->rr_retry_limit_tid[tid];
557 ampdu_len = 0;
558 dma_len = 0;
559 while (p) {
560 struct ieee80211_tx_rate *txrate;
561
562 tx_info = IEEE80211_SKB_CB(p);
563 txrate = tx_info->status.rates;
564
565 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
566 err = wlc_prep_pdu(wlc, p, &fifo);
567 } else {
568 WL_ERROR(("%s: AMPDU flag is off!\n", __func__));
569 *pdu = NULL;
570 err = 0;
571 break;
572 }
573
574 if (err) {
575 if (err == BCME_BUSY) {
576 WL_ERROR(("wl%d: wlc_sendampdu: prep_xdu retry; seq 0x%x\n", wlc->pub->unit, seq));
577 WLCNTINCR(ampdu->cnt->sduretry);
578 *pdu = p;
579 break;
580 }
581
582 /* error in the packet; reject it */
583 WL_AMPDU_ERR(("wl%d: wlc_sendampdu: prep_xdu rejected; seq 0x%x\n", wlc->pub->unit, seq));
584 WLCNTINCR(ampdu->cnt->sdurejected);
585
586 *pdu = NULL;
587 break;
588 }
589
590 /* pkt is good to be aggregated */
591 ASSERT(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
592 txh = (d11txh_t *) p->data;
593 plcp = (u8 *) (txh + 1);
594 h = (struct dot11_header *)(plcp + D11_PHY_HDR_LEN);
595 seq = ltoh16(h->seq) >> SEQNUM_SHIFT;
596 index = TX_SEQ_TO_INDEX(seq);
597
598 /* check mcl fields and test whether it can be agg'd */
599 mcl = ltoh16(txh->MacTxControlLow);
600 mcl &= ~TXC_AMPDU_MASK;
601 fbr_iscck = !(ltoh16(txh->XtraFrameTypes) & 0x3);
602 ASSERT(!fbr_iscck);
603 txh->PreloadSize = 0; /* always default to 0 */
604
605 /* Handle retry limits */
606 if (txrate[0].count <= rr_retry_limit) {
607 txrate[0].count++;
608 rr = true;
609 fbr = false;
610 ASSERT(!fbr);
611 } else {
612 fbr = true;
613 rr = false;
614 txrate[1].count++;
615 }
616
617 /* extract the length info */
618 len = fbr_iscck ? WLC_GET_CCK_PLCP_LEN(txh->FragPLCPFallback)
619 : WLC_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
620
621 /* retrieve null delimiter count */
622 ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
623 seg_cnt += 1;
624
625 WL_AMPDU_TX(("wl%d: wlc_sendampdu: mpdu %d plcp_len %d\n",
626 wlc->pub->unit, count, len));
627
628 /*
629 * aggregateable mpdu. For ucode/hw agg,
630 * test whether need to break or change the epoch
631 */
632 if (count == 0) {
633 u16 fc;
634 mcl |= (TXC_AMPDU_FIRST << TXC_AMPDU_SHIFT);
635 /* refill the bits since might be a retx mpdu */
636 mcl |= TXC_STARTMSDU;
637 rts = (struct dot11_rts_frame *)&txh->rts_frame;
638 fc = ltoh16(rts->fc);
639 if ((fc & FC_KIND_MASK) == FC_RTS) {
640 mcl |= TXC_SENDRTS;
641 use_rts = true;
642 }
643 if ((fc & FC_KIND_MASK) == FC_CTS) {
644 mcl |= TXC_SENDCTS;
645 use_cts = true;
646 }
647 } else {
648 mcl |= (TXC_AMPDU_MIDDLE << TXC_AMPDU_SHIFT);
649 mcl &= ~(TXC_STARTMSDU | TXC_SENDRTS | TXC_SENDCTS);
650 }
651
652 len = roundup(len, 4);
653 ampdu_len += (len + (ndelim + 1) * AMPDU_DELIMITER_LEN);
654
655 dma_len += (u16) pkttotlen(osh, p);
656
657 WL_AMPDU_TX(("wl%d: wlc_sendampdu: ampdu_len %d seg_cnt %d null delim %d\n", wlc->pub->unit, ampdu_len, seg_cnt, ndelim));
658
659 txh->MacTxControlLow = htol16(mcl);
660
661 /* this packet is added */
662 pkt[count++] = p;
663
664 /* patch the first MPDU */
665 if (count == 1) {
666 u8 plcp0, plcp3, is40, sgi;
667 struct ieee80211_sta *sta;
668
669 sta = tx_info->control.sta;
670
671 if (rr) {
672 plcp0 = plcp[0];
673 plcp3 = plcp[3];
674 } else {
675 plcp0 = txh->FragPLCPFallback[0];
676 plcp3 = txh->FragPLCPFallback[3];
677
678 }
679 is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0;
680 sgi = PLCP3_ISSGI(plcp3) ? 1 : 0;
681 mcs = plcp0 & ~MIMO_PLCP_40MHZ;
682 ASSERT(mcs < MCS_TABLE_SIZE);
683 maxlen =
684 min(scb_ampdu->max_rxlen,
685 ampdu->max_txlen[mcs][is40][sgi]);
686
687 WL_NONE(("sendampdu: sgi %d, is40 %d, mcs %d\n", sgi,
688 is40, mcs));
689
690 maxlen = 64 * 1024; /* XXX Fix me to honor real max_rxlen */
691
692 if (is40)
693 mimo_ctlchbw =
694 CHSPEC_SB_UPPER(WLC_BAND_PI_RADIO_CHANSPEC)
695 ? PHY_TXC1_BW_20MHZ_UP : PHY_TXC1_BW_20MHZ;
696
697 /* rebuild the rspec and rspec_fallback */
698 rspec = RSPEC_MIMORATE;
699 rspec |= plcp[0] & ~MIMO_PLCP_40MHZ;
700 if (plcp[0] & MIMO_PLCP_40MHZ)
701 rspec |= (PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT);
702
703 if (fbr_iscck) /* CCK */
704 rspec_fallback =
705 CCK_RSPEC(CCK_PHY2MAC_RATE
706 (txh->FragPLCPFallback[0]));
707 else { /* MIMO */
708 rspec_fallback = RSPEC_MIMORATE;
709 rspec_fallback |=
710 txh->FragPLCPFallback[0] & ~MIMO_PLCP_40MHZ;
711 if (txh->FragPLCPFallback[0] & MIMO_PLCP_40MHZ)
712 rspec_fallback |=
713 (PHY_TXC1_BW_40MHZ <<
714 RSPEC_BW_SHIFT);
715 }
716
717 if (use_rts || use_cts) {
718 rts_rspec =
719 wlc_rspec_to_rts_rspec(wlc, rspec, false,
720 mimo_ctlchbw);
721 rts_rspec_fallback =
722 wlc_rspec_to_rts_rspec(wlc, rspec_fallback,
723 false, mimo_ctlchbw);
724 }
725 }
726
727 /* if (first mpdu for host agg) */
728 /* test whether to add more */
729 if ((MCS_RATE(mcs, true, false) >= f->dmaxferrate) &&
730 (count == f->mcs2ampdu_table[mcs])) {
731 WL_AMPDU_ERR(("wl%d: PR 37644: stopping ampdu at %d for mcs %d", wlc->pub->unit, count, mcs));
732 break;
733 }
734
735 if (count == scb_ampdu->max_pdu) {
736 WL_NONE(("Stop taking from q, reached %d deep\n",
737 scb_ampdu->max_pdu));
738 break;
739 }
740
741 /* check to see if the next pkt is a candidate for aggregation */
742 p = pktq_ppeek(&qi->q, prec);
743 tx_info = IEEE80211_SKB_CB(p); /* tx_info must be checked with current p */
744
745 if (p) {
746 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
747 ((u8) (p->priority) == tid)) {
748
749 plen =
750 pkttotlen(osh, p) + AMPDU_MAX_MPDU_OVERHEAD;
751 plen = max(scb_ampdu->min_len, plen);
752
753 if ((plen + ampdu_len) > maxlen) {
754 p = NULL;
755 WL_ERROR(("%s: Bogus plen #1\n",
756 __func__));
757 ASSERT(3 == 4);
758 continue;
759 }
760
761 /* check if there are enough descriptors available */
762 if (TXAVAIL(wlc, fifo) <= (seg_cnt + 1)) {
763 WL_ERROR(("%s: No fifo space !!!!!!\n", __func__));
764 p = NULL;
765 continue;
766 }
767 p = pktq_pdeq(&qi->q, prec);
768 ASSERT(p);
769 } else {
770 p = NULL;
771 }
772 }
773 } /* end while(p) */
774
775 ini->tx_in_transit += count;
776
777 if (count) {
778 WLCNTADD(ampdu->cnt->txmpdu, count);
779
780 /* patch up the last txh */
781 txh = (d11txh_t *) pkt[count - 1]->data;
782 mcl = ltoh16(txh->MacTxControlLow);
783 mcl &= ~TXC_AMPDU_MASK;
784 mcl |= (TXC_AMPDU_LAST << TXC_AMPDU_SHIFT);
785 txh->MacTxControlLow = htol16(mcl);
786
787 /* remove the null delimiter after last mpdu */
788 ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
789 txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM] = 0;
790 ampdu_len -= ndelim * AMPDU_DELIMITER_LEN;
791
792 /* remove the pad len from last mpdu */
793 fbr_iscck = ((ltoh16(txh->XtraFrameTypes) & 0x3) == 0);
794 len = fbr_iscck ? WLC_GET_CCK_PLCP_LEN(txh->FragPLCPFallback)
795 : WLC_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
796 ampdu_len -= roundup(len, 4) - len;
797
798 /* patch up the first txh & plcp */
799 txh = (d11txh_t *) pkt[0]->data;
800 plcp = (u8 *) (txh + 1);
801
802 WLC_SET_MIMO_PLCP_LEN(plcp, ampdu_len);
803 /* mark plcp to indicate ampdu */
804 WLC_SET_MIMO_PLCP_AMPDU(plcp);
805
806 /* reset the mixed mode header durations */
807 if (txh->MModeLen) {
808 u16 mmodelen =
809 wlc_calc_lsig_len(wlc, rspec, ampdu_len);
810 txh->MModeLen = htol16(mmodelen);
811 preamble_type = WLC_MM_PREAMBLE;
812 }
813 if (txh->MModeFbrLen) {
814 u16 mmfbrlen =
815 wlc_calc_lsig_len(wlc, rspec_fallback, ampdu_len);
816 txh->MModeFbrLen = htol16(mmfbrlen);
817 fbr_preamble_type = WLC_MM_PREAMBLE;
818 }
819
820 /* set the preload length */
821 if (MCS_RATE(mcs, true, false) >= f->dmaxferrate) {
822 dma_len = min(dma_len, f->ampdu_pld_size);
823 txh->PreloadSize = htol16(dma_len);
824 } else
825 txh->PreloadSize = 0;
826
827 mch = ltoh16(txh->MacTxControlHigh);
828
829 /* update RTS dur fields */
830 if (use_rts || use_cts) {
831 u16 durid;
832 rts = (struct dot11_rts_frame *)&txh->rts_frame;
833 if ((mch & TXC_PREAMBLE_RTS_MAIN_SHORT) ==
834 TXC_PREAMBLE_RTS_MAIN_SHORT)
835 rts_preamble_type = WLC_SHORT_PREAMBLE;
836
837 if ((mch & TXC_PREAMBLE_RTS_FB_SHORT) ==
838 TXC_PREAMBLE_RTS_FB_SHORT)
839 rts_fbr_preamble_type = WLC_SHORT_PREAMBLE;
840
841 durid =
842 wlc_compute_rtscts_dur(wlc, use_cts, rts_rspec,
843 rspec, rts_preamble_type,
844 preamble_type, ampdu_len,
845 true);
846 rts->durid = htol16(durid);
847 durid = wlc_compute_rtscts_dur(wlc, use_cts,
848 rts_rspec_fallback,
849 rspec_fallback,
850 rts_fbr_preamble_type,
851 fbr_preamble_type,
852 ampdu_len, true);
853 txh->RTSDurFallback = htol16(durid);
854 /* set TxFesTimeNormal */
855 txh->TxFesTimeNormal = rts->durid;
856 /* set fallback rate version of TxFesTimeNormal */
857 txh->TxFesTimeFallback = txh->RTSDurFallback;
858 }
859
860 /* set flag and plcp for fallback rate */
861 if (fbr) {
862 WLCNTADD(ampdu->cnt->txfbr_mpdu, count);
863 WLCNTINCR(ampdu->cnt->txfbr_ampdu);
864 mch |= TXC_AMPDU_FBR;
865 txh->MacTxControlHigh = htol16(mch);
866 WLC_SET_MIMO_PLCP_AMPDU(plcp);
867 WLC_SET_MIMO_PLCP_AMPDU(txh->FragPLCPFallback);
868 }
869
870 WL_AMPDU_TX(("wl%d: wlc_sendampdu: count %d ampdu_len %d\n",
871 wlc->pub->unit, count, ampdu_len));
872
873 /* inform rate_sel if it this is a rate probe pkt */
874 frameid = ltoh16(txh->TxFrameID);
875 if (frameid & TXFID_RATE_PROBE_MASK) {
876 WL_ERROR(("%s: XXX what to do with TXFID_RATE_PROBE_MASK!?\n", __func__));
877 }
878 for (i = 0; i < count; i++)
879 wlc_txfifo(wlc, fifo, pkt[i], i == (count - 1),
880 ampdu->txpkt_weight);
881
882 }
883 /* endif (count) */
884 return err;
885 }
886
887 void BCMFASTPATH
888 wlc_ampdu_dotxstatus(ampdu_info_t *ampdu, struct scb *scb, struct sk_buff *p,
889 tx_status_t *txs)
890 {
891 scb_ampdu_t *scb_ampdu;
892 wlc_info_t *wlc = ampdu->wlc;
893 scb_ampdu_tid_ini_t *ini;
894 u32 s1 = 0, s2 = 0;
895 struct ieee80211_tx_info *tx_info;
896
897 tx_info = IEEE80211_SKB_CB(p);
898 ASSERT(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
899 ASSERT(scb);
900 ASSERT(scb->magic == SCB_MAGIC);
901 ASSERT(txs->status & TX_STATUS_AMPDU);
902 scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
903 ASSERT(scb_ampdu);
904 ini = SCB_AMPDU_INI(scb_ampdu, p->priority);
905 ASSERT(ini->scb == scb);
906
907 /* BMAC_NOTE: For the split driver, second level txstatus comes later
908 * So if the ACK was received then wait for the second level else just
909 * call the first one
910 */
911 if (txs->status & TX_STATUS_ACK_RCV) {
912 u8 status_delay = 0;
913
914 /* wait till the next 8 bytes of txstatus is available */
915 while (((s1 =
916 R_REG(wlc->osh,
917 &wlc->regs->frmtxstatus)) & TXS_V) == 0) {
918 udelay(1);
919 status_delay++;
920 if (status_delay > 10) {
921 ASSERT(status_delay <= 10);
922 return;
923 }
924 }
925
926 ASSERT(!(s1 & TX_STATUS_INTERMEDIATE));
927 ASSERT(s1 & TX_STATUS_AMPDU);
928 s2 = R_REG(wlc->osh, &wlc->regs->frmtxstatus2);
929 }
930
931 wlc_ampdu_dotxstatus_complete(ampdu, scb, p, txs, s1, s2);
932 wlc_ampdu_txflowcontrol(wlc, scb_ampdu, ini);
933 }
934
935 void rate_status(wlc_info_t *wlc, struct ieee80211_tx_info *tx_info,
936 tx_status_t *txs, u8 mcs);
937
938 void
939 rate_status(wlc_info_t *wlc, struct ieee80211_tx_info *tx_info,
940 tx_status_t *txs, u8 mcs)
941 {
942 struct ieee80211_tx_rate *txrate = tx_info->status.rates;
943 int i;
944
945 /* clear the rest of the rates */
946 for (i = 2; i < IEEE80211_TX_MAX_RATES; i++) {
947 txrate[i].idx = -1;
948 txrate[i].count = 0;
949 }
950 }
951
952 #define SHORTNAME "AMPDU status"
953
954 static void BCMFASTPATH
955 wlc_ampdu_dotxstatus_complete(ampdu_info_t *ampdu, struct scb *scb,
956 struct sk_buff *p, tx_status_t *txs,
957 u32 s1, u32 s2)
958 {
959 scb_ampdu_t *scb_ampdu;
960 wlc_info_t *wlc = ampdu->wlc;
961 scb_ampdu_tid_ini_t *ini;
962 u8 bitmap[8], queue, tid;
963 d11txh_t *txh;
964 u8 *plcp;
965 struct dot11_header *h;
966 u16 seq, start_seq = 0, bindex, index, mcl;
967 u8 mcs = 0;
968 bool ba_recd = false, ack_recd = false;
969 u8 suc_mpdu = 0, tot_mpdu = 0;
970 uint supr_status;
971 bool update_rate = true, retry = true, tx_error = false;
972 u16 mimoantsel = 0;
973 u8 antselid = 0;
974 u8 retry_limit, rr_retry_limit;
975 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(p);
976
977 #ifdef BCMDBG
978 u8 hole[AMPDU_MAX_MPDU];
979 memset(hole, 0, sizeof(hole));
980 #endif
981
982 ASSERT(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
983 ASSERT(txs->status & TX_STATUS_AMPDU);
984
985 scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
986 ASSERT(scb_ampdu);
987
988 tid = (u8) (p->priority);
989
990 ini = SCB_AMPDU_INI(scb_ampdu, tid);
991 retry_limit = ampdu->retry_limit_tid[tid];
992 rr_retry_limit = ampdu->rr_retry_limit_tid[tid];
993
994 ASSERT(ini->scb == scb);
995
996 memset(bitmap, 0, sizeof(bitmap));
997 queue = txs->frameid & TXFID_QUEUE_MASK;
998 ASSERT(queue < AC_COUNT);
999
1000 supr_status = txs->status & TX_STATUS_SUPR_MASK;
1001
1002 if (txs->status & TX_STATUS_ACK_RCV) {
1003 if (TX_STATUS_SUPR_UF == supr_status) {
1004 update_rate = false;
1005 }
1006
1007 ASSERT(txs->status & TX_STATUS_INTERMEDIATE);
1008 start_seq = txs->sequence >> SEQNUM_SHIFT;
1009 bitmap[0] = (txs->status & TX_STATUS_BA_BMAP03_MASK) >>
1010 TX_STATUS_BA_BMAP03_SHIFT;
1011
1012 ASSERT(!(s1 & TX_STATUS_INTERMEDIATE));
1013 ASSERT(s1 & TX_STATUS_AMPDU);
1014
1015 bitmap[0] |=
1016 (s1 & TX_STATUS_BA_BMAP47_MASK) <<
1017 TX_STATUS_BA_BMAP47_SHIFT;
1018 bitmap[1] = (s1 >> 8) & 0xff;
1019 bitmap[2] = (s1 >> 16) & 0xff;
1020 bitmap[3] = (s1 >> 24) & 0xff;
1021
1022 bitmap[4] = s2 & 0xff;
1023 bitmap[5] = (s2 >> 8) & 0xff;
1024 bitmap[6] = (s2 >> 16) & 0xff;
1025 bitmap[7] = (s2 >> 24) & 0xff;
1026
1027 ba_recd = true;
1028 } else {
1029 WLCNTINCR(ampdu->cnt->noba);
1030 if (supr_status) {
1031 update_rate = false;
1032 if (supr_status == TX_STATUS_SUPR_BADCH) {
1033 WL_ERROR(("%s: Pkt tx suppressed, illegal channel possibly %d\n", __func__, CHSPEC_CHANNEL(wlc->default_bss->chanspec)));
1034 } else {
1035 if (supr_status == TX_STATUS_SUPR_FRAG)
1036 WL_NONE(("%s: AMPDU frag err\n",
1037 __func__));
1038 else
1039 WL_ERROR(("%s: wlc_ampdu_dotxstatus: supr_status 0x%x\n", __func__, supr_status));
1040 }
1041 /* no need to retry for badch; will fail again */
1042 if (supr_status == TX_STATUS_SUPR_BADCH ||
1043 supr_status == TX_STATUS_SUPR_EXPTIME) {
1044 retry = false;
1045 WLCNTINCR(wlc->pub->_cnt->txchanrej);
1046 } else if (supr_status == TX_STATUS_SUPR_EXPTIME) {
1047
1048 WLCNTINCR(wlc->pub->_cnt->txexptime);
1049
1050 /* TX underflow : try tuning pre-loading or ampdu size */
1051 } else if (supr_status == TX_STATUS_SUPR_FRAG) {
1052 /* if there were underflows, but pre-loading is not active,
1053 notify rate adaptation.
1054 */
1055 if (wlc_ffpld_check_txfunfl(wlc, prio2fifo[tid])
1056 > 0) {
1057 tx_error = true;
1058 }
1059 }
1060 } else if (txs->phyerr) {
1061 update_rate = false;
1062 WLCNTINCR(wlc->pub->_cnt->txphyerr);
1063 WL_ERROR(("wl%d: wlc_ampdu_dotxstatus: tx phy error (0x%x)\n", wlc->pub->unit, txs->phyerr));
1064
1065 #ifdef BCMDBG
1066 if (WL_ERROR_ON()) {
1067 prpkt("txpkt (AMPDU)", wlc->osh, p);
1068 wlc_print_txdesc((d11txh_t *) p->data);
1069 wlc_print_txstatus(txs);
1070 }
1071 #endif /* BCMDBG */
1072 }
1073 }
1074
1075 /* loop through all pkts and retry if not acked */
1076 while (p) {
1077 tx_info = IEEE80211_SKB_CB(p);
1078 ASSERT(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
1079 txh = (d11txh_t *) p->data;
1080 mcl = ltoh16(txh->MacTxControlLow);
1081 plcp = (u8 *) (txh + 1);
1082 h = (struct dot11_header *)(plcp + D11_PHY_HDR_LEN);
1083 seq = ltoh16(h->seq) >> SEQNUM_SHIFT;
1084
1085 if (tot_mpdu == 0) {
1086 mcs = plcp[0] & MIMO_PLCP_MCS_MASK;
1087 mimoantsel = ltoh16(txh->ABI_MimoAntSel);
1088 }
1089
1090 index = TX_SEQ_TO_INDEX(seq);
1091 ack_recd = false;
1092 if (ba_recd) {
1093 bindex = MODSUB_POW2(seq, start_seq, SEQNUM_MAX);
1094
1095 WL_AMPDU_TX(("%s: tid %d seq is %d, start_seq is %d, "
1096 "bindex is %d set %d, index %d\n",
1097 __func__, tid, seq, start_seq, bindex,
1098 isset(bitmap, bindex), index));
1099
1100 /* if acked then clear bit and free packet */
1101 if ((bindex < AMPDU_TX_BA_MAX_WSIZE)
1102 && isset(bitmap, bindex)) {
1103 ini->tx_in_transit--;
1104 ini->txretry[index] = 0;
1105
1106 /* ampdu_ack_len: number of acked aggregated frames */
1107 /* ampdu_ack_map: block ack bit map for the aggregation */
1108 /* ampdu_len: number of aggregated frames */
1109 rate_status(wlc, tx_info, txs, mcs);
1110 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1111 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
1112
1113 /* XXX TODO: Make these accurate. */
1114 tx_info->status.ampdu_ack_len =
1115 (txs->
1116 status & TX_STATUS_FRM_RTX_MASK) >>
1117 TX_STATUS_FRM_RTX_SHIFT;
1118 tx_info->status.ampdu_len =
1119 (txs->
1120 status & TX_STATUS_FRM_RTX_MASK) >>
1121 TX_STATUS_FRM_RTX_SHIFT;
1122
1123 skb_pull(p, D11_PHY_HDR_LEN);
1124 skb_pull(p, D11_TXH_LEN);
1125
1126 ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
1127 p);
1128 ack_recd = true;
1129 suc_mpdu++;
1130 }
1131 }
1132 /* either retransmit or send bar if ack not recd */
1133 if (!ack_recd) {
1134 struct ieee80211_tx_rate *txrate =
1135 tx_info->status.rates;
1136 if (retry && (txrate[0].count < (int)retry_limit)) {
1137 ini->txretry[index]++;
1138 ini->tx_in_transit--;
1139 /* Use high prededence for retransmit to give some punch */
1140 /* wlc_txq_enq(wlc, scb, p, WLC_PRIO_TO_PREC(tid)); */
1141 wlc_txq_enq(wlc, scb, p,
1142 WLC_PRIO_TO_HI_PREC(tid));
1143 } else {
1144 /* Retry timeout */
1145 ini->tx_in_transit--;
1146 ieee80211_tx_info_clear_status(tx_info);
1147 tx_info->flags |=
1148 IEEE80211_TX_STAT_AMPDU_NO_BACK;
1149 skb_pull(p, D11_PHY_HDR_LEN);
1150 skb_pull(p, D11_TXH_LEN);
1151 WL_ERROR(("%s: BA Timeout, seq %d, in_transit %d\n", SHORTNAME, seq, ini->tx_in_transit));
1152 ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
1153 p);
1154 }
1155 }
1156 tot_mpdu++;
1157
1158 /* break out if last packet of ampdu */
1159 if (((mcl & TXC_AMPDU_MASK) >> TXC_AMPDU_SHIFT) ==
1160 TXC_AMPDU_LAST)
1161 break;
1162
1163 p = GETNEXTTXP(wlc, queue);
1164 if (p == NULL) {
1165 ASSERT(p);
1166 break;
1167 }
1168 }
1169 wlc_send_q(wlc, wlc->active_queue);
1170
1171 /* update rate state */
1172 if (WLANTSEL_ENAB(wlc))
1173 antselid = wlc_antsel_antsel2id(wlc->asi, mimoantsel);
1174
1175 wlc_txfifo_complete(wlc, queue, ampdu->txpkt_weight);
1176 }
1177
1178 static void
1179 ampdu_cleanup_tid_ini(ampdu_info_t *ampdu, scb_ampdu_t *scb_ampdu, u8 tid,
1180 bool force)
1181 {
1182 scb_ampdu_tid_ini_t *ini;
1183 ini = SCB_AMPDU_INI(scb_ampdu, tid);
1184 if (!ini)
1185 return;
1186
1187 WL_AMPDU_CTL(("wl%d: ampdu_cleanup_tid_ini: tid %d\n",
1188 ampdu->wlc->pub->unit, tid));
1189
1190 if (ini->tx_in_transit && !force)
1191 return;
1192
1193 scb_ampdu = SCB_AMPDU_CUBBY(ampdu, ini->scb);
1194 ASSERT(ini == &scb_ampdu->ini[ini->tid]);
1195
1196 /* free all buffered tx packets */
1197 pktq_pflush(ampdu->wlc->osh, &scb_ampdu->txq, ini->tid, true, NULL, 0);
1198 }
1199
1200 /* initialize the initiator code for tid */
1201 static scb_ampdu_tid_ini_t *wlc_ampdu_init_tid_ini(ampdu_info_t *ampdu,
1202 scb_ampdu_t *scb_ampdu,
1203 u8 tid, bool override)
1204 {
1205 scb_ampdu_tid_ini_t *ini;
1206
1207 ASSERT(scb_ampdu);
1208 ASSERT(scb_ampdu->scb);
1209 ASSERT(SCB_AMPDU(scb_ampdu->scb));
1210 ASSERT(tid < AMPDU_MAX_SCB_TID);
1211
1212 /* check for per-tid control of ampdu */
1213 if (!ampdu->ini_enable[tid]) {
1214 WL_ERROR(("%s: Rejecting tid %d\n", __func__, tid));
1215 return NULL;
1216 }
1217
1218 ini = SCB_AMPDU_INI(scb_ampdu, tid);
1219 ini->tid = tid;
1220 ini->scb = scb_ampdu->scb;
1221 ini->magic = INI_MAGIC;
1222 WLCNTINCR(ampdu->cnt->txaddbareq);
1223
1224 return ini;
1225 }
1226
1227 int wlc_ampdu_set(ampdu_info_t *ampdu, bool on)
1228 {
1229 wlc_info_t *wlc = ampdu->wlc;
1230
1231 wlc->pub->_ampdu = false;
1232
1233 if (on) {
1234 if (!N_ENAB(wlc->pub)) {
1235 WL_AMPDU_ERR(("wl%d: driver not nmode enabled\n",
1236 wlc->pub->unit));
1237 return BCME_UNSUPPORTED;
1238 }
1239 if (!wlc_ampdu_cap(ampdu)) {
1240 WL_AMPDU_ERR(("wl%d: device not ampdu capable\n",
1241 wlc->pub->unit));
1242 return BCME_UNSUPPORTED;
1243 }
1244 wlc->pub->_ampdu = on;
1245 }
1246
1247 return 0;
1248 }
1249
1250 bool wlc_ampdu_cap(ampdu_info_t *ampdu)
1251 {
1252 if (WLC_PHY_11N_CAP(ampdu->wlc->band))
1253 return true;
1254 else
1255 return false;
1256 }
1257
1258 static void ampdu_update_max_txlen(ampdu_info_t *ampdu, u8 dur)
1259 {
1260 u32 rate, mcs;
1261
1262 for (mcs = 0; mcs < MCS_TABLE_SIZE; mcs++) {
1263 /* rate is in Kbps; dur is in msec ==> len = (rate * dur) / 8 */
1264 /* 20MHz, No SGI */
1265 rate = MCS_RATE(mcs, false, false);
1266 ampdu->max_txlen[mcs][0][0] = (rate * dur) >> 3;
1267 /* 40 MHz, No SGI */
1268 rate = MCS_RATE(mcs, true, false);
1269 ampdu->max_txlen[mcs][1][0] = (rate * dur) >> 3;
1270 /* 20MHz, SGI */
1271 rate = MCS_RATE(mcs, false, true);
1272 ampdu->max_txlen[mcs][0][1] = (rate * dur) >> 3;
1273 /* 40 MHz, SGI */
1274 rate = MCS_RATE(mcs, true, true);
1275 ampdu->max_txlen[mcs][1][1] = (rate * dur) >> 3;
1276 }
1277 }
1278
1279 u8 BCMFASTPATH
1280 wlc_ampdu_null_delim_cnt(ampdu_info_t *ampdu, struct scb *scb,
1281 ratespec_t rspec, int phylen)
1282 {
1283 scb_ampdu_t *scb_ampdu;
1284 int bytes, cnt, tmp;
1285 u8 tx_density;
1286
1287 ASSERT(scb);
1288 ASSERT(SCB_AMPDU(scb));
1289
1290 scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
1291 ASSERT(scb_ampdu);
1292
1293 if (scb_ampdu->mpdu_density == 0)
1294 return 0;
1295
1296 /* RSPEC2RATE is in kbps units ==> ~RSPEC2RATE/2^13 is in bytes/usec
1297 density x is in 2^(x-4) usec
1298 ==> # of bytes needed for req density = rate/2^(17-x)
1299 ==> # of null delimiters = ceil(ceil(rate/2^(17-x)) - phylen)/4)
1300 */
1301
1302 tx_density = scb_ampdu->mpdu_density;
1303
1304 ASSERT(tx_density <= AMPDU_MAX_MPDU_DENSITY);
1305 tmp = 1 << (17 - tx_density);
1306 bytes = CEIL(RSPEC2RATE(rspec), tmp);
1307
1308 if (bytes > phylen) {
1309 cnt = CEIL(bytes - phylen, AMPDU_DELIMITER_LEN);
1310 ASSERT(cnt <= 255);
1311 return (u8) cnt;
1312 } else
1313 return 0;
1314 }
1315
1316 void wlc_ampdu_macaddr_upd(wlc_info_t *wlc)
1317 {
1318 char template[T_RAM_ACCESS_SZ * 2];
1319
1320 /* driver needs to write the ta in the template; ta is at offset 16 */
1321 memset(template, 0, sizeof(template));
1322 bcopy((char *)wlc->pub->cur_etheraddr.octet, template, ETHER_ADDR_LEN);
1323 wlc_write_template_ram(wlc, (T_BA_TPL_BASE + 16), (T_RAM_ACCESS_SZ * 2),
1324 template);
1325 }
1326
1327 bool wlc_aggregatable(wlc_info_t *wlc, u8 tid)
1328 {
1329 return wlc->ampdu->ini_enable[tid];
1330 }
1331
1332 void wlc_ampdu_shm_upd(ampdu_info_t *ampdu)
1333 {
1334 wlc_info_t *wlc = ampdu->wlc;
1335
1336 /* Extend ucode internal watchdog timer to match larger received frames */
1337 if ((ampdu->rx_factor & HT_PARAMS_RX_FACTOR_MASK) ==
1338 AMPDU_RX_FACTOR_64K) {
1339 wlc_write_shm(wlc, M_MIMO_MAXSYM, MIMO_MAXSYM_MAX);
1340 wlc_write_shm(wlc, M_WATCHDOG_8TU, WATCHDOG_8TU_MAX);
1341 } else {
1342 wlc_write_shm(wlc, M_MIMO_MAXSYM, MIMO_MAXSYM_DEF);
1343 wlc_write_shm(wlc, M_WATCHDOG_8TU, WATCHDOG_8TU_DEF);
1344 }
1345 }