]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/wireless/ath/ath9k/xmit.c
ath9k: remove bfs_tidno from struct ath_buf_state
[mirror_ubuntu-bionic-kernel.git] / drivers / net / wireless / ath / ath9k / xmit.c
CommitLineData
f078f209 1/*
cee075a2 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
f078f209
LR
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
394cf0a1 17#include "ath9k.h"
b622a720 18#include "ar9003_mac.h"
f078f209
LR
19
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
7817e4ce 22#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
f078f209
LR
23#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
c6663876 37static u16 bits_per_symbol[][2] = {
f078f209
LR
38 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
f078f209
LR
47};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
82b873af
FF
51static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
53 struct list_head *bf_head);
e8324357 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
55 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
102e0572 57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
e8324357
S
58 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
0934af23 60static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
61 struct ath_tx_status *ts, int txok);
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
8a92e2ee 63 int nbad, int txok, bool update_rc);
90fa539c
FF
64static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
65 int seqno);
c4288390 66
545750d3 67enum {
0e668cde
FF
68 MCS_HT20,
69 MCS_HT20_SGI,
545750d3
FF
70 MCS_HT40,
71 MCS_HT40_SGI,
72};
73
0e668cde
FF
74static int ath_max_4ms_framelen[4][32] = {
75 [MCS_HT20] = {
76 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
77 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
78 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
79 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
80 },
81 [MCS_HT20_SGI] = {
82 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
83 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
84 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
85 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
545750d3
FF
86 },
87 [MCS_HT40] = {
0e668cde
FF
88 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
89 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
90 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
91 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
92 },
93 [MCS_HT40_SGI] = {
0e668cde
FF
94 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
95 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
96 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
97 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
98 }
99};
100
e8324357
S
101/*********************/
102/* Aggregation logic */
103/*********************/
f078f209 104
e8324357 105static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
ff37e337 106{
e8324357 107 struct ath_atx_ac *ac = tid->ac;
ff37e337 108
e8324357
S
109 if (tid->paused)
110 return;
ff37e337 111
e8324357
S
112 if (tid->sched)
113 return;
ff37e337 114
e8324357
S
115 tid->sched = true;
116 list_add_tail(&tid->list, &ac->tid_q);
528f0c6b 117
e8324357
S
118 if (ac->sched)
119 return;
f078f209 120
e8324357
S
121 ac->sched = true;
122 list_add_tail(&ac->list, &txq->axq_acq);
123}
f078f209 124
e8324357 125static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
f078f209 126{
066dae93 127 struct ath_txq *txq = tid->ac->txq;
e6a9854b 128
75401849 129 WARN_ON(!tid->paused);
f078f209 130
75401849
LB
131 spin_lock_bh(&txq->axq_lock);
132 tid->paused = false;
f078f209 133
e8324357
S
134 if (list_empty(&tid->buf_q))
135 goto unlock;
f078f209 136
e8324357
S
137 ath_tx_queue_tid(txq, tid);
138 ath_txq_schedule(sc, txq);
139unlock:
140 spin_unlock_bh(&txq->axq_lock);
528f0c6b 141}
f078f209 142
2d3bcba0
FF
143static u16 ath_frame_seqno(struct sk_buff *skb)
144{
145 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
146 return le16_to_cpu(hdr->seq_ctrl) >> IEEE80211_SEQ_SEQ_SHIFT;
147}
148
e8324357 149static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
528f0c6b 150{
066dae93 151 struct ath_txq *txq = tid->ac->txq;
e8324357
S
152 struct ath_buf *bf;
153 struct list_head bf_head;
90fa539c 154 struct ath_tx_status ts;
f078f209 155
90fa539c 156 INIT_LIST_HEAD(&bf_head);
e6a9854b 157
90fa539c 158 memset(&ts, 0, sizeof(ts));
75401849 159 spin_lock_bh(&txq->axq_lock);
f078f209 160
e8324357
S
161 while (!list_empty(&tid->buf_q)) {
162 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
d43f3015 163 list_move_tail(&bf->list, &bf_head);
90fa539c
FF
164
165 if (bf_isretried(bf)) {
2d3bcba0 166 ath_tx_update_baw(sc, tid, ath_frame_seqno(bf->bf_mpdu));
90fa539c
FF
167 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
168 } else {
82b873af 169 ath_tx_send_normal(sc, txq, tid, &bf_head);
90fa539c 170 }
528f0c6b 171 }
f078f209 172
e8324357 173 spin_unlock_bh(&txq->axq_lock);
528f0c6b 174}
f078f209 175
e8324357
S
176static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
177 int seqno)
528f0c6b 178{
e8324357 179 int index, cindex;
f078f209 180
e8324357
S
181 index = ATH_BA_INDEX(tid->seq_start, seqno);
182 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
f078f209 183
81ee13ba 184 __clear_bit(cindex, tid->tx_buf);
528f0c6b 185
81ee13ba 186 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
e8324357
S
187 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
188 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
189 }
528f0c6b 190}
f078f209 191
e8324357 192static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
2d3bcba0 193 u16 seqno)
528f0c6b 194{
e8324357 195 int index, cindex;
528f0c6b 196
2d3bcba0 197 index = ATH_BA_INDEX(tid->seq_start, seqno);
e8324357 198 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
81ee13ba 199 __set_bit(cindex, tid->tx_buf);
f078f209 200
e8324357
S
201 if (index >= ((tid->baw_tail - tid->baw_head) &
202 (ATH_TID_MAX_BUFS - 1))) {
203 tid->baw_tail = cindex;
204 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
f078f209 205 }
f078f209
LR
206}
207
208/*
e8324357
S
209 * TODO: For frame(s) that are in the retry state, we will reuse the
210 * sequence number(s) without setting the retry bit. The
211 * alternative is to give up on these and BAR the receiver's window
212 * forward.
f078f209 213 */
e8324357
S
214static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
215 struct ath_atx_tid *tid)
f078f209 216
f078f209 217{
e8324357
S
218 struct ath_buf *bf;
219 struct list_head bf_head;
db1a052b 220 struct ath_tx_status ts;
2d3bcba0 221 u16 bf_seqno;
db1a052b
FF
222
223 memset(&ts, 0, sizeof(ts));
e8324357 224 INIT_LIST_HEAD(&bf_head);
f078f209 225
e8324357
S
226 for (;;) {
227 if (list_empty(&tid->buf_q))
228 break;
f078f209 229
d43f3015
S
230 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
231 list_move_tail(&bf->list, &bf_head);
f078f209 232
2d3bcba0 233 bf_seqno = ath_frame_seqno(bf->bf_mpdu);
e8324357 234 if (bf_isretried(bf))
2d3bcba0 235 ath_tx_update_baw(sc, tid, bf_seqno);
f078f209 236
e8324357 237 spin_unlock(&txq->axq_lock);
db1a052b 238 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
e8324357
S
239 spin_lock(&txq->axq_lock);
240 }
f078f209 241
e8324357
S
242 tid->seq_next = tid->seq_start;
243 tid->baw_tail = tid->baw_head;
f078f209
LR
244}
245
fec247c0
S
246static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
247 struct ath_buf *bf)
f078f209 248{
e8324357
S
249 struct sk_buff *skb;
250 struct ieee80211_hdr *hdr;
f078f209 251
e8324357
S
252 bf->bf_state.bf_type |= BUF_RETRY;
253 bf->bf_retries++;
fec247c0 254 TX_STAT_INC(txq->axq_qnum, a_retries);
f078f209 255
e8324357
S
256 skb = bf->bf_mpdu;
257 hdr = (struct ieee80211_hdr *)skb->data;
258 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
f078f209
LR
259}
260
0a8cea84 261static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
d43f3015 262{
0a8cea84 263 struct ath_buf *bf = NULL;
d43f3015
S
264
265 spin_lock_bh(&sc->tx.txbuflock);
0a8cea84
FF
266
267 if (unlikely(list_empty(&sc->tx.txbuf))) {
8a46097a
VT
268 spin_unlock_bh(&sc->tx.txbuflock);
269 return NULL;
270 }
0a8cea84
FF
271
272 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
273 list_del(&bf->list);
274
d43f3015
S
275 spin_unlock_bh(&sc->tx.txbuflock);
276
0a8cea84
FF
277 return bf;
278}
279
280static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
281{
282 spin_lock_bh(&sc->tx.txbuflock);
283 list_add_tail(&bf->list, &sc->tx.txbuf);
284 spin_unlock_bh(&sc->tx.txbuflock);
285}
286
287static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
288{
289 struct ath_buf *tbf;
290
291 tbf = ath_tx_get_buffer(sc);
292 if (WARN_ON(!tbf))
293 return NULL;
294
d43f3015
S
295 ATH_TXBUF_RESET(tbf);
296
827e69bf 297 tbf->aphy = bf->aphy;
d43f3015
S
298 tbf->bf_mpdu = bf->bf_mpdu;
299 tbf->bf_buf_addr = bf->bf_buf_addr;
d826c832 300 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
d43f3015 301 tbf->bf_state = bf->bf_state;
d43f3015
S
302
303 return tbf;
304}
305
306static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
307 struct ath_buf *bf, struct list_head *bf_q,
db1a052b 308 struct ath_tx_status *ts, int txok)
f078f209 309{
e8324357
S
310 struct ath_node *an = NULL;
311 struct sk_buff *skb;
1286ec6d 312 struct ieee80211_sta *sta;
76d5a9e8 313 struct ieee80211_hw *hw;
1286ec6d 314 struct ieee80211_hdr *hdr;
76d5a9e8 315 struct ieee80211_tx_info *tx_info;
e8324357 316 struct ath_atx_tid *tid = NULL;
d43f3015 317 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
e8324357 318 struct list_head bf_head, bf_pending;
0934af23 319 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
f078f209 320 u32 ba[WME_BA_BMP_SIZE >> 5];
0934af23
VT
321 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
322 bool rc_update = true;
78c4653a 323 struct ieee80211_tx_rate rates[4];
2d3bcba0 324 u16 bf_seqno;
ebd02287 325 int nframes;
5daefbd0 326 u8 tidno;
f078f209 327
a22be22a 328 skb = bf->bf_mpdu;
1286ec6d
S
329 hdr = (struct ieee80211_hdr *)skb->data;
330
76d5a9e8 331 tx_info = IEEE80211_SKB_CB(skb);
827e69bf 332 hw = bf->aphy->hw;
76d5a9e8 333
78c4653a 334 memcpy(rates, tx_info->control.rates, sizeof(rates));
ebd02287 335 nframes = bf->bf_nframes;
78c4653a 336
1286ec6d 337 rcu_read_lock();
f078f209 338
686b9cb9 339 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
1286ec6d
S
340 if (!sta) {
341 rcu_read_unlock();
73e19463 342
31e79a59
FF
343 INIT_LIST_HEAD(&bf_head);
344 while (bf) {
345 bf_next = bf->bf_next;
346
347 bf->bf_state.bf_type |= BUF_XRETRY;
348 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
349 !bf->bf_stale || bf_next != NULL)
350 list_move_tail(&bf->list, &bf_head);
351
ebd02287 352 ath_tx_rc_status(bf, ts, 1, 0, false);
31e79a59
FF
353 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
354 0, 0);
355
356 bf = bf_next;
357 }
1286ec6d 358 return;
f078f209
LR
359 }
360
1286ec6d 361 an = (struct ath_node *)sta->drv_priv;
5daefbd0
FF
362 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
363 tid = ATH_AN_2_TID(an, tidno);
1286ec6d 364
b11b160d
FF
365 /*
366 * The hardware occasionally sends a tx status for the wrong TID.
367 * In this case, the BA status cannot be considered valid and all
368 * subframes need to be retransmitted
369 */
5daefbd0 370 if (tidno != ts->tid)
b11b160d
FF
371 txok = false;
372
e8324357 373 isaggr = bf_isaggr(bf);
d43f3015 374 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209 375
d43f3015 376 if (isaggr && txok) {
db1a052b
FF
377 if (ts->ts_flags & ATH9K_TX_BA) {
378 seq_st = ts->ts_seqnum;
379 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 380 } else {
d43f3015
S
381 /*
382 * AR5416 can become deaf/mute when BA
383 * issue happens. Chip needs to be reset.
384 * But AP code may have sychronization issues
385 * when perform internal reset in this routine.
386 * Only enable reset in STA mode for now.
387 */
2660b81a 388 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
d43f3015 389 needreset = 1;
e8324357 390 }
f078f209
LR
391 }
392
e8324357
S
393 INIT_LIST_HEAD(&bf_pending);
394 INIT_LIST_HEAD(&bf_head);
f078f209 395
db1a052b 396 nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
e8324357
S
397 while (bf) {
398 txfail = txpending = 0;
399 bf_next = bf->bf_next;
f078f209 400
78c4653a
FF
401 skb = bf->bf_mpdu;
402 tx_info = IEEE80211_SKB_CB(skb);
2d3bcba0 403 bf_seqno = ath_frame_seqno(skb);
78c4653a 404
2d3bcba0 405 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf_seqno))) {
e8324357
S
406 /* transmit completion, subframe is
407 * acked by block ack */
0934af23 408 acked_cnt++;
e8324357
S
409 } else if (!isaggr && txok) {
410 /* transmit completion */
0934af23 411 acked_cnt++;
e8324357 412 } else {
e8324357 413 if (!(tid->state & AGGR_CLEANUP) &&
6d913f7d 414 !bf_last->bf_tx_aborted) {
e8324357 415 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
fec247c0 416 ath_tx_set_retry(sc, txq, bf);
e8324357
S
417 txpending = 1;
418 } else {
419 bf->bf_state.bf_type |= BUF_XRETRY;
420 txfail = 1;
421 sendbar = 1;
0934af23 422 txfail_cnt++;
e8324357
S
423 }
424 } else {
425 /*
426 * cleanup in progress, just fail
427 * the un-acked sub-frames
428 */
429 txfail = 1;
430 }
431 }
f078f209 432
e5003249
VT
433 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
434 bf_next == NULL) {
cbfe89c6
VT
435 /*
436 * Make sure the last desc is reclaimed if it
437 * not a holding desc.
438 */
439 if (!bf_last->bf_stale)
440 list_move_tail(&bf->list, &bf_head);
441 else
442 INIT_LIST_HEAD(&bf_head);
e8324357 443 } else {
9680e8a3 444 BUG_ON(list_empty(bf_q));
d43f3015 445 list_move_tail(&bf->list, &bf_head);
e8324357 446 }
f078f209 447
90fa539c 448 if (!txpending || (tid->state & AGGR_CLEANUP)) {
e8324357
S
449 /*
450 * complete the acked-ones/xretried ones; update
451 * block-ack window
452 */
453 spin_lock_bh(&txq->axq_lock);
2d3bcba0 454 ath_tx_update_baw(sc, tid, bf_seqno);
e8324357 455 spin_unlock_bh(&txq->axq_lock);
f078f209 456
8a92e2ee 457 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
78c4653a 458 memcpy(tx_info->control.rates, rates, sizeof(rates));
ebd02287 459 bf->bf_nframes = nframes;
db1a052b 460 ath_tx_rc_status(bf, ts, nbad, txok, true);
8a92e2ee
VT
461 rc_update = false;
462 } else {
db1a052b 463 ath_tx_rc_status(bf, ts, nbad, txok, false);
8a92e2ee
VT
464 }
465
db1a052b
FF
466 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
467 !txfail, sendbar);
e8324357 468 } else {
d43f3015 469 /* retry the un-acked ones */
e5003249
VT
470 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
471 if (bf->bf_next == NULL && bf_last->bf_stale) {
472 struct ath_buf *tbf;
473
474 tbf = ath_clone_txbuf(sc, bf_last);
475 /*
476 * Update tx baw and complete the
477 * frame with failed status if we
478 * run out of tx buf.
479 */
480 if (!tbf) {
481 spin_lock_bh(&txq->axq_lock);
482 ath_tx_update_baw(sc, tid,
2d3bcba0 483 bf_seqno);
e5003249
VT
484 spin_unlock_bh(&txq->axq_lock);
485
486 bf->bf_state.bf_type |=
487 BUF_XRETRY;
488 ath_tx_rc_status(bf, ts, nbad,
489 0, false);
490 ath_tx_complete_buf(sc, bf, txq,
491 &bf_head,
492 ts, 0, 0);
493 break;
494 }
495
496 ath9k_hw_cleartxdesc(sc->sc_ah,
497 tbf->bf_desc);
498 list_add_tail(&tbf->list, &bf_head);
499 } else {
500 /*
501 * Clear descriptor status words for
502 * software retry
503 */
504 ath9k_hw_cleartxdesc(sc->sc_ah,
505 bf->bf_desc);
c41d92dc 506 }
e8324357
S
507 }
508
509 /*
510 * Put this buffer to the temporary pending
511 * queue to retain ordering
512 */
513 list_splice_tail_init(&bf_head, &bf_pending);
514 }
515
516 bf = bf_next;
f078f209 517 }
f078f209 518
4cee7861
FF
519 /* prepend un-acked frames to the beginning of the pending frame queue */
520 if (!list_empty(&bf_pending)) {
521 spin_lock_bh(&txq->axq_lock);
522 list_splice(&bf_pending, &tid->buf_q);
523 ath_tx_queue_tid(txq, tid);
524 spin_unlock_bh(&txq->axq_lock);
525 }
526
e8324357 527 if (tid->state & AGGR_CLEANUP) {
90fa539c
FF
528 ath_tx_flush_tid(sc, tid);
529
e8324357
S
530 if (tid->baw_head == tid->baw_tail) {
531 tid->state &= ~AGGR_ADDBA_COMPLETE;
e8324357 532 tid->state &= ~AGGR_CLEANUP;
d43f3015 533 }
e8324357 534 }
f078f209 535
1286ec6d
S
536 rcu_read_unlock();
537
e8324357
S
538 if (needreset)
539 ath_reset(sc, false);
e8324357 540}
f078f209 541
e8324357
S
542static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
543 struct ath_atx_tid *tid)
f078f209 544{
528f0c6b
S
545 struct sk_buff *skb;
546 struct ieee80211_tx_info *tx_info;
a8efee4f 547 struct ieee80211_tx_rate *rates;
d43f3015 548 u32 max_4ms_framelen, frmlen;
4ef70841 549 u16 aggr_limit, legacy = 0;
e8324357 550 int i;
528f0c6b 551
a22be22a 552 skb = bf->bf_mpdu;
528f0c6b 553 tx_info = IEEE80211_SKB_CB(skb);
e63835b0 554 rates = tx_info->control.rates;
528f0c6b 555
e8324357
S
556 /*
557 * Find the lowest frame length among the rate series that will have a
558 * 4ms transmit duration.
559 * TODO - TXOP limit needs to be considered.
560 */
561 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
e63835b0 562
e8324357
S
563 for (i = 0; i < 4; i++) {
564 if (rates[i].count) {
545750d3
FF
565 int modeidx;
566 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
e8324357
S
567 legacy = 1;
568 break;
569 }
570
0e668cde 571 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
545750d3
FF
572 modeidx = MCS_HT40;
573 else
0e668cde
FF
574 modeidx = MCS_HT20;
575
576 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
577 modeidx++;
545750d3
FF
578
579 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
d43f3015 580 max_4ms_framelen = min(max_4ms_framelen, frmlen);
f078f209
LR
581 }
582 }
e63835b0 583
f078f209 584 /*
e8324357
S
585 * limit aggregate size by the minimum rate if rate selected is
586 * not a probe rate, if rate selected is a probe rate then
587 * avoid aggregation of this packet.
f078f209 588 */
e8324357
S
589 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
590 return 0;
f078f209 591
1773912b
VT
592 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
593 aggr_limit = min((max_4ms_framelen * 3) / 8,
594 (u32)ATH_AMPDU_LIMIT_MAX);
595 else
596 aggr_limit = min(max_4ms_framelen,
597 (u32)ATH_AMPDU_LIMIT_MAX);
f078f209 598
e8324357
S
599 /*
600 * h/w can accept aggregates upto 16 bit lengths (65535).
601 * The IE, however can hold upto 65536, which shows up here
602 * as zero. Ignore 65536 since we are constrained by hw.
f078f209 603 */
4ef70841
S
604 if (tid->an->maxampdu)
605 aggr_limit = min(aggr_limit, tid->an->maxampdu);
f078f209 606
e8324357
S
607 return aggr_limit;
608}
f078f209 609
e8324357 610/*
d43f3015 611 * Returns the number of delimiters to be added to
e8324357 612 * meet the minimum required mpdudensity.
e8324357
S
613 */
614static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
615 struct ath_buf *bf, u16 frmlen)
616{
e8324357
S
617 struct sk_buff *skb = bf->bf_mpdu;
618 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
4ef70841 619 u32 nsymbits, nsymbols;
e8324357 620 u16 minlen;
545750d3 621 u8 flags, rix;
c6663876 622 int width, streams, half_gi, ndelim, mindelim;
e8324357
S
623
624 /* Select standard number of delimiters based on frame length alone */
625 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
f078f209
LR
626
627 /*
e8324357
S
628 * If encryption enabled, hardware requires some more padding between
629 * subframes.
630 * TODO - this could be improved to be dependent on the rate.
631 * The hardware can keep up at lower rates, but not higher rates
f078f209 632 */
e8324357
S
633 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
634 ndelim += ATH_AGGR_ENCRYPTDELIM;
f078f209 635
e8324357
S
636 /*
637 * Convert desired mpdu density from microeconds to bytes based
638 * on highest rate in rate series (i.e. first rate) to determine
639 * required minimum length for subframe. Take into account
640 * whether high rate is 20 or 40Mhz and half or full GI.
4ef70841 641 *
e8324357
S
642 * If there is no mpdu density restriction, no further calculation
643 * is needed.
644 */
4ef70841
S
645
646 if (tid->an->mpdudensity == 0)
e8324357 647 return ndelim;
f078f209 648
e8324357
S
649 rix = tx_info->control.rates[0].idx;
650 flags = tx_info->control.rates[0].flags;
e8324357
S
651 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
652 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
f078f209 653
e8324357 654 if (half_gi)
4ef70841 655 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
e8324357 656 else
4ef70841 657 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
f078f209 658
e8324357
S
659 if (nsymbols == 0)
660 nsymbols = 1;
f078f209 661
c6663876
FF
662 streams = HT_RC_2_STREAMS(rix);
663 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357 664 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
f078f209 665
e8324357 666 if (frmlen < minlen) {
e8324357
S
667 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
668 ndelim = max(mindelim, ndelim);
f078f209
LR
669 }
670
e8324357 671 return ndelim;
f078f209
LR
672}
673
e8324357 674static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
fec247c0 675 struct ath_txq *txq,
d43f3015
S
676 struct ath_atx_tid *tid,
677 struct list_head *bf_q)
f078f209 678{
e8324357 679#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
d43f3015
S
680 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
681 int rl = 0, nframes = 0, ndelim, prev_al = 0;
e8324357
S
682 u16 aggr_limit = 0, al = 0, bpad = 0,
683 al_delta, h_baw = tid->baw_size / 2;
684 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
0299a50a 685 struct ieee80211_tx_info *tx_info;
2d3bcba0 686 u16 bf_seqno;
f078f209 687
e8324357 688 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
f078f209 689
e8324357
S
690 do {
691 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
2d3bcba0 692 bf_seqno = ath_frame_seqno(bf->bf_mpdu);
f078f209 693
d43f3015 694 /* do not step over block-ack window */
2d3bcba0 695 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf_seqno)) {
e8324357
S
696 status = ATH_AGGR_BAW_CLOSED;
697 break;
698 }
f078f209 699
e8324357
S
700 if (!rl) {
701 aggr_limit = ath_lookup_rate(sc, bf, tid);
702 rl = 1;
703 }
f078f209 704
d43f3015 705 /* do not exceed aggregation limit */
e8324357 706 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
f078f209 707
d43f3015
S
708 if (nframes &&
709 (aggr_limit < (al + bpad + al_delta + prev_al))) {
e8324357
S
710 status = ATH_AGGR_LIMITED;
711 break;
712 }
f078f209 713
0299a50a
FF
714 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
715 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
716 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
717 break;
718
d43f3015
S
719 /* do not exceed subframe limit */
720 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
e8324357
S
721 status = ATH_AGGR_LIMITED;
722 break;
723 }
d43f3015 724 nframes++;
f078f209 725
d43f3015 726 /* add padding for previous frame to aggregation length */
e8324357 727 al += bpad + al_delta;
f078f209 728
e8324357
S
729 /*
730 * Get the delimiters needed to meet the MPDU
731 * density for this node.
732 */
733 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
e8324357 734 bpad = PADBYTES(al_delta) + (ndelim << 2);
f078f209 735
e8324357 736 bf->bf_next = NULL;
87d5efbb 737 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
f078f209 738
d43f3015 739 /* link buffers of this frame to the aggregate */
2d3bcba0
FF
740 if (!bf_isretried(bf))
741 ath_tx_addto_baw(sc, tid, bf_seqno);
d43f3015
S
742 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
743 list_move_tail(&bf->list, bf_q);
e8324357
S
744 if (bf_prev) {
745 bf_prev->bf_next = bf;
87d5efbb
VT
746 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
747 bf->bf_daddr);
e8324357
S
748 }
749 bf_prev = bf;
fec247c0 750
e8324357 751 } while (!list_empty(&tid->buf_q));
f078f209 752
e8324357
S
753 bf_first->bf_al = al;
754 bf_first->bf_nframes = nframes;
d43f3015 755
e8324357
S
756 return status;
757#undef PADBYTES
758}
f078f209 759
e8324357
S
760static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
761 struct ath_atx_tid *tid)
762{
d43f3015 763 struct ath_buf *bf;
e8324357
S
764 enum ATH_AGGR_STATUS status;
765 struct list_head bf_q;
f078f209 766
e8324357
S
767 do {
768 if (list_empty(&tid->buf_q))
769 return;
f078f209 770
e8324357
S
771 INIT_LIST_HEAD(&bf_q);
772
fec247c0 773 status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
f078f209 774
f078f209 775 /*
d43f3015
S
776 * no frames picked up to be aggregated;
777 * block-ack window is not open.
f078f209 778 */
e8324357
S
779 if (list_empty(&bf_q))
780 break;
f078f209 781
e8324357 782 bf = list_first_entry(&bf_q, struct ath_buf, list);
d43f3015 783 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
f078f209 784
d43f3015 785 /* if only one frame, send as non-aggregate */
e8324357 786 if (bf->bf_nframes == 1) {
e8324357 787 bf->bf_state.bf_type &= ~BUF_AGGR;
d43f3015 788 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
e8324357
S
789 ath_buf_set_rate(sc, bf);
790 ath_tx_txqaddbuf(sc, txq, &bf_q);
791 continue;
792 }
f078f209 793
d43f3015 794 /* setup first desc of aggregate */
e8324357
S
795 bf->bf_state.bf_type |= BUF_AGGR;
796 ath_buf_set_rate(sc, bf);
797 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
f078f209 798
d43f3015
S
799 /* anchor last desc of aggregate */
800 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
f078f209 801
e8324357 802 ath_tx_txqaddbuf(sc, txq, &bf_q);
fec247c0 803 TX_STAT_INC(txq->axq_qnum, a_aggr);
f078f209 804
e8324357
S
805 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
806 status != ATH_AGGR_BAW_CLOSED);
807}
808
231c3a1f
FF
809int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
810 u16 tid, u16 *ssn)
e8324357
S
811{
812 struct ath_atx_tid *txtid;
813 struct ath_node *an;
814
815 an = (struct ath_node *)sta->drv_priv;
f83da965 816 txtid = ATH_AN_2_TID(an, tid);
231c3a1f
FF
817
818 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
819 return -EAGAIN;
820
f83da965 821 txtid->state |= AGGR_ADDBA_PROGRESS;
75401849 822 txtid->paused = true;
f83da965 823 *ssn = txtid->seq_start;
231c3a1f
FF
824
825 return 0;
e8324357 826}
f078f209 827
f83da965 828void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
e8324357
S
829{
830 struct ath_node *an = (struct ath_node *)sta->drv_priv;
831 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
066dae93 832 struct ath_txq *txq = txtid->ac->txq;
f078f209 833
e8324357 834 if (txtid->state & AGGR_CLEANUP)
f83da965 835 return;
f078f209 836
e8324357 837 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
5eae6592 838 txtid->state &= ~AGGR_ADDBA_PROGRESS;
f83da965 839 return;
e8324357 840 }
f078f209 841
e8324357 842 spin_lock_bh(&txq->axq_lock);
75401849 843 txtid->paused = true;
f078f209 844
90fa539c
FF
845 /*
846 * If frames are still being transmitted for this TID, they will be
847 * cleaned up during tx completion. To prevent race conditions, this
848 * TID can only be reused after all in-progress subframes have been
849 * completed.
850 */
851 if (txtid->baw_head != txtid->baw_tail)
e8324357 852 txtid->state |= AGGR_CLEANUP;
90fa539c 853 else
e8324357 854 txtid->state &= ~AGGR_ADDBA_COMPLETE;
90fa539c
FF
855 spin_unlock_bh(&txq->axq_lock);
856
857 ath_tx_flush_tid(sc, txtid);
e8324357 858}
f078f209 859
e8324357
S
860void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
861{
862 struct ath_atx_tid *txtid;
863 struct ath_node *an;
864
865 an = (struct ath_node *)sta->drv_priv;
866
867 if (sc->sc_flags & SC_OP_TXAGGR) {
868 txtid = ATH_AN_2_TID(an, tid);
869 txtid->baw_size =
870 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
871 txtid->state |= AGGR_ADDBA_COMPLETE;
872 txtid->state &= ~AGGR_ADDBA_PROGRESS;
873 ath_tx_resume_tid(sc, txtid);
874 }
f078f209
LR
875}
876
e8324357
S
877/********************/
878/* Queue Management */
879/********************/
f078f209 880
e8324357
S
881static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
882 struct ath_txq *txq)
f078f209 883{
e8324357
S
884 struct ath_atx_ac *ac, *ac_tmp;
885 struct ath_atx_tid *tid, *tid_tmp;
f078f209 886
e8324357
S
887 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
888 list_del(&ac->list);
889 ac->sched = false;
890 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
891 list_del(&tid->list);
892 tid->sched = false;
893 ath_tid_drain(sc, txq, tid);
894 }
f078f209
LR
895 }
896}
897
e8324357 898struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
f078f209 899{
cbe61d8a 900 struct ath_hw *ah = sc->sc_ah;
c46917bb 901 struct ath_common *common = ath9k_hw_common(ah);
e8324357 902 struct ath9k_tx_queue_info qi;
066dae93
FF
903 static const int subtype_txq_to_hwq[] = {
904 [WME_AC_BE] = ATH_TXQ_AC_BE,
905 [WME_AC_BK] = ATH_TXQ_AC_BK,
906 [WME_AC_VI] = ATH_TXQ_AC_VI,
907 [WME_AC_VO] = ATH_TXQ_AC_VO,
908 };
e5003249 909 int qnum, i;
f078f209 910
e8324357 911 memset(&qi, 0, sizeof(qi));
066dae93 912 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
e8324357
S
913 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
914 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
915 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
916 qi.tqi_physCompBuf = 0;
f078f209
LR
917
918 /*
e8324357
S
919 * Enable interrupts only for EOL and DESC conditions.
920 * We mark tx descriptors to receive a DESC interrupt
921 * when a tx queue gets deep; otherwise waiting for the
922 * EOL to reap descriptors. Note that this is done to
923 * reduce interrupt load and this only defers reaping
924 * descriptors, never transmitting frames. Aside from
925 * reducing interrupts this also permits more concurrency.
926 * The only potential downside is if the tx queue backs
927 * up in which case the top half of the kernel may backup
928 * due to a lack of tx descriptors.
929 *
930 * The UAPSD queue is an exception, since we take a desc-
931 * based intr on the EOSP frames.
f078f209 932 */
afe754d6
VT
933 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
934 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
935 TXQ_FLAG_TXERRINT_ENABLE;
936 } else {
937 if (qtype == ATH9K_TX_QUEUE_UAPSD)
938 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
939 else
940 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
941 TXQ_FLAG_TXDESCINT_ENABLE;
942 }
e8324357
S
943 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
944 if (qnum == -1) {
f078f209 945 /*
e8324357
S
946 * NB: don't print a message, this happens
947 * normally on parts with too few tx queues
f078f209 948 */
e8324357 949 return NULL;
f078f209 950 }
e8324357 951 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
c46917bb
LR
952 ath_print(common, ATH_DBG_FATAL,
953 "qnum %u out of range, max %u!\n",
954 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
e8324357
S
955 ath9k_hw_releasetxqueue(ah, qnum);
956 return NULL;
957 }
958 if (!ATH_TXQ_SETUP(sc, qnum)) {
959 struct ath_txq *txq = &sc->tx.txq[qnum];
f078f209 960
e8324357
S
961 txq->axq_qnum = qnum;
962 txq->axq_link = NULL;
963 INIT_LIST_HEAD(&txq->axq_q);
964 INIT_LIST_HEAD(&txq->axq_acq);
965 spin_lock_init(&txq->axq_lock);
966 txq->axq_depth = 0;
164ace38 967 txq->axq_tx_inprogress = false;
e8324357 968 sc->tx.txqsetup |= 1<<qnum;
e5003249
VT
969
970 txq->txq_headidx = txq->txq_tailidx = 0;
971 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
972 INIT_LIST_HEAD(&txq->txq_fifo[i]);
973 INIT_LIST_HEAD(&txq->txq_fifo_pending);
e8324357
S
974 }
975 return &sc->tx.txq[qnum];
f078f209
LR
976}
977
e8324357
S
978int ath_txq_update(struct ath_softc *sc, int qnum,
979 struct ath9k_tx_queue_info *qinfo)
980{
cbe61d8a 981 struct ath_hw *ah = sc->sc_ah;
e8324357
S
982 int error = 0;
983 struct ath9k_tx_queue_info qi;
984
985 if (qnum == sc->beacon.beaconq) {
986 /*
987 * XXX: for beacon queue, we just save the parameter.
988 * It will be picked up by ath_beaconq_config when
989 * it's necessary.
990 */
991 sc->beacon.beacon_qi = *qinfo;
f078f209 992 return 0;
e8324357 993 }
f078f209 994
9680e8a3 995 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
e8324357
S
996
997 ath9k_hw_get_txq_props(ah, qnum, &qi);
998 qi.tqi_aifs = qinfo->tqi_aifs;
999 qi.tqi_cwmin = qinfo->tqi_cwmin;
1000 qi.tqi_cwmax = qinfo->tqi_cwmax;
1001 qi.tqi_burstTime = qinfo->tqi_burstTime;
1002 qi.tqi_readyTime = qinfo->tqi_readyTime;
1003
1004 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
c46917bb
LR
1005 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1006 "Unable to update hardware queue %u!\n", qnum);
e8324357
S
1007 error = -EIO;
1008 } else {
1009 ath9k_hw_resettxqueue(ah, qnum);
1010 }
1011
1012 return error;
1013}
1014
1015int ath_cabq_update(struct ath_softc *sc)
1016{
1017 struct ath9k_tx_queue_info qi;
1018 int qnum = sc->beacon.cabq->axq_qnum;
f078f209 1019
e8324357 1020 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
f078f209 1021 /*
e8324357 1022 * Ensure the readytime % is within the bounds.
f078f209 1023 */
17d7904d
S
1024 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1025 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1026 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1027 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
f078f209 1028
57c4d7b4 1029 qi.tqi_readyTime = (sc->beacon_interval *
fdbf7335 1030 sc->config.cabqReadytime) / 100;
e8324357
S
1031 ath_txq_update(sc, qnum, &qi);
1032
1033 return 0;
f078f209
LR
1034}
1035
043a0405
S
1036/*
1037 * Drain a given TX queue (could be Beacon or Data)
1038 *
1039 * This assumes output has been stopped and
1040 * we do not need to block ath_tx_tasklet.
1041 */
1042void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
f078f209 1043{
e8324357
S
1044 struct ath_buf *bf, *lastbf;
1045 struct list_head bf_head;
db1a052b
FF
1046 struct ath_tx_status ts;
1047
1048 memset(&ts, 0, sizeof(ts));
e8324357 1049 INIT_LIST_HEAD(&bf_head);
f078f209 1050
e8324357
S
1051 for (;;) {
1052 spin_lock_bh(&txq->axq_lock);
f078f209 1053
e5003249
VT
1054 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1055 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1056 txq->txq_headidx = txq->txq_tailidx = 0;
1057 spin_unlock_bh(&txq->axq_lock);
1058 break;
1059 } else {
1060 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1061 struct ath_buf, list);
1062 }
1063 } else {
1064 if (list_empty(&txq->axq_q)) {
1065 txq->axq_link = NULL;
1066 spin_unlock_bh(&txq->axq_lock);
1067 break;
1068 }
1069 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1070 list);
f078f209 1071
e5003249
VT
1072 if (bf->bf_stale) {
1073 list_del(&bf->list);
1074 spin_unlock_bh(&txq->axq_lock);
f078f209 1075
0a8cea84 1076 ath_tx_return_buffer(sc, bf);
e5003249
VT
1077 continue;
1078 }
e8324357 1079 }
f078f209 1080
e8324357 1081 lastbf = bf->bf_lastbf;
6d913f7d
VT
1082 if (!retry_tx)
1083 lastbf->bf_tx_aborted = true;
f078f209 1084
e5003249
VT
1085 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1086 list_cut_position(&bf_head,
1087 &txq->txq_fifo[txq->txq_tailidx],
1088 &lastbf->list);
1089 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1090 } else {
1091 /* remove ath_buf's of the same mpdu from txq */
1092 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1093 }
1094
e8324357 1095 txq->axq_depth--;
f078f209 1096
e8324357
S
1097 spin_unlock_bh(&txq->axq_lock);
1098
1099 if (bf_isampdu(bf))
db1a052b 1100 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
e8324357 1101 else
db1a052b 1102 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
f078f209
LR
1103 }
1104
164ace38
SB
1105 spin_lock_bh(&txq->axq_lock);
1106 txq->axq_tx_inprogress = false;
1107 spin_unlock_bh(&txq->axq_lock);
1108
e5003249
VT
1109 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1110 spin_lock_bh(&txq->axq_lock);
1111 while (!list_empty(&txq->txq_fifo_pending)) {
1112 bf = list_first_entry(&txq->txq_fifo_pending,
1113 struct ath_buf, list);
1114 list_cut_position(&bf_head,
1115 &txq->txq_fifo_pending,
1116 &bf->bf_lastbf->list);
1117 spin_unlock_bh(&txq->axq_lock);
1118
1119 if (bf_isampdu(bf))
1120 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1121 &ts, 0);
1122 else
1123 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1124 &ts, 0, 0);
1125 spin_lock_bh(&txq->axq_lock);
1126 }
1127 spin_unlock_bh(&txq->axq_lock);
1128 }
e609e2ea
FF
1129
1130 /* flush any pending frames if aggregation is enabled */
1131 if (sc->sc_flags & SC_OP_TXAGGR) {
1132 if (!retry_tx) {
1133 spin_lock_bh(&txq->axq_lock);
1134 ath_txq_drain_pending_buffers(sc, txq);
1135 spin_unlock_bh(&txq->axq_lock);
1136 }
1137 }
f078f209
LR
1138}
1139
043a0405 1140void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
f078f209 1141{
cbe61d8a 1142 struct ath_hw *ah = sc->sc_ah;
c46917bb 1143 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
043a0405
S
1144 struct ath_txq *txq;
1145 int i, npend = 0;
1146
1147 if (sc->sc_flags & SC_OP_INVALID)
1148 return;
1149
1150 /* Stop beacon queue */
1151 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1152
1153 /* Stop data queues */
1154 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1155 if (ATH_TXQ_SETUP(sc, i)) {
1156 txq = &sc->tx.txq[i];
1157 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1158 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1159 }
1160 }
1161
1162 if (npend) {
1163 int r;
1164
e8009e98 1165 ath_print(common, ATH_DBG_FATAL,
9be8ab2e 1166 "Failed to stop TX DMA. Resetting hardware!\n");
043a0405 1167
20bd2a09 1168 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
043a0405 1169 if (r)
c46917bb
LR
1170 ath_print(common, ATH_DBG_FATAL,
1171 "Unable to reset hardware; reset status %d\n",
1172 r);
043a0405
S
1173 }
1174
1175 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1176 if (ATH_TXQ_SETUP(sc, i))
1177 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1178 }
e8324357 1179}
f078f209 1180
043a0405 1181void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
e8324357 1182{
043a0405
S
1183 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1184 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
e8324357 1185}
f078f209 1186
e8324357
S
1187void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1188{
1189 struct ath_atx_ac *ac;
1190 struct ath_atx_tid *tid;
f078f209 1191
e8324357
S
1192 if (list_empty(&txq->axq_acq))
1193 return;
f078f209 1194
e8324357
S
1195 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1196 list_del(&ac->list);
1197 ac->sched = false;
f078f209 1198
e8324357
S
1199 do {
1200 if (list_empty(&ac->tid_q))
1201 return;
f078f209 1202
e8324357
S
1203 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1204 list_del(&tid->list);
1205 tid->sched = false;
f078f209 1206
e8324357
S
1207 if (tid->paused)
1208 continue;
f078f209 1209
164ace38 1210 ath_tx_sched_aggr(sc, txq, tid);
f078f209
LR
1211
1212 /*
e8324357
S
1213 * add tid to round-robin queue if more frames
1214 * are pending for the tid
f078f209 1215 */
e8324357
S
1216 if (!list_empty(&tid->buf_q))
1217 ath_tx_queue_tid(txq, tid);
f078f209 1218
e8324357
S
1219 break;
1220 } while (!list_empty(&ac->tid_q));
f078f209 1221
e8324357
S
1222 if (!list_empty(&ac->tid_q)) {
1223 if (!ac->sched) {
1224 ac->sched = true;
1225 list_add_tail(&ac->list, &txq->axq_acq);
f078f209 1226 }
e8324357
S
1227 }
1228}
f078f209 1229
e8324357
S
1230/***********/
1231/* TX, DMA */
1232/***********/
1233
f078f209 1234/*
e8324357
S
1235 * Insert a chain of ath_buf (descriptors) on a txq and
1236 * assume the descriptors are already chained together by caller.
f078f209 1237 */
e8324357
S
1238static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1239 struct list_head *head)
f078f209 1240{
cbe61d8a 1241 struct ath_hw *ah = sc->sc_ah;
c46917bb 1242 struct ath_common *common = ath9k_hw_common(ah);
e8324357 1243 struct ath_buf *bf;
f078f209 1244
e8324357
S
1245 /*
1246 * Insert the frame on the outbound list and
1247 * pass it on to the hardware.
1248 */
f078f209 1249
e8324357
S
1250 if (list_empty(head))
1251 return;
f078f209 1252
e8324357 1253 bf = list_first_entry(head, struct ath_buf, list);
f078f209 1254
c46917bb
LR
1255 ath_print(common, ATH_DBG_QUEUE,
1256 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
f078f209 1257
e5003249
VT
1258 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1259 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1260 list_splice_tail_init(head, &txq->txq_fifo_pending);
1261 return;
1262 }
1263 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1264 ath_print(common, ATH_DBG_XMIT,
1265 "Initializing tx fifo %d which "
1266 "is non-empty\n",
1267 txq->txq_headidx);
1268 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1269 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1270 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
e8324357 1271 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
c46917bb
LR
1272 ath_print(common, ATH_DBG_XMIT,
1273 "TXDP[%u] = %llx (%p)\n",
1274 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
e8324357 1275 } else {
e5003249
VT
1276 list_splice_tail_init(head, &txq->axq_q);
1277
1278 if (txq->axq_link == NULL) {
1279 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1280 ath_print(common, ATH_DBG_XMIT,
1281 "TXDP[%u] = %llx (%p)\n",
1282 txq->axq_qnum, ito64(bf->bf_daddr),
1283 bf->bf_desc);
1284 } else {
1285 *txq->axq_link = bf->bf_daddr;
1286 ath_print(common, ATH_DBG_XMIT,
1287 "link[%u] (%p)=%llx (%p)\n",
1288 txq->axq_qnum, txq->axq_link,
1289 ito64(bf->bf_daddr), bf->bf_desc);
1290 }
1291 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1292 &txq->axq_link);
1293 ath9k_hw_txstart(ah, txq->axq_qnum);
e8324357 1294 }
e5003249 1295 txq->axq_depth++;
e8324357 1296}
f078f209 1297
e8324357
S
1298static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1299 struct list_head *bf_head,
1300 struct ath_tx_control *txctl)
f078f209
LR
1301{
1302 struct ath_buf *bf;
2d3bcba0 1303 u16 bf_seqno;
f078f209 1304
e8324357
S
1305 bf = list_first_entry(bf_head, struct ath_buf, list);
1306 bf->bf_state.bf_type |= BUF_AMPDU;
fec247c0 1307 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
2d3bcba0 1308 bf_seqno = ath_frame_seqno(bf->bf_mpdu);
f078f209 1309
e8324357
S
1310 /*
1311 * Do not queue to h/w when any of the following conditions is true:
1312 * - there are pending frames in software queue
1313 * - the TID is currently paused for ADDBA/BAR request
1314 * - seqno is not within block-ack window
1315 * - h/w queue depth exceeds low water mark
1316 */
1317 if (!list_empty(&tid->buf_q) || tid->paused ||
2d3bcba0 1318 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf_seqno) ||
e8324357 1319 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
f078f209 1320 /*
e8324357
S
1321 * Add this frame to software queue for scheduling later
1322 * for aggregation.
f078f209 1323 */
d43f3015 1324 list_move_tail(&bf->list, &tid->buf_q);
e8324357
S
1325 ath_tx_queue_tid(txctl->txq, tid);
1326 return;
1327 }
1328
1329 /* Add sub-frame to BAW */
2d3bcba0
FF
1330 if (!bf_isretried(bf))
1331 ath_tx_addto_baw(sc, tid, bf_seqno);
e8324357
S
1332
1333 /* Queue to h/w without aggregation */
1334 bf->bf_nframes = 1;
d43f3015 1335 bf->bf_lastbf = bf;
e8324357
S
1336 ath_buf_set_rate(sc, bf);
1337 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
e8324357
S
1338}
1339
82b873af
FF
1340static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1341 struct ath_atx_tid *tid,
1342 struct list_head *bf_head)
e8324357
S
1343{
1344 struct ath_buf *bf;
1345
e8324357
S
1346 bf = list_first_entry(bf_head, struct ath_buf, list);
1347 bf->bf_state.bf_type &= ~BUF_AMPDU;
1348
1349 /* update starting sequence number for subsequent ADDBA request */
82b873af
FF
1350 if (tid)
1351 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
e8324357
S
1352
1353 bf->bf_nframes = 1;
d43f3015 1354 bf->bf_lastbf = bf;
e8324357
S
1355 ath_buf_set_rate(sc, bf);
1356 ath_tx_txqaddbuf(sc, txq, bf_head);
fec247c0 1357 TX_STAT_INC(txq->axq_qnum, queued);
e8324357
S
1358}
1359
1360static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1361{
1362 struct ieee80211_hdr *hdr;
1363 enum ath9k_pkt_type htype;
1364 __le16 fc;
1365
1366 hdr = (struct ieee80211_hdr *)skb->data;
1367 fc = hdr->frame_control;
1368
1369 if (ieee80211_is_beacon(fc))
1370 htype = ATH9K_PKT_TYPE_BEACON;
1371 else if (ieee80211_is_probe_resp(fc))
1372 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1373 else if (ieee80211_is_atim(fc))
1374 htype = ATH9K_PKT_TYPE_ATIM;
1375 else if (ieee80211_is_pspoll(fc))
1376 htype = ATH9K_PKT_TYPE_PSPOLL;
1377 else
1378 htype = ATH9K_PKT_TYPE_NORMAL;
1379
1380 return htype;
1381}
1382
e8324357
S
1383static void assign_aggr_tid_seqno(struct sk_buff *skb,
1384 struct ath_buf *bf)
1385{
1386 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1387 struct ieee80211_hdr *hdr;
1388 struct ath_node *an;
1389 struct ath_atx_tid *tid;
1390 __le16 fc;
5daefbd0 1391 u8 tidno;
e8324357
S
1392
1393 if (!tx_info->control.sta)
1394 return;
1395
1396 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1397 hdr = (struct ieee80211_hdr *)skb->data;
1398 fc = hdr->frame_control;
5daefbd0 1399 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
e8324357
S
1400
1401 /*
5daefbd0 1402 * Override seqno set by upper layer with the one
e8324357 1403 * in tx aggregation state.
e8324357 1404 */
5daefbd0 1405 tid = ATH_AN_2_TID(an, tidno);
17b182e3 1406 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
e8324357
S
1407 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1408}
1409
82b873af 1410static int setup_tx_flags(struct sk_buff *skb)
e8324357
S
1411{
1412 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1413 int flags = 0;
1414
1415 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1416 flags |= ATH9K_TXDESC_INTREQ;
1417
1418 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1419 flags |= ATH9K_TXDESC_NOACK;
e8324357 1420
82b873af 1421 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
b0a33448
LR
1422 flags |= ATH9K_TXDESC_LDPC;
1423
e8324357
S
1424 return flags;
1425}
1426
1427/*
1428 * rix - rate index
1429 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1430 * width - 0 for 20 MHz, 1 for 40 MHz
1431 * half_gi - to use 4us v/s 3.6 us for symbol time
1432 */
1433static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1434 int width, int half_gi, bool shortPreamble)
1435{
e8324357 1436 u32 nbits, nsymbits, duration, nsymbols;
e8324357
S
1437 int streams, pktlen;
1438
1439 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
e8324357
S
1440
1441 /* find number of symbols: PLCP + data */
c6663876 1442 streams = HT_RC_2_STREAMS(rix);
e8324357 1443 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
c6663876 1444 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357
S
1445 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1446
1447 if (!half_gi)
1448 duration = SYMBOL_TIME(nsymbols);
1449 else
1450 duration = SYMBOL_TIME_HALFGI(nsymbols);
1451
1452 /* addup duration for legacy/ht training and signal fields */
e8324357
S
1453 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1454
1455 return duration;
1456}
1457
1458static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1459{
43c27613 1460 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357
S
1461 struct ath9k_11n_rate_series series[4];
1462 struct sk_buff *skb;
1463 struct ieee80211_tx_info *tx_info;
1464 struct ieee80211_tx_rate *rates;
545750d3 1465 const struct ieee80211_rate *rate;
254ad0ff 1466 struct ieee80211_hdr *hdr;
c89424df
S
1467 int i, flags = 0;
1468 u8 rix = 0, ctsrate = 0;
254ad0ff 1469 bool is_pspoll;
e8324357
S
1470
1471 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
1472
a22be22a 1473 skb = bf->bf_mpdu;
e8324357
S
1474 tx_info = IEEE80211_SKB_CB(skb);
1475 rates = tx_info->control.rates;
254ad0ff
S
1476 hdr = (struct ieee80211_hdr *)skb->data;
1477 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
e8324357 1478
e8324357 1479 /*
c89424df
S
1480 * We check if Short Preamble is needed for the CTS rate by
1481 * checking the BSS's global flag.
1482 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
e8324357 1483 */
545750d3
FF
1484 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1485 ctsrate = rate->hw_value;
c89424df 1486 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
545750d3 1487 ctsrate |= rate->hw_value_short;
e8324357 1488
e8324357 1489 for (i = 0; i < 4; i++) {
545750d3
FF
1490 bool is_40, is_sgi, is_sp;
1491 int phy;
1492
e8324357
S
1493 if (!rates[i].count || (rates[i].idx < 0))
1494 continue;
1495
1496 rix = rates[i].idx;
e8324357 1497 series[i].Tries = rates[i].count;
43c27613 1498 series[i].ChSel = common->tx_chainmask;
e8324357 1499
27032059
FF
1500 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1501 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
c89424df 1502 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
27032059
FF
1503 flags |= ATH9K_TXDESC_RTSENA;
1504 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1505 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1506 flags |= ATH9K_TXDESC_CTSENA;
1507 }
1508
c89424df
S
1509 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1510 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1511 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1512 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
e8324357 1513
545750d3
FF
1514 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1515 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1516 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1517
1518 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1519 /* MCS rates */
1520 series[i].Rate = rix | 0x80;
1521 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1522 is_40, is_sgi, is_sp);
074a8c0d
FF
1523 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1524 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
545750d3
FF
1525 continue;
1526 }
1527
1528 /* legcay rates */
1529 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1530 !(rate->flags & IEEE80211_RATE_ERP_G))
1531 phy = WLAN_RC_PHY_CCK;
1532 else
1533 phy = WLAN_RC_PHY_OFDM;
1534
1535 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1536 series[i].Rate = rate->hw_value;
1537 if (rate->hw_value_short) {
1538 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1539 series[i].Rate |= rate->hw_value_short;
1540 } else {
1541 is_sp = false;
1542 }
1543
1544 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1545 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
f078f209
LR
1546 }
1547
27032059
FF
1548 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1549 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1550 flags &= ~ATH9K_TXDESC_RTSENA;
1551
1552 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1553 if (flags & ATH9K_TXDESC_RTSENA)
1554 flags &= ~ATH9K_TXDESC_CTSENA;
1555
e8324357 1556 /* set dur_update_en for l-sig computation except for PS-Poll frames */
c89424df
S
1557 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1558 bf->bf_lastbf->bf_desc,
254ad0ff 1559 !is_pspoll, ctsrate,
c89424df 1560 0, series, 4, flags);
f078f209 1561
17d7904d 1562 if (sc->config.ath_aggr_prot && flags)
c89424df 1563 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
f078f209
LR
1564}
1565
82b873af
FF
1566static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
1567 struct sk_buff *skb)
f078f209 1568{
c52f33d0
JM
1569 struct ath_wiphy *aphy = hw->priv;
1570 struct ath_softc *sc = aphy->sc;
82b873af 1571 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
528f0c6b
S
1572 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1573 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
82b873af 1574 struct ath_buf *bf;
528f0c6b
S
1575 int hdrlen;
1576 __le16 fc;
1bc14880 1577 int padpos, padsize;
82b873af
FF
1578
1579 bf = ath_tx_get_buffer(sc);
1580 if (!bf) {
1581 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
1582 return NULL;
1583 }
e022edbd 1584
528f0c6b
S
1585 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1586 fc = hdr->frame_control;
f078f209 1587
528f0c6b 1588 ATH_TXBUF_RESET(bf);
f078f209 1589
827e69bf 1590 bf->aphy = aphy;
1bc14880
BP
1591 bf->bf_frmlen = skb->len + FCS_LEN;
1592 /* Remove the padding size from bf_frmlen, if any */
1593 padpos = ath9k_cmn_padpos(hdr->frame_control);
1594 padsize = padpos & 3;
1595 if (padsize && skb->len>padpos+padsize) {
1596 bf->bf_frmlen -= padsize;
1597 }
cd3d39a6 1598
82b873af 1599 if (ieee80211_is_data_qos(fc) && conf_is_ht(&hw->conf)) {
c656bbb5 1600 bf->bf_state.bf_type |= BUF_HT;
82b873af
FF
1601 if (sc->sc_flags & SC_OP_TXAGGR)
1602 assign_aggr_tid_seqno(skb, bf);
b0a33448 1603 }
528f0c6b 1604
82b873af 1605 bf->bf_flags = setup_tx_flags(skb);
528f0c6b 1606
c17512d8 1607 bf->bf_keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
528f0c6b
S
1608 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1609 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1610 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1611 } else {
1612 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1613 }
1614
f078f209 1615 bf->bf_mpdu = skb;
f8316df1 1616
c1739eb3
BG
1617 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1618 skb->len, DMA_TO_DEVICE);
1619 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
f8316df1 1620 bf->bf_mpdu = NULL;
6cf9e995 1621 bf->bf_buf_addr = 0;
c46917bb
LR
1622 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1623 "dma_mapping_error() on TX\n");
82b873af
FF
1624 ath_tx_return_buffer(sc, bf);
1625 return NULL;
f8316df1
LR
1626 }
1627
7c9fd60f
VT
1628 bf->bf_tx_aborted = false;
1629
82b873af 1630 return bf;
528f0c6b
S
1631}
1632
1633/* FIXME: tx power */
1634static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
528f0c6b
S
1635 struct ath_tx_control *txctl)
1636{
a22be22a 1637 struct sk_buff *skb = bf->bf_mpdu;
528f0c6b 1638 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c37452b0 1639 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
528f0c6b
S
1640 struct ath_node *an = NULL;
1641 struct list_head bf_head;
1642 struct ath_desc *ds;
1643 struct ath_atx_tid *tid;
cbe61d8a 1644 struct ath_hw *ah = sc->sc_ah;
528f0c6b 1645 int frm_type;
c37452b0 1646 __le16 fc;
5daefbd0 1647 u8 tidno;
528f0c6b 1648
528f0c6b 1649 frm_type = get_hw_packet_type(skb);
c37452b0 1650 fc = hdr->frame_control;
528f0c6b
S
1651
1652 INIT_LIST_HEAD(&bf_head);
1653 list_add_tail(&bf->list, &bf_head);
f078f209 1654
f078f209 1655 ds = bf->bf_desc;
87d5efbb 1656 ath9k_hw_set_desc_link(ah, ds, 0);
f078f209 1657
528f0c6b
S
1658 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1659 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1660
1661 ath9k_hw_filltxdesc(ah, ds,
8f93b8b3
S
1662 skb->len, /* segment length */
1663 true, /* first segment */
1664 true, /* last segment */
3f3a1c80 1665 ds, /* first descriptor */
cc610ac0
VT
1666 bf->bf_buf_addr,
1667 txctl->txq->axq_qnum);
f078f209 1668
528f0c6b 1669 spin_lock_bh(&txctl->txq->axq_lock);
f078f209 1670
f1617967
JL
1671 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1672 tx_info->control.sta) {
1673 an = (struct ath_node *)tx_info->control.sta->drv_priv;
5daefbd0
FF
1674 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1675 IEEE80211_QOS_CTL_TID_MASK;
1676 tid = ATH_AN_2_TID(an, tidno);
1677
f1617967 1678
066dae93 1679 WARN_ON(tid->ac->txq != txctl->txq);
4fdec031 1680 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
f078f209
LR
1681 /*
1682 * Try aggregation if it's a unicast data frame
1683 * and the destination is HT capable.
1684 */
528f0c6b 1685 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
f078f209
LR
1686 } else {
1687 /*
528f0c6b
S
1688 * Send this frame as regular when ADDBA
1689 * exchange is neither complete nor pending.
f078f209 1690 */
82b873af 1691 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
f078f209
LR
1692 }
1693 } else {
61117f01 1694 bf->bf_state.bfs_ftype = txctl->frame_type;
82b873af
FF
1695 bf->bf_state.bfs_paprd = txctl->paprd;
1696
9a6b8270
FF
1697 if (bf->bf_state.bfs_paprd)
1698 ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
1699
82b873af
FF
1700 if (txctl->paprd)
1701 bf->bf_state.bfs_paprd_timestamp = jiffies;
1702
1703 ath_tx_send_normal(sc, txctl->txq, NULL, &bf_head);
f078f209 1704 }
528f0c6b
S
1705
1706 spin_unlock_bh(&txctl->txq->axq_lock);
f078f209
LR
1707}
1708
f8316df1 1709/* Upon failure caller should free skb */
c52f33d0 1710int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
528f0c6b 1711 struct ath_tx_control *txctl)
f078f209 1712{
c52f33d0
JM
1713 struct ath_wiphy *aphy = hw->priv;
1714 struct ath_softc *sc = aphy->sc;
84642d6b 1715 struct ath_txq *txq = txctl->txq;
528f0c6b 1716 struct ath_buf *bf;
82b873af 1717 int q;
f078f209 1718
82b873af
FF
1719 bf = ath_tx_setup_buffer(hw, skb);
1720 if (unlikely(!bf))
1721 return -ENOMEM;
528f0c6b 1722
066dae93 1723 q = skb_get_queue_mapping(skb);
97923b14 1724 spin_lock_bh(&txq->axq_lock);
066dae93
FF
1725 if (txq == sc->tx.txq_map[q] &&
1726 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
1727 ath_mac80211_stop_queue(sc, q);
97923b14
FF
1728 txq->stopped = 1;
1729 }
1730 spin_unlock_bh(&txq->axq_lock);
1731
8f93b8b3 1732 ath_tx_start_dma(sc, bf, txctl);
f078f209 1733
528f0c6b 1734 return 0;
f078f209
LR
1735}
1736
c52f33d0 1737void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
f078f209 1738{
c52f33d0
JM
1739 struct ath_wiphy *aphy = hw->priv;
1740 struct ath_softc *sc = aphy->sc;
c46917bb 1741 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3
BP
1742 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1743 int padpos, padsize;
e8324357
S
1744 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1745 struct ath_tx_control txctl;
f078f209 1746
e8324357 1747 memset(&txctl, 0, sizeof(struct ath_tx_control));
f078f209
LR
1748
1749 /*
e8324357
S
1750 * As a temporary workaround, assign seq# here; this will likely need
1751 * to be cleaned up to work better with Beacon transmission and virtual
1752 * BSSes.
f078f209 1753 */
e8324357 1754 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
e8324357
S
1755 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1756 sc->tx.seq_no += 0x10;
1757 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1758 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
f078f209 1759 }
f078f209 1760
e8324357 1761 /* Add the padding after the header if this is not already done */
4d91f9f3
BP
1762 padpos = ath9k_cmn_padpos(hdr->frame_control);
1763 padsize = padpos & 3;
1764 if (padsize && skb->len>padpos) {
e8324357 1765 if (skb_headroom(skb) < padsize) {
c46917bb
LR
1766 ath_print(common, ATH_DBG_XMIT,
1767 "TX CABQ padding failed\n");
e8324357
S
1768 dev_kfree_skb_any(skb);
1769 return;
1770 }
1771 skb_push(skb, padsize);
4d91f9f3 1772 memmove(skb->data, skb->data + padsize, padpos);
f078f209 1773 }
f078f209 1774
e8324357 1775 txctl.txq = sc->beacon.cabq;
f078f209 1776
c46917bb
LR
1777 ath_print(common, ATH_DBG_XMIT,
1778 "transmitting CABQ packet, skb: %p\n", skb);
f078f209 1779
c52f33d0 1780 if (ath_tx_start(hw, skb, &txctl) != 0) {
c46917bb 1781 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
e8324357 1782 goto exit;
f078f209 1783 }
f078f209 1784
e8324357
S
1785 return;
1786exit:
1787 dev_kfree_skb_any(skb);
f078f209
LR
1788}
1789
e8324357
S
1790/*****************/
1791/* TX Completion */
1792/*****************/
528f0c6b 1793
e8324357 1794static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
61117f01 1795 struct ath_wiphy *aphy, int tx_flags, int ftype,
066dae93 1796 struct ath_txq *txq)
528f0c6b 1797{
e8324357
S
1798 struct ieee80211_hw *hw = sc->hw;
1799 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c46917bb 1800 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3 1801 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
97923b14 1802 int q, padpos, padsize;
528f0c6b 1803
c46917bb 1804 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
528f0c6b 1805
827e69bf
FF
1806 if (aphy)
1807 hw = aphy->hw;
528f0c6b 1808
6b2c4032 1809 if (tx_flags & ATH_TX_BAR)
e8324357 1810 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
e8324357 1811
6b2c4032 1812 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
e8324357
S
1813 /* Frame was ACKed */
1814 tx_info->flags |= IEEE80211_TX_STAT_ACK;
528f0c6b
S
1815 }
1816
4d91f9f3
BP
1817 padpos = ath9k_cmn_padpos(hdr->frame_control);
1818 padsize = padpos & 3;
1819 if (padsize && skb->len>padpos+padsize) {
e8324357
S
1820 /*
1821 * Remove MAC header padding before giving the frame back to
1822 * mac80211.
1823 */
4d91f9f3 1824 memmove(skb->data + padsize, skb->data, padpos);
e8324357
S
1825 skb_pull(skb, padsize);
1826 }
528f0c6b 1827
1b04b930
S
1828 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1829 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
c46917bb
LR
1830 ath_print(common, ATH_DBG_PS,
1831 "Going back to sleep after having "
f643e51d 1832 "received TX status (0x%lx)\n",
1b04b930
S
1833 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1834 PS_WAIT_FOR_CAB |
1835 PS_WAIT_FOR_PSPOLL_DATA |
1836 PS_WAIT_FOR_TX_ACK));
9a23f9ca
JM
1837 }
1838
61117f01
FF
1839 if (unlikely(ftype))
1840 ath9k_tx_status(hw, skb, ftype);
97923b14
FF
1841 else {
1842 q = skb_get_queue_mapping(skb);
066dae93
FF
1843 if (txq == sc->tx.txq_map[q]) {
1844 spin_lock_bh(&txq->axq_lock);
1845 if (WARN_ON(--txq->pending_frames < 0))
1846 txq->pending_frames = 0;
1847 spin_unlock_bh(&txq->axq_lock);
1848 }
97923b14 1849
827e69bf 1850 ieee80211_tx_status(hw, skb);
97923b14 1851 }
e8324357 1852}
f078f209 1853
e8324357 1854static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
1855 struct ath_txq *txq, struct list_head *bf_q,
1856 struct ath_tx_status *ts, int txok, int sendbar)
f078f209 1857{
e8324357 1858 struct sk_buff *skb = bf->bf_mpdu;
e8324357 1859 unsigned long flags;
6b2c4032 1860 int tx_flags = 0;
f078f209 1861
e8324357 1862 if (sendbar)
6b2c4032 1863 tx_flags = ATH_TX_BAR;
f078f209 1864
e8324357 1865 if (!txok) {
6b2c4032 1866 tx_flags |= ATH_TX_ERROR;
f078f209 1867
e8324357 1868 if (bf_isxretried(bf))
6b2c4032 1869 tx_flags |= ATH_TX_XRETRY;
f078f209
LR
1870 }
1871
c1739eb3 1872 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
6cf9e995 1873 bf->bf_buf_addr = 0;
9f42c2b6
FF
1874
1875 if (bf->bf_state.bfs_paprd) {
ca369eb4
VT
1876 if (time_after(jiffies,
1877 bf->bf_state.bfs_paprd_timestamp +
78a18172 1878 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
ca369eb4 1879 dev_kfree_skb_any(skb);
78a18172 1880 else
ca369eb4 1881 complete(&sc->paprd_complete);
9f42c2b6 1882 } else {
066dae93 1883 ath_debug_stat_tx(sc, bf, ts);
61117f01
FF
1884 ath_tx_complete(sc, skb, bf->aphy, tx_flags,
1885 bf->bf_state.bfs_ftype, txq);
9f42c2b6 1886 }
6cf9e995
BG
1887 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1888 * accidentally reference it later.
1889 */
1890 bf->bf_mpdu = NULL;
e8324357
S
1891
1892 /*
1893 * Return the list of ath_buf of this mpdu to free queue
1894 */
1895 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1896 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1897 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
f078f209
LR
1898}
1899
e8324357 1900static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
db1a052b 1901 struct ath_tx_status *ts, int txok)
f078f209 1902{
e8324357
S
1903 u16 seq_st = 0;
1904 u32 ba[WME_BA_BMP_SIZE >> 5];
1905 int ba_index;
1906 int nbad = 0;
1907 int isaggr = 0;
f078f209 1908
7c9fd60f 1909 if (bf->bf_lastbf->bf_tx_aborted)
e8324357 1910 return 0;
f078f209 1911
e8324357
S
1912 isaggr = bf_isaggr(bf);
1913 if (isaggr) {
db1a052b
FF
1914 seq_st = ts->ts_seqnum;
1915 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 1916 }
f078f209 1917
e8324357 1918 while (bf) {
2d3bcba0 1919 ba_index = ATH_BA_INDEX(seq_st, ath_frame_seqno(bf->bf_mpdu));
e8324357
S
1920 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
1921 nbad++;
1922
1923 bf = bf->bf_next;
1924 }
f078f209 1925
e8324357
S
1926 return nbad;
1927}
f078f209 1928
db1a052b 1929static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
8a92e2ee 1930 int nbad, int txok, bool update_rc)
f078f209 1931{
a22be22a 1932 struct sk_buff *skb = bf->bf_mpdu;
254ad0ff 1933 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
e8324357 1934 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
827e69bf 1935 struct ieee80211_hw *hw = bf->aphy->hw;
f0c255a0
FF
1936 struct ath_softc *sc = bf->aphy->sc;
1937 struct ath_hw *ah = sc->sc_ah;
8a92e2ee 1938 u8 i, tx_rateindex;
f078f209 1939
95e4acb7 1940 if (txok)
db1a052b 1941 tx_info->status.ack_signal = ts->ts_rssi;
95e4acb7 1942
db1a052b 1943 tx_rateindex = ts->ts_rateindex;
8a92e2ee
VT
1944 WARN_ON(tx_rateindex >= hw->max_rates);
1945
db1a052b 1946 if (ts->ts_status & ATH9K_TXERR_FILT)
e8324357 1947 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
ebd02287 1948 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
d969847c 1949 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
f078f209 1950
ebd02287
BS
1951 BUG_ON(nbad > bf->bf_nframes);
1952
1953 tx_info->status.ampdu_len = bf->bf_nframes;
1954 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
1955 }
1956
db1a052b 1957 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
8a92e2ee 1958 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
f0c255a0
FF
1959 /*
1960 * If an underrun error is seen assume it as an excessive
1961 * retry only if max frame trigger level has been reached
1962 * (2 KB for single stream, and 4 KB for dual stream).
1963 * Adjust the long retry as if the frame was tried
1964 * hw->max_rate_tries times to affect how rate control updates
1965 * PER for the failed rate.
1966 * In case of congestion on the bus penalizing this type of
1967 * underruns should help hardware actually transmit new frames
1968 * successfully by eventually preferring slower rates.
1969 * This itself should also alleviate congestion on the bus.
1970 */
1971 if (ieee80211_is_data(hdr->frame_control) &&
1972 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
1973 ATH9K_TX_DELIM_UNDERRUN)) &&
1974 ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max)
1975 tx_info->status.rates[tx_rateindex].count =
1976 hw->max_rate_tries;
f078f209 1977 }
8a92e2ee 1978
545750d3 1979 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
8a92e2ee 1980 tx_info->status.rates[i].count = 0;
545750d3
FF
1981 tx_info->status.rates[i].idx = -1;
1982 }
8a92e2ee 1983
78c4653a 1984 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
f078f209
LR
1985}
1986
066dae93 1987static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum)
059d806c 1988{
066dae93 1989 struct ath_txq *txq;
97923b14 1990
066dae93 1991 txq = sc->tx.txq_map[qnum];
059d806c 1992 spin_lock_bh(&txq->axq_lock);
066dae93 1993 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
68e8f2fa
VT
1994 if (ath_mac80211_start_queue(sc, qnum))
1995 txq->stopped = 0;
059d806c
S
1996 }
1997 spin_unlock_bh(&txq->axq_lock);
1998}
1999
e8324357 2000static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
f078f209 2001{
cbe61d8a 2002 struct ath_hw *ah = sc->sc_ah;
c46917bb 2003 struct ath_common *common = ath9k_hw_common(ah);
e8324357 2004 struct ath_buf *bf, *lastbf, *bf_held = NULL;
f078f209 2005 struct list_head bf_head;
e8324357 2006 struct ath_desc *ds;
29bffa96 2007 struct ath_tx_status ts;
0934af23 2008 int txok;
e8324357 2009 int status;
066dae93 2010 int qnum;
f078f209 2011
c46917bb
LR
2012 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2013 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2014 txq->axq_link);
f078f209 2015
f078f209
LR
2016 for (;;) {
2017 spin_lock_bh(&txq->axq_lock);
f078f209
LR
2018 if (list_empty(&txq->axq_q)) {
2019 txq->axq_link = NULL;
f078f209
LR
2020 spin_unlock_bh(&txq->axq_lock);
2021 break;
2022 }
f078f209
LR
2023 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2024
e8324357
S
2025 /*
2026 * There is a race condition that a BH gets scheduled
2027 * after sw writes TxE and before hw re-load the last
2028 * descriptor to get the newly chained one.
2029 * Software must keep the last DONE descriptor as a
2030 * holding descriptor - software does so by marking
2031 * it with the STALE flag.
2032 */
2033 bf_held = NULL;
a119cc49 2034 if (bf->bf_stale) {
e8324357
S
2035 bf_held = bf;
2036 if (list_is_last(&bf_held->list, &txq->axq_q)) {
6ef9b13d 2037 spin_unlock_bh(&txq->axq_lock);
e8324357
S
2038 break;
2039 } else {
2040 bf = list_entry(bf_held->list.next,
6ef9b13d 2041 struct ath_buf, list);
e8324357 2042 }
f078f209
LR
2043 }
2044
2045 lastbf = bf->bf_lastbf;
e8324357 2046 ds = lastbf->bf_desc;
f078f209 2047
29bffa96
FF
2048 memset(&ts, 0, sizeof(ts));
2049 status = ath9k_hw_txprocdesc(ah, ds, &ts);
e8324357 2050 if (status == -EINPROGRESS) {
f078f209 2051 spin_unlock_bh(&txq->axq_lock);
e8324357 2052 break;
f078f209 2053 }
f078f209 2054
e8324357
S
2055 /*
2056 * Remove ath_buf's of the same transmit unit from txq,
2057 * however leave the last descriptor back as the holding
2058 * descriptor for hw.
2059 */
a119cc49 2060 lastbf->bf_stale = true;
e8324357 2061 INIT_LIST_HEAD(&bf_head);
e8324357
S
2062 if (!list_is_singular(&lastbf->list))
2063 list_cut_position(&bf_head,
2064 &txq->axq_q, lastbf->list.prev);
f078f209 2065
e8324357 2066 txq->axq_depth--;
29bffa96 2067 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
164ace38 2068 txq->axq_tx_inprogress = false;
0a8cea84
FF
2069 if (bf_held)
2070 list_del(&bf_held->list);
e8324357 2071 spin_unlock_bh(&txq->axq_lock);
f078f209 2072
0a8cea84
FF
2073 if (bf_held)
2074 ath_tx_return_buffer(sc, bf_held);
f078f209 2075
e8324357
S
2076 if (!bf_isampdu(bf)) {
2077 /*
2078 * This frame is sent out as a single frame.
2079 * Use hardware retry status for this frame.
2080 */
29bffa96 2081 if (ts.ts_status & ATH9K_TXERR_XRETRY)
e8324357 2082 bf->bf_state.bf_type |= BUF_XRETRY;
ebd02287 2083 ath_tx_rc_status(bf, &ts, txok ? 0 : 1, txok, true);
e8324357 2084 }
f078f209 2085
066dae93
FF
2086 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2087
e8324357 2088 if (bf_isampdu(bf))
29bffa96 2089 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
e8324357 2090 else
29bffa96 2091 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
8469cdef 2092
066dae93
FF
2093 if (txq == sc->tx.txq_map[qnum])
2094 ath_wake_mac80211_queue(sc, qnum);
8469cdef 2095
059d806c 2096 spin_lock_bh(&txq->axq_lock);
e8324357
S
2097 if (sc->sc_flags & SC_OP_TXAGGR)
2098 ath_txq_schedule(sc, txq);
2099 spin_unlock_bh(&txq->axq_lock);
8469cdef
S
2100 }
2101}
2102
305fe47f 2103static void ath_tx_complete_poll_work(struct work_struct *work)
164ace38
SB
2104{
2105 struct ath_softc *sc = container_of(work, struct ath_softc,
2106 tx_complete_work.work);
2107 struct ath_txq *txq;
2108 int i;
2109 bool needreset = false;
2110
2111 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2112 if (ATH_TXQ_SETUP(sc, i)) {
2113 txq = &sc->tx.txq[i];
2114 spin_lock_bh(&txq->axq_lock);
2115 if (txq->axq_depth) {
2116 if (txq->axq_tx_inprogress) {
2117 needreset = true;
2118 spin_unlock_bh(&txq->axq_lock);
2119 break;
2120 } else {
2121 txq->axq_tx_inprogress = true;
2122 }
2123 }
2124 spin_unlock_bh(&txq->axq_lock);
2125 }
2126
2127 if (needreset) {
c46917bb
LR
2128 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2129 "tx hung, resetting the chip\n");
332c5566 2130 ath9k_ps_wakeup(sc);
fac6b6a0 2131 ath_reset(sc, true);
332c5566 2132 ath9k_ps_restore(sc);
164ace38
SB
2133 }
2134
42935eca 2135 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
164ace38
SB
2136 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2137}
2138
2139
f078f209 2140
e8324357 2141void ath_tx_tasklet(struct ath_softc *sc)
f078f209 2142{
e8324357
S
2143 int i;
2144 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
f078f209 2145
e8324357 2146 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
f078f209 2147
e8324357
S
2148 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2149 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2150 ath_tx_processq(sc, &sc->tx.txq[i]);
f078f209
LR
2151 }
2152}
2153
e5003249
VT
2154void ath_tx_edma_tasklet(struct ath_softc *sc)
2155{
2156 struct ath_tx_status txs;
2157 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2158 struct ath_hw *ah = sc->sc_ah;
2159 struct ath_txq *txq;
2160 struct ath_buf *bf, *lastbf;
2161 struct list_head bf_head;
2162 int status;
2163 int txok;
066dae93 2164 int qnum;
e5003249
VT
2165
2166 for (;;) {
2167 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2168 if (status == -EINPROGRESS)
2169 break;
2170 if (status == -EIO) {
2171 ath_print(common, ATH_DBG_XMIT,
2172 "Error processing tx status\n");
2173 break;
2174 }
2175
2176 /* Skip beacon completions */
2177 if (txs.qid == sc->beacon.beaconq)
2178 continue;
2179
2180 txq = &sc->tx.txq[txs.qid];
2181
2182 spin_lock_bh(&txq->axq_lock);
2183 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2184 spin_unlock_bh(&txq->axq_lock);
2185 return;
2186 }
2187
2188 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2189 struct ath_buf, list);
2190 lastbf = bf->bf_lastbf;
2191
2192 INIT_LIST_HEAD(&bf_head);
2193 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2194 &lastbf->list);
2195 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2196 txq->axq_depth--;
2197 txq->axq_tx_inprogress = false;
2198 spin_unlock_bh(&txq->axq_lock);
2199
2200 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2201
2202 if (!bf_isampdu(bf)) {
e5003249
VT
2203 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2204 bf->bf_state.bf_type |= BUF_XRETRY;
ebd02287 2205 ath_tx_rc_status(bf, &txs, txok ? 0 : 1, txok, true);
e5003249
VT
2206 }
2207
066dae93
FF
2208 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2209
e5003249
VT
2210 if (bf_isampdu(bf))
2211 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2212 else
2213 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2214 &txs, txok, 0);
2215
066dae93
FF
2216 if (txq == sc->tx.txq_map[qnum])
2217 ath_wake_mac80211_queue(sc, qnum);
7f9f3600 2218
e5003249
VT
2219 spin_lock_bh(&txq->axq_lock);
2220 if (!list_empty(&txq->txq_fifo_pending)) {
2221 INIT_LIST_HEAD(&bf_head);
2222 bf = list_first_entry(&txq->txq_fifo_pending,
2223 struct ath_buf, list);
2224 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2225 &bf->bf_lastbf->list);
2226 ath_tx_txqaddbuf(sc, txq, &bf_head);
2227 } else if (sc->sc_flags & SC_OP_TXAGGR)
2228 ath_txq_schedule(sc, txq);
2229 spin_unlock_bh(&txq->axq_lock);
2230 }
2231}
2232
e8324357
S
2233/*****************/
2234/* Init, Cleanup */
2235/*****************/
f078f209 2236
5088c2f1
VT
2237static int ath_txstatus_setup(struct ath_softc *sc, int size)
2238{
2239 struct ath_descdma *dd = &sc->txsdma;
2240 u8 txs_len = sc->sc_ah->caps.txs_len;
2241
2242 dd->dd_desc_len = size * txs_len;
2243 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2244 &dd->dd_desc_paddr, GFP_KERNEL);
2245 if (!dd->dd_desc)
2246 return -ENOMEM;
2247
2248 return 0;
2249}
2250
2251static int ath_tx_edma_init(struct ath_softc *sc)
2252{
2253 int err;
2254
2255 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2256 if (!err)
2257 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2258 sc->txsdma.dd_desc_paddr,
2259 ATH_TXSTATUS_RING_SIZE);
2260
2261 return err;
2262}
2263
2264static void ath_tx_edma_cleanup(struct ath_softc *sc)
2265{
2266 struct ath_descdma *dd = &sc->txsdma;
2267
2268 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2269 dd->dd_desc_paddr);
2270}
2271
e8324357 2272int ath_tx_init(struct ath_softc *sc, int nbufs)
f078f209 2273{
c46917bb 2274 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357 2275 int error = 0;
f078f209 2276
797fe5cb 2277 spin_lock_init(&sc->tx.txbuflock);
f078f209 2278
797fe5cb 2279 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
4adfcded 2280 "tx", nbufs, 1, 1);
797fe5cb 2281 if (error != 0) {
c46917bb
LR
2282 ath_print(common, ATH_DBG_FATAL,
2283 "Failed to allocate tx descriptors: %d\n", error);
797fe5cb
S
2284 goto err;
2285 }
f078f209 2286
797fe5cb 2287 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
5088c2f1 2288 "beacon", ATH_BCBUF, 1, 1);
797fe5cb 2289 if (error != 0) {
c46917bb
LR
2290 ath_print(common, ATH_DBG_FATAL,
2291 "Failed to allocate beacon descriptors: %d\n", error);
797fe5cb
S
2292 goto err;
2293 }
f078f209 2294
164ace38
SB
2295 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2296
5088c2f1
VT
2297 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2298 error = ath_tx_edma_init(sc);
2299 if (error)
2300 goto err;
2301 }
2302
797fe5cb 2303err:
e8324357
S
2304 if (error != 0)
2305 ath_tx_cleanup(sc);
f078f209 2306
e8324357 2307 return error;
f078f209
LR
2308}
2309
797fe5cb 2310void ath_tx_cleanup(struct ath_softc *sc)
e8324357
S
2311{
2312 if (sc->beacon.bdma.dd_desc_len != 0)
2313 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2314
2315 if (sc->tx.txdma.dd_desc_len != 0)
2316 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
5088c2f1
VT
2317
2318 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2319 ath_tx_edma_cleanup(sc);
e8324357 2320}
f078f209
LR
2321
2322void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2323{
c5170163
S
2324 struct ath_atx_tid *tid;
2325 struct ath_atx_ac *ac;
2326 int tidno, acno;
f078f209 2327
8ee5afbc 2328 for (tidno = 0, tid = &an->tid[tidno];
c5170163
S
2329 tidno < WME_NUM_TID;
2330 tidno++, tid++) {
2331 tid->an = an;
2332 tid->tidno = tidno;
2333 tid->seq_start = tid->seq_next = 0;
2334 tid->baw_size = WME_MAX_BA;
2335 tid->baw_head = tid->baw_tail = 0;
2336 tid->sched = false;
e8324357 2337 tid->paused = false;
a37c2c79 2338 tid->state &= ~AGGR_CLEANUP;
c5170163 2339 INIT_LIST_HEAD(&tid->buf_q);
c5170163 2340 acno = TID_TO_WME_AC(tidno);
8ee5afbc 2341 tid->ac = &an->ac[acno];
a37c2c79
S
2342 tid->state &= ~AGGR_ADDBA_COMPLETE;
2343 tid->state &= ~AGGR_ADDBA_PROGRESS;
c5170163 2344 }
f078f209 2345
8ee5afbc 2346 for (acno = 0, ac = &an->ac[acno];
c5170163
S
2347 acno < WME_NUM_AC; acno++, ac++) {
2348 ac->sched = false;
066dae93 2349 ac->txq = sc->tx.txq_map[acno];
c5170163 2350 INIT_LIST_HEAD(&ac->tid_q);
f078f209
LR
2351 }
2352}
2353
b5aa9bf9 2354void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
f078f209 2355{
2b40994c
FF
2356 struct ath_atx_ac *ac;
2357 struct ath_atx_tid *tid;
f078f209 2358 struct ath_txq *txq;
066dae93 2359 int tidno;
e8324357 2360
2b40994c
FF
2361 for (tidno = 0, tid = &an->tid[tidno];
2362 tidno < WME_NUM_TID; tidno++, tid++) {
f078f209 2363
2b40994c 2364 ac = tid->ac;
066dae93 2365 txq = ac->txq;
f078f209 2366
2b40994c
FF
2367 spin_lock_bh(&txq->axq_lock);
2368
2369 if (tid->sched) {
2370 list_del(&tid->list);
2371 tid->sched = false;
2372 }
2373
2374 if (ac->sched) {
2375 list_del(&ac->list);
2376 tid->ac->sched = false;
f078f209 2377 }
2b40994c
FF
2378
2379 ath_tid_drain(sc, txq, tid);
2380 tid->state &= ~AGGR_ADDBA_COMPLETE;
2381 tid->state &= ~AGGR_CLEANUP;
2382
2383 spin_unlock_bh(&txq->axq_lock);
f078f209
LR
2384 }
2385}