]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/wireless/ath9k/recv.c
ath9k: Nuke fixed rate handling in driver
[mirror_ubuntu-bionic-kernel.git] / drivers / net / wireless / ath9k / recv.c
CommitLineData
f078f209
LR
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/*
18 * Implementation of receive path.
19 */
20
21#include "core.h"
22
23/*
24 * Setup and link descriptors.
25 *
26 * 11N: we can no longer afford to self link the last descriptor.
27 * MAC acknowledges BA status as long as it copies frames to host
28 * buffer (or rx fifo). This can incorrectly acknowledge packets
29 * to a sender if last desc is self-linked.
30 *
31 * NOTE: Caller should hold the rxbuf lock.
32 */
33
34static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
35{
36 struct ath_hal *ah = sc->sc_ah;
37 struct ath_desc *ds;
38 struct sk_buff *skb;
39
40 ATH_RXBUF_RESET(bf);
41
42 ds = bf->bf_desc;
43 ds->ds_link = 0; /* link to null */
44 ds->ds_data = bf->bf_buf_addr;
45
46 /* XXX For RADAR?
47 * virtual addr of the beginning of the buffer. */
48 skb = bf->bf_mpdu;
49 ASSERT(skb != NULL);
50 ds->ds_vdata = skb->data;
51
52 /* setup rx descriptors */
53 ath9k_hw_setuprxdesc(ah,
54 ds,
55 skb_tailroom(skb), /* buffer size */
56 0);
57
58 if (sc->sc_rxlink == NULL)
59 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
60 else
61 *sc->sc_rxlink = bf->bf_daddr;
62
63 sc->sc_rxlink = &ds->ds_link;
64 ath9k_hw_rxena(ah);
65}
66
f078f209
LR
67static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc,
68 u32 len)
69{
70 struct sk_buff *skb;
71 u32 off;
72
73 /*
74 * Cache-line-align. This is important (for the
75 * 5210 at least) as not doing so causes bogus data
76 * in rx'd frames.
77 */
78
79 skb = dev_alloc_skb(len + sc->sc_cachelsz - 1);
80 if (skb != NULL) {
81 off = ((unsigned long) skb->data) % sc->sc_cachelsz;
82 if (off != 0)
83 skb_reserve(skb, sc->sc_cachelsz - off);
84 } else {
85 DPRINTF(sc, ATH_DBG_FATAL,
86 "%s: skbuff alloc of size %u failed\n",
87 __func__, len);
88 return NULL;
89 }
90
91 return skb;
92}
93
94static void ath_rx_requeue(struct ath_softc *sc, struct sk_buff *skb)
95{
96 struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf;
97
98 ASSERT(bf != NULL);
99
100 spin_lock_bh(&sc->sc_rxbuflock);
101 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
102 /*
103 * This buffer is still held for hw acess.
104 * Mark it as free to be re-queued it later.
105 */
106 bf->bf_status |= ATH_BUFSTATUS_FREE;
107 } else {
108 /* XXX: we probably never enter here, remove after
109 * verification */
110 list_add_tail(&bf->list, &sc->sc_rxbuf);
111 ath_rx_buf_link(sc, bf);
112 }
113 spin_unlock_bh(&sc->sc_rxbuflock);
114}
115
116/*
117 * The skb indicated to upper stack won't be returned to us.
118 * So we have to allocate a new one and queue it by ourselves.
119 */
120static int ath_rx_indicate(struct ath_softc *sc,
121 struct sk_buff *skb,
122 struct ath_recv_status *status,
123 u16 keyix)
124{
125 struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf;
126 struct sk_buff *nskb;
127 int type;
128
129 /* indicate frame to the stack, which will free the old skb. */
19b73c7f 130 type = _ath_rx_indicate(sc, skb, status, keyix);
f078f209
LR
131
132 /* allocate a new skb and queue it to for H/W processing */
133 nskb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
134 if (nskb != NULL) {
135 bf->bf_mpdu = nskb;
927e70e9
S
136 bf->bf_buf_addr = pci_map_single(sc->pdev, nskb->data,
137 skb_end_pointer(nskb) - nskb->head,
138 PCI_DMA_FROMDEVICE);
139 bf->bf_dmacontext = bf->bf_buf_addr;
f078f209
LR
140 ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf;
141
142 /* queue the new wbuf to H/W */
143 ath_rx_requeue(sc, nskb);
144 }
145
146 return type;
147}
148
149static void ath_opmode_init(struct ath_softc *sc)
150{
151 struct ath_hal *ah = sc->sc_ah;
152 u32 rfilt, mfilt[2];
153
154 /* configure rx filter */
155 rfilt = ath_calcrxfilter(sc);
156 ath9k_hw_setrxfilter(ah, rfilt);
157
158 /* configure bssid mask */
60b67f51 159 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
f078f209
LR
160 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
161
162 /* configure operational mode */
163 ath9k_hw_setopmode(ah);
164
165 /* Handle any link-level address change. */
166 ath9k_hw_setmac(ah, sc->sc_myaddr);
167
168 /* calculate and install multicast filter */
169 mfilt[0] = mfilt[1] = ~0;
170
171 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
172 DPRINTF(sc, ATH_DBG_CONFIG ,
173 "%s: RX filter 0x%x, MC filter %08x:%08x\n",
174 __func__, rfilt, mfilt[0], mfilt[1]);
175}
176
177int ath_rx_init(struct ath_softc *sc, int nbufs)
178{
179 struct sk_buff *skb;
180 struct ath_buf *bf;
181 int error = 0;
182
183 do {
184 spin_lock_init(&sc->sc_rxflushlock);
98deeea0 185 sc->sc_flags &= ~SC_OP_RXFLUSH;
f078f209
LR
186 spin_lock_init(&sc->sc_rxbuflock);
187
188 /*
189 * Cisco's VPN software requires that drivers be able to
190 * receive encapsulated frames that are larger than the MTU.
191 * Since we can't be sure how large a frame we'll get, setup
192 * to handle the larges on possible.
193 */
194 sc->sc_rxbufsize = roundup(IEEE80211_MAX_MPDU_LEN,
195 min(sc->sc_cachelsz,
196 (u16)64));
197
198 DPRINTF(sc, ATH_DBG_CONFIG, "%s: cachelsz %u rxbufsize %u\n",
199 __func__, sc->sc_cachelsz, sc->sc_rxbufsize);
200
201 /* Initialize rx descriptors */
202
203 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
204 "rx", nbufs, 1);
205 if (error != 0) {
206 DPRINTF(sc, ATH_DBG_FATAL,
207 "%s: failed to allocate rx descriptors: %d\n",
208 __func__, error);
209 break;
210 }
211
212 /* Pre-allocate a wbuf for each rx buffer */
213
214 list_for_each_entry(bf, &sc->sc_rxbuf, list) {
215 skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
216 if (skb == NULL) {
217 error = -ENOMEM;
218 break;
219 }
220
221 bf->bf_mpdu = skb;
927e70e9
S
222 bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data,
223 skb_end_pointer(skb) - skb->head,
224 PCI_DMA_FROMDEVICE);
225 bf->bf_dmacontext = bf->bf_buf_addr;
f078f209
LR
226 ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf;
227 }
228 sc->sc_rxlink = NULL;
229
230 } while (0);
231
232 if (error)
233 ath_rx_cleanup(sc);
234
235 return error;
236}
237
238/* Reclaim all rx queue resources */
239
240void ath_rx_cleanup(struct ath_softc *sc)
241{
242 struct sk_buff *skb;
243 struct ath_buf *bf;
244
245 list_for_each_entry(bf, &sc->sc_rxbuf, list) {
246 skb = bf->bf_mpdu;
247 if (skb)
248 dev_kfree_skb(skb);
249 }
250
251 /* cleanup rx descriptors */
252
253 if (sc->sc_rxdma.dd_desc_len != 0)
254 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
255}
256
257/*
258 * Calculate the receive filter according to the
259 * operating mode and state:
260 *
261 * o always accept unicast, broadcast, and multicast traffic
262 * o maintain current state of phy error reception (the hal
263 * may enable phy error frames for noise immunity work)
264 * o probe request frames are accepted only when operating in
265 * hostap, adhoc, or monitor modes
266 * o enable promiscuous mode according to the interface state
267 * o accept beacons:
268 * - when operating in adhoc mode so the 802.11 layer creates
269 * node table entries for peers,
270 * - when operating in station mode for collecting rssi data when
271 * the station is otherwise quiet, or
272 * - when operating as a repeater so we see repeater-sta beacons
273 * - when scanning
274 */
275
276u32 ath_calcrxfilter(struct ath_softc *sc)
277{
278#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
7dcfdcd9 279
f078f209
LR
280 u32 rfilt;
281
282 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
283 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
284 | ATH9K_RX_FILTER_MCAST;
285
286 /* If not a STA, enable processing of Probe Requests */
b4696c8b 287 if (sc->sc_ah->ah_opmode != ATH9K_M_STA)
f078f209
LR
288 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
289
290 /* Can't set HOSTAP into promiscous mode */
b4696c8b 291 if (((sc->sc_ah->ah_opmode != ATH9K_M_HOSTAP) &&
7dcfdcd9 292 (sc->rx_filter & FIF_PROMISC_IN_BSS)) ||
b4696c8b 293 (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR)) {
f078f209
LR
294 rfilt |= ATH9K_RX_FILTER_PROM;
295 /* ??? To prevent from sending ACK */
296 rfilt &= ~ATH9K_RX_FILTER_UCAST;
297 }
298
ffb82676
LR
299 if (sc->sc_ah->ah_opmode == ATH9K_M_STA ||
300 sc->sc_ah->ah_opmode == ATH9K_M_IBSS)
f078f209
LR
301 rfilt |= ATH9K_RX_FILTER_BEACON;
302
303 /* If in HOSTAP mode, want to enable reception of PSPOLL frames
304 & beacon frames */
b4696c8b 305 if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP)
f078f209
LR
306 rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL);
307 return rfilt;
7dcfdcd9 308
f078f209
LR
309#undef RX_FILTER_PRESERVE
310}
311
312/* Enable the receive h/w following a reset. */
313
314int ath_startrecv(struct ath_softc *sc)
315{
316 struct ath_hal *ah = sc->sc_ah;
317 struct ath_buf *bf, *tbf;
318
319 spin_lock_bh(&sc->sc_rxbuflock);
320 if (list_empty(&sc->sc_rxbuf))
321 goto start_recv;
322
323 sc->sc_rxlink = NULL;
324 list_for_each_entry_safe(bf, tbf, &sc->sc_rxbuf, list) {
325 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
326 /* restarting h/w, no need for holding descriptors */
327 bf->bf_status &= ~ATH_BUFSTATUS_STALE;
328 /*
329 * Upper layer may not be done with the frame yet so
330 * we can't just re-queue it to hardware. Remove it
331 * from h/w queue. It'll be re-queued when upper layer
332 * returns the frame and ath_rx_requeue_mpdu is called.
333 */
334 if (!(bf->bf_status & ATH_BUFSTATUS_FREE)) {
335 list_del(&bf->list);
336 continue;
337 }
338 }
339 /* chain descriptors */
340 ath_rx_buf_link(sc, bf);
341 }
342
343 /* We could have deleted elements so the list may be empty now */
344 if (list_empty(&sc->sc_rxbuf))
345 goto start_recv;
346
347 bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list);
348 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
349 ath9k_hw_rxena(ah); /* enable recv descriptors */
350
351start_recv:
352 spin_unlock_bh(&sc->sc_rxbuflock);
353 ath_opmode_init(sc); /* set filters, etc. */
354 ath9k_hw_startpcureceive(ah); /* re-enable PCU/DMA engine */
355 return 0;
356}
357
358/* Disable the receive h/w in preparation for a reset. */
359
360bool ath_stoprecv(struct ath_softc *sc)
361{
362 struct ath_hal *ah = sc->sc_ah;
363 u64 tsf;
364 bool stopped;
365
366 ath9k_hw_stoppcurecv(ah); /* disable PCU */
367 ath9k_hw_setrxfilter(ah, 0); /* clear recv filter */
368 stopped = ath9k_hw_stopdmarecv(ah); /* disable DMA engine */
369 mdelay(3); /* 3ms is long enough for 1 frame */
370 tsf = ath9k_hw_gettsf64(ah);
371 sc->sc_rxlink = NULL; /* just in case */
372 return stopped;
373}
374
375/* Flush receive queue */
376
377void ath_flushrecv(struct ath_softc *sc)
378{
379 /*
380 * ath_rx_tasklet may be used to handle rx interrupt and flush receive
381 * queue at the same time. Use a lock to serialize the access of rx
382 * queue.
383 * ath_rx_tasklet cannot hold the spinlock while indicating packets.
384 * Instead, do not claim the spinlock but check for a flush in
385 * progress (see references to sc_rxflush)
386 */
387 spin_lock_bh(&sc->sc_rxflushlock);
98deeea0 388 sc->sc_flags |= SC_OP_RXFLUSH;
f078f209
LR
389
390 ath_rx_tasklet(sc, 1);
391
98deeea0 392 sc->sc_flags &= ~SC_OP_RXFLUSH;
f078f209
LR
393 spin_unlock_bh(&sc->sc_rxflushlock);
394}
395
f078f209
LR
396/* Process receive queue, as well as LED, etc. */
397
398int ath_rx_tasklet(struct ath_softc *sc, int flush)
399{
400#define PA2DESC(_sc, _pa) \
401 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
402 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
403
404 struct ath_buf *bf, *bf_held = NULL;
405 struct ath_desc *ds;
406 struct ieee80211_hdr *hdr;
407 struct sk_buff *skb = NULL;
408 struct ath_recv_status rx_status;
409 struct ath_hal *ah = sc->sc_ah;
410 int type, rx_processed = 0;
411 u32 phyerr;
412 u8 chainreset = 0;
413 int retval;
414 __le16 fc;
415
416 do {
417 /* If handling rx interrupt and flush is in progress => exit */
98deeea0 418 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
f078f209
LR
419 break;
420
421 spin_lock_bh(&sc->sc_rxbuflock);
422 if (list_empty(&sc->sc_rxbuf)) {
423 sc->sc_rxlink = NULL;
424 spin_unlock_bh(&sc->sc_rxbuflock);
425 break;
426 }
427
428 bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list);
429
430 /*
431 * There is a race condition that BH gets scheduled after sw
432 * writes RxE and before hw re-load the last descriptor to get
433 * the newly chained one. Software must keep the last DONE
434 * descriptor as a holding descriptor - software does so by
435 * marking it with the STALE flag.
436 */
437 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
438 bf_held = bf;
439 if (list_is_last(&bf_held->list, &sc->sc_rxbuf)) {
440 /*
441 * The holding descriptor is the last
442 * descriptor in queue. It's safe to
443 * remove the last holding descriptor
444 * in BH context.
445 */
446 list_del(&bf_held->list);
447 bf_held->bf_status &= ~ATH_BUFSTATUS_STALE;
448 sc->sc_rxlink = NULL;
449
450 if (bf_held->bf_status & ATH_BUFSTATUS_FREE) {
451 list_add_tail(&bf_held->list,
452 &sc->sc_rxbuf);
453 ath_rx_buf_link(sc, bf_held);
454 }
455 spin_unlock_bh(&sc->sc_rxbuflock);
456 break;
457 }
458 bf = list_entry(bf->list.next, struct ath_buf, list);
459 }
460
461 ds = bf->bf_desc;
462 ++rx_processed;
463
464 /*
465 * Must provide the virtual address of the current
466 * descriptor, the physical address, and the virtual
467 * address of the next descriptor in the h/w chain.
468 * This allows the HAL to look ahead to see if the
469 * hardware is done with a descriptor by checking the
470 * done bit in the following descriptor and the address
471 * of the current descriptor the DMA engine is working
472 * on. All this is necessary because of our use of
473 * a self-linked list to avoid rx overruns.
474 */
475 retval = ath9k_hw_rxprocdesc(ah,
476 ds,
477 bf->bf_daddr,
478 PA2DESC(sc, ds->ds_link),
479 0);
480 if (retval == -EINPROGRESS) {
481 struct ath_buf *tbf;
482 struct ath_desc *tds;
483
484 if (list_is_last(&bf->list, &sc->sc_rxbuf)) {
485 spin_unlock_bh(&sc->sc_rxbuflock);
486 break;
487 }
488
489 tbf = list_entry(bf->list.next, struct ath_buf, list);
490
491 /*
492 * On some hardware the descriptor status words could
493 * get corrupted, including the done bit. Because of
494 * this, check if the next descriptor's done bit is
495 * set or not.
496 *
497 * If the next descriptor's done bit is set, the current
498 * descriptor has been corrupted. Force s/w to discard
499 * this descriptor and continue...
500 */
501
502 tds = tbf->bf_desc;
503 retval = ath9k_hw_rxprocdesc(ah,
504 tds, tbf->bf_daddr,
505 PA2DESC(sc, tds->ds_link), 0);
506 if (retval == -EINPROGRESS) {
507 spin_unlock_bh(&sc->sc_rxbuflock);
508 break;
509 }
510 }
511
512 /* XXX: we do not support frames spanning
513 * multiple descriptors */
514 bf->bf_status |= ATH_BUFSTATUS_DONE;
515
516 skb = bf->bf_mpdu;
517 if (skb == NULL) { /* XXX ??? can this happen */
518 spin_unlock_bh(&sc->sc_rxbuflock);
519 continue;
520 }
521 /*
522 * Now we know it's a completed frame, we can indicate the
523 * frame. Remove the previous holding descriptor and leave
524 * this one in the queue as the new holding descriptor.
525 */
526 if (bf_held) {
527 list_del(&bf_held->list);
528 bf_held->bf_status &= ~ATH_BUFSTATUS_STALE;
529 if (bf_held->bf_status & ATH_BUFSTATUS_FREE) {
530 list_add_tail(&bf_held->list, &sc->sc_rxbuf);
531 /* try to requeue this descriptor */
532 ath_rx_buf_link(sc, bf_held);
533 }
534 }
535
536 bf->bf_status |= ATH_BUFSTATUS_STALE;
537 bf_held = bf;
538 /*
539 * Release the lock here in case ieee80211_input() return
540 * the frame immediately by calling ath_rx_mpdu_requeue().
541 */
542 spin_unlock_bh(&sc->sc_rxbuflock);
543
544 if (flush) {
545 /*
546 * If we're asked to flush receive queue, directly
547 * chain it back at the queue without processing it.
548 */
549 goto rx_next;
550 }
551
552 hdr = (struct ieee80211_hdr *)skb->data;
553 fc = hdr->frame_control;
0345f37b 554 memset(&rx_status, 0, sizeof(struct ath_recv_status));
f078f209
LR
555
556 if (ds->ds_rxstat.rs_more) {
557 /*
558 * Frame spans multiple descriptors; this
559 * cannot happen yet as we don't support
560 * jumbograms. If not in monitor mode,
561 * discard the frame.
562 */
563#ifndef ERROR_FRAMES
564 /*
565 * Enable this if you want to see
566 * error frames in Monitor mode.
567 */
b4696c8b 568 if (sc->sc_ah->ah_opmode != ATH9K_M_MONITOR)
f078f209
LR
569 goto rx_next;
570#endif
571 /* fall thru for monitor mode handling... */
572 } else if (ds->ds_rxstat.rs_status != 0) {
573 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
574 rx_status.flags |= ATH_RX_FCS_ERROR;
575 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) {
576 phyerr = ds->ds_rxstat.rs_phyerr & 0x1f;
577 goto rx_next;
578 }
579
580 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) {
581 /*
582 * Decrypt error. We only mark packet status
583 * here and always push up the frame up to let
584 * mac80211 handle the actual error case, be
585 * it no decryption key or real decryption
586 * error. This let us keep statistics there.
587 */
588 rx_status.flags |= ATH_RX_DECRYPT_ERROR;
589 } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) {
590 /*
591 * Demic error. We only mark frame status here
592 * and always push up the frame up to let
593 * mac80211 handle the actual error case. This
594 * let us keep statistics there. Hardware may
595 * post a false-positive MIC error.
596 */
597 if (ieee80211_is_ctl(fc))
598 /*
599 * Sometimes, we get invalid
600 * MIC failures on valid control frames.
601 * Remove these mic errors.
602 */
603 ds->ds_rxstat.rs_status &=
604 ~ATH9K_RXERR_MIC;
605 else
606 rx_status.flags |= ATH_RX_MIC_ERROR;
607 }
608 /*
609 * Reject error frames with the exception of
610 * decryption and MIC failures. For monitor mode,
611 * we also ignore the CRC error.
612 */
b4696c8b 613 if (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR) {
f078f209
LR
614 if (ds->ds_rxstat.rs_status &
615 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
616 ATH9K_RXERR_CRC))
617 goto rx_next;
618 } else {
619 if (ds->ds_rxstat.rs_status &
620 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
621 goto rx_next;
622 }
623 }
624 }
625 /*
626 * The status portion of the descriptor could get corrupted.
627 */
628 if (sc->sc_rxbufsize < ds->ds_rxstat.rs_datalen)
629 goto rx_next;
630 /*
631 * Sync and unmap the frame. At this point we're
632 * committed to passing the sk_buff somewhere so
633 * clear buf_skb; this means a new sk_buff must be
634 * allocated when the rx descriptor is setup again
635 * to receive another frame.
636 */
637 skb_put(skb, ds->ds_rxstat.rs_datalen);
638 skb->protocol = cpu_to_be16(ETH_P_CONTROL);
639 rx_status.tsf = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp);
640 rx_status.rateieee =
641 sc->sc_hwmap[ds->ds_rxstat.rs_rate].ieeerate;
642 rx_status.rateKbps =
643 sc->sc_hwmap[ds->ds_rxstat.rs_rate].rateKbps;
644 rx_status.ratecode = ds->ds_rxstat.rs_rate;
645
646 /* HT rate */
647 if (rx_status.ratecode & 0x80) {
648 /* TODO - add table to avoid division */
649 if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) {
650 rx_status.flags |= ATH_RX_40MHZ;
651 rx_status.rateKbps =
652 (rx_status.rateKbps * 27) / 13;
653 }
654 if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI)
655 rx_status.rateKbps =
656 (rx_status.rateKbps * 10) / 9;
657 else
658 rx_status.flags |= ATH_RX_SHORT_GI;
659 }
660
6f255425 661 /* sc_noise_floor is only available when the station
f078f209
LR
662 attaches to an AP, so we use a default value
663 if we are not yet attached. */
f078f209 664 rx_status.abs_rssi =
6f255425 665 ds->ds_rxstat.rs_rssi + sc->sc_ani.sc_noise_floor;
f078f209
LR
666
667 pci_dma_sync_single_for_cpu(sc->pdev,
668 bf->bf_buf_addr,
669 skb_tailroom(skb),
670 PCI_DMA_FROMDEVICE);
671 pci_unmap_single(sc->pdev,
672 bf->bf_buf_addr,
673 sc->sc_rxbufsize,
674 PCI_DMA_FROMDEVICE);
675
676 /* XXX: Ah! make me more readable, use a helper */
60b67f51 677 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
f078f209
LR
678 if (ds->ds_rxstat.rs_moreaggr == 0) {
679 rx_status.rssictl[0] =
680 ds->ds_rxstat.rs_rssi_ctl0;
681 rx_status.rssictl[1] =
682 ds->ds_rxstat.rs_rssi_ctl1;
683 rx_status.rssictl[2] =
684 ds->ds_rxstat.rs_rssi_ctl2;
685 rx_status.rssi = ds->ds_rxstat.rs_rssi;
686 if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) {
687 rx_status.rssiextn[0] =
688 ds->ds_rxstat.rs_rssi_ext0;
689 rx_status.rssiextn[1] =
690 ds->ds_rxstat.rs_rssi_ext1;
691 rx_status.rssiextn[2] =
692 ds->ds_rxstat.rs_rssi_ext2;
693 rx_status.flags |=
694 ATH_RX_RSSI_EXTN_VALID;
695 }
696 rx_status.flags |= ATH_RX_RSSI_VALID |
697 ATH_RX_CHAIN_RSSI_VALID;
698 }
699 } else {
700 /*
701 * Need to insert the "combined" rssi into the
702 * status structure for upper layer processing
703 */
704 rx_status.rssi = ds->ds_rxstat.rs_rssi;
705 rx_status.flags |= ATH_RX_RSSI_VALID;
706 }
707
708 /* Pass frames up to the stack. */
709
710 type = ath_rx_indicate(sc, skb,
711 &rx_status, ds->ds_rxstat.rs_keyix);
712
713 /*
714 * change the default rx antenna if rx diversity chooses the
715 * other antenna 3 times in a row.
716 */
717 if (sc->sc_defant != ds->ds_rxstat.rs_antenna) {
718 if (++sc->sc_rxotherant >= 3)
719 ath_setdefantenna(sc,
720 ds->ds_rxstat.rs_antenna);
721 } else {
722 sc->sc_rxotherant = 0;
723 }
724
725#ifdef CONFIG_SLOW_ANT_DIV
726 if ((rx_status.flags & ATH_RX_RSSI_VALID) &&
727 ieee80211_is_beacon(fc)) {
728 ath_slow_ant_div(&sc->sc_antdiv, hdr, &ds->ds_rxstat);
729 }
730#endif
731 /*
732 * For frames successfully indicated, the buffer will be
733 * returned to us by upper layers by calling
734 * ath_rx_mpdu_requeue, either synchronusly or asynchronously.
735 * So we don't want to do it here in this loop.
736 */
737 continue;
738
739rx_next:
740 bf->bf_status |= ATH_BUFSTATUS_FREE;
741 } while (TRUE);
742
743 if (chainreset) {
744 DPRINTF(sc, ATH_DBG_CONFIG,
745 "%s: Reset rx chain mask. "
746 "Do internal reset\n", __func__);
747 ASSERT(flush == 0);
f45144ef 748 ath_reset(sc, false);
f078f209
LR
749 }
750
751 return 0;
752#undef PA2DESC
753}