]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/wireless/ath9k/recv.c
ath5k: allow APs to receive beacons
[mirror_ubuntu-bionic-kernel.git] / drivers / net / wireless / ath9k / recv.c
CommitLineData
f078f209
LR
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/*
18 * Implementation of receive path.
19 */
20
21#include "core.h"
22
23/*
24 * Setup and link descriptors.
25 *
26 * 11N: we can no longer afford to self link the last descriptor.
27 * MAC acknowledges BA status as long as it copies frames to host
28 * buffer (or rx fifo). This can incorrectly acknowledge packets
29 * to a sender if last desc is self-linked.
30 *
31 * NOTE: Caller should hold the rxbuf lock.
32 */
33
34static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
35{
36 struct ath_hal *ah = sc->sc_ah;
37 struct ath_desc *ds;
38 struct sk_buff *skb;
39
40 ATH_RXBUF_RESET(bf);
41
42 ds = bf->bf_desc;
43 ds->ds_link = 0; /* link to null */
44 ds->ds_data = bf->bf_buf_addr;
45
46 /* XXX For RADAR?
47 * virtual addr of the beginning of the buffer. */
48 skb = bf->bf_mpdu;
49 ASSERT(skb != NULL);
50 ds->ds_vdata = skb->data;
51
52 /* setup rx descriptors */
53 ath9k_hw_setuprxdesc(ah,
54 ds,
55 skb_tailroom(skb), /* buffer size */
56 0);
57
58 if (sc->sc_rxlink == NULL)
59 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
60 else
61 *sc->sc_rxlink = bf->bf_daddr;
62
63 sc->sc_rxlink = &ds->ds_link;
64 ath9k_hw_rxena(ah);
65}
66
f078f209
LR
67static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc,
68 u32 len)
69{
70 struct sk_buff *skb;
71 u32 off;
72
73 /*
74 * Cache-line-align. This is important (for the
75 * 5210 at least) as not doing so causes bogus data
76 * in rx'd frames.
77 */
78
79 skb = dev_alloc_skb(len + sc->sc_cachelsz - 1);
80 if (skb != NULL) {
81 off = ((unsigned long) skb->data) % sc->sc_cachelsz;
82 if (off != 0)
83 skb_reserve(skb, sc->sc_cachelsz - off);
84 } else {
85 DPRINTF(sc, ATH_DBG_FATAL,
86 "%s: skbuff alloc of size %u failed\n",
87 __func__, len);
88 return NULL;
89 }
90
91 return skb;
92}
93
94static void ath_rx_requeue(struct ath_softc *sc, struct sk_buff *skb)
95{
96 struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf;
97
98 ASSERT(bf != NULL);
99
100 spin_lock_bh(&sc->sc_rxbuflock);
101 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
102 /*
103 * This buffer is still held for hw acess.
104 * Mark it as free to be re-queued it later.
105 */
106 bf->bf_status |= ATH_BUFSTATUS_FREE;
107 } else {
108 /* XXX: we probably never enter here, remove after
109 * verification */
110 list_add_tail(&bf->list, &sc->sc_rxbuf);
111 ath_rx_buf_link(sc, bf);
112 }
113 spin_unlock_bh(&sc->sc_rxbuflock);
114}
115
116/*
117 * The skb indicated to upper stack won't be returned to us.
118 * So we have to allocate a new one and queue it by ourselves.
119 */
120static int ath_rx_indicate(struct ath_softc *sc,
121 struct sk_buff *skb,
122 struct ath_recv_status *status,
123 u16 keyix)
124{
125 struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf;
126 struct sk_buff *nskb;
127 int type;
128
129 /* indicate frame to the stack, which will free the old skb. */
19b73c7f 130 type = _ath_rx_indicate(sc, skb, status, keyix);
f078f209
LR
131
132 /* allocate a new skb and queue it to for H/W processing */
133 nskb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
134 if (nskb != NULL) {
135 bf->bf_mpdu = nskb;
927e70e9
S
136 bf->bf_buf_addr = pci_map_single(sc->pdev, nskb->data,
137 skb_end_pointer(nskb) - nskb->head,
138 PCI_DMA_FROMDEVICE);
139 bf->bf_dmacontext = bf->bf_buf_addr;
f078f209
LR
140 ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf;
141
142 /* queue the new wbuf to H/W */
143 ath_rx_requeue(sc, nskb);
144 }
145
146 return type;
147}
148
149static void ath_opmode_init(struct ath_softc *sc)
150{
151 struct ath_hal *ah = sc->sc_ah;
152 u32 rfilt, mfilt[2];
153
154 /* configure rx filter */
155 rfilt = ath_calcrxfilter(sc);
156 ath9k_hw_setrxfilter(ah, rfilt);
157
158 /* configure bssid mask */
60b67f51 159 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
f078f209
LR
160 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
161
162 /* configure operational mode */
163 ath9k_hw_setopmode(ah);
164
165 /* Handle any link-level address change. */
166 ath9k_hw_setmac(ah, sc->sc_myaddr);
167
168 /* calculate and install multicast filter */
169 mfilt[0] = mfilt[1] = ~0;
170
171 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
172 DPRINTF(sc, ATH_DBG_CONFIG ,
173 "%s: RX filter 0x%x, MC filter %08x:%08x\n",
174 __func__, rfilt, mfilt[0], mfilt[1]);
175}
176
177int ath_rx_init(struct ath_softc *sc, int nbufs)
178{
179 struct sk_buff *skb;
180 struct ath_buf *bf;
181 int error = 0;
182
183 do {
184 spin_lock_init(&sc->sc_rxflushlock);
98deeea0 185 sc->sc_flags &= ~SC_OP_RXFLUSH;
f078f209
LR
186 spin_lock_init(&sc->sc_rxbuflock);
187
188 /*
189 * Cisco's VPN software requires that drivers be able to
190 * receive encapsulated frames that are larger than the MTU.
191 * Since we can't be sure how large a frame we'll get, setup
192 * to handle the larges on possible.
193 */
194 sc->sc_rxbufsize = roundup(IEEE80211_MAX_MPDU_LEN,
195 min(sc->sc_cachelsz,
196 (u16)64));
197
198 DPRINTF(sc, ATH_DBG_CONFIG, "%s: cachelsz %u rxbufsize %u\n",
199 __func__, sc->sc_cachelsz, sc->sc_rxbufsize);
200
201 /* Initialize rx descriptors */
202
203 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
204 "rx", nbufs, 1);
205 if (error != 0) {
206 DPRINTF(sc, ATH_DBG_FATAL,
207 "%s: failed to allocate rx descriptors: %d\n",
208 __func__, error);
209 break;
210 }
211
212 /* Pre-allocate a wbuf for each rx buffer */
213
214 list_for_each_entry(bf, &sc->sc_rxbuf, list) {
215 skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
216 if (skb == NULL) {
217 error = -ENOMEM;
218 break;
219 }
220
221 bf->bf_mpdu = skb;
927e70e9
S
222 bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data,
223 skb_end_pointer(skb) - skb->head,
224 PCI_DMA_FROMDEVICE);
225 bf->bf_dmacontext = bf->bf_buf_addr;
f078f209
LR
226 ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf;
227 }
228 sc->sc_rxlink = NULL;
229
230 } while (0);
231
232 if (error)
233 ath_rx_cleanup(sc);
234
235 return error;
236}
237
238/* Reclaim all rx queue resources */
239
240void ath_rx_cleanup(struct ath_softc *sc)
241{
242 struct sk_buff *skb;
243 struct ath_buf *bf;
244
245 list_for_each_entry(bf, &sc->sc_rxbuf, list) {
246 skb = bf->bf_mpdu;
247 if (skb)
248 dev_kfree_skb(skb);
249 }
250
251 /* cleanup rx descriptors */
252
253 if (sc->sc_rxdma.dd_desc_len != 0)
254 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
255}
256
257/*
258 * Calculate the receive filter according to the
259 * operating mode and state:
260 *
261 * o always accept unicast, broadcast, and multicast traffic
262 * o maintain current state of phy error reception (the hal
263 * may enable phy error frames for noise immunity work)
264 * o probe request frames are accepted only when operating in
265 * hostap, adhoc, or monitor modes
266 * o enable promiscuous mode according to the interface state
267 * o accept beacons:
268 * - when operating in adhoc mode so the 802.11 layer creates
269 * node table entries for peers,
270 * - when operating in station mode for collecting rssi data when
271 * the station is otherwise quiet, or
272 * - when operating as a repeater so we see repeater-sta beacons
273 * - when scanning
274 */
275
276u32 ath_calcrxfilter(struct ath_softc *sc)
277{
278#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
7dcfdcd9 279
f078f209
LR
280 u32 rfilt;
281
282 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
283 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
284 | ATH9K_RX_FILTER_MCAST;
285
286 /* If not a STA, enable processing of Probe Requests */
b4696c8b 287 if (sc->sc_ah->ah_opmode != ATH9K_M_STA)
f078f209
LR
288 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
289
290 /* Can't set HOSTAP into promiscous mode */
b4696c8b 291 if (((sc->sc_ah->ah_opmode != ATH9K_M_HOSTAP) &&
7dcfdcd9 292 (sc->rx_filter & FIF_PROMISC_IN_BSS)) ||
b4696c8b 293 (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR)) {
f078f209
LR
294 rfilt |= ATH9K_RX_FILTER_PROM;
295 /* ??? To prevent from sending ACK */
296 rfilt &= ~ATH9K_RX_FILTER_UCAST;
297 }
298
b4696c8b 299 if (((sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
7dcfdcd9 300 (sc->rx_filter & FIF_BCN_PRBRESP_PROMISC)) ||
b4696c8b 301 (sc->sc_ah->ah_opmode == ATH9K_M_IBSS))
f078f209
LR
302 rfilt |= ATH9K_RX_FILTER_BEACON;
303
304 /* If in HOSTAP mode, want to enable reception of PSPOLL frames
305 & beacon frames */
b4696c8b 306 if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP)
f078f209
LR
307 rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL);
308 return rfilt;
7dcfdcd9 309
f078f209
LR
310#undef RX_FILTER_PRESERVE
311}
312
313/* Enable the receive h/w following a reset. */
314
315int ath_startrecv(struct ath_softc *sc)
316{
317 struct ath_hal *ah = sc->sc_ah;
318 struct ath_buf *bf, *tbf;
319
320 spin_lock_bh(&sc->sc_rxbuflock);
321 if (list_empty(&sc->sc_rxbuf))
322 goto start_recv;
323
324 sc->sc_rxlink = NULL;
325 list_for_each_entry_safe(bf, tbf, &sc->sc_rxbuf, list) {
326 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
327 /* restarting h/w, no need for holding descriptors */
328 bf->bf_status &= ~ATH_BUFSTATUS_STALE;
329 /*
330 * Upper layer may not be done with the frame yet so
331 * we can't just re-queue it to hardware. Remove it
332 * from h/w queue. It'll be re-queued when upper layer
333 * returns the frame and ath_rx_requeue_mpdu is called.
334 */
335 if (!(bf->bf_status & ATH_BUFSTATUS_FREE)) {
336 list_del(&bf->list);
337 continue;
338 }
339 }
340 /* chain descriptors */
341 ath_rx_buf_link(sc, bf);
342 }
343
344 /* We could have deleted elements so the list may be empty now */
345 if (list_empty(&sc->sc_rxbuf))
346 goto start_recv;
347
348 bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list);
349 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
350 ath9k_hw_rxena(ah); /* enable recv descriptors */
351
352start_recv:
353 spin_unlock_bh(&sc->sc_rxbuflock);
354 ath_opmode_init(sc); /* set filters, etc. */
355 ath9k_hw_startpcureceive(ah); /* re-enable PCU/DMA engine */
356 return 0;
357}
358
359/* Disable the receive h/w in preparation for a reset. */
360
361bool ath_stoprecv(struct ath_softc *sc)
362{
363 struct ath_hal *ah = sc->sc_ah;
364 u64 tsf;
365 bool stopped;
366
367 ath9k_hw_stoppcurecv(ah); /* disable PCU */
368 ath9k_hw_setrxfilter(ah, 0); /* clear recv filter */
369 stopped = ath9k_hw_stopdmarecv(ah); /* disable DMA engine */
370 mdelay(3); /* 3ms is long enough for 1 frame */
371 tsf = ath9k_hw_gettsf64(ah);
372 sc->sc_rxlink = NULL; /* just in case */
373 return stopped;
374}
375
376/* Flush receive queue */
377
378void ath_flushrecv(struct ath_softc *sc)
379{
380 /*
381 * ath_rx_tasklet may be used to handle rx interrupt and flush receive
382 * queue at the same time. Use a lock to serialize the access of rx
383 * queue.
384 * ath_rx_tasklet cannot hold the spinlock while indicating packets.
385 * Instead, do not claim the spinlock but check for a flush in
386 * progress (see references to sc_rxflush)
387 */
388 spin_lock_bh(&sc->sc_rxflushlock);
98deeea0 389 sc->sc_flags |= SC_OP_RXFLUSH;
f078f209
LR
390
391 ath_rx_tasklet(sc, 1);
392
98deeea0 393 sc->sc_flags &= ~SC_OP_RXFLUSH;
f078f209
LR
394 spin_unlock_bh(&sc->sc_rxflushlock);
395}
396
f078f209
LR
397/* Process receive queue, as well as LED, etc. */
398
399int ath_rx_tasklet(struct ath_softc *sc, int flush)
400{
401#define PA2DESC(_sc, _pa) \
402 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
403 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
404
405 struct ath_buf *bf, *bf_held = NULL;
406 struct ath_desc *ds;
407 struct ieee80211_hdr *hdr;
408 struct sk_buff *skb = NULL;
409 struct ath_recv_status rx_status;
410 struct ath_hal *ah = sc->sc_ah;
411 int type, rx_processed = 0;
412 u32 phyerr;
413 u8 chainreset = 0;
414 int retval;
415 __le16 fc;
416
417 do {
418 /* If handling rx interrupt and flush is in progress => exit */
98deeea0 419 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
f078f209
LR
420 break;
421
422 spin_lock_bh(&sc->sc_rxbuflock);
423 if (list_empty(&sc->sc_rxbuf)) {
424 sc->sc_rxlink = NULL;
425 spin_unlock_bh(&sc->sc_rxbuflock);
426 break;
427 }
428
429 bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list);
430
431 /*
432 * There is a race condition that BH gets scheduled after sw
433 * writes RxE and before hw re-load the last descriptor to get
434 * the newly chained one. Software must keep the last DONE
435 * descriptor as a holding descriptor - software does so by
436 * marking it with the STALE flag.
437 */
438 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
439 bf_held = bf;
440 if (list_is_last(&bf_held->list, &sc->sc_rxbuf)) {
441 /*
442 * The holding descriptor is the last
443 * descriptor in queue. It's safe to
444 * remove the last holding descriptor
445 * in BH context.
446 */
447 list_del(&bf_held->list);
448 bf_held->bf_status &= ~ATH_BUFSTATUS_STALE;
449 sc->sc_rxlink = NULL;
450
451 if (bf_held->bf_status & ATH_BUFSTATUS_FREE) {
452 list_add_tail(&bf_held->list,
453 &sc->sc_rxbuf);
454 ath_rx_buf_link(sc, bf_held);
455 }
456 spin_unlock_bh(&sc->sc_rxbuflock);
457 break;
458 }
459 bf = list_entry(bf->list.next, struct ath_buf, list);
460 }
461
462 ds = bf->bf_desc;
463 ++rx_processed;
464
465 /*
466 * Must provide the virtual address of the current
467 * descriptor, the physical address, and the virtual
468 * address of the next descriptor in the h/w chain.
469 * This allows the HAL to look ahead to see if the
470 * hardware is done with a descriptor by checking the
471 * done bit in the following descriptor and the address
472 * of the current descriptor the DMA engine is working
473 * on. All this is necessary because of our use of
474 * a self-linked list to avoid rx overruns.
475 */
476 retval = ath9k_hw_rxprocdesc(ah,
477 ds,
478 bf->bf_daddr,
479 PA2DESC(sc, ds->ds_link),
480 0);
481 if (retval == -EINPROGRESS) {
482 struct ath_buf *tbf;
483 struct ath_desc *tds;
484
485 if (list_is_last(&bf->list, &sc->sc_rxbuf)) {
486 spin_unlock_bh(&sc->sc_rxbuflock);
487 break;
488 }
489
490 tbf = list_entry(bf->list.next, struct ath_buf, list);
491
492 /*
493 * On some hardware the descriptor status words could
494 * get corrupted, including the done bit. Because of
495 * this, check if the next descriptor's done bit is
496 * set or not.
497 *
498 * If the next descriptor's done bit is set, the current
499 * descriptor has been corrupted. Force s/w to discard
500 * this descriptor and continue...
501 */
502
503 tds = tbf->bf_desc;
504 retval = ath9k_hw_rxprocdesc(ah,
505 tds, tbf->bf_daddr,
506 PA2DESC(sc, tds->ds_link), 0);
507 if (retval == -EINPROGRESS) {
508 spin_unlock_bh(&sc->sc_rxbuflock);
509 break;
510 }
511 }
512
513 /* XXX: we do not support frames spanning
514 * multiple descriptors */
515 bf->bf_status |= ATH_BUFSTATUS_DONE;
516
517 skb = bf->bf_mpdu;
518 if (skb == NULL) { /* XXX ??? can this happen */
519 spin_unlock_bh(&sc->sc_rxbuflock);
520 continue;
521 }
522 /*
523 * Now we know it's a completed frame, we can indicate the
524 * frame. Remove the previous holding descriptor and leave
525 * this one in the queue as the new holding descriptor.
526 */
527 if (bf_held) {
528 list_del(&bf_held->list);
529 bf_held->bf_status &= ~ATH_BUFSTATUS_STALE;
530 if (bf_held->bf_status & ATH_BUFSTATUS_FREE) {
531 list_add_tail(&bf_held->list, &sc->sc_rxbuf);
532 /* try to requeue this descriptor */
533 ath_rx_buf_link(sc, bf_held);
534 }
535 }
536
537 bf->bf_status |= ATH_BUFSTATUS_STALE;
538 bf_held = bf;
539 /*
540 * Release the lock here in case ieee80211_input() return
541 * the frame immediately by calling ath_rx_mpdu_requeue().
542 */
543 spin_unlock_bh(&sc->sc_rxbuflock);
544
545 if (flush) {
546 /*
547 * If we're asked to flush receive queue, directly
548 * chain it back at the queue without processing it.
549 */
550 goto rx_next;
551 }
552
553 hdr = (struct ieee80211_hdr *)skb->data;
554 fc = hdr->frame_control;
0345f37b 555 memset(&rx_status, 0, sizeof(struct ath_recv_status));
f078f209
LR
556
557 if (ds->ds_rxstat.rs_more) {
558 /*
559 * Frame spans multiple descriptors; this
560 * cannot happen yet as we don't support
561 * jumbograms. If not in monitor mode,
562 * discard the frame.
563 */
564#ifndef ERROR_FRAMES
565 /*
566 * Enable this if you want to see
567 * error frames in Monitor mode.
568 */
b4696c8b 569 if (sc->sc_ah->ah_opmode != ATH9K_M_MONITOR)
f078f209
LR
570 goto rx_next;
571#endif
572 /* fall thru for monitor mode handling... */
573 } else if (ds->ds_rxstat.rs_status != 0) {
574 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
575 rx_status.flags |= ATH_RX_FCS_ERROR;
576 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) {
577 phyerr = ds->ds_rxstat.rs_phyerr & 0x1f;
578 goto rx_next;
579 }
580
581 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) {
582 /*
583 * Decrypt error. We only mark packet status
584 * here and always push up the frame up to let
585 * mac80211 handle the actual error case, be
586 * it no decryption key or real decryption
587 * error. This let us keep statistics there.
588 */
589 rx_status.flags |= ATH_RX_DECRYPT_ERROR;
590 } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) {
591 /*
592 * Demic error. We only mark frame status here
593 * and always push up the frame up to let
594 * mac80211 handle the actual error case. This
595 * let us keep statistics there. Hardware may
596 * post a false-positive MIC error.
597 */
598 if (ieee80211_is_ctl(fc))
599 /*
600 * Sometimes, we get invalid
601 * MIC failures on valid control frames.
602 * Remove these mic errors.
603 */
604 ds->ds_rxstat.rs_status &=
605 ~ATH9K_RXERR_MIC;
606 else
607 rx_status.flags |= ATH_RX_MIC_ERROR;
608 }
609 /*
610 * Reject error frames with the exception of
611 * decryption and MIC failures. For monitor mode,
612 * we also ignore the CRC error.
613 */
b4696c8b 614 if (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR) {
f078f209
LR
615 if (ds->ds_rxstat.rs_status &
616 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
617 ATH9K_RXERR_CRC))
618 goto rx_next;
619 } else {
620 if (ds->ds_rxstat.rs_status &
621 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
622 goto rx_next;
623 }
624 }
625 }
626 /*
627 * The status portion of the descriptor could get corrupted.
628 */
629 if (sc->sc_rxbufsize < ds->ds_rxstat.rs_datalen)
630 goto rx_next;
631 /*
632 * Sync and unmap the frame. At this point we're
633 * committed to passing the sk_buff somewhere so
634 * clear buf_skb; this means a new sk_buff must be
635 * allocated when the rx descriptor is setup again
636 * to receive another frame.
637 */
638 skb_put(skb, ds->ds_rxstat.rs_datalen);
639 skb->protocol = cpu_to_be16(ETH_P_CONTROL);
640 rx_status.tsf = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp);
641 rx_status.rateieee =
642 sc->sc_hwmap[ds->ds_rxstat.rs_rate].ieeerate;
643 rx_status.rateKbps =
644 sc->sc_hwmap[ds->ds_rxstat.rs_rate].rateKbps;
645 rx_status.ratecode = ds->ds_rxstat.rs_rate;
646
647 /* HT rate */
648 if (rx_status.ratecode & 0x80) {
649 /* TODO - add table to avoid division */
650 if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) {
651 rx_status.flags |= ATH_RX_40MHZ;
652 rx_status.rateKbps =
653 (rx_status.rateKbps * 27) / 13;
654 }
655 if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI)
656 rx_status.rateKbps =
657 (rx_status.rateKbps * 10) / 9;
658 else
659 rx_status.flags |= ATH_RX_SHORT_GI;
660 }
661
6f255425 662 /* sc_noise_floor is only available when the station
f078f209
LR
663 attaches to an AP, so we use a default value
664 if we are not yet attached. */
f078f209 665 rx_status.abs_rssi =
6f255425 666 ds->ds_rxstat.rs_rssi + sc->sc_ani.sc_noise_floor;
f078f209
LR
667
668 pci_dma_sync_single_for_cpu(sc->pdev,
669 bf->bf_buf_addr,
670 skb_tailroom(skb),
671 PCI_DMA_FROMDEVICE);
672 pci_unmap_single(sc->pdev,
673 bf->bf_buf_addr,
674 sc->sc_rxbufsize,
675 PCI_DMA_FROMDEVICE);
676
677 /* XXX: Ah! make me more readable, use a helper */
60b67f51 678 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
f078f209
LR
679 if (ds->ds_rxstat.rs_moreaggr == 0) {
680 rx_status.rssictl[0] =
681 ds->ds_rxstat.rs_rssi_ctl0;
682 rx_status.rssictl[1] =
683 ds->ds_rxstat.rs_rssi_ctl1;
684 rx_status.rssictl[2] =
685 ds->ds_rxstat.rs_rssi_ctl2;
686 rx_status.rssi = ds->ds_rxstat.rs_rssi;
687 if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) {
688 rx_status.rssiextn[0] =
689 ds->ds_rxstat.rs_rssi_ext0;
690 rx_status.rssiextn[1] =
691 ds->ds_rxstat.rs_rssi_ext1;
692 rx_status.rssiextn[2] =
693 ds->ds_rxstat.rs_rssi_ext2;
694 rx_status.flags |=
695 ATH_RX_RSSI_EXTN_VALID;
696 }
697 rx_status.flags |= ATH_RX_RSSI_VALID |
698 ATH_RX_CHAIN_RSSI_VALID;
699 }
700 } else {
701 /*
702 * Need to insert the "combined" rssi into the
703 * status structure for upper layer processing
704 */
705 rx_status.rssi = ds->ds_rxstat.rs_rssi;
706 rx_status.flags |= ATH_RX_RSSI_VALID;
707 }
708
709 /* Pass frames up to the stack. */
710
711 type = ath_rx_indicate(sc, skb,
712 &rx_status, ds->ds_rxstat.rs_keyix);
713
714 /*
715 * change the default rx antenna if rx diversity chooses the
716 * other antenna 3 times in a row.
717 */
718 if (sc->sc_defant != ds->ds_rxstat.rs_antenna) {
719 if (++sc->sc_rxotherant >= 3)
720 ath_setdefantenna(sc,
721 ds->ds_rxstat.rs_antenna);
722 } else {
723 sc->sc_rxotherant = 0;
724 }
725
726#ifdef CONFIG_SLOW_ANT_DIV
727 if ((rx_status.flags & ATH_RX_RSSI_VALID) &&
728 ieee80211_is_beacon(fc)) {
729 ath_slow_ant_div(&sc->sc_antdiv, hdr, &ds->ds_rxstat);
730 }
731#endif
732 /*
733 * For frames successfully indicated, the buffer will be
734 * returned to us by upper layers by calling
735 * ath_rx_mpdu_requeue, either synchronusly or asynchronously.
736 * So we don't want to do it here in this loop.
737 */
738 continue;
739
740rx_next:
741 bf->bf_status |= ATH_BUFSTATUS_FREE;
742 } while (TRUE);
743
744 if (chainreset) {
745 DPRINTF(sc, ATH_DBG_CONFIG,
746 "%s: Reset rx chain mask. "
747 "Do internal reset\n", __func__);
748 ASSERT(flush == 0);
f45144ef 749 ath_reset(sc, false);
f078f209
LR
750 }
751
752 return 0;
753#undef PA2DESC
754}