2 * Copyright (c) 2008 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 * Implementation of receive path.
24 * Setup and link descriptors.
26 * 11N: we can no longer afford to self link the last descriptor.
27 * MAC acknowledges BA status as long as it copies frames to host
28 * buffer (or rx fifo). This can incorrectly acknowledge packets
29 * to a sender if last desc is self-linked.
31 * NOTE: Caller should hold the rxbuf lock.
34 static void ath_rx_buf_link(struct ath_softc
*sc
, struct ath_buf
*bf
)
36 struct ath_hal
*ah
= sc
->sc_ah
;
43 ds
->ds_link
= 0; /* link to null */
44 ds
->ds_data
= bf
->bf_buf_addr
;
47 * virtual addr of the beginning of the buffer. */
50 ds
->ds_vdata
= skb
->data
;
52 /* setup rx descriptors */
53 ath9k_hw_setuprxdesc(ah
,
55 skb_tailroom(skb
), /* buffer size */
58 if (sc
->sc_rxlink
== NULL
)
59 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
61 *sc
->sc_rxlink
= bf
->bf_daddr
;
63 sc
->sc_rxlink
= &ds
->ds_link
;
67 static struct sk_buff
*ath_rxbuf_alloc(struct ath_softc
*sc
,
74 * Cache-line-align. This is important (for the
75 * 5210 at least) as not doing so causes bogus data
79 skb
= dev_alloc_skb(len
+ sc
->sc_cachelsz
- 1);
81 off
= ((unsigned long) skb
->data
) % sc
->sc_cachelsz
;
83 skb_reserve(skb
, sc
->sc_cachelsz
- off
);
85 DPRINTF(sc
, ATH_DBG_FATAL
,
86 "%s: skbuff alloc of size %u failed\n",
94 static void ath_rx_requeue(struct ath_softc
*sc
, struct sk_buff
*skb
)
96 struct ath_buf
*bf
= ATH_RX_CONTEXT(skb
)->ctx_rxbuf
;
100 spin_lock_bh(&sc
->sc_rxbuflock
);
101 if (bf
->bf_status
& ATH_BUFSTATUS_STALE
) {
103 * This buffer is still held for hw acess.
104 * Mark it as free to be re-queued it later.
106 bf
->bf_status
|= ATH_BUFSTATUS_FREE
;
108 /* XXX: we probably never enter here, remove after
110 list_add_tail(&bf
->list
, &sc
->sc_rxbuf
);
111 ath_rx_buf_link(sc
, bf
);
113 spin_unlock_bh(&sc
->sc_rxbuflock
);
117 * The skb indicated to upper stack won't be returned to us.
118 * So we have to allocate a new one and queue it by ourselves.
120 static int ath_rx_indicate(struct ath_softc
*sc
,
122 struct ath_recv_status
*status
,
125 struct ath_buf
*bf
= ATH_RX_CONTEXT(skb
)->ctx_rxbuf
;
126 struct sk_buff
*nskb
;
129 /* indicate frame to the stack, which will free the old skb. */
130 type
= _ath_rx_indicate(sc
, skb
, status
, keyix
);
132 /* allocate a new skb and queue it to for H/W processing */
133 nskb
= ath_rxbuf_alloc(sc
, sc
->sc_rxbufsize
);
136 bf
->bf_buf_addr
= pci_map_single(sc
->pdev
, nskb
->data
,
137 skb_end_pointer(nskb
) - nskb
->head
,
139 bf
->bf_dmacontext
= bf
->bf_buf_addr
;
140 ATH_RX_CONTEXT(nskb
)->ctx_rxbuf
= bf
;
142 /* queue the new wbuf to H/W */
143 ath_rx_requeue(sc
, nskb
);
149 static void ath_opmode_init(struct ath_softc
*sc
)
151 struct ath_hal
*ah
= sc
->sc_ah
;
154 /* configure rx filter */
155 rfilt
= ath_calcrxfilter(sc
);
156 ath9k_hw_setrxfilter(ah
, rfilt
);
158 /* configure bssid mask */
159 if (ah
->ah_caps
.hw_caps
& ATH9K_HW_CAP_BSSIDMASK
)
160 ath9k_hw_setbssidmask(ah
, sc
->sc_bssidmask
);
162 /* configure operational mode */
163 ath9k_hw_setopmode(ah
);
165 /* Handle any link-level address change. */
166 ath9k_hw_setmac(ah
, sc
->sc_myaddr
);
168 /* calculate and install multicast filter */
169 mfilt
[0] = mfilt
[1] = ~0;
171 ath9k_hw_setmcastfilter(ah
, mfilt
[0], mfilt
[1]);
172 DPRINTF(sc
, ATH_DBG_CONFIG
,
173 "%s: RX filter 0x%x, MC filter %08x:%08x\n",
174 __func__
, rfilt
, mfilt
[0], mfilt
[1]);
177 int ath_rx_init(struct ath_softc
*sc
, int nbufs
)
184 spin_lock_init(&sc
->sc_rxflushlock
);
185 sc
->sc_flags
&= ~SC_OP_RXFLUSH
;
186 spin_lock_init(&sc
->sc_rxbuflock
);
189 * Cisco's VPN software requires that drivers be able to
190 * receive encapsulated frames that are larger than the MTU.
191 * Since we can't be sure how large a frame we'll get, setup
192 * to handle the larges on possible.
194 sc
->sc_rxbufsize
= roundup(IEEE80211_MAX_MPDU_LEN
,
198 DPRINTF(sc
, ATH_DBG_CONFIG
, "%s: cachelsz %u rxbufsize %u\n",
199 __func__
, sc
->sc_cachelsz
, sc
->sc_rxbufsize
);
201 /* Initialize rx descriptors */
203 error
= ath_descdma_setup(sc
, &sc
->sc_rxdma
, &sc
->sc_rxbuf
,
206 DPRINTF(sc
, ATH_DBG_FATAL
,
207 "%s: failed to allocate rx descriptors: %d\n",
212 /* Pre-allocate a wbuf for each rx buffer */
214 list_for_each_entry(bf
, &sc
->sc_rxbuf
, list
) {
215 skb
= ath_rxbuf_alloc(sc
, sc
->sc_rxbufsize
);
222 bf
->bf_buf_addr
= pci_map_single(sc
->pdev
, skb
->data
,
223 skb_end_pointer(skb
) - skb
->head
,
225 bf
->bf_dmacontext
= bf
->bf_buf_addr
;
226 ATH_RX_CONTEXT(skb
)->ctx_rxbuf
= bf
;
228 sc
->sc_rxlink
= NULL
;
238 /* Reclaim all rx queue resources */
240 void ath_rx_cleanup(struct ath_softc
*sc
)
245 list_for_each_entry(bf
, &sc
->sc_rxbuf
, list
) {
251 /* cleanup rx descriptors */
253 if (sc
->sc_rxdma
.dd_desc_len
!= 0)
254 ath_descdma_cleanup(sc
, &sc
->sc_rxdma
, &sc
->sc_rxbuf
);
258 * Calculate the receive filter according to the
259 * operating mode and state:
261 * o always accept unicast, broadcast, and multicast traffic
262 * o maintain current state of phy error reception (the hal
263 * may enable phy error frames for noise immunity work)
264 * o probe request frames are accepted only when operating in
265 * hostap, adhoc, or monitor modes
266 * o enable promiscuous mode according to the interface state
268 * - when operating in adhoc mode so the 802.11 layer creates
269 * node table entries for peers,
270 * - when operating in station mode for collecting rssi data when
271 * the station is otherwise quiet, or
272 * - when operating as a repeater so we see repeater-sta beacons
276 u32
ath_calcrxfilter(struct ath_softc
*sc
)
278 #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
282 rfilt
= (ath9k_hw_getrxfilter(sc
->sc_ah
) & RX_FILTER_PRESERVE
)
283 | ATH9K_RX_FILTER_UCAST
| ATH9K_RX_FILTER_BCAST
284 | ATH9K_RX_FILTER_MCAST
;
286 /* If not a STA, enable processing of Probe Requests */
287 if (sc
->sc_ah
->ah_opmode
!= ATH9K_M_STA
)
288 rfilt
|= ATH9K_RX_FILTER_PROBEREQ
;
290 /* Can't set HOSTAP into promiscous mode */
291 if (((sc
->sc_ah
->ah_opmode
!= ATH9K_M_HOSTAP
) &&
292 (sc
->rx_filter
& FIF_PROMISC_IN_BSS
)) ||
293 (sc
->sc_ah
->ah_opmode
== ATH9K_M_MONITOR
)) {
294 rfilt
|= ATH9K_RX_FILTER_PROM
;
295 /* ??? To prevent from sending ACK */
296 rfilt
&= ~ATH9K_RX_FILTER_UCAST
;
299 if (((sc
->sc_ah
->ah_opmode
== ATH9K_M_STA
) &&
300 (sc
->rx_filter
& FIF_BCN_PRBRESP_PROMISC
)) ||
301 (sc
->sc_ah
->ah_opmode
== ATH9K_M_IBSS
))
302 rfilt
|= ATH9K_RX_FILTER_BEACON
;
304 /* If in HOSTAP mode, want to enable reception of PSPOLL frames
306 if (sc
->sc_ah
->ah_opmode
== ATH9K_M_HOSTAP
)
307 rfilt
|= (ATH9K_RX_FILTER_BEACON
| ATH9K_RX_FILTER_PSPOLL
);
310 #undef RX_FILTER_PRESERVE
313 /* Enable the receive h/w following a reset. */
315 int ath_startrecv(struct ath_softc
*sc
)
317 struct ath_hal
*ah
= sc
->sc_ah
;
318 struct ath_buf
*bf
, *tbf
;
320 spin_lock_bh(&sc
->sc_rxbuflock
);
321 if (list_empty(&sc
->sc_rxbuf
))
324 sc
->sc_rxlink
= NULL
;
325 list_for_each_entry_safe(bf
, tbf
, &sc
->sc_rxbuf
, list
) {
326 if (bf
->bf_status
& ATH_BUFSTATUS_STALE
) {
327 /* restarting h/w, no need for holding descriptors */
328 bf
->bf_status
&= ~ATH_BUFSTATUS_STALE
;
330 * Upper layer may not be done with the frame yet so
331 * we can't just re-queue it to hardware. Remove it
332 * from h/w queue. It'll be re-queued when upper layer
333 * returns the frame and ath_rx_requeue_mpdu is called.
335 if (!(bf
->bf_status
& ATH_BUFSTATUS_FREE
)) {
340 /* chain descriptors */
341 ath_rx_buf_link(sc
, bf
);
344 /* We could have deleted elements so the list may be empty now */
345 if (list_empty(&sc
->sc_rxbuf
))
348 bf
= list_first_entry(&sc
->sc_rxbuf
, struct ath_buf
, list
);
349 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
350 ath9k_hw_rxena(ah
); /* enable recv descriptors */
353 spin_unlock_bh(&sc
->sc_rxbuflock
);
354 ath_opmode_init(sc
); /* set filters, etc. */
355 ath9k_hw_startpcureceive(ah
); /* re-enable PCU/DMA engine */
359 /* Disable the receive h/w in preparation for a reset. */
361 bool ath_stoprecv(struct ath_softc
*sc
)
363 struct ath_hal
*ah
= sc
->sc_ah
;
367 ath9k_hw_stoppcurecv(ah
); /* disable PCU */
368 ath9k_hw_setrxfilter(ah
, 0); /* clear recv filter */
369 stopped
= ath9k_hw_stopdmarecv(ah
); /* disable DMA engine */
370 mdelay(3); /* 3ms is long enough for 1 frame */
371 tsf
= ath9k_hw_gettsf64(ah
);
372 sc
->sc_rxlink
= NULL
; /* just in case */
376 /* Flush receive queue */
378 void ath_flushrecv(struct ath_softc
*sc
)
381 * ath_rx_tasklet may be used to handle rx interrupt and flush receive
382 * queue at the same time. Use a lock to serialize the access of rx
384 * ath_rx_tasklet cannot hold the spinlock while indicating packets.
385 * Instead, do not claim the spinlock but check for a flush in
386 * progress (see references to sc_rxflush)
388 spin_lock_bh(&sc
->sc_rxflushlock
);
389 sc
->sc_flags
|= SC_OP_RXFLUSH
;
391 ath_rx_tasklet(sc
, 1);
393 sc
->sc_flags
&= ~SC_OP_RXFLUSH
;
394 spin_unlock_bh(&sc
->sc_rxflushlock
);
397 /* Process receive queue, as well as LED, etc. */
399 int ath_rx_tasklet(struct ath_softc
*sc
, int flush
)
401 #define PA2DESC(_sc, _pa) \
402 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
403 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
405 struct ath_buf
*bf
, *bf_held
= NULL
;
407 struct ieee80211_hdr
*hdr
;
408 struct sk_buff
*skb
= NULL
;
409 struct ath_recv_status rx_status
;
410 struct ath_hal
*ah
= sc
->sc_ah
;
411 int type
, rx_processed
= 0;
418 /* If handling rx interrupt and flush is in progress => exit */
419 if ((sc
->sc_flags
& SC_OP_RXFLUSH
) && (flush
== 0))
422 spin_lock_bh(&sc
->sc_rxbuflock
);
423 if (list_empty(&sc
->sc_rxbuf
)) {
424 sc
->sc_rxlink
= NULL
;
425 spin_unlock_bh(&sc
->sc_rxbuflock
);
429 bf
= list_first_entry(&sc
->sc_rxbuf
, struct ath_buf
, list
);
432 * There is a race condition that BH gets scheduled after sw
433 * writes RxE and before hw re-load the last descriptor to get
434 * the newly chained one. Software must keep the last DONE
435 * descriptor as a holding descriptor - software does so by
436 * marking it with the STALE flag.
438 if (bf
->bf_status
& ATH_BUFSTATUS_STALE
) {
440 if (list_is_last(&bf_held
->list
, &sc
->sc_rxbuf
)) {
442 * The holding descriptor is the last
443 * descriptor in queue. It's safe to
444 * remove the last holding descriptor
447 list_del(&bf_held
->list
);
448 bf_held
->bf_status
&= ~ATH_BUFSTATUS_STALE
;
449 sc
->sc_rxlink
= NULL
;
451 if (bf_held
->bf_status
& ATH_BUFSTATUS_FREE
) {
452 list_add_tail(&bf_held
->list
,
454 ath_rx_buf_link(sc
, bf_held
);
456 spin_unlock_bh(&sc
->sc_rxbuflock
);
459 bf
= list_entry(bf
->list
.next
, struct ath_buf
, list
);
466 * Must provide the virtual address of the current
467 * descriptor, the physical address, and the virtual
468 * address of the next descriptor in the h/w chain.
469 * This allows the HAL to look ahead to see if the
470 * hardware is done with a descriptor by checking the
471 * done bit in the following descriptor and the address
472 * of the current descriptor the DMA engine is working
473 * on. All this is necessary because of our use of
474 * a self-linked list to avoid rx overruns.
476 retval
= ath9k_hw_rxprocdesc(ah
,
479 PA2DESC(sc
, ds
->ds_link
),
481 if (retval
== -EINPROGRESS
) {
483 struct ath_desc
*tds
;
485 if (list_is_last(&bf
->list
, &sc
->sc_rxbuf
)) {
486 spin_unlock_bh(&sc
->sc_rxbuflock
);
490 tbf
= list_entry(bf
->list
.next
, struct ath_buf
, list
);
493 * On some hardware the descriptor status words could
494 * get corrupted, including the done bit. Because of
495 * this, check if the next descriptor's done bit is
498 * If the next descriptor's done bit is set, the current
499 * descriptor has been corrupted. Force s/w to discard
500 * this descriptor and continue...
504 retval
= ath9k_hw_rxprocdesc(ah
,
506 PA2DESC(sc
, tds
->ds_link
), 0);
507 if (retval
== -EINPROGRESS
) {
508 spin_unlock_bh(&sc
->sc_rxbuflock
);
513 /* XXX: we do not support frames spanning
514 * multiple descriptors */
515 bf
->bf_status
|= ATH_BUFSTATUS_DONE
;
518 if (skb
== NULL
) { /* XXX ??? can this happen */
519 spin_unlock_bh(&sc
->sc_rxbuflock
);
523 * Now we know it's a completed frame, we can indicate the
524 * frame. Remove the previous holding descriptor and leave
525 * this one in the queue as the new holding descriptor.
528 list_del(&bf_held
->list
);
529 bf_held
->bf_status
&= ~ATH_BUFSTATUS_STALE
;
530 if (bf_held
->bf_status
& ATH_BUFSTATUS_FREE
) {
531 list_add_tail(&bf_held
->list
, &sc
->sc_rxbuf
);
532 /* try to requeue this descriptor */
533 ath_rx_buf_link(sc
, bf_held
);
537 bf
->bf_status
|= ATH_BUFSTATUS_STALE
;
540 * Release the lock here in case ieee80211_input() return
541 * the frame immediately by calling ath_rx_mpdu_requeue().
543 spin_unlock_bh(&sc
->sc_rxbuflock
);
547 * If we're asked to flush receive queue, directly
548 * chain it back at the queue without processing it.
553 hdr
= (struct ieee80211_hdr
*)skb
->data
;
554 fc
= hdr
->frame_control
;
555 memset(&rx_status
, 0, sizeof(struct ath_recv_status
));
557 if (ds
->ds_rxstat
.rs_more
) {
559 * Frame spans multiple descriptors; this
560 * cannot happen yet as we don't support
561 * jumbograms. If not in monitor mode,
566 * Enable this if you want to see
567 * error frames in Monitor mode.
569 if (sc
->sc_ah
->ah_opmode
!= ATH9K_M_MONITOR
)
572 /* fall thru for monitor mode handling... */
573 } else if (ds
->ds_rxstat
.rs_status
!= 0) {
574 if (ds
->ds_rxstat
.rs_status
& ATH9K_RXERR_CRC
)
575 rx_status
.flags
|= ATH_RX_FCS_ERROR
;
576 if (ds
->ds_rxstat
.rs_status
& ATH9K_RXERR_PHY
) {
577 phyerr
= ds
->ds_rxstat
.rs_phyerr
& 0x1f;
581 if (ds
->ds_rxstat
.rs_status
& ATH9K_RXERR_DECRYPT
) {
583 * Decrypt error. We only mark packet status
584 * here and always push up the frame up to let
585 * mac80211 handle the actual error case, be
586 * it no decryption key or real decryption
587 * error. This let us keep statistics there.
589 rx_status
.flags
|= ATH_RX_DECRYPT_ERROR
;
590 } else if (ds
->ds_rxstat
.rs_status
& ATH9K_RXERR_MIC
) {
592 * Demic error. We only mark frame status here
593 * and always push up the frame up to let
594 * mac80211 handle the actual error case. This
595 * let us keep statistics there. Hardware may
596 * post a false-positive MIC error.
598 if (ieee80211_is_ctl(fc
))
600 * Sometimes, we get invalid
601 * MIC failures on valid control frames.
602 * Remove these mic errors.
604 ds
->ds_rxstat
.rs_status
&=
607 rx_status
.flags
|= ATH_RX_MIC_ERROR
;
610 * Reject error frames with the exception of
611 * decryption and MIC failures. For monitor mode,
612 * we also ignore the CRC error.
614 if (sc
->sc_ah
->ah_opmode
== ATH9K_M_MONITOR
) {
615 if (ds
->ds_rxstat
.rs_status
&
616 ~(ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
|
620 if (ds
->ds_rxstat
.rs_status
&
621 ~(ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
)) {
627 * The status portion of the descriptor could get corrupted.
629 if (sc
->sc_rxbufsize
< ds
->ds_rxstat
.rs_datalen
)
632 * Sync and unmap the frame. At this point we're
633 * committed to passing the sk_buff somewhere so
634 * clear buf_skb; this means a new sk_buff must be
635 * allocated when the rx descriptor is setup again
636 * to receive another frame.
638 skb_put(skb
, ds
->ds_rxstat
.rs_datalen
);
639 skb
->protocol
= cpu_to_be16(ETH_P_CONTROL
);
640 rx_status
.tsf
= ath_extend_tsf(sc
, ds
->ds_rxstat
.rs_tstamp
);
642 sc
->sc_hwmap
[ds
->ds_rxstat
.rs_rate
].ieeerate
;
644 sc
->sc_hwmap
[ds
->ds_rxstat
.rs_rate
].rateKbps
;
645 rx_status
.ratecode
= ds
->ds_rxstat
.rs_rate
;
648 if (rx_status
.ratecode
& 0x80) {
649 /* TODO - add table to avoid division */
650 if (ds
->ds_rxstat
.rs_flags
& ATH9K_RX_2040
) {
651 rx_status
.flags
|= ATH_RX_40MHZ
;
653 (rx_status
.rateKbps
* 27) / 13;
655 if (ds
->ds_rxstat
.rs_flags
& ATH9K_RX_GI
)
657 (rx_status
.rateKbps
* 10) / 9;
659 rx_status
.flags
|= ATH_RX_SHORT_GI
;
662 /* sc_noise_floor is only available when the station
663 attaches to an AP, so we use a default value
664 if we are not yet attached. */
666 ds
->ds_rxstat
.rs_rssi
+ sc
->sc_ani
.sc_noise_floor
;
668 pci_dma_sync_single_for_cpu(sc
->pdev
,
672 pci_unmap_single(sc
->pdev
,
677 /* XXX: Ah! make me more readable, use a helper */
678 if (ah
->ah_caps
.hw_caps
& ATH9K_HW_CAP_HT
) {
679 if (ds
->ds_rxstat
.rs_moreaggr
== 0) {
680 rx_status
.rssictl
[0] =
681 ds
->ds_rxstat
.rs_rssi_ctl0
;
682 rx_status
.rssictl
[1] =
683 ds
->ds_rxstat
.rs_rssi_ctl1
;
684 rx_status
.rssictl
[2] =
685 ds
->ds_rxstat
.rs_rssi_ctl2
;
686 rx_status
.rssi
= ds
->ds_rxstat
.rs_rssi
;
687 if (ds
->ds_rxstat
.rs_flags
& ATH9K_RX_2040
) {
688 rx_status
.rssiextn
[0] =
689 ds
->ds_rxstat
.rs_rssi_ext0
;
690 rx_status
.rssiextn
[1] =
691 ds
->ds_rxstat
.rs_rssi_ext1
;
692 rx_status
.rssiextn
[2] =
693 ds
->ds_rxstat
.rs_rssi_ext2
;
695 ATH_RX_RSSI_EXTN_VALID
;
697 rx_status
.flags
|= ATH_RX_RSSI_VALID
|
698 ATH_RX_CHAIN_RSSI_VALID
;
702 * Need to insert the "combined" rssi into the
703 * status structure for upper layer processing
705 rx_status
.rssi
= ds
->ds_rxstat
.rs_rssi
;
706 rx_status
.flags
|= ATH_RX_RSSI_VALID
;
709 /* Pass frames up to the stack. */
711 type
= ath_rx_indicate(sc
, skb
,
712 &rx_status
, ds
->ds_rxstat
.rs_keyix
);
715 * change the default rx antenna if rx diversity chooses the
716 * other antenna 3 times in a row.
718 if (sc
->sc_defant
!= ds
->ds_rxstat
.rs_antenna
) {
719 if (++sc
->sc_rxotherant
>= 3)
720 ath_setdefantenna(sc
,
721 ds
->ds_rxstat
.rs_antenna
);
723 sc
->sc_rxotherant
= 0;
726 #ifdef CONFIG_SLOW_ANT_DIV
727 if ((rx_status
.flags
& ATH_RX_RSSI_VALID
) &&
728 ieee80211_is_beacon(fc
)) {
729 ath_slow_ant_div(&sc
->sc_antdiv
, hdr
, &ds
->ds_rxstat
);
733 * For frames successfully indicated, the buffer will be
734 * returned to us by upper layers by calling
735 * ath_rx_mpdu_requeue, either synchronusly or asynchronously.
736 * So we don't want to do it here in this loop.
741 bf
->bf_status
|= ATH_BUFSTATUS_FREE
;
745 DPRINTF(sc
, ATH_DBG_CONFIG
,
746 "%s: Reset rx chain mask. "
747 "Do internal reset\n", __func__
);
749 ath_reset(sc
, false);