2 * Copyright (c) 2008-2009 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include "ar9003_mac.h"
20 #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
22 static inline bool ath_is_alt_ant_ratio_better(int alt_ratio
, int maxdelta
,
23 int mindelta
, int main_rssi_avg
,
24 int alt_rssi_avg
, int pkt_count
)
26 return (((alt_ratio
>= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2
) &&
27 (alt_rssi_avg
> main_rssi_avg
+ maxdelta
)) ||
28 (alt_rssi_avg
> main_rssi_avg
+ mindelta
)) && (pkt_count
> 50);
31 static inline bool ath9k_check_auto_sleep(struct ath_softc
*sc
)
33 return sc
->ps_enabled
&&
34 (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_AUTOSLEEP
);
37 static struct ieee80211_hw
* ath_get_virt_hw(struct ath_softc
*sc
,
38 struct ieee80211_hdr
*hdr
)
40 struct ieee80211_hw
*hw
= sc
->pri_wiphy
->hw
;
43 spin_lock_bh(&sc
->wiphy_lock
);
44 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
45 struct ath_wiphy
*aphy
= sc
->sec_wiphy
[i
];
48 if (compare_ether_addr(hdr
->addr1
, aphy
->hw
->wiphy
->perm_addr
)
54 spin_unlock_bh(&sc
->wiphy_lock
);
59 * Setup and link descriptors.
61 * 11N: we can no longer afford to self link the last descriptor.
62 * MAC acknowledges BA status as long as it copies frames to host
63 * buffer (or rx fifo). This can incorrectly acknowledge packets
64 * to a sender if last desc is self-linked.
66 static void ath_rx_buf_link(struct ath_softc
*sc
, struct ath_buf
*bf
)
68 struct ath_hw
*ah
= sc
->sc_ah
;
69 struct ath_common
*common
= ath9k_hw_common(ah
);
76 ds
->ds_link
= 0; /* link to null */
77 ds
->ds_data
= bf
->bf_buf_addr
;
79 /* virtual addr of the beginning of the buffer. */
82 ds
->ds_vdata
= skb
->data
;
85 * setup rx descriptors. The rx_bufsize here tells the hardware
86 * how much data it can DMA to us and that we are prepared
89 ath9k_hw_setuprxdesc(ah
, ds
,
93 if (sc
->rx
.rxlink
== NULL
)
94 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
96 *sc
->rx
.rxlink
= bf
->bf_daddr
;
98 sc
->rx
.rxlink
= &ds
->ds_link
;
102 static void ath_setdefantenna(struct ath_softc
*sc
, u32 antenna
)
104 /* XXX block beacon interrupts */
105 ath9k_hw_setantenna(sc
->sc_ah
, antenna
);
106 sc
->rx
.defant
= antenna
;
107 sc
->rx
.rxotherant
= 0;
110 static void ath_opmode_init(struct ath_softc
*sc
)
112 struct ath_hw
*ah
= sc
->sc_ah
;
113 struct ath_common
*common
= ath9k_hw_common(ah
);
117 /* configure rx filter */
118 rfilt
= ath_calcrxfilter(sc
);
119 ath9k_hw_setrxfilter(ah
, rfilt
);
121 /* configure bssid mask */
122 ath_hw_setbssidmask(common
);
124 /* configure operational mode */
125 ath9k_hw_setopmode(ah
);
127 /* calculate and install multicast filter */
128 mfilt
[0] = mfilt
[1] = ~0;
129 ath9k_hw_setmcastfilter(ah
, mfilt
[0], mfilt
[1]);
132 static bool ath_rx_edma_buf_link(struct ath_softc
*sc
,
133 enum ath9k_rx_qtype qtype
)
135 struct ath_hw
*ah
= sc
->sc_ah
;
136 struct ath_rx_edma
*rx_edma
;
140 rx_edma
= &sc
->rx
.rx_edma
[qtype
];
141 if (skb_queue_len(&rx_edma
->rx_fifo
) >= rx_edma
->rx_fifo_hwsize
)
144 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_buf
, list
);
145 list_del_init(&bf
->list
);
150 memset(skb
->data
, 0, ah
->caps
.rx_status_len
);
151 dma_sync_single_for_device(sc
->dev
, bf
->bf_buf_addr
,
152 ah
->caps
.rx_status_len
, DMA_TO_DEVICE
);
154 SKB_CB_ATHBUF(skb
) = bf
;
155 ath9k_hw_addrxbuf_edma(ah
, bf
->bf_buf_addr
, qtype
);
156 skb_queue_tail(&rx_edma
->rx_fifo
, skb
);
161 static void ath_rx_addbuffer_edma(struct ath_softc
*sc
,
162 enum ath9k_rx_qtype qtype
, int size
)
164 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
167 if (list_empty(&sc
->rx
.rxbuf
)) {
168 ath_print(common
, ATH_DBG_QUEUE
, "No free rx buf available\n");
172 while (!list_empty(&sc
->rx
.rxbuf
)) {
175 if (!ath_rx_edma_buf_link(sc
, qtype
))
183 static void ath_rx_remove_buffer(struct ath_softc
*sc
,
184 enum ath9k_rx_qtype qtype
)
187 struct ath_rx_edma
*rx_edma
;
190 rx_edma
= &sc
->rx
.rx_edma
[qtype
];
192 while ((skb
= skb_dequeue(&rx_edma
->rx_fifo
)) != NULL
) {
193 bf
= SKB_CB_ATHBUF(skb
);
195 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
199 static void ath_rx_edma_cleanup(struct ath_softc
*sc
)
203 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_LP
);
204 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_HP
);
206 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
208 dev_kfree_skb_any(bf
->bf_mpdu
);
211 INIT_LIST_HEAD(&sc
->rx
.rxbuf
);
213 kfree(sc
->rx
.rx_bufptr
);
214 sc
->rx
.rx_bufptr
= NULL
;
217 static void ath_rx_edma_init_queue(struct ath_rx_edma
*rx_edma
, int size
)
219 skb_queue_head_init(&rx_edma
->rx_fifo
);
220 skb_queue_head_init(&rx_edma
->rx_buffers
);
221 rx_edma
->rx_fifo_hwsize
= size
;
224 static int ath_rx_edma_init(struct ath_softc
*sc
, int nbufs
)
226 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
227 struct ath_hw
*ah
= sc
->sc_ah
;
234 common
->rx_bufsize
= roundup(IEEE80211_MAX_MPDU_LEN
+
235 ah
->caps
.rx_status_len
,
236 min(common
->cachelsz
, (u16
)64));
238 ath9k_hw_set_rx_bufsize(ah
, common
->rx_bufsize
-
239 ah
->caps
.rx_status_len
);
241 ath_rx_edma_init_queue(&sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_LP
],
242 ah
->caps
.rx_lp_qdepth
);
243 ath_rx_edma_init_queue(&sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_HP
],
244 ah
->caps
.rx_hp_qdepth
);
246 size
= sizeof(struct ath_buf
) * nbufs
;
247 bf
= kzalloc(size
, GFP_KERNEL
);
251 INIT_LIST_HEAD(&sc
->rx
.rxbuf
);
252 sc
->rx
.rx_bufptr
= bf
;
254 for (i
= 0; i
< nbufs
; i
++, bf
++) {
255 skb
= ath_rxbuf_alloc(common
, common
->rx_bufsize
, GFP_KERNEL
);
261 memset(skb
->data
, 0, common
->rx_bufsize
);
264 bf
->bf_buf_addr
= dma_map_single(sc
->dev
, skb
->data
,
267 if (unlikely(dma_mapping_error(sc
->dev
,
269 dev_kfree_skb_any(skb
);
271 ath_print(common
, ATH_DBG_FATAL
,
272 "dma_mapping_error() on RX init\n");
277 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
283 ath_rx_edma_cleanup(sc
);
287 static void ath_edma_start_recv(struct ath_softc
*sc
)
289 spin_lock_bh(&sc
->rx
.rxbuflock
);
291 ath9k_hw_rxena(sc
->sc_ah
);
293 ath_rx_addbuffer_edma(sc
, ATH9K_RX_QUEUE_HP
,
294 sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_HP
].rx_fifo_hwsize
);
296 ath_rx_addbuffer_edma(sc
, ATH9K_RX_QUEUE_LP
,
297 sc
->rx
.rx_edma
[ATH9K_RX_QUEUE_LP
].rx_fifo_hwsize
);
299 spin_unlock_bh(&sc
->rx
.rxbuflock
);
303 ath9k_hw_startpcureceive(sc
->sc_ah
, (sc
->sc_flags
& SC_OP_OFFCHANNEL
));
306 static void ath_edma_stop_recv(struct ath_softc
*sc
)
308 spin_lock_bh(&sc
->rx
.rxbuflock
);
309 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_HP
);
310 ath_rx_remove_buffer(sc
, ATH9K_RX_QUEUE_LP
);
311 spin_unlock_bh(&sc
->rx
.rxbuflock
);
314 int ath_rx_init(struct ath_softc
*sc
, int nbufs
)
316 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
321 spin_lock_init(&sc
->rx
.rxflushlock
);
322 sc
->sc_flags
&= ~SC_OP_RXFLUSH
;
323 spin_lock_init(&sc
->rx
.rxbuflock
);
325 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
) {
326 return ath_rx_edma_init(sc
, nbufs
);
328 common
->rx_bufsize
= roundup(IEEE80211_MAX_MPDU_LEN
,
329 min(common
->cachelsz
, (u16
)64));
331 ath_print(common
, ATH_DBG_CONFIG
, "cachelsz %u rxbufsize %u\n",
332 common
->cachelsz
, common
->rx_bufsize
);
334 /* Initialize rx descriptors */
336 error
= ath_descdma_setup(sc
, &sc
->rx
.rxdma
, &sc
->rx
.rxbuf
,
339 ath_print(common
, ATH_DBG_FATAL
,
340 "failed to allocate rx descriptors: %d\n",
345 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
346 skb
= ath_rxbuf_alloc(common
, common
->rx_bufsize
,
354 bf
->bf_buf_addr
= dma_map_single(sc
->dev
, skb
->data
,
357 if (unlikely(dma_mapping_error(sc
->dev
,
359 dev_kfree_skb_any(skb
);
361 ath_print(common
, ATH_DBG_FATAL
,
362 "dma_mapping_error() on RX init\n");
366 bf
->bf_dmacontext
= bf
->bf_buf_addr
;
368 sc
->rx
.rxlink
= NULL
;
378 void ath_rx_cleanup(struct ath_softc
*sc
)
380 struct ath_hw
*ah
= sc
->sc_ah
;
381 struct ath_common
*common
= ath9k_hw_common(ah
);
385 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
) {
386 ath_rx_edma_cleanup(sc
);
389 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
392 dma_unmap_single(sc
->dev
, bf
->bf_buf_addr
,
399 if (sc
->rx
.rxdma
.dd_desc_len
!= 0)
400 ath_descdma_cleanup(sc
, &sc
->rx
.rxdma
, &sc
->rx
.rxbuf
);
405 * Calculate the receive filter according to the
406 * operating mode and state:
408 * o always accept unicast, broadcast, and multicast traffic
409 * o maintain current state of phy error reception (the hal
410 * may enable phy error frames for noise immunity work)
411 * o probe request frames are accepted only when operating in
412 * hostap, adhoc, or monitor modes
413 * o enable promiscuous mode according to the interface state
415 * - when operating in adhoc mode so the 802.11 layer creates
416 * node table entries for peers,
417 * - when operating in station mode for collecting rssi data when
418 * the station is otherwise quiet, or
419 * - when operating as a repeater so we see repeater-sta beacons
423 u32
ath_calcrxfilter(struct ath_softc
*sc
)
425 #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
429 rfilt
= (ath9k_hw_getrxfilter(sc
->sc_ah
) & RX_FILTER_PRESERVE
)
430 | ATH9K_RX_FILTER_UCAST
| ATH9K_RX_FILTER_BCAST
431 | ATH9K_RX_FILTER_MCAST
;
433 /* If not a STA, enable processing of Probe Requests */
434 if (sc
->sc_ah
->opmode
!= NL80211_IFTYPE_STATION
)
435 rfilt
|= ATH9K_RX_FILTER_PROBEREQ
;
438 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
439 * mode interface or when in monitor mode. AP mode does not need this
440 * since it receives all in-BSS frames anyway.
442 if (((sc
->sc_ah
->opmode
!= NL80211_IFTYPE_AP
) &&
443 (sc
->rx
.rxfilter
& FIF_PROMISC_IN_BSS
)) ||
444 (sc
->sc_ah
->opmode
== NL80211_IFTYPE_MONITOR
))
445 rfilt
|= ATH9K_RX_FILTER_PROM
;
447 if (sc
->rx
.rxfilter
& FIF_CONTROL
)
448 rfilt
|= ATH9K_RX_FILTER_CONTROL
;
450 if ((sc
->sc_ah
->opmode
== NL80211_IFTYPE_STATION
) &&
452 !(sc
->rx
.rxfilter
& FIF_BCN_PRBRESP_PROMISC
))
453 rfilt
|= ATH9K_RX_FILTER_MYBEACON
;
455 rfilt
|= ATH9K_RX_FILTER_BEACON
;
457 if ((AR_SREV_9280_20_OR_LATER(sc
->sc_ah
) ||
458 AR_SREV_9285_12_OR_LATER(sc
->sc_ah
)) &&
459 (sc
->sc_ah
->opmode
== NL80211_IFTYPE_AP
) &&
460 (sc
->rx
.rxfilter
& FIF_PSPOLL
))
461 rfilt
|= ATH9K_RX_FILTER_PSPOLL
;
463 if (conf_is_ht(&sc
->hw
->conf
))
464 rfilt
|= ATH9K_RX_FILTER_COMP_BAR
;
466 if (sc
->sec_wiphy
|| (sc
->nvifs
> 1) ||
467 (sc
->rx
.rxfilter
& FIF_OTHER_BSS
)) {
468 /* The following may also be needed for other older chips */
469 if (sc
->sc_ah
->hw_version
.macVersion
== AR_SREV_VERSION_9160
)
470 rfilt
|= ATH9K_RX_FILTER_PROM
;
471 rfilt
|= ATH9K_RX_FILTER_MCAST_BCAST_ALL
;
476 #undef RX_FILTER_PRESERVE
479 int ath_startrecv(struct ath_softc
*sc
)
481 struct ath_hw
*ah
= sc
->sc_ah
;
482 struct ath_buf
*bf
, *tbf
;
484 if (ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
) {
485 ath_edma_start_recv(sc
);
489 spin_lock_bh(&sc
->rx
.rxbuflock
);
490 if (list_empty(&sc
->rx
.rxbuf
))
493 sc
->rx
.rxlink
= NULL
;
494 list_for_each_entry_safe(bf
, tbf
, &sc
->rx
.rxbuf
, list
) {
495 ath_rx_buf_link(sc
, bf
);
498 /* We could have deleted elements so the list may be empty now */
499 if (list_empty(&sc
->rx
.rxbuf
))
502 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_buf
, list
);
503 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
507 spin_unlock_bh(&sc
->rx
.rxbuflock
);
509 ath9k_hw_startpcureceive(ah
, (sc
->sc_flags
& SC_OP_OFFCHANNEL
));
514 bool ath_stoprecv(struct ath_softc
*sc
)
516 struct ath_hw
*ah
= sc
->sc_ah
;
519 ath9k_hw_stoppcurecv(ah
);
520 ath9k_hw_setrxfilter(ah
, 0);
521 stopped
= ath9k_hw_stopdmarecv(ah
);
523 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
)
524 ath_edma_stop_recv(sc
);
526 sc
->rx
.rxlink
= NULL
;
531 void ath_flushrecv(struct ath_softc
*sc
)
533 spin_lock_bh(&sc
->rx
.rxflushlock
);
534 sc
->sc_flags
|= SC_OP_RXFLUSH
;
535 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
)
536 ath_rx_tasklet(sc
, 1, true);
537 ath_rx_tasklet(sc
, 1, false);
538 sc
->sc_flags
&= ~SC_OP_RXFLUSH
;
539 spin_unlock_bh(&sc
->rx
.rxflushlock
);
542 static bool ath_beacon_dtim_pending_cab(struct sk_buff
*skb
)
544 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
545 struct ieee80211_mgmt
*mgmt
;
546 u8
*pos
, *end
, id
, elen
;
547 struct ieee80211_tim_ie
*tim
;
549 mgmt
= (struct ieee80211_mgmt
*)skb
->data
;
550 pos
= mgmt
->u
.beacon
.variable
;
551 end
= skb
->data
+ skb
->len
;
553 while (pos
+ 2 < end
) {
556 if (pos
+ elen
> end
)
559 if (id
== WLAN_EID_TIM
) {
560 if (elen
< sizeof(*tim
))
562 tim
= (struct ieee80211_tim_ie
*) pos
;
563 if (tim
->dtim_count
!= 0)
565 return tim
->bitmap_ctrl
& 0x01;
574 static void ath_rx_ps_beacon(struct ath_softc
*sc
, struct sk_buff
*skb
)
576 struct ieee80211_mgmt
*mgmt
;
577 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
579 if (skb
->len
< 24 + 8 + 2 + 2)
582 mgmt
= (struct ieee80211_mgmt
*)skb
->data
;
583 if (memcmp(common
->curbssid
, mgmt
->bssid
, ETH_ALEN
) != 0)
584 return; /* not from our current AP */
586 sc
->ps_flags
&= ~PS_WAIT_FOR_BEACON
;
588 if (sc
->ps_flags
& PS_BEACON_SYNC
) {
589 sc
->ps_flags
&= ~PS_BEACON_SYNC
;
590 ath_print(common
, ATH_DBG_PS
,
591 "Reconfigure Beacon timers based on "
592 "timestamp from the AP\n");
593 ath_beacon_config(sc
, NULL
);
596 if (ath_beacon_dtim_pending_cab(skb
)) {
598 * Remain awake waiting for buffered broadcast/multicast
599 * frames. If the last broadcast/multicast frame is not
600 * received properly, the next beacon frame will work as
601 * a backup trigger for returning into NETWORK SLEEP state,
602 * so we are waiting for it as well.
604 ath_print(common
, ATH_DBG_PS
, "Received DTIM beacon indicating "
605 "buffered broadcast/multicast frame(s)\n");
606 sc
->ps_flags
|= PS_WAIT_FOR_CAB
| PS_WAIT_FOR_BEACON
;
610 if (sc
->ps_flags
& PS_WAIT_FOR_CAB
) {
612 * This can happen if a broadcast frame is dropped or the AP
613 * fails to send a frame indicating that all CAB frames have
616 sc
->ps_flags
&= ~PS_WAIT_FOR_CAB
;
617 ath_print(common
, ATH_DBG_PS
,
618 "PS wait for CAB frames timed out\n");
622 static void ath_rx_ps(struct ath_softc
*sc
, struct sk_buff
*skb
)
624 struct ieee80211_hdr
*hdr
;
625 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
627 hdr
= (struct ieee80211_hdr
*)skb
->data
;
629 /* Process Beacon and CAB receive in PS state */
630 if (((sc
->ps_flags
& PS_WAIT_FOR_BEACON
) || ath9k_check_auto_sleep(sc
))
631 && ieee80211_is_beacon(hdr
->frame_control
))
632 ath_rx_ps_beacon(sc
, skb
);
633 else if ((sc
->ps_flags
& PS_WAIT_FOR_CAB
) &&
634 (ieee80211_is_data(hdr
->frame_control
) ||
635 ieee80211_is_action(hdr
->frame_control
)) &&
636 is_multicast_ether_addr(hdr
->addr1
) &&
637 !ieee80211_has_moredata(hdr
->frame_control
)) {
639 * No more broadcast/multicast frames to be received at this
642 sc
->ps_flags
&= ~(PS_WAIT_FOR_CAB
| PS_WAIT_FOR_BEACON
);
643 ath_print(common
, ATH_DBG_PS
,
644 "All PS CAB frames received, back to sleep\n");
645 } else if ((sc
->ps_flags
& PS_WAIT_FOR_PSPOLL_DATA
) &&
646 !is_multicast_ether_addr(hdr
->addr1
) &&
647 !ieee80211_has_morefrags(hdr
->frame_control
)) {
648 sc
->ps_flags
&= ~PS_WAIT_FOR_PSPOLL_DATA
;
649 ath_print(common
, ATH_DBG_PS
,
650 "Going back to sleep after having received "
651 "PS-Poll data (0x%lx)\n",
652 sc
->ps_flags
& (PS_WAIT_FOR_BEACON
|
654 PS_WAIT_FOR_PSPOLL_DATA
|
655 PS_WAIT_FOR_TX_ACK
));
659 static void ath_rx_send_to_mac80211(struct ieee80211_hw
*hw
,
660 struct ath_softc
*sc
, struct sk_buff
*skb
,
661 struct ieee80211_rx_status
*rxs
)
663 struct ieee80211_hdr
*hdr
;
665 hdr
= (struct ieee80211_hdr
*)skb
->data
;
667 /* Send the frame to mac80211 */
668 if (is_multicast_ether_addr(hdr
->addr1
)) {
671 * Deliver broadcast/multicast frames to all suitable
674 /* TODO: filter based on channel configuration */
675 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
676 struct ath_wiphy
*aphy
= sc
->sec_wiphy
[i
];
677 struct sk_buff
*nskb
;
680 nskb
= skb_copy(skb
, GFP_ATOMIC
);
683 ieee80211_rx(aphy
->hw
, nskb
);
685 ieee80211_rx(sc
->hw
, skb
);
687 /* Deliver unicast frames based on receiver address */
688 ieee80211_rx(hw
, skb
);
691 static bool ath_edma_get_buffers(struct ath_softc
*sc
,
692 enum ath9k_rx_qtype qtype
)
694 struct ath_rx_edma
*rx_edma
= &sc
->rx
.rx_edma
[qtype
];
695 struct ath_hw
*ah
= sc
->sc_ah
;
696 struct ath_common
*common
= ath9k_hw_common(ah
);
701 skb
= skb_peek(&rx_edma
->rx_fifo
);
705 bf
= SKB_CB_ATHBUF(skb
);
708 dma_sync_single_for_cpu(sc
->dev
, bf
->bf_buf_addr
,
709 common
->rx_bufsize
, DMA_FROM_DEVICE
);
711 ret
= ath9k_hw_process_rxdesc_edma(ah
, NULL
, skb
->data
);
712 if (ret
== -EINPROGRESS
) {
713 /*let device gain the buffer again*/
714 dma_sync_single_for_device(sc
->dev
, bf
->bf_buf_addr
,
715 common
->rx_bufsize
, DMA_FROM_DEVICE
);
719 __skb_unlink(skb
, &rx_edma
->rx_fifo
);
720 if (ret
== -EINVAL
) {
721 /* corrupt descriptor, skip this one and the following one */
722 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
723 ath_rx_edma_buf_link(sc
, qtype
);
724 skb
= skb_peek(&rx_edma
->rx_fifo
);
728 bf
= SKB_CB_ATHBUF(skb
);
731 __skb_unlink(skb
, &rx_edma
->rx_fifo
);
732 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
733 ath_rx_edma_buf_link(sc
, qtype
);
736 skb_queue_tail(&rx_edma
->rx_buffers
, skb
);
741 static struct ath_buf
*ath_edma_get_next_rx_buf(struct ath_softc
*sc
,
742 struct ath_rx_status
*rs
,
743 enum ath9k_rx_qtype qtype
)
745 struct ath_rx_edma
*rx_edma
= &sc
->rx
.rx_edma
[qtype
];
749 while (ath_edma_get_buffers(sc
, qtype
));
750 skb
= __skb_dequeue(&rx_edma
->rx_buffers
);
754 bf
= SKB_CB_ATHBUF(skb
);
755 ath9k_hw_process_rxdesc_edma(sc
->sc_ah
, rs
, skb
->data
);
759 static struct ath_buf
*ath_get_next_rx_buf(struct ath_softc
*sc
,
760 struct ath_rx_status
*rs
)
762 struct ath_hw
*ah
= sc
->sc_ah
;
763 struct ath_common
*common
= ath9k_hw_common(ah
);
768 if (list_empty(&sc
->rx
.rxbuf
)) {
769 sc
->rx
.rxlink
= NULL
;
773 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_buf
, list
);
777 * Must provide the virtual address of the current
778 * descriptor, the physical address, and the virtual
779 * address of the next descriptor in the h/w chain.
780 * This allows the HAL to look ahead to see if the
781 * hardware is done with a descriptor by checking the
782 * done bit in the following descriptor and the address
783 * of the current descriptor the DMA engine is working
784 * on. All this is necessary because of our use of
785 * a self-linked list to avoid rx overruns.
787 ret
= ath9k_hw_rxprocdesc(ah
, ds
, rs
, 0);
788 if (ret
== -EINPROGRESS
) {
789 struct ath_rx_status trs
;
791 struct ath_desc
*tds
;
793 memset(&trs
, 0, sizeof(trs
));
794 if (list_is_last(&bf
->list
, &sc
->rx
.rxbuf
)) {
795 sc
->rx
.rxlink
= NULL
;
799 tbf
= list_entry(bf
->list
.next
, struct ath_buf
, list
);
802 * On some hardware the descriptor status words could
803 * get corrupted, including the done bit. Because of
804 * this, check if the next descriptor's done bit is
807 * If the next descriptor's done bit is set, the current
808 * descriptor has been corrupted. Force s/w to discard
809 * this descriptor and continue...
813 ret
= ath9k_hw_rxprocdesc(ah
, tds
, &trs
, 0);
814 if (ret
== -EINPROGRESS
)
822 * Synchronize the DMA transfer with CPU before
823 * 1. accessing the frame
824 * 2. requeueing the same buffer to h/w
826 dma_sync_single_for_cpu(sc
->dev
, bf
->bf_buf_addr
,
833 /* Assumes you've already done the endian to CPU conversion */
834 static bool ath9k_rx_accept(struct ath_common
*common
,
835 struct ieee80211_hdr
*hdr
,
836 struct ieee80211_rx_status
*rxs
,
837 struct ath_rx_status
*rx_stats
,
840 struct ath_hw
*ah
= common
->ah
;
842 u8 rx_status_len
= ah
->caps
.rx_status_len
;
844 fc
= hdr
->frame_control
;
846 if (!rx_stats
->rs_datalen
)
849 * rs_status follows rs_datalen so if rs_datalen is too large
850 * we can take a hint that hardware corrupted it, so ignore
853 if (rx_stats
->rs_datalen
> (common
->rx_bufsize
- rx_status_len
))
857 * rs_more indicates chained descriptors which can be used
858 * to link buffers together for a sort of scatter-gather
860 * reject the frame, we don't support scatter-gather yet and
861 * the frame is probably corrupt anyway
863 if (rx_stats
->rs_more
)
867 * The rx_stats->rs_status will not be set until the end of the
868 * chained descriptors so it can be ignored if rs_more is set. The
869 * rs_more will be false at the last element of the chained
872 if (rx_stats
->rs_status
!= 0) {
873 if (rx_stats
->rs_status
& ATH9K_RXERR_CRC
)
874 rxs
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
875 if (rx_stats
->rs_status
& ATH9K_RXERR_PHY
)
878 if (rx_stats
->rs_status
& ATH9K_RXERR_DECRYPT
) {
879 *decrypt_error
= true;
880 } else if (rx_stats
->rs_status
& ATH9K_RXERR_MIC
) {
882 * The MIC error bit is only valid if the frame
883 * is not a control frame or fragment, and it was
884 * decrypted using a valid TKIP key.
886 if (!ieee80211_is_ctl(fc
) &&
887 !ieee80211_has_morefrags(fc
) &&
888 !(le16_to_cpu(hdr
->seq_ctrl
) & IEEE80211_SCTL_FRAG
) &&
889 test_bit(rx_stats
->rs_keyix
, common
->tkip_keymap
))
890 rxs
->flag
|= RX_FLAG_MMIC_ERROR
;
892 rx_stats
->rs_status
&= ~ATH9K_RXERR_MIC
;
895 * Reject error frames with the exception of
896 * decryption and MIC failures. For monitor mode,
897 * we also ignore the CRC error.
899 if (ah
->opmode
== NL80211_IFTYPE_MONITOR
) {
900 if (rx_stats
->rs_status
&
901 ~(ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
|
905 if (rx_stats
->rs_status
&
906 ~(ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
)) {
914 static int ath9k_process_rate(struct ath_common
*common
,
915 struct ieee80211_hw
*hw
,
916 struct ath_rx_status
*rx_stats
,
917 struct ieee80211_rx_status
*rxs
)
919 struct ieee80211_supported_band
*sband
;
920 enum ieee80211_band band
;
923 band
= hw
->conf
.channel
->band
;
924 sband
= hw
->wiphy
->bands
[band
];
926 if (rx_stats
->rs_rate
& 0x80) {
928 rxs
->flag
|= RX_FLAG_HT
;
929 if (rx_stats
->rs_flags
& ATH9K_RX_2040
)
930 rxs
->flag
|= RX_FLAG_40MHZ
;
931 if (rx_stats
->rs_flags
& ATH9K_RX_GI
)
932 rxs
->flag
|= RX_FLAG_SHORT_GI
;
933 rxs
->rate_idx
= rx_stats
->rs_rate
& 0x7f;
937 for (i
= 0; i
< sband
->n_bitrates
; i
++) {
938 if (sband
->bitrates
[i
].hw_value
== rx_stats
->rs_rate
) {
942 if (sband
->bitrates
[i
].hw_value_short
== rx_stats
->rs_rate
) {
943 rxs
->flag
|= RX_FLAG_SHORTPRE
;
950 * No valid hardware bitrate found -- we should not get here
951 * because hardware has already validated this frame as OK.
953 ath_print(common
, ATH_DBG_XMIT
, "unsupported hw bitrate detected "
954 "0x%02x using 1 Mbit\n", rx_stats
->rs_rate
);
959 static void ath9k_process_rssi(struct ath_common
*common
,
960 struct ieee80211_hw
*hw
,
961 struct ieee80211_hdr
*hdr
,
962 struct ath_rx_status
*rx_stats
)
964 struct ath_hw
*ah
= common
->ah
;
965 struct ieee80211_sta
*sta
;
967 int last_rssi
= ATH_RSSI_DUMMY_MARKER
;
970 fc
= hdr
->frame_control
;
974 * XXX: use ieee80211_find_sta! This requires quite a bit of work
975 * under the current ath9k virtual wiphy implementation as we have
976 * no way of tying a vif to wiphy. Typically vifs are attached to
977 * at least one sdata of a wiphy on mac80211 but with ath9k virtual
978 * wiphy you'd have to iterate over every wiphy and each sdata.
980 if (is_multicast_ether_addr(hdr
->addr1
))
981 sta
= ieee80211_find_sta_by_ifaddr(hw
, hdr
->addr2
, NULL
);
983 sta
= ieee80211_find_sta_by_ifaddr(hw
, hdr
->addr2
, hdr
->addr1
);
986 an
= (struct ath_node
*) sta
->drv_priv
;
987 if (rx_stats
->rs_rssi
!= ATH9K_RSSI_BAD
&&
988 !rx_stats
->rs_moreaggr
)
989 ATH_RSSI_LPF(an
->last_rssi
, rx_stats
->rs_rssi
);
990 last_rssi
= an
->last_rssi
;
994 if (likely(last_rssi
!= ATH_RSSI_DUMMY_MARKER
))
995 rx_stats
->rs_rssi
= ATH_EP_RND(last_rssi
,
996 ATH_RSSI_EP_MULTIPLIER
);
997 if (rx_stats
->rs_rssi
< 0)
998 rx_stats
->rs_rssi
= 0;
1000 /* Update Beacon RSSI, this is used by ANI. */
1001 if (ieee80211_is_beacon(fc
))
1002 ah
->stats
.avgbrssi
= rx_stats
->rs_rssi
;
1006 * For Decrypt or Demic errors, we only mark packet status here and always push
1007 * up the frame up to let mac80211 handle the actual error case, be it no
1008 * decryption key or real decryption error. This let us keep statistics there.
1010 static int ath9k_rx_skb_preprocess(struct ath_common
*common
,
1011 struct ieee80211_hw
*hw
,
1012 struct ieee80211_hdr
*hdr
,
1013 struct ath_rx_status
*rx_stats
,
1014 struct ieee80211_rx_status
*rx_status
,
1015 bool *decrypt_error
)
1017 memset(rx_status
, 0, sizeof(struct ieee80211_rx_status
));
1020 * everything but the rate is checked here, the rate check is done
1021 * separately to avoid doing two lookups for a rate for each frame.
1023 if (!ath9k_rx_accept(common
, hdr
, rx_status
, rx_stats
, decrypt_error
))
1026 ath9k_process_rssi(common
, hw
, hdr
, rx_stats
);
1028 if (ath9k_process_rate(common
, hw
, rx_stats
, rx_status
))
1031 rx_status
->band
= hw
->conf
.channel
->band
;
1032 rx_status
->freq
= hw
->conf
.channel
->center_freq
;
1033 rx_status
->signal
= ATH_DEFAULT_NOISE_FLOOR
+ rx_stats
->rs_rssi
;
1034 rx_status
->antenna
= rx_stats
->rs_antenna
;
1035 rx_status
->flag
|= RX_FLAG_TSFT
;
1040 static void ath9k_rx_skb_postprocess(struct ath_common
*common
,
1041 struct sk_buff
*skb
,
1042 struct ath_rx_status
*rx_stats
,
1043 struct ieee80211_rx_status
*rxs
,
1046 struct ath_hw
*ah
= common
->ah
;
1047 struct ieee80211_hdr
*hdr
;
1048 int hdrlen
, padpos
, padsize
;
1052 /* see if any padding is done by the hw and remove it */
1053 hdr
= (struct ieee80211_hdr
*) skb
->data
;
1054 hdrlen
= ieee80211_get_hdrlen_from_skb(skb
);
1055 fc
= hdr
->frame_control
;
1056 padpos
= ath9k_cmn_padpos(hdr
->frame_control
);
1058 /* The MAC header is padded to have 32-bit boundary if the
1059 * packet payload is non-zero. The general calculation for
1060 * padsize would take into account odd header lengths:
1061 * padsize = (4 - padpos % 4) % 4; However, since only
1062 * even-length headers are used, padding can only be 0 or 2
1063 * bytes and we can optimize this a bit. In addition, we must
1064 * not try to remove padding from short control frames that do
1065 * not have payload. */
1066 padsize
= padpos
& 3;
1067 if (padsize
&& skb
->len
>=padpos
+padsize
+FCS_LEN
) {
1068 memmove(skb
->data
+ padsize
, skb
->data
, padpos
);
1069 skb_pull(skb
, padsize
);
1072 keyix
= rx_stats
->rs_keyix
;
1074 if (!(keyix
== ATH9K_RXKEYIX_INVALID
) && !decrypt_error
&&
1075 ieee80211_has_protected(fc
)) {
1076 rxs
->flag
|= RX_FLAG_DECRYPTED
;
1077 } else if (ieee80211_has_protected(fc
)
1078 && !decrypt_error
&& skb
->len
>= hdrlen
+ 4) {
1079 keyix
= skb
->data
[hdrlen
+ 3] >> 6;
1081 if (test_bit(keyix
, common
->keymap
))
1082 rxs
->flag
|= RX_FLAG_DECRYPTED
;
1084 if (ah
->sw_mgmt_crypto
&&
1085 (rxs
->flag
& RX_FLAG_DECRYPTED
) &&
1086 ieee80211_is_mgmt(fc
))
1087 /* Use software decrypt for management frames. */
1088 rxs
->flag
&= ~RX_FLAG_DECRYPTED
;
1091 static void ath_lnaconf_alt_good_scan(struct ath_ant_comb
*antcomb
,
1092 struct ath_hw_antcomb_conf ant_conf
,
1095 antcomb
->quick_scan_cnt
= 0;
1097 if (ant_conf
.main_lna_conf
== ATH_ANT_DIV_COMB_LNA2
)
1098 antcomb
->rssi_lna2
= main_rssi_avg
;
1099 else if (ant_conf
.main_lna_conf
== ATH_ANT_DIV_COMB_LNA1
)
1100 antcomb
->rssi_lna1
= main_rssi_avg
;
1102 switch ((ant_conf
.main_lna_conf
<< 4) | ant_conf
.alt_lna_conf
) {
1103 case (0x10): /* LNA2 A-B */
1104 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1105 antcomb
->first_quick_scan_conf
=
1106 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1107 antcomb
->second_quick_scan_conf
= ATH_ANT_DIV_COMB_LNA1
;
1109 case (0x20): /* LNA1 A-B */
1110 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1111 antcomb
->first_quick_scan_conf
=
1112 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1113 antcomb
->second_quick_scan_conf
= ATH_ANT_DIV_COMB_LNA2
;
1115 case (0x21): /* LNA1 LNA2 */
1116 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA2
;
1117 antcomb
->first_quick_scan_conf
=
1118 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1119 antcomb
->second_quick_scan_conf
=
1120 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1122 case (0x12): /* LNA2 LNA1 */
1123 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1
;
1124 antcomb
->first_quick_scan_conf
=
1125 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1126 antcomb
->second_quick_scan_conf
=
1127 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1129 case (0x13): /* LNA2 A+B */
1130 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1131 antcomb
->first_quick_scan_conf
=
1132 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1133 antcomb
->second_quick_scan_conf
= ATH_ANT_DIV_COMB_LNA1
;
1135 case (0x23): /* LNA1 A+B */
1136 antcomb
->main_conf
= ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1137 antcomb
->first_quick_scan_conf
=
1138 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1139 antcomb
->second_quick_scan_conf
= ATH_ANT_DIV_COMB_LNA2
;
1146 static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb
*antcomb
,
1147 struct ath_hw_antcomb_conf
*div_ant_conf
,
1148 int main_rssi_avg
, int alt_rssi_avg
,
1152 switch (antcomb
->quick_scan_cnt
) {
1154 /* set alt to main, and alt to first conf */
1155 div_ant_conf
->main_lna_conf
= antcomb
->main_conf
;
1156 div_ant_conf
->alt_lna_conf
= antcomb
->first_quick_scan_conf
;
1159 /* set alt to main, and alt to first conf */
1160 div_ant_conf
->main_lna_conf
= antcomb
->main_conf
;
1161 div_ant_conf
->alt_lna_conf
= antcomb
->second_quick_scan_conf
;
1162 antcomb
->rssi_first
= main_rssi_avg
;
1163 antcomb
->rssi_second
= alt_rssi_avg
;
1165 if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA1
) {
1167 if (ath_is_alt_ant_ratio_better(alt_ratio
,
1168 ATH_ANT_DIV_COMB_LNA1_DELTA_HI
,
1169 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW
,
1170 main_rssi_avg
, alt_rssi_avg
,
1171 antcomb
->total_pkt_count
))
1172 antcomb
->first_ratio
= true;
1174 antcomb
->first_ratio
= false;
1175 } else if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA2
) {
1176 if (ath_is_alt_ant_ratio_better(alt_ratio
,
1177 ATH_ANT_DIV_COMB_LNA1_DELTA_MID
,
1178 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW
,
1179 main_rssi_avg
, alt_rssi_avg
,
1180 antcomb
->total_pkt_count
))
1181 antcomb
->first_ratio
= true;
1183 antcomb
->first_ratio
= false;
1185 if ((((alt_ratio
>= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2
) &&
1186 (alt_rssi_avg
> main_rssi_avg
+
1187 ATH_ANT_DIV_COMB_LNA1_DELTA_HI
)) ||
1188 (alt_rssi_avg
> main_rssi_avg
)) &&
1189 (antcomb
->total_pkt_count
> 50))
1190 antcomb
->first_ratio
= true;
1192 antcomb
->first_ratio
= false;
1196 antcomb
->alt_good
= false;
1197 antcomb
->scan_not_start
= false;
1198 antcomb
->scan
= false;
1199 antcomb
->rssi_first
= main_rssi_avg
;
1200 antcomb
->rssi_third
= alt_rssi_avg
;
1202 if (antcomb
->second_quick_scan_conf
== ATH_ANT_DIV_COMB_LNA1
)
1203 antcomb
->rssi_lna1
= alt_rssi_avg
;
1204 else if (antcomb
->second_quick_scan_conf
==
1205 ATH_ANT_DIV_COMB_LNA2
)
1206 antcomb
->rssi_lna2
= alt_rssi_avg
;
1207 else if (antcomb
->second_quick_scan_conf
==
1208 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
) {
1209 if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA2
)
1210 antcomb
->rssi_lna2
= main_rssi_avg
;
1211 else if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA1
)
1212 antcomb
->rssi_lna1
= main_rssi_avg
;
1215 if (antcomb
->rssi_lna2
> antcomb
->rssi_lna1
+
1216 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA
)
1217 div_ant_conf
->main_lna_conf
= ATH_ANT_DIV_COMB_LNA2
;
1219 div_ant_conf
->main_lna_conf
= ATH_ANT_DIV_COMB_LNA1
;
1221 if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA1
) {
1222 if (ath_is_alt_ant_ratio_better(alt_ratio
,
1223 ATH_ANT_DIV_COMB_LNA1_DELTA_HI
,
1224 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW
,
1225 main_rssi_avg
, alt_rssi_avg
,
1226 antcomb
->total_pkt_count
))
1227 antcomb
->second_ratio
= true;
1229 antcomb
->second_ratio
= false;
1230 } else if (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA2
) {
1231 if (ath_is_alt_ant_ratio_better(alt_ratio
,
1232 ATH_ANT_DIV_COMB_LNA1_DELTA_MID
,
1233 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW
,
1234 main_rssi_avg
, alt_rssi_avg
,
1235 antcomb
->total_pkt_count
))
1236 antcomb
->second_ratio
= true;
1238 antcomb
->second_ratio
= false;
1240 if ((((alt_ratio
>= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2
) &&
1241 (alt_rssi_avg
> main_rssi_avg
+
1242 ATH_ANT_DIV_COMB_LNA1_DELTA_HI
)) ||
1243 (alt_rssi_avg
> main_rssi_avg
)) &&
1244 (antcomb
->total_pkt_count
> 50))
1245 antcomb
->second_ratio
= true;
1247 antcomb
->second_ratio
= false;
1250 /* set alt to the conf with maximun ratio */
1251 if (antcomb
->first_ratio
&& antcomb
->second_ratio
) {
1252 if (antcomb
->rssi_second
> antcomb
->rssi_third
) {
1254 if ((antcomb
->first_quick_scan_conf
==
1255 ATH_ANT_DIV_COMB_LNA1
) ||
1256 (antcomb
->first_quick_scan_conf
==
1257 ATH_ANT_DIV_COMB_LNA2
))
1258 /* Set alt LNA1 or LNA2*/
1259 if (div_ant_conf
->main_lna_conf
==
1260 ATH_ANT_DIV_COMB_LNA2
)
1261 div_ant_conf
->alt_lna_conf
=
1262 ATH_ANT_DIV_COMB_LNA1
;
1264 div_ant_conf
->alt_lna_conf
=
1265 ATH_ANT_DIV_COMB_LNA2
;
1267 /* Set alt to A+B or A-B */
1268 div_ant_conf
->alt_lna_conf
=
1269 antcomb
->first_quick_scan_conf
;
1270 } else if ((antcomb
->second_quick_scan_conf
==
1271 ATH_ANT_DIV_COMB_LNA1
) ||
1272 (antcomb
->second_quick_scan_conf
==
1273 ATH_ANT_DIV_COMB_LNA2
)) {
1274 /* Set alt LNA1 or LNA2 */
1275 if (div_ant_conf
->main_lna_conf
==
1276 ATH_ANT_DIV_COMB_LNA2
)
1277 div_ant_conf
->alt_lna_conf
=
1278 ATH_ANT_DIV_COMB_LNA1
;
1280 div_ant_conf
->alt_lna_conf
=
1281 ATH_ANT_DIV_COMB_LNA2
;
1283 /* Set alt to A+B or A-B */
1284 div_ant_conf
->alt_lna_conf
=
1285 antcomb
->second_quick_scan_conf
;
1287 } else if (antcomb
->first_ratio
) {
1289 if ((antcomb
->first_quick_scan_conf
==
1290 ATH_ANT_DIV_COMB_LNA1
) ||
1291 (antcomb
->first_quick_scan_conf
==
1292 ATH_ANT_DIV_COMB_LNA2
))
1293 /* Set alt LNA1 or LNA2 */
1294 if (div_ant_conf
->main_lna_conf
==
1295 ATH_ANT_DIV_COMB_LNA2
)
1296 div_ant_conf
->alt_lna_conf
=
1297 ATH_ANT_DIV_COMB_LNA1
;
1299 div_ant_conf
->alt_lna_conf
=
1300 ATH_ANT_DIV_COMB_LNA2
;
1302 /* Set alt to A+B or A-B */
1303 div_ant_conf
->alt_lna_conf
=
1304 antcomb
->first_quick_scan_conf
;
1305 } else if (antcomb
->second_ratio
) {
1307 if ((antcomb
->second_quick_scan_conf
==
1308 ATH_ANT_DIV_COMB_LNA1
) ||
1309 (antcomb
->second_quick_scan_conf
==
1310 ATH_ANT_DIV_COMB_LNA2
))
1311 /* Set alt LNA1 or LNA2 */
1312 if (div_ant_conf
->main_lna_conf
==
1313 ATH_ANT_DIV_COMB_LNA2
)
1314 div_ant_conf
->alt_lna_conf
=
1315 ATH_ANT_DIV_COMB_LNA1
;
1317 div_ant_conf
->alt_lna_conf
=
1318 ATH_ANT_DIV_COMB_LNA2
;
1320 /* Set alt to A+B or A-B */
1321 div_ant_conf
->alt_lna_conf
=
1322 antcomb
->second_quick_scan_conf
;
1324 /* main is largest */
1325 if ((antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA1
) ||
1326 (antcomb
->main_conf
== ATH_ANT_DIV_COMB_LNA2
))
1327 /* Set alt LNA1 or LNA2 */
1328 if (div_ant_conf
->main_lna_conf
==
1329 ATH_ANT_DIV_COMB_LNA2
)
1330 div_ant_conf
->alt_lna_conf
=
1331 ATH_ANT_DIV_COMB_LNA1
;
1333 div_ant_conf
->alt_lna_conf
=
1334 ATH_ANT_DIV_COMB_LNA2
;
1336 /* Set alt to A+B or A-B */
1337 div_ant_conf
->alt_lna_conf
= antcomb
->main_conf
;
1345 static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf
*ant_conf
)
1347 /* Adjust the fast_div_bias based on main and alt lna conf */
1348 switch ((ant_conf
->main_lna_conf
<< 4) | ant_conf
->alt_lna_conf
) {
1349 case (0x01): /* A-B LNA2 */
1350 ant_conf
->fast_div_bias
= 0x3b;
1352 case (0x02): /* A-B LNA1 */
1353 ant_conf
->fast_div_bias
= 0x3d;
1355 case (0x03): /* A-B A+B */
1356 ant_conf
->fast_div_bias
= 0x1;
1358 case (0x10): /* LNA2 A-B */
1359 ant_conf
->fast_div_bias
= 0x7;
1361 case (0x12): /* LNA2 LNA1 */
1362 ant_conf
->fast_div_bias
= 0x2;
1364 case (0x13): /* LNA2 A+B */
1365 ant_conf
->fast_div_bias
= 0x7;
1367 case (0x20): /* LNA1 A-B */
1368 ant_conf
->fast_div_bias
= 0x6;
1370 case (0x21): /* LNA1 LNA2 */
1371 ant_conf
->fast_div_bias
= 0x0;
1373 case (0x23): /* LNA1 A+B */
1374 ant_conf
->fast_div_bias
= 0x6;
1376 case (0x30): /* A+B A-B */
1377 ant_conf
->fast_div_bias
= 0x1;
1379 case (0x31): /* A+B LNA2 */
1380 ant_conf
->fast_div_bias
= 0x3b;
1382 case (0x32): /* A+B LNA1 */
1383 ant_conf
->fast_div_bias
= 0x3d;
1390 /* Antenna diversity and combining */
1391 static void ath_ant_comb_scan(struct ath_softc
*sc
, struct ath_rx_status
*rs
)
1393 struct ath_hw_antcomb_conf div_ant_conf
;
1394 struct ath_ant_comb
*antcomb
= &sc
->ant_comb
;
1395 int alt_ratio
= 0, alt_rssi_avg
= 0, main_rssi_avg
= 0, curr_alt_set
;
1396 int curr_main_set
, curr_bias
;
1397 int main_rssi
= rs
->rs_rssi_ctl0
;
1398 int alt_rssi
= rs
->rs_rssi_ctl1
;
1399 int rx_ant_conf
, main_ant_conf
;
1400 bool short_scan
= false;
1402 rx_ant_conf
= (rs
->rs_rssi_ctl2
>> ATH_ANT_RX_CURRENT_SHIFT
) &
1404 main_ant_conf
= (rs
->rs_rssi_ctl2
>> ATH_ANT_RX_MAIN_SHIFT
) &
1407 /* Record packet only when alt_rssi is positive */
1409 antcomb
->total_pkt_count
++;
1410 antcomb
->main_total_rssi
+= main_rssi
;
1411 antcomb
->alt_total_rssi
+= alt_rssi
;
1412 if (main_ant_conf
== rx_ant_conf
)
1413 antcomb
->main_recv_cnt
++;
1415 antcomb
->alt_recv_cnt
++;
1418 /* Short scan check */
1419 if (antcomb
->scan
&& antcomb
->alt_good
) {
1420 if (time_after(jiffies
, antcomb
->scan_start_time
+
1421 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR
)))
1424 if (antcomb
->total_pkt_count
==
1425 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT
) {
1426 alt_ratio
= ((antcomb
->alt_recv_cnt
* 100) /
1427 antcomb
->total_pkt_count
);
1428 if (alt_ratio
< ATH_ANT_DIV_COMB_ALT_ANT_RATIO
)
1433 if (((antcomb
->total_pkt_count
< ATH_ANT_DIV_COMB_MAX_PKTCOUNT
) ||
1434 rs
->rs_moreaggr
) && !short_scan
)
1437 if (antcomb
->total_pkt_count
) {
1438 alt_ratio
= ((antcomb
->alt_recv_cnt
* 100) /
1439 antcomb
->total_pkt_count
);
1440 main_rssi_avg
= (antcomb
->main_total_rssi
/
1441 antcomb
->total_pkt_count
);
1442 alt_rssi_avg
= (antcomb
->alt_total_rssi
/
1443 antcomb
->total_pkt_count
);
1447 ath9k_hw_antdiv_comb_conf_get(sc
->sc_ah
, &div_ant_conf
);
1448 curr_alt_set
= div_ant_conf
.alt_lna_conf
;
1449 curr_main_set
= div_ant_conf
.main_lna_conf
;
1450 curr_bias
= div_ant_conf
.fast_div_bias
;
1454 if (antcomb
->count
== ATH_ANT_DIV_COMB_MAX_COUNT
) {
1455 if (alt_ratio
> ATH_ANT_DIV_COMB_ALT_ANT_RATIO
) {
1456 ath_lnaconf_alt_good_scan(antcomb
, div_ant_conf
,
1458 antcomb
->alt_good
= true;
1460 antcomb
->alt_good
= false;
1464 antcomb
->scan
= true;
1465 antcomb
->scan_not_start
= true;
1468 if (!antcomb
->scan
) {
1469 if (alt_ratio
> ATH_ANT_DIV_COMB_ALT_ANT_RATIO
) {
1470 if (curr_alt_set
== ATH_ANT_DIV_COMB_LNA2
) {
1471 /* Switch main and alt LNA */
1472 div_ant_conf
.main_lna_conf
=
1473 ATH_ANT_DIV_COMB_LNA2
;
1474 div_ant_conf
.alt_lna_conf
=
1475 ATH_ANT_DIV_COMB_LNA1
;
1476 } else if (curr_alt_set
== ATH_ANT_DIV_COMB_LNA1
) {
1477 div_ant_conf
.main_lna_conf
=
1478 ATH_ANT_DIV_COMB_LNA1
;
1479 div_ant_conf
.alt_lna_conf
=
1480 ATH_ANT_DIV_COMB_LNA2
;
1484 } else if ((curr_alt_set
!= ATH_ANT_DIV_COMB_LNA1
) &&
1485 (curr_alt_set
!= ATH_ANT_DIV_COMB_LNA2
)) {
1486 /* Set alt to another LNA */
1487 if (curr_main_set
== ATH_ANT_DIV_COMB_LNA2
)
1488 div_ant_conf
.alt_lna_conf
=
1489 ATH_ANT_DIV_COMB_LNA1
;
1490 else if (curr_main_set
== ATH_ANT_DIV_COMB_LNA1
)
1491 div_ant_conf
.alt_lna_conf
=
1492 ATH_ANT_DIV_COMB_LNA2
;
1497 if ((alt_rssi_avg
< (main_rssi_avg
+
1498 ATH_ANT_DIV_COMB_LNA1_LNA2_DELTA
)))
1502 if (!antcomb
->scan_not_start
) {
1503 switch (curr_alt_set
) {
1504 case ATH_ANT_DIV_COMB_LNA2
:
1505 antcomb
->rssi_lna2
= alt_rssi_avg
;
1506 antcomb
->rssi_lna1
= main_rssi_avg
;
1507 antcomb
->scan
= true;
1509 div_ant_conf
.main_lna_conf
=
1510 ATH_ANT_DIV_COMB_LNA1
;
1511 div_ant_conf
.alt_lna_conf
=
1512 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1514 case ATH_ANT_DIV_COMB_LNA1
:
1515 antcomb
->rssi_lna1
= alt_rssi_avg
;
1516 antcomb
->rssi_lna2
= main_rssi_avg
;
1517 antcomb
->scan
= true;
1519 div_ant_conf
.main_lna_conf
= ATH_ANT_DIV_COMB_LNA2
;
1520 div_ant_conf
.alt_lna_conf
=
1521 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1523 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
:
1524 antcomb
->rssi_add
= alt_rssi_avg
;
1525 antcomb
->scan
= true;
1527 div_ant_conf
.alt_lna_conf
=
1528 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1530 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
:
1531 antcomb
->rssi_sub
= alt_rssi_avg
;
1532 antcomb
->scan
= false;
1533 if (antcomb
->rssi_lna2
>
1534 (antcomb
->rssi_lna1
+
1535 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA
)) {
1536 /* use LNA2 as main LNA */
1537 if ((antcomb
->rssi_add
> antcomb
->rssi_lna1
) &&
1538 (antcomb
->rssi_add
> antcomb
->rssi_sub
)) {
1540 div_ant_conf
.main_lna_conf
=
1541 ATH_ANT_DIV_COMB_LNA2
;
1542 div_ant_conf
.alt_lna_conf
=
1543 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1544 } else if (antcomb
->rssi_sub
>
1545 antcomb
->rssi_lna1
) {
1547 div_ant_conf
.main_lna_conf
=
1548 ATH_ANT_DIV_COMB_LNA2
;
1549 div_ant_conf
.alt_lna_conf
=
1550 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1553 div_ant_conf
.main_lna_conf
=
1554 ATH_ANT_DIV_COMB_LNA2
;
1555 div_ant_conf
.alt_lna_conf
=
1556 ATH_ANT_DIV_COMB_LNA1
;
1559 /* use LNA1 as main LNA */
1560 if ((antcomb
->rssi_add
> antcomb
->rssi_lna2
) &&
1561 (antcomb
->rssi_add
> antcomb
->rssi_sub
)) {
1563 div_ant_conf
.main_lna_conf
=
1564 ATH_ANT_DIV_COMB_LNA1
;
1565 div_ant_conf
.alt_lna_conf
=
1566 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2
;
1567 } else if (antcomb
->rssi_sub
>
1568 antcomb
->rssi_lna1
) {
1570 div_ant_conf
.main_lna_conf
=
1571 ATH_ANT_DIV_COMB_LNA1
;
1572 div_ant_conf
.alt_lna_conf
=
1573 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2
;
1576 div_ant_conf
.main_lna_conf
=
1577 ATH_ANT_DIV_COMB_LNA1
;
1578 div_ant_conf
.alt_lna_conf
=
1579 ATH_ANT_DIV_COMB_LNA2
;
1587 if (!antcomb
->alt_good
) {
1588 antcomb
->scan_not_start
= false;
1589 /* Set alt to another LNA */
1590 if (curr_main_set
== ATH_ANT_DIV_COMB_LNA2
) {
1591 div_ant_conf
.main_lna_conf
=
1592 ATH_ANT_DIV_COMB_LNA2
;
1593 div_ant_conf
.alt_lna_conf
=
1594 ATH_ANT_DIV_COMB_LNA1
;
1595 } else if (curr_main_set
== ATH_ANT_DIV_COMB_LNA1
) {
1596 div_ant_conf
.main_lna_conf
=
1597 ATH_ANT_DIV_COMB_LNA1
;
1598 div_ant_conf
.alt_lna_conf
=
1599 ATH_ANT_DIV_COMB_LNA2
;
1605 ath_select_ant_div_from_quick_scan(antcomb
, &div_ant_conf
,
1606 main_rssi_avg
, alt_rssi_avg
,
1609 antcomb
->quick_scan_cnt
++;
1612 ath_ant_div_conf_fast_divbias(&div_ant_conf
);
1614 ath9k_hw_antdiv_comb_conf_set(sc
->sc_ah
, &div_ant_conf
);
1616 antcomb
->scan_start_time
= jiffies
;
1617 antcomb
->total_pkt_count
= 0;
1618 antcomb
->main_total_rssi
= 0;
1619 antcomb
->alt_total_rssi
= 0;
1620 antcomb
->main_recv_cnt
= 0;
1621 antcomb
->alt_recv_cnt
= 0;
1624 int ath_rx_tasklet(struct ath_softc
*sc
, int flush
, bool hp
)
1627 struct sk_buff
*skb
= NULL
, *requeue_skb
;
1628 struct ieee80211_rx_status
*rxs
;
1629 struct ath_hw
*ah
= sc
->sc_ah
;
1630 struct ath_common
*common
= ath9k_hw_common(ah
);
1632 * The hw can techncically differ from common->hw when using ath9k
1633 * virtual wiphy so to account for that we iterate over the active
1634 * wiphys and find the appropriate wiphy and therefore hw.
1636 struct ieee80211_hw
*hw
= NULL
;
1637 struct ieee80211_hdr
*hdr
;
1639 bool decrypt_error
= false;
1640 struct ath_rx_status rs
;
1641 enum ath9k_rx_qtype qtype
;
1642 bool edma
= !!(ah
->caps
.hw_caps
& ATH9K_HW_CAP_EDMA
);
1644 u8 rx_status_len
= ah
->caps
.rx_status_len
;
1647 unsigned long flags
;
1650 dma_type
= DMA_BIDIRECTIONAL
;
1652 dma_type
= DMA_FROM_DEVICE
;
1654 qtype
= hp
? ATH9K_RX_QUEUE_HP
: ATH9K_RX_QUEUE_LP
;
1655 spin_lock_bh(&sc
->rx
.rxbuflock
);
1657 tsf
= ath9k_hw_gettsf64(ah
);
1658 tsf_lower
= tsf
& 0xffffffff;
1661 /* If handling rx interrupt and flush is in progress => exit */
1662 if ((sc
->sc_flags
& SC_OP_RXFLUSH
) && (flush
== 0))
1665 memset(&rs
, 0, sizeof(rs
));
1667 bf
= ath_edma_get_next_rx_buf(sc
, &rs
, qtype
);
1669 bf
= ath_get_next_rx_buf(sc
, &rs
);
1678 hdr
= (struct ieee80211_hdr
*) (skb
->data
+ rx_status_len
);
1679 rxs
= IEEE80211_SKB_RXCB(skb
);
1681 hw
= ath_get_virt_hw(sc
, hdr
);
1683 ath_debug_stat_rx(sc
, &rs
);
1686 * If we're asked to flush receive queue, directly
1687 * chain it back at the queue without processing it.
1692 retval
= ath9k_rx_skb_preprocess(common
, hw
, hdr
, &rs
,
1693 rxs
, &decrypt_error
);
1697 rxs
->mactime
= (tsf
& ~0xffffffffULL
) | rs
.rs_tstamp
;
1698 if (rs
.rs_tstamp
> tsf_lower
&&
1699 unlikely(rs
.rs_tstamp
- tsf_lower
> 0x10000000))
1700 rxs
->mactime
-= 0x100000000ULL
;
1702 if (rs
.rs_tstamp
< tsf_lower
&&
1703 unlikely(tsf_lower
- rs
.rs_tstamp
> 0x10000000))
1704 rxs
->mactime
+= 0x100000000ULL
;
1706 /* Ensure we always have an skb to requeue once we are done
1707 * processing the current buffer's skb */
1708 requeue_skb
= ath_rxbuf_alloc(common
, common
->rx_bufsize
, GFP_ATOMIC
);
1710 /* If there is no memory we ignore the current RX'd frame,
1711 * tell hardware it can give us a new frame using the old
1712 * skb and put it at the tail of the sc->rx.rxbuf list for
1717 /* Unmap the frame */
1718 dma_unmap_single(sc
->dev
, bf
->bf_buf_addr
,
1722 skb_put(skb
, rs
.rs_datalen
+ ah
->caps
.rx_status_len
);
1723 if (ah
->caps
.rx_status_len
)
1724 skb_pull(skb
, ah
->caps
.rx_status_len
);
1726 ath9k_rx_skb_postprocess(common
, skb
, &rs
,
1727 rxs
, decrypt_error
);
1729 /* We will now give hardware our shiny new allocated skb */
1730 bf
->bf_mpdu
= requeue_skb
;
1731 bf
->bf_buf_addr
= dma_map_single(sc
->dev
, requeue_skb
->data
,
1734 if (unlikely(dma_mapping_error(sc
->dev
,
1735 bf
->bf_buf_addr
))) {
1736 dev_kfree_skb_any(requeue_skb
);
1738 ath_print(common
, ATH_DBG_FATAL
,
1739 "dma_mapping_error() on RX\n");
1740 ath_rx_send_to_mac80211(hw
, sc
, skb
, rxs
);
1743 bf
->bf_dmacontext
= bf
->bf_buf_addr
;
1746 * change the default rx antenna if rx diversity chooses the
1747 * other antenna 3 times in a row.
1749 if (sc
->rx
.defant
!= rs
.rs_antenna
) {
1750 if (++sc
->rx
.rxotherant
>= 3)
1751 ath_setdefantenna(sc
, rs
.rs_antenna
);
1753 sc
->rx
.rxotherant
= 0;
1756 spin_lock_irqsave(&sc
->sc_pm_lock
, flags
);
1757 if (unlikely(ath9k_check_auto_sleep(sc
) ||
1758 (sc
->ps_flags
& (PS_WAIT_FOR_BEACON
|
1760 PS_WAIT_FOR_PSPOLL_DATA
))))
1762 spin_unlock_irqrestore(&sc
->sc_pm_lock
, flags
);
1764 if (ah
->caps
.hw_caps
& ATH9K_HW_CAP_ANT_DIV_COMB
)
1765 ath_ant_comb_scan(sc
, &rs
);
1767 ath_rx_send_to_mac80211(hw
, sc
, skb
, rxs
);
1771 list_add_tail(&bf
->list
, &sc
->rx
.rxbuf
);
1772 ath_rx_edma_buf_link(sc
, qtype
);
1774 list_move_tail(&bf
->list
, &sc
->rx
.rxbuf
);
1775 ath_rx_buf_link(sc
, bf
);
1779 spin_unlock_bh(&sc
->rx
.rxbuflock
);