]>
Commit | Line | Data |
---|---|---|
f078f209 | 1 | /* |
5b68138e | 2 | * Copyright (c) 2008-2011 Atheros Communications Inc. |
f078f209 LR |
3 | * |
4 | * Permission to use, copy, modify, and/or distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
16 | ||
394cf0a1 | 17 | #include "ath9k.h" |
b622a720 | 18 | #include "ar9003_mac.h" |
f078f209 | 19 | |
b5c80475 FF |
20 | #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) |
21 | ||
102885a5 VT |
22 | static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta, |
23 | int mindelta, int main_rssi_avg, | |
24 | int alt_rssi_avg, int pkt_count) | |
25 | { | |
26 | return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && | |
27 | (alt_rssi_avg > main_rssi_avg + maxdelta)) || | |
28 | (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50); | |
29 | } | |
30 | ||
b85c5734 MSS |
31 | static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio, |
32 | int curr_main_set, int curr_alt_set, | |
33 | int alt_rssi_avg, int main_rssi_avg) | |
34 | { | |
35 | bool result = false; | |
36 | switch (div_group) { | |
37 | case 0: | |
38 | if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) | |
39 | result = true; | |
40 | break; | |
41 | case 1: | |
66ce235a | 42 | case 2: |
b85c5734 MSS |
43 | if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) && |
44 | (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) && | |
45 | (alt_rssi_avg >= (main_rssi_avg - 5))) || | |
46 | ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) && | |
47 | (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) && | |
48 | (alt_rssi_avg >= (main_rssi_avg - 2)))) && | |
49 | (alt_rssi_avg >= 4)) | |
50 | result = true; | |
51 | else | |
52 | result = false; | |
53 | break; | |
54 | } | |
55 | ||
56 | return result; | |
57 | } | |
58 | ||
ededf1f8 VT |
59 | static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) |
60 | { | |
61 | return sc->ps_enabled && | |
62 | (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); | |
63 | } | |
64 | ||
f078f209 LR |
65 | /* |
66 | * Setup and link descriptors. | |
67 | * | |
68 | * 11N: we can no longer afford to self link the last descriptor. | |
69 | * MAC acknowledges BA status as long as it copies frames to host | |
70 | * buffer (or rx fifo). This can incorrectly acknowledge packets | |
71 | * to a sender if last desc is self-linked. | |
f078f209 | 72 | */ |
f078f209 LR |
73 | static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) |
74 | { | |
cbe61d8a | 75 | struct ath_hw *ah = sc->sc_ah; |
cc861f74 | 76 | struct ath_common *common = ath9k_hw_common(ah); |
f078f209 LR |
77 | struct ath_desc *ds; |
78 | struct sk_buff *skb; | |
79 | ||
80 | ATH_RXBUF_RESET(bf); | |
81 | ||
82 | ds = bf->bf_desc; | |
be0418ad | 83 | ds->ds_link = 0; /* link to null */ |
f078f209 LR |
84 | ds->ds_data = bf->bf_buf_addr; |
85 | ||
be0418ad | 86 | /* virtual addr of the beginning of the buffer. */ |
f078f209 | 87 | skb = bf->bf_mpdu; |
9680e8a3 | 88 | BUG_ON(skb == NULL); |
f078f209 LR |
89 | ds->ds_vdata = skb->data; |
90 | ||
cc861f74 LR |
91 | /* |
92 | * setup rx descriptors. The rx_bufsize here tells the hardware | |
b4b6cda2 | 93 | * how much data it can DMA to us and that we are prepared |
cc861f74 LR |
94 | * to process |
95 | */ | |
b77f483f | 96 | ath9k_hw_setuprxdesc(ah, ds, |
cc861f74 | 97 | common->rx_bufsize, |
f078f209 LR |
98 | 0); |
99 | ||
b77f483f | 100 | if (sc->rx.rxlink == NULL) |
f078f209 LR |
101 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); |
102 | else | |
b77f483f | 103 | *sc->rx.rxlink = bf->bf_daddr; |
f078f209 | 104 | |
b77f483f | 105 | sc->rx.rxlink = &ds->ds_link; |
f078f209 LR |
106 | } |
107 | ||
ff37e337 S |
108 | static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) |
109 | { | |
110 | /* XXX block beacon interrupts */ | |
111 | ath9k_hw_setantenna(sc->sc_ah, antenna); | |
b77f483f S |
112 | sc->rx.defant = antenna; |
113 | sc->rx.rxotherant = 0; | |
ff37e337 S |
114 | } |
115 | ||
f078f209 LR |
116 | static void ath_opmode_init(struct ath_softc *sc) |
117 | { | |
cbe61d8a | 118 | struct ath_hw *ah = sc->sc_ah; |
1510718d LR |
119 | struct ath_common *common = ath9k_hw_common(ah); |
120 | ||
f078f209 LR |
121 | u32 rfilt, mfilt[2]; |
122 | ||
123 | /* configure rx filter */ | |
124 | rfilt = ath_calcrxfilter(sc); | |
125 | ath9k_hw_setrxfilter(ah, rfilt); | |
126 | ||
127 | /* configure bssid mask */ | |
364734fa | 128 | ath_hw_setbssidmask(common); |
f078f209 LR |
129 | |
130 | /* configure operational mode */ | |
131 | ath9k_hw_setopmode(ah); | |
132 | ||
f078f209 LR |
133 | /* calculate and install multicast filter */ |
134 | mfilt[0] = mfilt[1] = ~0; | |
f078f209 | 135 | ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); |
f078f209 LR |
136 | } |
137 | ||
b5c80475 FF |
138 | static bool ath_rx_edma_buf_link(struct ath_softc *sc, |
139 | enum ath9k_rx_qtype qtype) | |
f078f209 | 140 | { |
b5c80475 FF |
141 | struct ath_hw *ah = sc->sc_ah; |
142 | struct ath_rx_edma *rx_edma; | |
f078f209 LR |
143 | struct sk_buff *skb; |
144 | struct ath_buf *bf; | |
f078f209 | 145 | |
b5c80475 FF |
146 | rx_edma = &sc->rx.rx_edma[qtype]; |
147 | if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) | |
148 | return false; | |
f078f209 | 149 | |
b5c80475 FF |
150 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); |
151 | list_del_init(&bf->list); | |
f078f209 | 152 | |
b5c80475 FF |
153 | skb = bf->bf_mpdu; |
154 | ||
155 | ATH_RXBUF_RESET(bf); | |
156 | memset(skb->data, 0, ah->caps.rx_status_len); | |
157 | dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, | |
158 | ah->caps.rx_status_len, DMA_TO_DEVICE); | |
f078f209 | 159 | |
b5c80475 FF |
160 | SKB_CB_ATHBUF(skb) = bf; |
161 | ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); | |
162 | skb_queue_tail(&rx_edma->rx_fifo, skb); | |
f078f209 | 163 | |
b5c80475 FF |
164 | return true; |
165 | } | |
166 | ||
167 | static void ath_rx_addbuffer_edma(struct ath_softc *sc, | |
168 | enum ath9k_rx_qtype qtype, int size) | |
169 | { | |
b5c80475 FF |
170 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
171 | u32 nbuf = 0; | |
172 | ||
b5c80475 | 173 | if (list_empty(&sc->rx.rxbuf)) { |
226afe68 | 174 | ath_dbg(common, ATH_DBG_QUEUE, "No free rx buf available\n"); |
b5c80475 | 175 | return; |
797fe5cb | 176 | } |
f078f209 | 177 | |
b5c80475 FF |
178 | while (!list_empty(&sc->rx.rxbuf)) { |
179 | nbuf++; | |
180 | ||
181 | if (!ath_rx_edma_buf_link(sc, qtype)) | |
182 | break; | |
183 | ||
184 | if (nbuf >= size) | |
185 | break; | |
186 | } | |
187 | } | |
188 | ||
189 | static void ath_rx_remove_buffer(struct ath_softc *sc, | |
190 | enum ath9k_rx_qtype qtype) | |
191 | { | |
192 | struct ath_buf *bf; | |
193 | struct ath_rx_edma *rx_edma; | |
194 | struct sk_buff *skb; | |
195 | ||
196 | rx_edma = &sc->rx.rx_edma[qtype]; | |
197 | ||
198 | while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) { | |
199 | bf = SKB_CB_ATHBUF(skb); | |
200 | BUG_ON(!bf); | |
201 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
202 | } | |
203 | } | |
204 | ||
205 | static void ath_rx_edma_cleanup(struct ath_softc *sc) | |
206 | { | |
207 | struct ath_buf *bf; | |
208 | ||
209 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); | |
210 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); | |
211 | ||
797fe5cb | 212 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { |
b5c80475 FF |
213 | if (bf->bf_mpdu) |
214 | dev_kfree_skb_any(bf->bf_mpdu); | |
215 | } | |
216 | ||
217 | INIT_LIST_HEAD(&sc->rx.rxbuf); | |
218 | ||
219 | kfree(sc->rx.rx_bufptr); | |
220 | sc->rx.rx_bufptr = NULL; | |
221 | } | |
222 | ||
223 | static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) | |
224 | { | |
225 | skb_queue_head_init(&rx_edma->rx_fifo); | |
226 | skb_queue_head_init(&rx_edma->rx_buffers); | |
227 | rx_edma->rx_fifo_hwsize = size; | |
228 | } | |
229 | ||
230 | static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) | |
231 | { | |
232 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | |
233 | struct ath_hw *ah = sc->sc_ah; | |
234 | struct sk_buff *skb; | |
235 | struct ath_buf *bf; | |
236 | int error = 0, i; | |
237 | u32 size; | |
238 | ||
b5c80475 FF |
239 | ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - |
240 | ah->caps.rx_status_len); | |
241 | ||
242 | ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], | |
243 | ah->caps.rx_lp_qdepth); | |
244 | ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], | |
245 | ah->caps.rx_hp_qdepth); | |
246 | ||
247 | size = sizeof(struct ath_buf) * nbufs; | |
248 | bf = kzalloc(size, GFP_KERNEL); | |
249 | if (!bf) | |
250 | return -ENOMEM; | |
251 | ||
252 | INIT_LIST_HEAD(&sc->rx.rxbuf); | |
253 | sc->rx.rx_bufptr = bf; | |
254 | ||
255 | for (i = 0; i < nbufs; i++, bf++) { | |
cc861f74 | 256 | skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); |
b5c80475 | 257 | if (!skb) { |
797fe5cb | 258 | error = -ENOMEM; |
b5c80475 | 259 | goto rx_init_fail; |
f078f209 | 260 | } |
f078f209 | 261 | |
b5c80475 | 262 | memset(skb->data, 0, common->rx_bufsize); |
797fe5cb | 263 | bf->bf_mpdu = skb; |
b5c80475 | 264 | |
797fe5cb | 265 | bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, |
cc861f74 | 266 | common->rx_bufsize, |
b5c80475 | 267 | DMA_BIDIRECTIONAL); |
797fe5cb | 268 | if (unlikely(dma_mapping_error(sc->dev, |
b5c80475 FF |
269 | bf->bf_buf_addr))) { |
270 | dev_kfree_skb_any(skb); | |
271 | bf->bf_mpdu = NULL; | |
6cf9e995 | 272 | bf->bf_buf_addr = 0; |
3800276a | 273 | ath_err(common, |
b5c80475 FF |
274 | "dma_mapping_error() on RX init\n"); |
275 | error = -ENOMEM; | |
276 | goto rx_init_fail; | |
277 | } | |
278 | ||
279 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
280 | } | |
281 | ||
282 | return 0; | |
283 | ||
284 | rx_init_fail: | |
285 | ath_rx_edma_cleanup(sc); | |
286 | return error; | |
287 | } | |
288 | ||
289 | static void ath_edma_start_recv(struct ath_softc *sc) | |
290 | { | |
291 | spin_lock_bh(&sc->rx.rxbuflock); | |
292 | ||
293 | ath9k_hw_rxena(sc->sc_ah); | |
294 | ||
295 | ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, | |
296 | sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize); | |
297 | ||
298 | ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, | |
299 | sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); | |
300 | ||
b5c80475 FF |
301 | ath_opmode_init(sc); |
302 | ||
48a6a468 | 303 | ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); |
7583c550 LR |
304 | |
305 | spin_unlock_bh(&sc->rx.rxbuflock); | |
b5c80475 FF |
306 | } |
307 | ||
308 | static void ath_edma_stop_recv(struct ath_softc *sc) | |
309 | { | |
b5c80475 FF |
310 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); |
311 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); | |
b5c80475 FF |
312 | } |
313 | ||
314 | int ath_rx_init(struct ath_softc *sc, int nbufs) | |
315 | { | |
316 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | |
317 | struct sk_buff *skb; | |
318 | struct ath_buf *bf; | |
319 | int error = 0; | |
320 | ||
4bdd1e97 | 321 | spin_lock_init(&sc->sc_pcu_lock); |
b5c80475 FF |
322 | sc->sc_flags &= ~SC_OP_RXFLUSH; |
323 | spin_lock_init(&sc->rx.rxbuflock); | |
324 | ||
0d95521e FF |
325 | common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + |
326 | sc->sc_ah->caps.rx_status_len; | |
327 | ||
b5c80475 FF |
328 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { |
329 | return ath_rx_edma_init(sc, nbufs); | |
330 | } else { | |
226afe68 JP |
331 | ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", |
332 | common->cachelsz, common->rx_bufsize); | |
b5c80475 FF |
333 | |
334 | /* Initialize rx descriptors */ | |
335 | ||
336 | error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, | |
4adfcded | 337 | "rx", nbufs, 1, 0); |
b5c80475 | 338 | if (error != 0) { |
3800276a JP |
339 | ath_err(common, |
340 | "failed to allocate rx descriptors: %d\n", | |
341 | error); | |
797fe5cb S |
342 | goto err; |
343 | } | |
b5c80475 FF |
344 | |
345 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { | |
346 | skb = ath_rxbuf_alloc(common, common->rx_bufsize, | |
347 | GFP_KERNEL); | |
348 | if (skb == NULL) { | |
349 | error = -ENOMEM; | |
350 | goto err; | |
351 | } | |
352 | ||
353 | bf->bf_mpdu = skb; | |
354 | bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, | |
355 | common->rx_bufsize, | |
356 | DMA_FROM_DEVICE); | |
357 | if (unlikely(dma_mapping_error(sc->dev, | |
358 | bf->bf_buf_addr))) { | |
359 | dev_kfree_skb_any(skb); | |
360 | bf->bf_mpdu = NULL; | |
6cf9e995 | 361 | bf->bf_buf_addr = 0; |
3800276a JP |
362 | ath_err(common, |
363 | "dma_mapping_error() on RX init\n"); | |
b5c80475 FF |
364 | error = -ENOMEM; |
365 | goto err; | |
366 | } | |
b5c80475 FF |
367 | } |
368 | sc->rx.rxlink = NULL; | |
797fe5cb | 369 | } |
f078f209 | 370 | |
797fe5cb | 371 | err: |
f078f209 LR |
372 | if (error) |
373 | ath_rx_cleanup(sc); | |
374 | ||
375 | return error; | |
376 | } | |
377 | ||
f078f209 LR |
378 | void ath_rx_cleanup(struct ath_softc *sc) |
379 | { | |
cc861f74 LR |
380 | struct ath_hw *ah = sc->sc_ah; |
381 | struct ath_common *common = ath9k_hw_common(ah); | |
f078f209 LR |
382 | struct sk_buff *skb; |
383 | struct ath_buf *bf; | |
384 | ||
b5c80475 FF |
385 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { |
386 | ath_rx_edma_cleanup(sc); | |
387 | return; | |
388 | } else { | |
389 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { | |
390 | skb = bf->bf_mpdu; | |
391 | if (skb) { | |
392 | dma_unmap_single(sc->dev, bf->bf_buf_addr, | |
393 | common->rx_bufsize, | |
394 | DMA_FROM_DEVICE); | |
395 | dev_kfree_skb(skb); | |
6cf9e995 BG |
396 | bf->bf_buf_addr = 0; |
397 | bf->bf_mpdu = NULL; | |
b5c80475 | 398 | } |
051b9191 | 399 | } |
f078f209 | 400 | |
b5c80475 FF |
401 | if (sc->rx.rxdma.dd_desc_len != 0) |
402 | ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); | |
403 | } | |
f078f209 LR |
404 | } |
405 | ||
406 | /* | |
407 | * Calculate the receive filter according to the | |
408 | * operating mode and state: | |
409 | * | |
410 | * o always accept unicast, broadcast, and multicast traffic | |
411 | * o maintain current state of phy error reception (the hal | |
412 | * may enable phy error frames for noise immunity work) | |
413 | * o probe request frames are accepted only when operating in | |
414 | * hostap, adhoc, or monitor modes | |
415 | * o enable promiscuous mode according to the interface state | |
416 | * o accept beacons: | |
417 | * - when operating in adhoc mode so the 802.11 layer creates | |
418 | * node table entries for peers, | |
419 | * - when operating in station mode for collecting rssi data when | |
420 | * the station is otherwise quiet, or | |
421 | * - when operating as a repeater so we see repeater-sta beacons | |
422 | * - when scanning | |
423 | */ | |
424 | ||
425 | u32 ath_calcrxfilter(struct ath_softc *sc) | |
426 | { | |
427 | #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR) | |
7dcfdcd9 | 428 | |
f078f209 LR |
429 | u32 rfilt; |
430 | ||
431 | rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE) | |
432 | | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST | |
433 | | ATH9K_RX_FILTER_MCAST; | |
434 | ||
9c1d8e4a | 435 | if (sc->rx.rxfilter & FIF_PROBE_REQ) |
f078f209 LR |
436 | rfilt |= ATH9K_RX_FILTER_PROBEREQ; |
437 | ||
217ba9da JM |
438 | /* |
439 | * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station | |
440 | * mode interface or when in monitor mode. AP mode does not need this | |
441 | * since it receives all in-BSS frames anyway. | |
442 | */ | |
2e286947 | 443 | if (sc->sc_ah->is_monitoring) |
f078f209 | 444 | rfilt |= ATH9K_RX_FILTER_PROM; |
f078f209 | 445 | |
d42c6b71 S |
446 | if (sc->rx.rxfilter & FIF_CONTROL) |
447 | rfilt |= ATH9K_RX_FILTER_CONTROL; | |
448 | ||
dbaaa147 | 449 | if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && |
cfda6695 | 450 | (sc->nvifs <= 1) && |
dbaaa147 VT |
451 | !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) |
452 | rfilt |= ATH9K_RX_FILTER_MYBEACON; | |
453 | else | |
f078f209 LR |
454 | rfilt |= ATH9K_RX_FILTER_BEACON; |
455 | ||
264bbec8 | 456 | if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || |
66afad01 | 457 | (sc->rx.rxfilter & FIF_PSPOLL)) |
dbaaa147 | 458 | rfilt |= ATH9K_RX_FILTER_PSPOLL; |
be0418ad | 459 | |
7ea310be S |
460 | if (conf_is_ht(&sc->hw->conf)) |
461 | rfilt |= ATH9K_RX_FILTER_COMP_BAR; | |
462 | ||
7545daf4 | 463 | if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) { |
5eb6ba83 JC |
464 | /* The following may also be needed for other older chips */ |
465 | if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) | |
466 | rfilt |= ATH9K_RX_FILTER_PROM; | |
b93bce2a JM |
467 | rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; |
468 | } | |
469 | ||
f078f209 | 470 | return rfilt; |
7dcfdcd9 | 471 | |
f078f209 LR |
472 | #undef RX_FILTER_PRESERVE |
473 | } | |
474 | ||
f078f209 LR |
475 | int ath_startrecv(struct ath_softc *sc) |
476 | { | |
cbe61d8a | 477 | struct ath_hw *ah = sc->sc_ah; |
f078f209 LR |
478 | struct ath_buf *bf, *tbf; |
479 | ||
b5c80475 FF |
480 | if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { |
481 | ath_edma_start_recv(sc); | |
482 | return 0; | |
483 | } | |
484 | ||
b77f483f S |
485 | spin_lock_bh(&sc->rx.rxbuflock); |
486 | if (list_empty(&sc->rx.rxbuf)) | |
f078f209 LR |
487 | goto start_recv; |
488 | ||
b77f483f S |
489 | sc->rx.rxlink = NULL; |
490 | list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { | |
f078f209 LR |
491 | ath_rx_buf_link(sc, bf); |
492 | } | |
493 | ||
494 | /* We could have deleted elements so the list may be empty now */ | |
b77f483f | 495 | if (list_empty(&sc->rx.rxbuf)) |
f078f209 LR |
496 | goto start_recv; |
497 | ||
b77f483f | 498 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); |
f078f209 | 499 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); |
be0418ad | 500 | ath9k_hw_rxena(ah); |
f078f209 LR |
501 | |
502 | start_recv: | |
be0418ad | 503 | ath_opmode_init(sc); |
48a6a468 | 504 | ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); |
be0418ad | 505 | |
7583c550 LR |
506 | spin_unlock_bh(&sc->rx.rxbuflock); |
507 | ||
f078f209 LR |
508 | return 0; |
509 | } | |
510 | ||
f078f209 LR |
511 | bool ath_stoprecv(struct ath_softc *sc) |
512 | { | |
cbe61d8a | 513 | struct ath_hw *ah = sc->sc_ah; |
5882da02 | 514 | bool stopped, reset = false; |
f078f209 | 515 | |
1e450285 | 516 | spin_lock_bh(&sc->rx.rxbuflock); |
d47844a0 | 517 | ath9k_hw_abortpcurecv(ah); |
be0418ad | 518 | ath9k_hw_setrxfilter(ah, 0); |
5882da02 | 519 | stopped = ath9k_hw_stopdmarecv(ah, &reset); |
b5c80475 FF |
520 | |
521 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) | |
522 | ath_edma_stop_recv(sc); | |
523 | else | |
524 | sc->rx.rxlink = NULL; | |
1e450285 | 525 | spin_unlock_bh(&sc->rx.rxbuflock); |
be0418ad | 526 | |
d584747b RM |
527 | if (!(ah->ah_flags & AH_UNPLUGGED) && |
528 | unlikely(!stopped)) { | |
d7fd1b50 BG |
529 | ath_err(ath9k_hw_common(sc->sc_ah), |
530 | "Could not stop RX, we could be " | |
531 | "confusing the DMA engine when we start RX up\n"); | |
532 | ATH_DBG_WARN_ON_ONCE(!stopped); | |
533 | } | |
2232d31b | 534 | return stopped && !reset; |
f078f209 LR |
535 | } |
536 | ||
f078f209 LR |
537 | void ath_flushrecv(struct ath_softc *sc) |
538 | { | |
98deeea0 | 539 | sc->sc_flags |= SC_OP_RXFLUSH; |
b5c80475 FF |
540 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) |
541 | ath_rx_tasklet(sc, 1, true); | |
542 | ath_rx_tasklet(sc, 1, false); | |
98deeea0 | 543 | sc->sc_flags &= ~SC_OP_RXFLUSH; |
f078f209 LR |
544 | } |
545 | ||
cc65965c JM |
546 | static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) |
547 | { | |
548 | /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ | |
549 | struct ieee80211_mgmt *mgmt; | |
550 | u8 *pos, *end, id, elen; | |
551 | struct ieee80211_tim_ie *tim; | |
552 | ||
553 | mgmt = (struct ieee80211_mgmt *)skb->data; | |
554 | pos = mgmt->u.beacon.variable; | |
555 | end = skb->data + skb->len; | |
556 | ||
557 | while (pos + 2 < end) { | |
558 | id = *pos++; | |
559 | elen = *pos++; | |
560 | if (pos + elen > end) | |
561 | break; | |
562 | ||
563 | if (id == WLAN_EID_TIM) { | |
564 | if (elen < sizeof(*tim)) | |
565 | break; | |
566 | tim = (struct ieee80211_tim_ie *) pos; | |
567 | if (tim->dtim_count != 0) | |
568 | break; | |
569 | return tim->bitmap_ctrl & 0x01; | |
570 | } | |
571 | ||
572 | pos += elen; | |
573 | } | |
574 | ||
575 | return false; | |
576 | } | |
577 | ||
cc65965c JM |
578 | static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) |
579 | { | |
580 | struct ieee80211_mgmt *mgmt; | |
1510718d | 581 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
cc65965c JM |
582 | |
583 | if (skb->len < 24 + 8 + 2 + 2) | |
584 | return; | |
585 | ||
586 | mgmt = (struct ieee80211_mgmt *)skb->data; | |
4801416c BG |
587 | if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) { |
588 | /* TODO: This doesn't work well if you have stations | |
589 | * associated to two different APs because curbssid | |
590 | * is just the last AP that any of the stations associated | |
591 | * with. | |
592 | */ | |
cc65965c | 593 | return; /* not from our current AP */ |
4801416c | 594 | } |
cc65965c | 595 | |
1b04b930 | 596 | sc->ps_flags &= ~PS_WAIT_FOR_BEACON; |
293dc5df | 597 | |
1b04b930 S |
598 | if (sc->ps_flags & PS_BEACON_SYNC) { |
599 | sc->ps_flags &= ~PS_BEACON_SYNC; | |
226afe68 JP |
600 | ath_dbg(common, ATH_DBG_PS, |
601 | "Reconfigure Beacon timers based on timestamp from the AP\n"); | |
99e4d43a | 602 | ath_set_beacon(sc); |
deb75188 | 603 | sc->ps_flags &= ~PS_TSFOOR_SYNC; |
ccdfeab6 JM |
604 | } |
605 | ||
cc65965c JM |
606 | if (ath_beacon_dtim_pending_cab(skb)) { |
607 | /* | |
608 | * Remain awake waiting for buffered broadcast/multicast | |
58f5fffd GJ |
609 | * frames. If the last broadcast/multicast frame is not |
610 | * received properly, the next beacon frame will work as | |
611 | * a backup trigger for returning into NETWORK SLEEP state, | |
612 | * so we are waiting for it as well. | |
cc65965c | 613 | */ |
226afe68 JP |
614 | ath_dbg(common, ATH_DBG_PS, |
615 | "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n"); | |
1b04b930 | 616 | sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; |
cc65965c JM |
617 | return; |
618 | } | |
619 | ||
1b04b930 | 620 | if (sc->ps_flags & PS_WAIT_FOR_CAB) { |
cc65965c JM |
621 | /* |
622 | * This can happen if a broadcast frame is dropped or the AP | |
623 | * fails to send a frame indicating that all CAB frames have | |
624 | * been delivered. | |
625 | */ | |
1b04b930 | 626 | sc->ps_flags &= ~PS_WAIT_FOR_CAB; |
226afe68 JP |
627 | ath_dbg(common, ATH_DBG_PS, |
628 | "PS wait for CAB frames timed out\n"); | |
cc65965c | 629 | } |
cc65965c JM |
630 | } |
631 | ||
632 | static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb) | |
633 | { | |
634 | struct ieee80211_hdr *hdr; | |
c46917bb | 635 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
cc65965c JM |
636 | |
637 | hdr = (struct ieee80211_hdr *)skb->data; | |
638 | ||
639 | /* Process Beacon and CAB receive in PS state */ | |
ededf1f8 VT |
640 | if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) |
641 | && ieee80211_is_beacon(hdr->frame_control)) | |
cc65965c | 642 | ath_rx_ps_beacon(sc, skb); |
1b04b930 | 643 | else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && |
cc65965c JM |
644 | (ieee80211_is_data(hdr->frame_control) || |
645 | ieee80211_is_action(hdr->frame_control)) && | |
646 | is_multicast_ether_addr(hdr->addr1) && | |
647 | !ieee80211_has_moredata(hdr->frame_control)) { | |
cc65965c JM |
648 | /* |
649 | * No more broadcast/multicast frames to be received at this | |
650 | * point. | |
651 | */ | |
3fac6dfd | 652 | sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); |
226afe68 JP |
653 | ath_dbg(common, ATH_DBG_PS, |
654 | "All PS CAB frames received, back to sleep\n"); | |
1b04b930 | 655 | } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && |
9a23f9ca JM |
656 | !is_multicast_ether_addr(hdr->addr1) && |
657 | !ieee80211_has_morefrags(hdr->frame_control)) { | |
1b04b930 | 658 | sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; |
226afe68 JP |
659 | ath_dbg(common, ATH_DBG_PS, |
660 | "Going back to sleep after having received PS-Poll data (0x%lx)\n", | |
1b04b930 S |
661 | sc->ps_flags & (PS_WAIT_FOR_BEACON | |
662 | PS_WAIT_FOR_CAB | | |
663 | PS_WAIT_FOR_PSPOLL_DATA | | |
664 | PS_WAIT_FOR_TX_ACK)); | |
cc65965c JM |
665 | } |
666 | } | |
667 | ||
b5c80475 FF |
668 | static bool ath_edma_get_buffers(struct ath_softc *sc, |
669 | enum ath9k_rx_qtype qtype) | |
f078f209 | 670 | { |
b5c80475 FF |
671 | struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; |
672 | struct ath_hw *ah = sc->sc_ah; | |
673 | struct ath_common *common = ath9k_hw_common(ah); | |
674 | struct sk_buff *skb; | |
675 | struct ath_buf *bf; | |
676 | int ret; | |
677 | ||
678 | skb = skb_peek(&rx_edma->rx_fifo); | |
679 | if (!skb) | |
680 | return false; | |
681 | ||
682 | bf = SKB_CB_ATHBUF(skb); | |
683 | BUG_ON(!bf); | |
684 | ||
ce9426d1 | 685 | dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, |
b5c80475 FF |
686 | common->rx_bufsize, DMA_FROM_DEVICE); |
687 | ||
688 | ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data); | |
ce9426d1 ML |
689 | if (ret == -EINPROGRESS) { |
690 | /*let device gain the buffer again*/ | |
691 | dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, | |
692 | common->rx_bufsize, DMA_FROM_DEVICE); | |
b5c80475 | 693 | return false; |
ce9426d1 | 694 | } |
b5c80475 FF |
695 | |
696 | __skb_unlink(skb, &rx_edma->rx_fifo); | |
697 | if (ret == -EINVAL) { | |
698 | /* corrupt descriptor, skip this one and the following one */ | |
699 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
700 | ath_rx_edma_buf_link(sc, qtype); | |
701 | skb = skb_peek(&rx_edma->rx_fifo); | |
702 | if (!skb) | |
703 | return true; | |
704 | ||
705 | bf = SKB_CB_ATHBUF(skb); | |
706 | BUG_ON(!bf); | |
707 | ||
708 | __skb_unlink(skb, &rx_edma->rx_fifo); | |
709 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
710 | ath_rx_edma_buf_link(sc, qtype); | |
083e3e8d | 711 | return true; |
b5c80475 FF |
712 | } |
713 | skb_queue_tail(&rx_edma->rx_buffers, skb); | |
714 | ||
715 | return true; | |
716 | } | |
f078f209 | 717 | |
b5c80475 FF |
718 | static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, |
719 | struct ath_rx_status *rs, | |
720 | enum ath9k_rx_qtype qtype) | |
721 | { | |
722 | struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; | |
723 | struct sk_buff *skb; | |
be0418ad | 724 | struct ath_buf *bf; |
b5c80475 FF |
725 | |
726 | while (ath_edma_get_buffers(sc, qtype)); | |
727 | skb = __skb_dequeue(&rx_edma->rx_buffers); | |
728 | if (!skb) | |
729 | return NULL; | |
730 | ||
731 | bf = SKB_CB_ATHBUF(skb); | |
732 | ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data); | |
733 | return bf; | |
734 | } | |
735 | ||
736 | static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, | |
737 | struct ath_rx_status *rs) | |
738 | { | |
739 | struct ath_hw *ah = sc->sc_ah; | |
740 | struct ath_common *common = ath9k_hw_common(ah); | |
f078f209 | 741 | struct ath_desc *ds; |
b5c80475 FF |
742 | struct ath_buf *bf; |
743 | int ret; | |
744 | ||
745 | if (list_empty(&sc->rx.rxbuf)) { | |
746 | sc->rx.rxlink = NULL; | |
747 | return NULL; | |
748 | } | |
749 | ||
750 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); | |
751 | ds = bf->bf_desc; | |
752 | ||
753 | /* | |
754 | * Must provide the virtual address of the current | |
755 | * descriptor, the physical address, and the virtual | |
756 | * address of the next descriptor in the h/w chain. | |
757 | * This allows the HAL to look ahead to see if the | |
758 | * hardware is done with a descriptor by checking the | |
759 | * done bit in the following descriptor and the address | |
760 | * of the current descriptor the DMA engine is working | |
761 | * on. All this is necessary because of our use of | |
762 | * a self-linked list to avoid rx overruns. | |
763 | */ | |
764 | ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0); | |
765 | if (ret == -EINPROGRESS) { | |
766 | struct ath_rx_status trs; | |
767 | struct ath_buf *tbf; | |
768 | struct ath_desc *tds; | |
769 | ||
770 | memset(&trs, 0, sizeof(trs)); | |
771 | if (list_is_last(&bf->list, &sc->rx.rxbuf)) { | |
772 | sc->rx.rxlink = NULL; | |
773 | return NULL; | |
774 | } | |
775 | ||
776 | tbf = list_entry(bf->list.next, struct ath_buf, list); | |
777 | ||
778 | /* | |
779 | * On some hardware the descriptor status words could | |
780 | * get corrupted, including the done bit. Because of | |
781 | * this, check if the next descriptor's done bit is | |
782 | * set or not. | |
783 | * | |
784 | * If the next descriptor's done bit is set, the current | |
785 | * descriptor has been corrupted. Force s/w to discard | |
786 | * this descriptor and continue... | |
787 | */ | |
788 | ||
789 | tds = tbf->bf_desc; | |
790 | ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0); | |
791 | if (ret == -EINPROGRESS) | |
792 | return NULL; | |
793 | } | |
794 | ||
795 | if (!bf->bf_mpdu) | |
796 | return bf; | |
797 | ||
798 | /* | |
799 | * Synchronize the DMA transfer with CPU before | |
800 | * 1. accessing the frame | |
801 | * 2. requeueing the same buffer to h/w | |
802 | */ | |
ce9426d1 | 803 | dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, |
b5c80475 FF |
804 | common->rx_bufsize, |
805 | DMA_FROM_DEVICE); | |
806 | ||
807 | return bf; | |
808 | } | |
809 | ||
d435700f S |
810 | /* Assumes you've already done the endian to CPU conversion */ |
811 | static bool ath9k_rx_accept(struct ath_common *common, | |
9f167f64 | 812 | struct ieee80211_hdr *hdr, |
d435700f S |
813 | struct ieee80211_rx_status *rxs, |
814 | struct ath_rx_status *rx_stats, | |
815 | bool *decrypt_error) | |
816 | { | |
38852b20 SB |
817 | #define is_mc_or_valid_tkip_keyix ((is_mc || \ |
818 | (rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && \ | |
819 | test_bit(rx_stats->rs_keyix, common->tkip_keymap)))) | |
820 | ||
d435700f | 821 | struct ath_hw *ah = common->ah; |
d435700f | 822 | __le16 fc; |
b7b1b512 | 823 | u8 rx_status_len = ah->caps.rx_status_len; |
d435700f | 824 | |
d435700f S |
825 | fc = hdr->frame_control; |
826 | ||
827 | if (!rx_stats->rs_datalen) | |
828 | return false; | |
829 | /* | |
830 | * rs_status follows rs_datalen so if rs_datalen is too large | |
831 | * we can take a hint that hardware corrupted it, so ignore | |
832 | * those frames. | |
833 | */ | |
b7b1b512 | 834 | if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) |
d435700f S |
835 | return false; |
836 | ||
0d95521e | 837 | /* Only use error bits from the last fragment */ |
d435700f | 838 | if (rx_stats->rs_more) |
0d95521e | 839 | return true; |
d435700f S |
840 | |
841 | /* | |
842 | * The rx_stats->rs_status will not be set until the end of the | |
843 | * chained descriptors so it can be ignored if rs_more is set. The | |
844 | * rs_more will be false at the last element of the chained | |
845 | * descriptors. | |
846 | */ | |
847 | if (rx_stats->rs_status != 0) { | |
848 | if (rx_stats->rs_status & ATH9K_RXERR_CRC) | |
849 | rxs->flag |= RX_FLAG_FAILED_FCS_CRC; | |
850 | if (rx_stats->rs_status & ATH9K_RXERR_PHY) | |
851 | return false; | |
852 | ||
853 | if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) { | |
854 | *decrypt_error = true; | |
855 | } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) { | |
38852b20 | 856 | bool is_mc; |
56363dde FF |
857 | /* |
858 | * The MIC error bit is only valid if the frame | |
859 | * is not a control frame or fragment, and it was | |
860 | * decrypted using a valid TKIP key. | |
861 | */ | |
38852b20 SB |
862 | is_mc = !!is_multicast_ether_addr(hdr->addr1); |
863 | ||
56363dde FF |
864 | if (!ieee80211_is_ctl(fc) && |
865 | !ieee80211_has_morefrags(fc) && | |
866 | !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && | |
38852b20 | 867 | is_mc_or_valid_tkip_keyix) |
d435700f | 868 | rxs->flag |= RX_FLAG_MMIC_ERROR; |
56363dde FF |
869 | else |
870 | rx_stats->rs_status &= ~ATH9K_RXERR_MIC; | |
d435700f S |
871 | } |
872 | /* | |
873 | * Reject error frames with the exception of | |
874 | * decryption and MIC failures. For monitor mode, | |
875 | * we also ignore the CRC error. | |
876 | */ | |
5f841b41 | 877 | if (ah->is_monitoring) { |
d435700f S |
878 | if (rx_stats->rs_status & |
879 | ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | | |
880 | ATH9K_RXERR_CRC)) | |
881 | return false; | |
882 | } else { | |
883 | if (rx_stats->rs_status & | |
884 | ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) { | |
885 | return false; | |
886 | } | |
887 | } | |
888 | } | |
889 | return true; | |
890 | } | |
891 | ||
892 | static int ath9k_process_rate(struct ath_common *common, | |
893 | struct ieee80211_hw *hw, | |
894 | struct ath_rx_status *rx_stats, | |
9f167f64 | 895 | struct ieee80211_rx_status *rxs) |
d435700f S |
896 | { |
897 | struct ieee80211_supported_band *sband; | |
898 | enum ieee80211_band band; | |
899 | unsigned int i = 0; | |
900 | ||
901 | band = hw->conf.channel->band; | |
902 | sband = hw->wiphy->bands[band]; | |
903 | ||
904 | if (rx_stats->rs_rate & 0x80) { | |
905 | /* HT rate */ | |
906 | rxs->flag |= RX_FLAG_HT; | |
907 | if (rx_stats->rs_flags & ATH9K_RX_2040) | |
908 | rxs->flag |= RX_FLAG_40MHZ; | |
909 | if (rx_stats->rs_flags & ATH9K_RX_GI) | |
910 | rxs->flag |= RX_FLAG_SHORT_GI; | |
911 | rxs->rate_idx = rx_stats->rs_rate & 0x7f; | |
912 | return 0; | |
913 | } | |
914 | ||
915 | for (i = 0; i < sband->n_bitrates; i++) { | |
916 | if (sband->bitrates[i].hw_value == rx_stats->rs_rate) { | |
917 | rxs->rate_idx = i; | |
918 | return 0; | |
919 | } | |
920 | if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { | |
921 | rxs->flag |= RX_FLAG_SHORTPRE; | |
922 | rxs->rate_idx = i; | |
923 | return 0; | |
924 | } | |
925 | } | |
926 | ||
927 | /* | |
928 | * No valid hardware bitrate found -- we should not get here | |
929 | * because hardware has already validated this frame as OK. | |
930 | */ | |
226afe68 JP |
931 | ath_dbg(common, ATH_DBG_XMIT, |
932 | "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", | |
933 | rx_stats->rs_rate); | |
d435700f S |
934 | |
935 | return -EINVAL; | |
936 | } | |
937 | ||
938 | static void ath9k_process_rssi(struct ath_common *common, | |
939 | struct ieee80211_hw *hw, | |
9f167f64 | 940 | struct ieee80211_hdr *hdr, |
d435700f S |
941 | struct ath_rx_status *rx_stats) |
942 | { | |
9ac58615 | 943 | struct ath_softc *sc = hw->priv; |
d435700f | 944 | struct ath_hw *ah = common->ah; |
9fa23e17 | 945 | int last_rssi; |
d435700f S |
946 | __le16 fc; |
947 | ||
2b892a98 RM |
948 | if ((ah->opmode != NL80211_IFTYPE_STATION) && |
949 | (ah->opmode != NL80211_IFTYPE_ADHOC)) | |
9fa23e17 FF |
950 | return; |
951 | ||
d435700f | 952 | fc = hdr->frame_control; |
9fa23e17 | 953 | if (!ieee80211_is_beacon(fc) || |
4801416c BG |
954 | compare_ether_addr(hdr->addr3, common->curbssid)) { |
955 | /* TODO: This doesn't work well if you have stations | |
956 | * associated to two different APs because curbssid | |
957 | * is just the last AP that any of the stations associated | |
958 | * with. | |
959 | */ | |
9fa23e17 | 960 | return; |
4801416c | 961 | } |
d435700f | 962 | |
9fa23e17 | 963 | if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr) |
9ac58615 | 964 | ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi); |
d435700f | 965 | |
9ac58615 | 966 | last_rssi = sc->last_rssi; |
d435700f S |
967 | if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) |
968 | rx_stats->rs_rssi = ATH_EP_RND(last_rssi, | |
969 | ATH_RSSI_EP_MULTIPLIER); | |
970 | if (rx_stats->rs_rssi < 0) | |
971 | rx_stats->rs_rssi = 0; | |
972 | ||
973 | /* Update Beacon RSSI, this is used by ANI. */ | |
9fa23e17 | 974 | ah->stats.avgbrssi = rx_stats->rs_rssi; |
d435700f S |
975 | } |
976 | ||
977 | /* | |
978 | * For Decrypt or Demic errors, we only mark packet status here and always push | |
979 | * up the frame up to let mac80211 handle the actual error case, be it no | |
980 | * decryption key or real decryption error. This let us keep statistics there. | |
981 | */ | |
982 | static int ath9k_rx_skb_preprocess(struct ath_common *common, | |
983 | struct ieee80211_hw *hw, | |
9f167f64 | 984 | struct ieee80211_hdr *hdr, |
d435700f S |
985 | struct ath_rx_status *rx_stats, |
986 | struct ieee80211_rx_status *rx_status, | |
987 | bool *decrypt_error) | |
988 | { | |
d435700f S |
989 | memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); |
990 | ||
991 | /* | |
992 | * everything but the rate is checked here, the rate check is done | |
993 | * separately to avoid doing two lookups for a rate for each frame. | |
994 | */ | |
9f167f64 | 995 | if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) |
d435700f S |
996 | return -EINVAL; |
997 | ||
0d95521e FF |
998 | /* Only use status info from the last fragment */ |
999 | if (rx_stats->rs_more) | |
1000 | return 0; | |
1001 | ||
9f167f64 | 1002 | ath9k_process_rssi(common, hw, hdr, rx_stats); |
d435700f | 1003 | |
9f167f64 | 1004 | if (ath9k_process_rate(common, hw, rx_stats, rx_status)) |
d435700f S |
1005 | return -EINVAL; |
1006 | ||
d435700f S |
1007 | rx_status->band = hw->conf.channel->band; |
1008 | rx_status->freq = hw->conf.channel->center_freq; | |
1009 | rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi; | |
1010 | rx_status->antenna = rx_stats->rs_antenna; | |
6ebacbb7 | 1011 | rx_status->flag |= RX_FLAG_MACTIME_MPDU; |
d435700f S |
1012 | |
1013 | return 0; | |
1014 | } | |
1015 | ||
1016 | static void ath9k_rx_skb_postprocess(struct ath_common *common, | |
1017 | struct sk_buff *skb, | |
1018 | struct ath_rx_status *rx_stats, | |
1019 | struct ieee80211_rx_status *rxs, | |
1020 | bool decrypt_error) | |
1021 | { | |
1022 | struct ath_hw *ah = common->ah; | |
1023 | struct ieee80211_hdr *hdr; | |
1024 | int hdrlen, padpos, padsize; | |
1025 | u8 keyix; | |
1026 | __le16 fc; | |
1027 | ||
1028 | /* see if any padding is done by the hw and remove it */ | |
1029 | hdr = (struct ieee80211_hdr *) skb->data; | |
1030 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | |
1031 | fc = hdr->frame_control; | |
1032 | padpos = ath9k_cmn_padpos(hdr->frame_control); | |
1033 | ||
1034 | /* The MAC header is padded to have 32-bit boundary if the | |
1035 | * packet payload is non-zero. The general calculation for | |
1036 | * padsize would take into account odd header lengths: | |
1037 | * padsize = (4 - padpos % 4) % 4; However, since only | |
1038 | * even-length headers are used, padding can only be 0 or 2 | |
1039 | * bytes and we can optimize this a bit. In addition, we must | |
1040 | * not try to remove padding from short control frames that do | |
1041 | * not have payload. */ | |
1042 | padsize = padpos & 3; | |
1043 | if (padsize && skb->len>=padpos+padsize+FCS_LEN) { | |
1044 | memmove(skb->data + padsize, skb->data, padpos); | |
1045 | skb_pull(skb, padsize); | |
1046 | } | |
1047 | ||
1048 | keyix = rx_stats->rs_keyix; | |
1049 | ||
1050 | if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error && | |
1051 | ieee80211_has_protected(fc)) { | |
1052 | rxs->flag |= RX_FLAG_DECRYPTED; | |
1053 | } else if (ieee80211_has_protected(fc) | |
1054 | && !decrypt_error && skb->len >= hdrlen + 4) { | |
1055 | keyix = skb->data[hdrlen + 3] >> 6; | |
1056 | ||
1057 | if (test_bit(keyix, common->keymap)) | |
1058 | rxs->flag |= RX_FLAG_DECRYPTED; | |
1059 | } | |
1060 | if (ah->sw_mgmt_crypto && | |
1061 | (rxs->flag & RX_FLAG_DECRYPTED) && | |
1062 | ieee80211_is_mgmt(fc)) | |
1063 | /* Use software decrypt for management frames. */ | |
1064 | rxs->flag &= ~RX_FLAG_DECRYPTED; | |
1065 | } | |
b5c80475 | 1066 | |
102885a5 VT |
1067 | static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb, |
1068 | struct ath_hw_antcomb_conf ant_conf, | |
1069 | int main_rssi_avg) | |
1070 | { | |
1071 | antcomb->quick_scan_cnt = 0; | |
1072 | ||
1073 | if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2) | |
1074 | antcomb->rssi_lna2 = main_rssi_avg; | |
1075 | else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1) | |
1076 | antcomb->rssi_lna1 = main_rssi_avg; | |
1077 | ||
1078 | switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) { | |
223c5a87 | 1079 | case 0x10: /* LNA2 A-B */ |
102885a5 VT |
1080 | antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; |
1081 | antcomb->first_quick_scan_conf = | |
1082 | ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; | |
1083 | antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1; | |
1084 | break; | |
223c5a87 | 1085 | case 0x20: /* LNA1 A-B */ |
102885a5 VT |
1086 | antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; |
1087 | antcomb->first_quick_scan_conf = | |
1088 | ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; | |
1089 | antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2; | |
1090 | break; | |
223c5a87 | 1091 | case 0x21: /* LNA1 LNA2 */ |
102885a5 VT |
1092 | antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2; |
1093 | antcomb->first_quick_scan_conf = | |
1094 | ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; | |
1095 | antcomb->second_quick_scan_conf = | |
1096 | ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; | |
1097 | break; | |
223c5a87 | 1098 | case 0x12: /* LNA2 LNA1 */ |
102885a5 VT |
1099 | antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1; |
1100 | antcomb->first_quick_scan_conf = | |
1101 | ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; | |
1102 | antcomb->second_quick_scan_conf = | |
1103 | ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; | |
1104 | break; | |
223c5a87 | 1105 | case 0x13: /* LNA2 A+B */ |
102885a5 VT |
1106 | antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; |
1107 | antcomb->first_quick_scan_conf = | |
1108 | ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; | |
1109 | antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1; | |
1110 | break; | |
223c5a87 | 1111 | case 0x23: /* LNA1 A+B */ |
102885a5 VT |
1112 | antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; |
1113 | antcomb->first_quick_scan_conf = | |
1114 | ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; | |
1115 | antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2; | |
1116 | break; | |
1117 | default: | |
1118 | break; | |
1119 | } | |
1120 | } | |
1121 | ||
1122 | static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb, | |
1123 | struct ath_hw_antcomb_conf *div_ant_conf, | |
1124 | int main_rssi_avg, int alt_rssi_avg, | |
1125 | int alt_ratio) | |
1126 | { | |
1127 | /* alt_good */ | |
1128 | switch (antcomb->quick_scan_cnt) { | |
1129 | case 0: | |
1130 | /* set alt to main, and alt to first conf */ | |
1131 | div_ant_conf->main_lna_conf = antcomb->main_conf; | |
1132 | div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf; | |
1133 | break; | |
1134 | case 1: | |
1135 | /* set alt to main, and alt to first conf */ | |
1136 | div_ant_conf->main_lna_conf = antcomb->main_conf; | |
1137 | div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf; | |
1138 | antcomb->rssi_first = main_rssi_avg; | |
1139 | antcomb->rssi_second = alt_rssi_avg; | |
1140 | ||
1141 | if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) { | |
1142 | /* main is LNA1 */ | |
1143 | if (ath_is_alt_ant_ratio_better(alt_ratio, | |
1144 | ATH_ANT_DIV_COMB_LNA1_DELTA_HI, | |
1145 | ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, | |
1146 | main_rssi_avg, alt_rssi_avg, | |
1147 | antcomb->total_pkt_count)) | |
1148 | antcomb->first_ratio = true; | |
1149 | else | |
1150 | antcomb->first_ratio = false; | |
1151 | } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) { | |
1152 | if (ath_is_alt_ant_ratio_better(alt_ratio, | |
1153 | ATH_ANT_DIV_COMB_LNA1_DELTA_MID, | |
1154 | ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, | |
1155 | main_rssi_avg, alt_rssi_avg, | |
1156 | antcomb->total_pkt_count)) | |
1157 | antcomb->first_ratio = true; | |
1158 | else | |
1159 | antcomb->first_ratio = false; | |
1160 | } else { | |
1161 | if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && | |
1162 | (alt_rssi_avg > main_rssi_avg + | |
1163 | ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) || | |
1164 | (alt_rssi_avg > main_rssi_avg)) && | |
1165 | (antcomb->total_pkt_count > 50)) | |
1166 | antcomb->first_ratio = true; | |
1167 | else | |
1168 | antcomb->first_ratio = false; | |
1169 | } | |
1170 | break; | |
1171 | case 2: | |
1172 | antcomb->alt_good = false; | |
1173 | antcomb->scan_not_start = false; | |
1174 | antcomb->scan = false; | |
1175 | antcomb->rssi_first = main_rssi_avg; | |
1176 | antcomb->rssi_third = alt_rssi_avg; | |
1177 | ||
1178 | if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) | |
1179 | antcomb->rssi_lna1 = alt_rssi_avg; | |
1180 | else if (antcomb->second_quick_scan_conf == | |
1181 | ATH_ANT_DIV_COMB_LNA2) | |
1182 | antcomb->rssi_lna2 = alt_rssi_avg; | |
1183 | else if (antcomb->second_quick_scan_conf == | |
1184 | ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) { | |
1185 | if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) | |
1186 | antcomb->rssi_lna2 = main_rssi_avg; | |
1187 | else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) | |
1188 | antcomb->rssi_lna1 = main_rssi_avg; | |
1189 | } | |
1190 | ||
1191 | if (antcomb->rssi_lna2 > antcomb->rssi_lna1 + | |
1192 | ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA) | |
1193 | div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2; | |
1194 | else | |
1195 | div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1; | |
1196 | ||
1197 | if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) { | |
1198 | if (ath_is_alt_ant_ratio_better(alt_ratio, | |
1199 | ATH_ANT_DIV_COMB_LNA1_DELTA_HI, | |
1200 | ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, | |
1201 | main_rssi_avg, alt_rssi_avg, | |
1202 | antcomb->total_pkt_count)) | |
1203 | antcomb->second_ratio = true; | |
1204 | else | |
1205 | antcomb->second_ratio = false; | |
1206 | } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) { | |
1207 | if (ath_is_alt_ant_ratio_better(alt_ratio, | |
1208 | ATH_ANT_DIV_COMB_LNA1_DELTA_MID, | |
1209 | ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, | |
1210 | main_rssi_avg, alt_rssi_avg, | |
1211 | antcomb->total_pkt_count)) | |
1212 | antcomb->second_ratio = true; | |
1213 | else | |
1214 | antcomb->second_ratio = false; | |
1215 | } else { | |
1216 | if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && | |
1217 | (alt_rssi_avg > main_rssi_avg + | |
1218 | ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) || | |
1219 | (alt_rssi_avg > main_rssi_avg)) && | |
1220 | (antcomb->total_pkt_count > 50)) | |
1221 | antcomb->second_ratio = true; | |
1222 | else | |
1223 | antcomb->second_ratio = false; | |
1224 | } | |
1225 | ||
1226 | /* set alt to the conf with maximun ratio */ | |
1227 | if (antcomb->first_ratio && antcomb->second_ratio) { | |
1228 | if (antcomb->rssi_second > antcomb->rssi_third) { | |
1229 | /* first alt*/ | |
1230 | if ((antcomb->first_quick_scan_conf == | |
1231 | ATH_ANT_DIV_COMB_LNA1) || | |
1232 | (antcomb->first_quick_scan_conf == | |
1233 | ATH_ANT_DIV_COMB_LNA2)) | |
1234 | /* Set alt LNA1 or LNA2*/ | |
1235 | if (div_ant_conf->main_lna_conf == | |
1236 | ATH_ANT_DIV_COMB_LNA2) | |
1237 | div_ant_conf->alt_lna_conf = | |
1238 | ATH_ANT_DIV_COMB_LNA1; | |
1239 | else | |
1240 | div_ant_conf->alt_lna_conf = | |
1241 | ATH_ANT_DIV_COMB_LNA2; | |
1242 | else | |
1243 | /* Set alt to A+B or A-B */ | |
1244 | div_ant_conf->alt_lna_conf = | |
1245 | antcomb->first_quick_scan_conf; | |
1246 | } else if ((antcomb->second_quick_scan_conf == | |
1247 | ATH_ANT_DIV_COMB_LNA1) || | |
1248 | (antcomb->second_quick_scan_conf == | |
1249 | ATH_ANT_DIV_COMB_LNA2)) { | |
1250 | /* Set alt LNA1 or LNA2 */ | |
1251 | if (div_ant_conf->main_lna_conf == | |
1252 | ATH_ANT_DIV_COMB_LNA2) | |
1253 | div_ant_conf->alt_lna_conf = | |
1254 | ATH_ANT_DIV_COMB_LNA1; | |
1255 | else | |
1256 | div_ant_conf->alt_lna_conf = | |
1257 | ATH_ANT_DIV_COMB_LNA2; | |
1258 | } else { | |
1259 | /* Set alt to A+B or A-B */ | |
1260 | div_ant_conf->alt_lna_conf = | |
1261 | antcomb->second_quick_scan_conf; | |
1262 | } | |
1263 | } else if (antcomb->first_ratio) { | |
1264 | /* first alt */ | |
1265 | if ((antcomb->first_quick_scan_conf == | |
1266 | ATH_ANT_DIV_COMB_LNA1) || | |
1267 | (antcomb->first_quick_scan_conf == | |
1268 | ATH_ANT_DIV_COMB_LNA2)) | |
1269 | /* Set alt LNA1 or LNA2 */ | |
1270 | if (div_ant_conf->main_lna_conf == | |
1271 | ATH_ANT_DIV_COMB_LNA2) | |
1272 | div_ant_conf->alt_lna_conf = | |
1273 | ATH_ANT_DIV_COMB_LNA1; | |
1274 | else | |
1275 | div_ant_conf->alt_lna_conf = | |
1276 | ATH_ANT_DIV_COMB_LNA2; | |
1277 | else | |
1278 | /* Set alt to A+B or A-B */ | |
1279 | div_ant_conf->alt_lna_conf = | |
1280 | antcomb->first_quick_scan_conf; | |
1281 | } else if (antcomb->second_ratio) { | |
1282 | /* second alt */ | |
1283 | if ((antcomb->second_quick_scan_conf == | |
1284 | ATH_ANT_DIV_COMB_LNA1) || | |
1285 | (antcomb->second_quick_scan_conf == | |
1286 | ATH_ANT_DIV_COMB_LNA2)) | |
1287 | /* Set alt LNA1 or LNA2 */ | |
1288 | if (div_ant_conf->main_lna_conf == | |
1289 | ATH_ANT_DIV_COMB_LNA2) | |
1290 | div_ant_conf->alt_lna_conf = | |
1291 | ATH_ANT_DIV_COMB_LNA1; | |
1292 | else | |
1293 | div_ant_conf->alt_lna_conf = | |
1294 | ATH_ANT_DIV_COMB_LNA2; | |
1295 | else | |
1296 | /* Set alt to A+B or A-B */ | |
1297 | div_ant_conf->alt_lna_conf = | |
1298 | antcomb->second_quick_scan_conf; | |
1299 | } else { | |
1300 | /* main is largest */ | |
1301 | if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) || | |
1302 | (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)) | |
1303 | /* Set alt LNA1 or LNA2 */ | |
1304 | if (div_ant_conf->main_lna_conf == | |
1305 | ATH_ANT_DIV_COMB_LNA2) | |
1306 | div_ant_conf->alt_lna_conf = | |
1307 | ATH_ANT_DIV_COMB_LNA1; | |
1308 | else | |
1309 | div_ant_conf->alt_lna_conf = | |
1310 | ATH_ANT_DIV_COMB_LNA2; | |
1311 | else | |
1312 | /* Set alt to A+B or A-B */ | |
1313 | div_ant_conf->alt_lna_conf = antcomb->main_conf; | |
1314 | } | |
1315 | break; | |
1316 | default: | |
1317 | break; | |
1318 | } | |
1319 | } | |
1320 | ||
3e9a212a MSS |
1321 | static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf, |
1322 | struct ath_ant_comb *antcomb, int alt_ratio) | |
102885a5 | 1323 | { |
3e9a212a MSS |
1324 | if (ant_conf->div_group == 0) { |
1325 | /* Adjust the fast_div_bias based on main and alt lna conf */ | |
1326 | switch ((ant_conf->main_lna_conf << 4) | | |
1327 | ant_conf->alt_lna_conf) { | |
223c5a87 | 1328 | case 0x01: /* A-B LNA2 */ |
3e9a212a MSS |
1329 | ant_conf->fast_div_bias = 0x3b; |
1330 | break; | |
223c5a87 | 1331 | case 0x02: /* A-B LNA1 */ |
3e9a212a MSS |
1332 | ant_conf->fast_div_bias = 0x3d; |
1333 | break; | |
223c5a87 | 1334 | case 0x03: /* A-B A+B */ |
3e9a212a MSS |
1335 | ant_conf->fast_div_bias = 0x1; |
1336 | break; | |
223c5a87 | 1337 | case 0x10: /* LNA2 A-B */ |
3e9a212a MSS |
1338 | ant_conf->fast_div_bias = 0x7; |
1339 | break; | |
223c5a87 | 1340 | case 0x12: /* LNA2 LNA1 */ |
3e9a212a MSS |
1341 | ant_conf->fast_div_bias = 0x2; |
1342 | break; | |
223c5a87 | 1343 | case 0x13: /* LNA2 A+B */ |
3e9a212a MSS |
1344 | ant_conf->fast_div_bias = 0x7; |
1345 | break; | |
223c5a87 | 1346 | case 0x20: /* LNA1 A-B */ |
3e9a212a MSS |
1347 | ant_conf->fast_div_bias = 0x6; |
1348 | break; | |
223c5a87 | 1349 | case 0x21: /* LNA1 LNA2 */ |
3e9a212a MSS |
1350 | ant_conf->fast_div_bias = 0x0; |
1351 | break; | |
223c5a87 | 1352 | case 0x23: /* LNA1 A+B */ |
3e9a212a MSS |
1353 | ant_conf->fast_div_bias = 0x6; |
1354 | break; | |
223c5a87 | 1355 | case 0x30: /* A+B A-B */ |
3e9a212a MSS |
1356 | ant_conf->fast_div_bias = 0x1; |
1357 | break; | |
223c5a87 | 1358 | case 0x31: /* A+B LNA2 */ |
3e9a212a MSS |
1359 | ant_conf->fast_div_bias = 0x3b; |
1360 | break; | |
223c5a87 | 1361 | case 0x32: /* A+B LNA1 */ |
3e9a212a MSS |
1362 | ant_conf->fast_div_bias = 0x3d; |
1363 | break; | |
1364 | default: | |
1365 | break; | |
1366 | } | |
1367 | } else if (ant_conf->div_group == 2) { | |
1368 | /* Adjust the fast_div_bias based on main and alt_lna_conf */ | |
1369 | switch ((ant_conf->main_lna_conf << 4) | | |
1370 | ant_conf->alt_lna_conf) { | |
223c5a87 | 1371 | case 0x01: /* A-B LNA2 */ |
3e9a212a MSS |
1372 | ant_conf->fast_div_bias = 0x1; |
1373 | ant_conf->main_gaintb = 0; | |
1374 | ant_conf->alt_gaintb = 0; | |
1375 | break; | |
223c5a87 | 1376 | case 0x02: /* A-B LNA1 */ |
3e9a212a MSS |
1377 | ant_conf->fast_div_bias = 0x1; |
1378 | ant_conf->main_gaintb = 0; | |
1379 | ant_conf->alt_gaintb = 0; | |
1380 | break; | |
223c5a87 | 1381 | case 0x03: /* A-B A+B */ |
3e9a212a MSS |
1382 | ant_conf->fast_div_bias = 0x1; |
1383 | ant_conf->main_gaintb = 0; | |
1384 | ant_conf->alt_gaintb = 0; | |
1385 | break; | |
223c5a87 | 1386 | case 0x10: /* LNA2 A-B */ |
3e9a212a MSS |
1387 | if (!(antcomb->scan) && |
1388 | (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) | |
1389 | ant_conf->fast_div_bias = 0x1; | |
1390 | else | |
1391 | ant_conf->fast_div_bias = 0x2; | |
1392 | ant_conf->main_gaintb = 0; | |
1393 | ant_conf->alt_gaintb = 0; | |
1394 | break; | |
223c5a87 | 1395 | case 0x12: /* LNA2 LNA1 */ |
3e9a212a MSS |
1396 | ant_conf->fast_div_bias = 0x1; |
1397 | ant_conf->main_gaintb = 0; | |
1398 | ant_conf->alt_gaintb = 0; | |
1399 | break; | |
223c5a87 | 1400 | case 0x13: /* LNA2 A+B */ |
3e9a212a MSS |
1401 | if (!(antcomb->scan) && |
1402 | (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) | |
1403 | ant_conf->fast_div_bias = 0x1; | |
1404 | else | |
1405 | ant_conf->fast_div_bias = 0x2; | |
1406 | ant_conf->main_gaintb = 0; | |
1407 | ant_conf->alt_gaintb = 0; | |
1408 | break; | |
223c5a87 | 1409 | case 0x20: /* LNA1 A-B */ |
3e9a212a MSS |
1410 | if (!(antcomb->scan) && |
1411 | (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) | |
1412 | ant_conf->fast_div_bias = 0x1; | |
1413 | else | |
1414 | ant_conf->fast_div_bias = 0x2; | |
1415 | ant_conf->main_gaintb = 0; | |
1416 | ant_conf->alt_gaintb = 0; | |
1417 | break; | |
223c5a87 | 1418 | case 0x21: /* LNA1 LNA2 */ |
3e9a212a MSS |
1419 | ant_conf->fast_div_bias = 0x1; |
1420 | ant_conf->main_gaintb = 0; | |
1421 | ant_conf->alt_gaintb = 0; | |
1422 | break; | |
223c5a87 | 1423 | case 0x23: /* LNA1 A+B */ |
3e9a212a MSS |
1424 | if (!(antcomb->scan) && |
1425 | (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) | |
1426 | ant_conf->fast_div_bias = 0x1; | |
1427 | else | |
1428 | ant_conf->fast_div_bias = 0x2; | |
1429 | ant_conf->main_gaintb = 0; | |
1430 | ant_conf->alt_gaintb = 0; | |
1431 | break; | |
223c5a87 | 1432 | case 0x30: /* A+B A-B */ |
3e9a212a MSS |
1433 | ant_conf->fast_div_bias = 0x1; |
1434 | ant_conf->main_gaintb = 0; | |
1435 | ant_conf->alt_gaintb = 0; | |
1436 | break; | |
223c5a87 | 1437 | case 0x31: /* A+B LNA2 */ |
3e9a212a MSS |
1438 | ant_conf->fast_div_bias = 0x1; |
1439 | ant_conf->main_gaintb = 0; | |
1440 | ant_conf->alt_gaintb = 0; | |
1441 | break; | |
223c5a87 | 1442 | case 0x32: /* A+B LNA1 */ |
3e9a212a MSS |
1443 | ant_conf->fast_div_bias = 0x1; |
1444 | ant_conf->main_gaintb = 0; | |
1445 | ant_conf->alt_gaintb = 0; | |
1446 | break; | |
1447 | default: | |
1448 | break; | |
1449 | } | |
102885a5 VT |
1450 | } |
1451 | } | |
1452 | ||
1453 | /* Antenna diversity and combining */ | |
1454 | static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) | |
1455 | { | |
1456 | struct ath_hw_antcomb_conf div_ant_conf; | |
1457 | struct ath_ant_comb *antcomb = &sc->ant_comb; | |
1458 | int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set; | |
0ff2b5c0 | 1459 | int curr_main_set; |
102885a5 VT |
1460 | int main_rssi = rs->rs_rssi_ctl0; |
1461 | int alt_rssi = rs->rs_rssi_ctl1; | |
1462 | int rx_ant_conf, main_ant_conf; | |
1463 | bool short_scan = false; | |
1464 | ||
1465 | rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) & | |
1466 | ATH_ANT_RX_MASK; | |
1467 | main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) & | |
1468 | ATH_ANT_RX_MASK; | |
1469 | ||
21e8ee6d MSS |
1470 | /* Record packet only when both main_rssi and alt_rssi is positive */ |
1471 | if (main_rssi > 0 && alt_rssi > 0) { | |
102885a5 VT |
1472 | antcomb->total_pkt_count++; |
1473 | antcomb->main_total_rssi += main_rssi; | |
1474 | antcomb->alt_total_rssi += alt_rssi; | |
1475 | if (main_ant_conf == rx_ant_conf) | |
1476 | antcomb->main_recv_cnt++; | |
1477 | else | |
1478 | antcomb->alt_recv_cnt++; | |
1479 | } | |
1480 | ||
1481 | /* Short scan check */ | |
1482 | if (antcomb->scan && antcomb->alt_good) { | |
1483 | if (time_after(jiffies, antcomb->scan_start_time + | |
1484 | msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR))) | |
1485 | short_scan = true; | |
1486 | else | |
1487 | if (antcomb->total_pkt_count == | |
1488 | ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) { | |
1489 | alt_ratio = ((antcomb->alt_recv_cnt * 100) / | |
1490 | antcomb->total_pkt_count); | |
1491 | if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO) | |
1492 | short_scan = true; | |
1493 | } | |
1494 | } | |
1495 | ||
1496 | if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) || | |
1497 | rs->rs_moreaggr) && !short_scan) | |
1498 | return; | |
1499 | ||
1500 | if (antcomb->total_pkt_count) { | |
1501 | alt_ratio = ((antcomb->alt_recv_cnt * 100) / | |
1502 | antcomb->total_pkt_count); | |
1503 | main_rssi_avg = (antcomb->main_total_rssi / | |
1504 | antcomb->total_pkt_count); | |
1505 | alt_rssi_avg = (antcomb->alt_total_rssi / | |
1506 | antcomb->total_pkt_count); | |
1507 | } | |
1508 | ||
1509 | ||
1510 | ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf); | |
1511 | curr_alt_set = div_ant_conf.alt_lna_conf; | |
1512 | curr_main_set = div_ant_conf.main_lna_conf; | |
102885a5 VT |
1513 | |
1514 | antcomb->count++; | |
1515 | ||
1516 | if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) { | |
1517 | if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) { | |
1518 | ath_lnaconf_alt_good_scan(antcomb, div_ant_conf, | |
1519 | main_rssi_avg); | |
1520 | antcomb->alt_good = true; | |
1521 | } else { | |
1522 | antcomb->alt_good = false; | |
1523 | } | |
1524 | ||
1525 | antcomb->count = 0; | |
1526 | antcomb->scan = true; | |
1527 | antcomb->scan_not_start = true; | |
1528 | } | |
1529 | ||
1530 | if (!antcomb->scan) { | |
b85c5734 MSS |
1531 | if (ath_ant_div_comb_alt_check(div_ant_conf.div_group, |
1532 | alt_ratio, curr_main_set, curr_alt_set, | |
1533 | alt_rssi_avg, main_rssi_avg)) { | |
102885a5 VT |
1534 | if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) { |
1535 | /* Switch main and alt LNA */ | |
1536 | div_ant_conf.main_lna_conf = | |
1537 | ATH_ANT_DIV_COMB_LNA2; | |
1538 | div_ant_conf.alt_lna_conf = | |
1539 | ATH_ANT_DIV_COMB_LNA1; | |
1540 | } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) { | |
1541 | div_ant_conf.main_lna_conf = | |
1542 | ATH_ANT_DIV_COMB_LNA1; | |
1543 | div_ant_conf.alt_lna_conf = | |
1544 | ATH_ANT_DIV_COMB_LNA2; | |
1545 | } | |
1546 | ||
1547 | goto div_comb_done; | |
1548 | } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) && | |
1549 | (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) { | |
1550 | /* Set alt to another LNA */ | |
1551 | if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) | |
1552 | div_ant_conf.alt_lna_conf = | |
1553 | ATH_ANT_DIV_COMB_LNA1; | |
1554 | else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) | |
1555 | div_ant_conf.alt_lna_conf = | |
1556 | ATH_ANT_DIV_COMB_LNA2; | |
1557 | ||
1558 | goto div_comb_done; | |
1559 | } | |
1560 | ||
1561 | if ((alt_rssi_avg < (main_rssi_avg + | |
8afbcc8b | 1562 | div_ant_conf.lna1_lna2_delta))) |
102885a5 VT |
1563 | goto div_comb_done; |
1564 | } | |
1565 | ||
1566 | if (!antcomb->scan_not_start) { | |
1567 | switch (curr_alt_set) { | |
1568 | case ATH_ANT_DIV_COMB_LNA2: | |
1569 | antcomb->rssi_lna2 = alt_rssi_avg; | |
1570 | antcomb->rssi_lna1 = main_rssi_avg; | |
1571 | antcomb->scan = true; | |
1572 | /* set to A+B */ | |
1573 | div_ant_conf.main_lna_conf = | |
1574 | ATH_ANT_DIV_COMB_LNA1; | |
1575 | div_ant_conf.alt_lna_conf = | |
1576 | ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; | |
1577 | break; | |
1578 | case ATH_ANT_DIV_COMB_LNA1: | |
1579 | antcomb->rssi_lna1 = alt_rssi_avg; | |
1580 | antcomb->rssi_lna2 = main_rssi_avg; | |
1581 | antcomb->scan = true; | |
1582 | /* set to A+B */ | |
1583 | div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2; | |
1584 | div_ant_conf.alt_lna_conf = | |
1585 | ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; | |
1586 | break; | |
1587 | case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2: | |
1588 | antcomb->rssi_add = alt_rssi_avg; | |
1589 | antcomb->scan = true; | |
1590 | /* set to A-B */ | |
1591 | div_ant_conf.alt_lna_conf = | |
1592 | ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; | |
1593 | break; | |
1594 | case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2: | |
1595 | antcomb->rssi_sub = alt_rssi_avg; | |
1596 | antcomb->scan = false; | |
1597 | if (antcomb->rssi_lna2 > | |
1598 | (antcomb->rssi_lna1 + | |
1599 | ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) { | |
1600 | /* use LNA2 as main LNA */ | |
1601 | if ((antcomb->rssi_add > antcomb->rssi_lna1) && | |
1602 | (antcomb->rssi_add > antcomb->rssi_sub)) { | |
1603 | /* set to A+B */ | |
1604 | div_ant_conf.main_lna_conf = | |
1605 | ATH_ANT_DIV_COMB_LNA2; | |
1606 | div_ant_conf.alt_lna_conf = | |
1607 | ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; | |
1608 | } else if (antcomb->rssi_sub > | |
1609 | antcomb->rssi_lna1) { | |
1610 | /* set to A-B */ | |
1611 | div_ant_conf.main_lna_conf = | |
1612 | ATH_ANT_DIV_COMB_LNA2; | |
1613 | div_ant_conf.alt_lna_conf = | |
1614 | ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; | |
1615 | } else { | |
1616 | /* set to LNA1 */ | |
1617 | div_ant_conf.main_lna_conf = | |
1618 | ATH_ANT_DIV_COMB_LNA2; | |
1619 | div_ant_conf.alt_lna_conf = | |
1620 | ATH_ANT_DIV_COMB_LNA1; | |
1621 | } | |
1622 | } else { | |
1623 | /* use LNA1 as main LNA */ | |
1624 | if ((antcomb->rssi_add > antcomb->rssi_lna2) && | |
1625 | (antcomb->rssi_add > antcomb->rssi_sub)) { | |
1626 | /* set to A+B */ | |
1627 | div_ant_conf.main_lna_conf = | |
1628 | ATH_ANT_DIV_COMB_LNA1; | |
1629 | div_ant_conf.alt_lna_conf = | |
1630 | ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; | |
1631 | } else if (antcomb->rssi_sub > | |
1632 | antcomb->rssi_lna1) { | |
1633 | /* set to A-B */ | |
1634 | div_ant_conf.main_lna_conf = | |
1635 | ATH_ANT_DIV_COMB_LNA1; | |
1636 | div_ant_conf.alt_lna_conf = | |
1637 | ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; | |
1638 | } else { | |
1639 | /* set to LNA2 */ | |
1640 | div_ant_conf.main_lna_conf = | |
1641 | ATH_ANT_DIV_COMB_LNA1; | |
1642 | div_ant_conf.alt_lna_conf = | |
1643 | ATH_ANT_DIV_COMB_LNA2; | |
1644 | } | |
1645 | } | |
1646 | break; | |
1647 | default: | |
1648 | break; | |
1649 | } | |
1650 | } else { | |
1651 | if (!antcomb->alt_good) { | |
1652 | antcomb->scan_not_start = false; | |
1653 | /* Set alt to another LNA */ | |
1654 | if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) { | |
1655 | div_ant_conf.main_lna_conf = | |
1656 | ATH_ANT_DIV_COMB_LNA2; | |
1657 | div_ant_conf.alt_lna_conf = | |
1658 | ATH_ANT_DIV_COMB_LNA1; | |
1659 | } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) { | |
1660 | div_ant_conf.main_lna_conf = | |
1661 | ATH_ANT_DIV_COMB_LNA1; | |
1662 | div_ant_conf.alt_lna_conf = | |
1663 | ATH_ANT_DIV_COMB_LNA2; | |
1664 | } | |
1665 | goto div_comb_done; | |
1666 | } | |
1667 | } | |
1668 | ||
1669 | ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf, | |
1670 | main_rssi_avg, alt_rssi_avg, | |
1671 | alt_ratio); | |
1672 | ||
1673 | antcomb->quick_scan_cnt++; | |
1674 | ||
1675 | div_comb_done: | |
3e9a212a | 1676 | ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio); |
102885a5 VT |
1677 | ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf); |
1678 | ||
1679 | antcomb->scan_start_time = jiffies; | |
1680 | antcomb->total_pkt_count = 0; | |
1681 | antcomb->main_total_rssi = 0; | |
1682 | antcomb->alt_total_rssi = 0; | |
1683 | antcomb->main_recv_cnt = 0; | |
1684 | antcomb->alt_recv_cnt = 0; | |
1685 | } | |
1686 | ||
b5c80475 FF |
1687 | int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) |
1688 | { | |
1689 | struct ath_buf *bf; | |
0d95521e | 1690 | struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb; |
5ca42627 | 1691 | struct ieee80211_rx_status *rxs; |
cbe61d8a | 1692 | struct ath_hw *ah = sc->sc_ah; |
27c51f1a | 1693 | struct ath_common *common = ath9k_hw_common(ah); |
b4afffc0 | 1694 | /* |
cae6b74d | 1695 | * The hw can technically differ from common->hw when using ath9k |
b4afffc0 LR |
1696 | * virtual wiphy so to account for that we iterate over the active |
1697 | * wiphys and find the appropriate wiphy and therefore hw. | |
1698 | */ | |
7545daf4 | 1699 | struct ieee80211_hw *hw = sc->hw; |
be0418ad | 1700 | struct ieee80211_hdr *hdr; |
c9b14170 | 1701 | int retval; |
be0418ad | 1702 | bool decrypt_error = false; |
29bffa96 | 1703 | struct ath_rx_status rs; |
b5c80475 FF |
1704 | enum ath9k_rx_qtype qtype; |
1705 | bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); | |
1706 | int dma_type; | |
5c6dd921 | 1707 | u8 rx_status_len = ah->caps.rx_status_len; |
a6d2055b FF |
1708 | u64 tsf = 0; |
1709 | u32 tsf_lower = 0; | |
8ab2cd09 | 1710 | unsigned long flags; |
be0418ad | 1711 | |
b5c80475 | 1712 | if (edma) |
b5c80475 | 1713 | dma_type = DMA_BIDIRECTIONAL; |
56824223 ML |
1714 | else |
1715 | dma_type = DMA_FROM_DEVICE; | |
b5c80475 FF |
1716 | |
1717 | qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; | |
b77f483f | 1718 | spin_lock_bh(&sc->rx.rxbuflock); |
f078f209 | 1719 | |
a6d2055b FF |
1720 | tsf = ath9k_hw_gettsf64(ah); |
1721 | tsf_lower = tsf & 0xffffffff; | |
1722 | ||
f078f209 LR |
1723 | do { |
1724 | /* If handling rx interrupt and flush is in progress => exit */ | |
98deeea0 | 1725 | if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) |
f078f209 LR |
1726 | break; |
1727 | ||
29bffa96 | 1728 | memset(&rs, 0, sizeof(rs)); |
b5c80475 FF |
1729 | if (edma) |
1730 | bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); | |
1731 | else | |
1732 | bf = ath_get_next_rx_buf(sc, &rs); | |
f078f209 | 1733 | |
b5c80475 FF |
1734 | if (!bf) |
1735 | break; | |
f078f209 | 1736 | |
f078f209 | 1737 | skb = bf->bf_mpdu; |
be0418ad | 1738 | if (!skb) |
f078f209 | 1739 | continue; |
f078f209 | 1740 | |
0d95521e FF |
1741 | /* |
1742 | * Take frame header from the first fragment and RX status from | |
1743 | * the last one. | |
1744 | */ | |
1745 | if (sc->rx.frag) | |
1746 | hdr_skb = sc->rx.frag; | |
1747 | else | |
1748 | hdr_skb = skb; | |
1749 | ||
1750 | hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len); | |
1751 | rxs = IEEE80211_SKB_RXCB(hdr_skb); | |
5ca42627 | 1752 | |
29bffa96 | 1753 | ath_debug_stat_rx(sc, &rs); |
1395d3f0 | 1754 | |
f078f209 | 1755 | /* |
be0418ad S |
1756 | * If we're asked to flush receive queue, directly |
1757 | * chain it back at the queue without processing it. | |
f078f209 | 1758 | */ |
be0418ad | 1759 | if (flush) |
0d95521e | 1760 | goto requeue_drop_frag; |
f078f209 | 1761 | |
c8f3b721 JF |
1762 | retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, |
1763 | rxs, &decrypt_error); | |
1764 | if (retval) | |
0d95521e | 1765 | goto requeue_drop_frag; |
c8f3b721 | 1766 | |
a6d2055b FF |
1767 | rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; |
1768 | if (rs.rs_tstamp > tsf_lower && | |
1769 | unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) | |
1770 | rxs->mactime -= 0x100000000ULL; | |
1771 | ||
1772 | if (rs.rs_tstamp < tsf_lower && | |
1773 | unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) | |
1774 | rxs->mactime += 0x100000000ULL; | |
1775 | ||
cb71d9ba LR |
1776 | /* Ensure we always have an skb to requeue once we are done |
1777 | * processing the current buffer's skb */ | |
cc861f74 | 1778 | requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); |
cb71d9ba LR |
1779 | |
1780 | /* If there is no memory we ignore the current RX'd frame, | |
1781 | * tell hardware it can give us a new frame using the old | |
b77f483f | 1782 | * skb and put it at the tail of the sc->rx.rxbuf list for |
cb71d9ba LR |
1783 | * processing. */ |
1784 | if (!requeue_skb) | |
0d95521e | 1785 | goto requeue_drop_frag; |
f078f209 | 1786 | |
9bf9fca8 | 1787 | /* Unmap the frame */ |
7da3c55c | 1788 | dma_unmap_single(sc->dev, bf->bf_buf_addr, |
cc861f74 | 1789 | common->rx_bufsize, |
b5c80475 | 1790 | dma_type); |
f078f209 | 1791 | |
b5c80475 FF |
1792 | skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); |
1793 | if (ah->caps.rx_status_len) | |
1794 | skb_pull(skb, ah->caps.rx_status_len); | |
be0418ad | 1795 | |
0d95521e FF |
1796 | if (!rs.rs_more) |
1797 | ath9k_rx_skb_postprocess(common, hdr_skb, &rs, | |
1798 | rxs, decrypt_error); | |
be0418ad | 1799 | |
cb71d9ba LR |
1800 | /* We will now give hardware our shiny new allocated skb */ |
1801 | bf->bf_mpdu = requeue_skb; | |
7da3c55c | 1802 | bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, |
cc861f74 | 1803 | common->rx_bufsize, |
b5c80475 | 1804 | dma_type); |
7da3c55c | 1805 | if (unlikely(dma_mapping_error(sc->dev, |
f8316df1 LR |
1806 | bf->bf_buf_addr))) { |
1807 | dev_kfree_skb_any(requeue_skb); | |
1808 | bf->bf_mpdu = NULL; | |
6cf9e995 | 1809 | bf->bf_buf_addr = 0; |
3800276a | 1810 | ath_err(common, "dma_mapping_error() on RX\n"); |
7545daf4 | 1811 | ieee80211_rx(hw, skb); |
f8316df1 LR |
1812 | break; |
1813 | } | |
f078f209 | 1814 | |
0d95521e FF |
1815 | if (rs.rs_more) { |
1816 | /* | |
1817 | * rs_more indicates chained descriptors which can be | |
1818 | * used to link buffers together for a sort of | |
1819 | * scatter-gather operation. | |
1820 | */ | |
1821 | if (sc->rx.frag) { | |
1822 | /* too many fragments - cannot handle frame */ | |
1823 | dev_kfree_skb_any(sc->rx.frag); | |
1824 | dev_kfree_skb_any(skb); | |
1825 | skb = NULL; | |
1826 | } | |
1827 | sc->rx.frag = skb; | |
1828 | goto requeue; | |
1829 | } | |
1830 | ||
1831 | if (sc->rx.frag) { | |
1832 | int space = skb->len - skb_tailroom(hdr_skb); | |
1833 | ||
1834 | sc->rx.frag = NULL; | |
1835 | ||
1836 | if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { | |
1837 | dev_kfree_skb(skb); | |
1838 | goto requeue_drop_frag; | |
1839 | } | |
1840 | ||
1841 | skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len), | |
1842 | skb->len); | |
1843 | dev_kfree_skb_any(skb); | |
1844 | skb = hdr_skb; | |
1845 | } | |
1846 | ||
f078f209 LR |
1847 | /* |
1848 | * change the default rx antenna if rx diversity chooses the | |
1849 | * other antenna 3 times in a row. | |
1850 | */ | |
29bffa96 | 1851 | if (sc->rx.defant != rs.rs_antenna) { |
b77f483f | 1852 | if (++sc->rx.rxotherant >= 3) |
29bffa96 | 1853 | ath_setdefantenna(sc, rs.rs_antenna); |
f078f209 | 1854 | } else { |
b77f483f | 1855 | sc->rx.rxotherant = 0; |
f078f209 | 1856 | } |
3cbb5dd7 | 1857 | |
8ab2cd09 | 1858 | spin_lock_irqsave(&sc->sc_pm_lock, flags); |
aaef24b4 MSS |
1859 | |
1860 | if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | | |
ededf1f8 | 1861 | PS_WAIT_FOR_CAB | |
aaef24b4 | 1862 | PS_WAIT_FOR_PSPOLL_DATA)) || |
cedc7e3d | 1863 | ath9k_check_auto_sleep(sc)) |
cc65965c | 1864 | ath_rx_ps(sc, skb); |
8ab2cd09 | 1865 | spin_unlock_irqrestore(&sc->sc_pm_lock, flags); |
cc65965c | 1866 | |
102885a5 VT |
1867 | if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) |
1868 | ath_ant_comb_scan(sc, &rs); | |
1869 | ||
7545daf4 | 1870 | ieee80211_rx(hw, skb); |
cc65965c | 1871 | |
0d95521e FF |
1872 | requeue_drop_frag: |
1873 | if (sc->rx.frag) { | |
1874 | dev_kfree_skb_any(sc->rx.frag); | |
1875 | sc->rx.frag = NULL; | |
1876 | } | |
cb71d9ba | 1877 | requeue: |
b5c80475 FF |
1878 | if (edma) { |
1879 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
1880 | ath_rx_edma_buf_link(sc, qtype); | |
1881 | } else { | |
1882 | list_move_tail(&bf->list, &sc->rx.rxbuf); | |
1883 | ath_rx_buf_link(sc, bf); | |
95294973 | 1884 | ath9k_hw_rxena(ah); |
b5c80475 | 1885 | } |
be0418ad S |
1886 | } while (1); |
1887 | ||
b77f483f | 1888 | spin_unlock_bh(&sc->rx.rxbuflock); |
f078f209 LR |
1889 | |
1890 | return 0; | |
f078f209 | 1891 | } |