]>
Commit | Line | Data |
---|---|---|
f078f209 | 1 | /* |
5b68138e | 2 | * Copyright (c) 2008-2011 Atheros Communications Inc. |
f078f209 LR |
3 | * |
4 | * Permission to use, copy, modify, and/or distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
16 | ||
b7f080cf | 17 | #include <linux/dma-mapping.h> |
e93d083f | 18 | #include <linux/relay.h> |
394cf0a1 | 19 | #include "ath9k.h" |
b622a720 | 20 | #include "ar9003_mac.h" |
f078f209 | 21 | |
b5c80475 FF |
22 | #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) |
23 | ||
ededf1f8 VT |
24 | static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) |
25 | { | |
26 | return sc->ps_enabled && | |
27 | (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); | |
28 | } | |
29 | ||
f078f209 LR |
30 | /* |
31 | * Setup and link descriptors. | |
32 | * | |
33 | * 11N: we can no longer afford to self link the last descriptor. | |
34 | * MAC acknowledges BA status as long as it copies frames to host | |
35 | * buffer (or rx fifo). This can incorrectly acknowledge packets | |
36 | * to a sender if last desc is self-linked. | |
f078f209 | 37 | */ |
f078f209 LR |
38 | static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) |
39 | { | |
cbe61d8a | 40 | struct ath_hw *ah = sc->sc_ah; |
cc861f74 | 41 | struct ath_common *common = ath9k_hw_common(ah); |
f078f209 LR |
42 | struct ath_desc *ds; |
43 | struct sk_buff *skb; | |
44 | ||
45 | ATH_RXBUF_RESET(bf); | |
46 | ||
47 | ds = bf->bf_desc; | |
be0418ad | 48 | ds->ds_link = 0; /* link to null */ |
f078f209 LR |
49 | ds->ds_data = bf->bf_buf_addr; |
50 | ||
be0418ad | 51 | /* virtual addr of the beginning of the buffer. */ |
f078f209 | 52 | skb = bf->bf_mpdu; |
9680e8a3 | 53 | BUG_ON(skb == NULL); |
f078f209 LR |
54 | ds->ds_vdata = skb->data; |
55 | ||
cc861f74 LR |
56 | /* |
57 | * setup rx descriptors. The rx_bufsize here tells the hardware | |
b4b6cda2 | 58 | * how much data it can DMA to us and that we are prepared |
cc861f74 LR |
59 | * to process |
60 | */ | |
b77f483f | 61 | ath9k_hw_setuprxdesc(ah, ds, |
cc861f74 | 62 | common->rx_bufsize, |
f078f209 LR |
63 | 0); |
64 | ||
b77f483f | 65 | if (sc->rx.rxlink == NULL) |
f078f209 LR |
66 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); |
67 | else | |
b77f483f | 68 | *sc->rx.rxlink = bf->bf_daddr; |
f078f209 | 69 | |
b77f483f | 70 | sc->rx.rxlink = &ds->ds_link; |
f078f209 LR |
71 | } |
72 | ||
ff37e337 S |
73 | static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) |
74 | { | |
75 | /* XXX block beacon interrupts */ | |
76 | ath9k_hw_setantenna(sc->sc_ah, antenna); | |
b77f483f S |
77 | sc->rx.defant = antenna; |
78 | sc->rx.rxotherant = 0; | |
ff37e337 S |
79 | } |
80 | ||
f078f209 LR |
81 | static void ath_opmode_init(struct ath_softc *sc) |
82 | { | |
cbe61d8a | 83 | struct ath_hw *ah = sc->sc_ah; |
1510718d LR |
84 | struct ath_common *common = ath9k_hw_common(ah); |
85 | ||
f078f209 LR |
86 | u32 rfilt, mfilt[2]; |
87 | ||
88 | /* configure rx filter */ | |
89 | rfilt = ath_calcrxfilter(sc); | |
90 | ath9k_hw_setrxfilter(ah, rfilt); | |
91 | ||
92 | /* configure bssid mask */ | |
364734fa | 93 | ath_hw_setbssidmask(common); |
f078f209 LR |
94 | |
95 | /* configure operational mode */ | |
96 | ath9k_hw_setopmode(ah); | |
97 | ||
f078f209 LR |
98 | /* calculate and install multicast filter */ |
99 | mfilt[0] = mfilt[1] = ~0; | |
f078f209 | 100 | ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); |
f078f209 LR |
101 | } |
102 | ||
b5c80475 FF |
103 | static bool ath_rx_edma_buf_link(struct ath_softc *sc, |
104 | enum ath9k_rx_qtype qtype) | |
f078f209 | 105 | { |
b5c80475 FF |
106 | struct ath_hw *ah = sc->sc_ah; |
107 | struct ath_rx_edma *rx_edma; | |
f078f209 LR |
108 | struct sk_buff *skb; |
109 | struct ath_buf *bf; | |
f078f209 | 110 | |
b5c80475 FF |
111 | rx_edma = &sc->rx.rx_edma[qtype]; |
112 | if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) | |
113 | return false; | |
f078f209 | 114 | |
b5c80475 FF |
115 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); |
116 | list_del_init(&bf->list); | |
f078f209 | 117 | |
b5c80475 FF |
118 | skb = bf->bf_mpdu; |
119 | ||
120 | ATH_RXBUF_RESET(bf); | |
121 | memset(skb->data, 0, ah->caps.rx_status_len); | |
122 | dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, | |
123 | ah->caps.rx_status_len, DMA_TO_DEVICE); | |
f078f209 | 124 | |
b5c80475 FF |
125 | SKB_CB_ATHBUF(skb) = bf; |
126 | ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); | |
127 | skb_queue_tail(&rx_edma->rx_fifo, skb); | |
f078f209 | 128 | |
b5c80475 FF |
129 | return true; |
130 | } | |
131 | ||
132 | static void ath_rx_addbuffer_edma(struct ath_softc *sc, | |
133 | enum ath9k_rx_qtype qtype, int size) | |
134 | { | |
b5c80475 | 135 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
6a01f0c0 | 136 | struct ath_buf *bf, *tbf; |
b5c80475 | 137 | |
b5c80475 | 138 | if (list_empty(&sc->rx.rxbuf)) { |
d2182b69 | 139 | ath_dbg(common, QUEUE, "No free rx buf available\n"); |
b5c80475 | 140 | return; |
797fe5cb | 141 | } |
f078f209 | 142 | |
6a01f0c0 | 143 | list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) |
b5c80475 FF |
144 | if (!ath_rx_edma_buf_link(sc, qtype)) |
145 | break; | |
146 | ||
b5c80475 FF |
147 | } |
148 | ||
149 | static void ath_rx_remove_buffer(struct ath_softc *sc, | |
150 | enum ath9k_rx_qtype qtype) | |
151 | { | |
152 | struct ath_buf *bf; | |
153 | struct ath_rx_edma *rx_edma; | |
154 | struct sk_buff *skb; | |
155 | ||
156 | rx_edma = &sc->rx.rx_edma[qtype]; | |
157 | ||
158 | while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) { | |
159 | bf = SKB_CB_ATHBUF(skb); | |
160 | BUG_ON(!bf); | |
161 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
162 | } | |
163 | } | |
164 | ||
165 | static void ath_rx_edma_cleanup(struct ath_softc *sc) | |
166 | { | |
ba542385 MSS |
167 | struct ath_hw *ah = sc->sc_ah; |
168 | struct ath_common *common = ath9k_hw_common(ah); | |
b5c80475 FF |
169 | struct ath_buf *bf; |
170 | ||
171 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); | |
172 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); | |
173 | ||
797fe5cb | 174 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { |
ba542385 MSS |
175 | if (bf->bf_mpdu) { |
176 | dma_unmap_single(sc->dev, bf->bf_buf_addr, | |
177 | common->rx_bufsize, | |
178 | DMA_BIDIRECTIONAL); | |
b5c80475 | 179 | dev_kfree_skb_any(bf->bf_mpdu); |
ba542385 MSS |
180 | bf->bf_buf_addr = 0; |
181 | bf->bf_mpdu = NULL; | |
182 | } | |
b5c80475 | 183 | } |
b5c80475 FF |
184 | } |
185 | ||
186 | static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) | |
187 | { | |
188 | skb_queue_head_init(&rx_edma->rx_fifo); | |
b5c80475 FF |
189 | rx_edma->rx_fifo_hwsize = size; |
190 | } | |
191 | ||
192 | static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) | |
193 | { | |
194 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | |
195 | struct ath_hw *ah = sc->sc_ah; | |
196 | struct sk_buff *skb; | |
197 | struct ath_buf *bf; | |
198 | int error = 0, i; | |
199 | u32 size; | |
200 | ||
b5c80475 FF |
201 | ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - |
202 | ah->caps.rx_status_len); | |
203 | ||
204 | ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], | |
205 | ah->caps.rx_lp_qdepth); | |
206 | ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], | |
207 | ah->caps.rx_hp_qdepth); | |
208 | ||
209 | size = sizeof(struct ath_buf) * nbufs; | |
b81950b1 | 210 | bf = devm_kzalloc(sc->dev, size, GFP_KERNEL); |
b5c80475 FF |
211 | if (!bf) |
212 | return -ENOMEM; | |
213 | ||
214 | INIT_LIST_HEAD(&sc->rx.rxbuf); | |
b5c80475 FF |
215 | |
216 | for (i = 0; i < nbufs; i++, bf++) { | |
cc861f74 | 217 | skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); |
b5c80475 | 218 | if (!skb) { |
797fe5cb | 219 | error = -ENOMEM; |
b5c80475 | 220 | goto rx_init_fail; |
f078f209 | 221 | } |
f078f209 | 222 | |
b5c80475 | 223 | memset(skb->data, 0, common->rx_bufsize); |
797fe5cb | 224 | bf->bf_mpdu = skb; |
b5c80475 | 225 | |
797fe5cb | 226 | bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, |
cc861f74 | 227 | common->rx_bufsize, |
b5c80475 | 228 | DMA_BIDIRECTIONAL); |
797fe5cb | 229 | if (unlikely(dma_mapping_error(sc->dev, |
b5c80475 FF |
230 | bf->bf_buf_addr))) { |
231 | dev_kfree_skb_any(skb); | |
232 | bf->bf_mpdu = NULL; | |
6cf9e995 | 233 | bf->bf_buf_addr = 0; |
3800276a | 234 | ath_err(common, |
b5c80475 FF |
235 | "dma_mapping_error() on RX init\n"); |
236 | error = -ENOMEM; | |
237 | goto rx_init_fail; | |
238 | } | |
239 | ||
240 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
241 | } | |
242 | ||
243 | return 0; | |
244 | ||
245 | rx_init_fail: | |
246 | ath_rx_edma_cleanup(sc); | |
247 | return error; | |
248 | } | |
249 | ||
250 | static void ath_edma_start_recv(struct ath_softc *sc) | |
251 | { | |
b5c80475 FF |
252 | ath9k_hw_rxena(sc->sc_ah); |
253 | ||
254 | ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, | |
255 | sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize); | |
256 | ||
257 | ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, | |
258 | sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); | |
259 | ||
b5c80475 FF |
260 | ath_opmode_init(sc); |
261 | ||
4cb54fa3 | 262 | ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)); |
b5c80475 FF |
263 | } |
264 | ||
265 | static void ath_edma_stop_recv(struct ath_softc *sc) | |
266 | { | |
b5c80475 FF |
267 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); |
268 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); | |
b5c80475 FF |
269 | } |
270 | ||
271 | int ath_rx_init(struct ath_softc *sc, int nbufs) | |
272 | { | |
273 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | |
274 | struct sk_buff *skb; | |
275 | struct ath_buf *bf; | |
276 | int error = 0; | |
277 | ||
4bdd1e97 | 278 | spin_lock_init(&sc->sc_pcu_lock); |
b5c80475 | 279 | |
0d95521e FF |
280 | common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + |
281 | sc->sc_ah->caps.rx_status_len; | |
282 | ||
b5c80475 FF |
283 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { |
284 | return ath_rx_edma_init(sc, nbufs); | |
285 | } else { | |
d2182b69 | 286 | ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n", |
226afe68 | 287 | common->cachelsz, common->rx_bufsize); |
b5c80475 FF |
288 | |
289 | /* Initialize rx descriptors */ | |
290 | ||
291 | error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, | |
4adfcded | 292 | "rx", nbufs, 1, 0); |
b5c80475 | 293 | if (error != 0) { |
3800276a JP |
294 | ath_err(common, |
295 | "failed to allocate rx descriptors: %d\n", | |
296 | error); | |
797fe5cb S |
297 | goto err; |
298 | } | |
b5c80475 FF |
299 | |
300 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { | |
301 | skb = ath_rxbuf_alloc(common, common->rx_bufsize, | |
302 | GFP_KERNEL); | |
303 | if (skb == NULL) { | |
304 | error = -ENOMEM; | |
305 | goto err; | |
306 | } | |
307 | ||
308 | bf->bf_mpdu = skb; | |
309 | bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, | |
310 | common->rx_bufsize, | |
311 | DMA_FROM_DEVICE); | |
312 | if (unlikely(dma_mapping_error(sc->dev, | |
313 | bf->bf_buf_addr))) { | |
314 | dev_kfree_skb_any(skb); | |
315 | bf->bf_mpdu = NULL; | |
6cf9e995 | 316 | bf->bf_buf_addr = 0; |
3800276a JP |
317 | ath_err(common, |
318 | "dma_mapping_error() on RX init\n"); | |
b5c80475 FF |
319 | error = -ENOMEM; |
320 | goto err; | |
321 | } | |
b5c80475 FF |
322 | } |
323 | sc->rx.rxlink = NULL; | |
797fe5cb | 324 | } |
f078f209 | 325 | |
797fe5cb | 326 | err: |
f078f209 LR |
327 | if (error) |
328 | ath_rx_cleanup(sc); | |
329 | ||
330 | return error; | |
331 | } | |
332 | ||
f078f209 LR |
333 | void ath_rx_cleanup(struct ath_softc *sc) |
334 | { | |
cc861f74 LR |
335 | struct ath_hw *ah = sc->sc_ah; |
336 | struct ath_common *common = ath9k_hw_common(ah); | |
f078f209 LR |
337 | struct sk_buff *skb; |
338 | struct ath_buf *bf; | |
339 | ||
b5c80475 FF |
340 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { |
341 | ath_rx_edma_cleanup(sc); | |
342 | return; | |
343 | } else { | |
344 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { | |
345 | skb = bf->bf_mpdu; | |
346 | if (skb) { | |
347 | dma_unmap_single(sc->dev, bf->bf_buf_addr, | |
348 | common->rx_bufsize, | |
349 | DMA_FROM_DEVICE); | |
350 | dev_kfree_skb(skb); | |
6cf9e995 BG |
351 | bf->bf_buf_addr = 0; |
352 | bf->bf_mpdu = NULL; | |
b5c80475 | 353 | } |
051b9191 | 354 | } |
b5c80475 | 355 | } |
f078f209 LR |
356 | } |
357 | ||
358 | /* | |
359 | * Calculate the receive filter according to the | |
360 | * operating mode and state: | |
361 | * | |
362 | * o always accept unicast, broadcast, and multicast traffic | |
363 | * o maintain current state of phy error reception (the hal | |
364 | * may enable phy error frames for noise immunity work) | |
365 | * o probe request frames are accepted only when operating in | |
366 | * hostap, adhoc, or monitor modes | |
367 | * o enable promiscuous mode according to the interface state | |
368 | * o accept beacons: | |
369 | * - when operating in adhoc mode so the 802.11 layer creates | |
370 | * node table entries for peers, | |
371 | * - when operating in station mode for collecting rssi data when | |
372 | * the station is otherwise quiet, or | |
373 | * - when operating as a repeater so we see repeater-sta beacons | |
374 | * - when scanning | |
375 | */ | |
376 | ||
377 | u32 ath_calcrxfilter(struct ath_softc *sc) | |
378 | { | |
f078f209 LR |
379 | u32 rfilt; |
380 | ||
ac06697c | 381 | rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST |
f078f209 LR |
382 | | ATH9K_RX_FILTER_MCAST; |
383 | ||
9c1d8e4a | 384 | if (sc->rx.rxfilter & FIF_PROBE_REQ) |
f078f209 LR |
385 | rfilt |= ATH9K_RX_FILTER_PROBEREQ; |
386 | ||
217ba9da JM |
387 | /* |
388 | * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station | |
389 | * mode interface or when in monitor mode. AP mode does not need this | |
390 | * since it receives all in-BSS frames anyway. | |
391 | */ | |
2e286947 | 392 | if (sc->sc_ah->is_monitoring) |
f078f209 | 393 | rfilt |= ATH9K_RX_FILTER_PROM; |
f078f209 | 394 | |
d42c6b71 S |
395 | if (sc->rx.rxfilter & FIF_CONTROL) |
396 | rfilt |= ATH9K_RX_FILTER_CONTROL; | |
397 | ||
dbaaa147 | 398 | if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && |
cfda6695 | 399 | (sc->nvifs <= 1) && |
dbaaa147 VT |
400 | !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) |
401 | rfilt |= ATH9K_RX_FILTER_MYBEACON; | |
402 | else | |
f078f209 LR |
403 | rfilt |= ATH9K_RX_FILTER_BEACON; |
404 | ||
264bbec8 | 405 | if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || |
66afad01 | 406 | (sc->rx.rxfilter & FIF_PSPOLL)) |
dbaaa147 | 407 | rfilt |= ATH9K_RX_FILTER_PSPOLL; |
be0418ad | 408 | |
7ea310be S |
409 | if (conf_is_ht(&sc->hw->conf)) |
410 | rfilt |= ATH9K_RX_FILTER_COMP_BAR; | |
411 | ||
7545daf4 | 412 | if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) { |
a549459c TW |
413 | /* This is needed for older chips */ |
414 | if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160) | |
5eb6ba83 | 415 | rfilt |= ATH9K_RX_FILTER_PROM; |
b93bce2a JM |
416 | rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; |
417 | } | |
418 | ||
b3d7aa43 GJ |
419 | if (AR_SREV_9550(sc->sc_ah)) |
420 | rfilt |= ATH9K_RX_FILTER_4ADDRESS; | |
421 | ||
f078f209 | 422 | return rfilt; |
7dcfdcd9 | 423 | |
f078f209 LR |
424 | } |
425 | ||
f078f209 LR |
426 | int ath_startrecv(struct ath_softc *sc) |
427 | { | |
cbe61d8a | 428 | struct ath_hw *ah = sc->sc_ah; |
f078f209 LR |
429 | struct ath_buf *bf, *tbf; |
430 | ||
b5c80475 FF |
431 | if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { |
432 | ath_edma_start_recv(sc); | |
433 | return 0; | |
434 | } | |
435 | ||
b77f483f | 436 | if (list_empty(&sc->rx.rxbuf)) |
f078f209 LR |
437 | goto start_recv; |
438 | ||
b77f483f S |
439 | sc->rx.rxlink = NULL; |
440 | list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { | |
f078f209 LR |
441 | ath_rx_buf_link(sc, bf); |
442 | } | |
443 | ||
444 | /* We could have deleted elements so the list may be empty now */ | |
b77f483f | 445 | if (list_empty(&sc->rx.rxbuf)) |
f078f209 LR |
446 | goto start_recv; |
447 | ||
b77f483f | 448 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); |
f078f209 | 449 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); |
be0418ad | 450 | ath9k_hw_rxena(ah); |
f078f209 LR |
451 | |
452 | start_recv: | |
be0418ad | 453 | ath_opmode_init(sc); |
4cb54fa3 | 454 | ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)); |
be0418ad | 455 | |
f078f209 LR |
456 | return 0; |
457 | } | |
458 | ||
4b883f02 FF |
459 | static void ath_flushrecv(struct ath_softc *sc) |
460 | { | |
461 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) | |
462 | ath_rx_tasklet(sc, 1, true); | |
463 | ath_rx_tasklet(sc, 1, false); | |
464 | } | |
465 | ||
f078f209 LR |
466 | bool ath_stoprecv(struct ath_softc *sc) |
467 | { | |
cbe61d8a | 468 | struct ath_hw *ah = sc->sc_ah; |
5882da02 | 469 | bool stopped, reset = false; |
f078f209 | 470 | |
d47844a0 | 471 | ath9k_hw_abortpcurecv(ah); |
be0418ad | 472 | ath9k_hw_setrxfilter(ah, 0); |
5882da02 | 473 | stopped = ath9k_hw_stopdmarecv(ah, &reset); |
b5c80475 | 474 | |
4b883f02 FF |
475 | ath_flushrecv(sc); |
476 | ||
b5c80475 FF |
477 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) |
478 | ath_edma_stop_recv(sc); | |
479 | else | |
480 | sc->rx.rxlink = NULL; | |
be0418ad | 481 | |
d584747b RM |
482 | if (!(ah->ah_flags & AH_UNPLUGGED) && |
483 | unlikely(!stopped)) { | |
d7fd1b50 BG |
484 | ath_err(ath9k_hw_common(sc->sc_ah), |
485 | "Could not stop RX, we could be " | |
486 | "confusing the DMA engine when we start RX up\n"); | |
487 | ATH_DBG_WARN_ON_ONCE(!stopped); | |
488 | } | |
2232d31b | 489 | return stopped && !reset; |
f078f209 LR |
490 | } |
491 | ||
cc65965c JM |
492 | static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) |
493 | { | |
494 | /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ | |
495 | struct ieee80211_mgmt *mgmt; | |
496 | u8 *pos, *end, id, elen; | |
497 | struct ieee80211_tim_ie *tim; | |
498 | ||
499 | mgmt = (struct ieee80211_mgmt *)skb->data; | |
500 | pos = mgmt->u.beacon.variable; | |
501 | end = skb->data + skb->len; | |
502 | ||
503 | while (pos + 2 < end) { | |
504 | id = *pos++; | |
505 | elen = *pos++; | |
506 | if (pos + elen > end) | |
507 | break; | |
508 | ||
509 | if (id == WLAN_EID_TIM) { | |
510 | if (elen < sizeof(*tim)) | |
511 | break; | |
512 | tim = (struct ieee80211_tim_ie *) pos; | |
513 | if (tim->dtim_count != 0) | |
514 | break; | |
515 | return tim->bitmap_ctrl & 0x01; | |
516 | } | |
517 | ||
518 | pos += elen; | |
519 | } | |
520 | ||
521 | return false; | |
522 | } | |
523 | ||
cc65965c JM |
524 | static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) |
525 | { | |
1510718d | 526 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
cc65965c JM |
527 | |
528 | if (skb->len < 24 + 8 + 2 + 2) | |
529 | return; | |
530 | ||
1b04b930 | 531 | sc->ps_flags &= ~PS_WAIT_FOR_BEACON; |
293dc5df | 532 | |
1b04b930 S |
533 | if (sc->ps_flags & PS_BEACON_SYNC) { |
534 | sc->ps_flags &= ~PS_BEACON_SYNC; | |
d2182b69 | 535 | ath_dbg(common, PS, |
226afe68 | 536 | "Reconfigure Beacon timers based on timestamp from the AP\n"); |
ef4ad633 | 537 | ath9k_set_beacon(sc); |
ccdfeab6 JM |
538 | } |
539 | ||
cc65965c JM |
540 | if (ath_beacon_dtim_pending_cab(skb)) { |
541 | /* | |
542 | * Remain awake waiting for buffered broadcast/multicast | |
58f5fffd GJ |
543 | * frames. If the last broadcast/multicast frame is not |
544 | * received properly, the next beacon frame will work as | |
545 | * a backup trigger for returning into NETWORK SLEEP state, | |
546 | * so we are waiting for it as well. | |
cc65965c | 547 | */ |
d2182b69 | 548 | ath_dbg(common, PS, |
226afe68 | 549 | "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n"); |
1b04b930 | 550 | sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; |
cc65965c JM |
551 | return; |
552 | } | |
553 | ||
1b04b930 | 554 | if (sc->ps_flags & PS_WAIT_FOR_CAB) { |
cc65965c JM |
555 | /* |
556 | * This can happen if a broadcast frame is dropped or the AP | |
557 | * fails to send a frame indicating that all CAB frames have | |
558 | * been delivered. | |
559 | */ | |
1b04b930 | 560 | sc->ps_flags &= ~PS_WAIT_FOR_CAB; |
d2182b69 | 561 | ath_dbg(common, PS, "PS wait for CAB frames timed out\n"); |
cc65965c | 562 | } |
cc65965c JM |
563 | } |
564 | ||
f73c604c | 565 | static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon) |
cc65965c JM |
566 | { |
567 | struct ieee80211_hdr *hdr; | |
c46917bb | 568 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
cc65965c JM |
569 | |
570 | hdr = (struct ieee80211_hdr *)skb->data; | |
571 | ||
572 | /* Process Beacon and CAB receive in PS state */ | |
ededf1f8 | 573 | if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) |
07c15a3f | 574 | && mybeacon) { |
cc65965c | 575 | ath_rx_ps_beacon(sc, skb); |
07c15a3f SM |
576 | } else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && |
577 | (ieee80211_is_data(hdr->frame_control) || | |
578 | ieee80211_is_action(hdr->frame_control)) && | |
579 | is_multicast_ether_addr(hdr->addr1) && | |
580 | !ieee80211_has_moredata(hdr->frame_control)) { | |
cc65965c JM |
581 | /* |
582 | * No more broadcast/multicast frames to be received at this | |
583 | * point. | |
584 | */ | |
3fac6dfd | 585 | sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); |
d2182b69 | 586 | ath_dbg(common, PS, |
226afe68 | 587 | "All PS CAB frames received, back to sleep\n"); |
1b04b930 | 588 | } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && |
9a23f9ca JM |
589 | !is_multicast_ether_addr(hdr->addr1) && |
590 | !ieee80211_has_morefrags(hdr->frame_control)) { | |
1b04b930 | 591 | sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; |
d2182b69 | 592 | ath_dbg(common, PS, |
226afe68 | 593 | "Going back to sleep after having received PS-Poll data (0x%lx)\n", |
1b04b930 S |
594 | sc->ps_flags & (PS_WAIT_FOR_BEACON | |
595 | PS_WAIT_FOR_CAB | | |
596 | PS_WAIT_FOR_PSPOLL_DATA | | |
597 | PS_WAIT_FOR_TX_ACK)); | |
cc65965c JM |
598 | } |
599 | } | |
600 | ||
b5c80475 | 601 | static bool ath_edma_get_buffers(struct ath_softc *sc, |
3a2923e8 FF |
602 | enum ath9k_rx_qtype qtype, |
603 | struct ath_rx_status *rs, | |
604 | struct ath_buf **dest) | |
f078f209 | 605 | { |
b5c80475 FF |
606 | struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; |
607 | struct ath_hw *ah = sc->sc_ah; | |
608 | struct ath_common *common = ath9k_hw_common(ah); | |
609 | struct sk_buff *skb; | |
610 | struct ath_buf *bf; | |
611 | int ret; | |
612 | ||
613 | skb = skb_peek(&rx_edma->rx_fifo); | |
614 | if (!skb) | |
615 | return false; | |
616 | ||
617 | bf = SKB_CB_ATHBUF(skb); | |
618 | BUG_ON(!bf); | |
619 | ||
ce9426d1 | 620 | dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, |
b5c80475 FF |
621 | common->rx_bufsize, DMA_FROM_DEVICE); |
622 | ||
3a2923e8 | 623 | ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data); |
ce9426d1 ML |
624 | if (ret == -EINPROGRESS) { |
625 | /*let device gain the buffer again*/ | |
626 | dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, | |
627 | common->rx_bufsize, DMA_FROM_DEVICE); | |
b5c80475 | 628 | return false; |
ce9426d1 | 629 | } |
b5c80475 FF |
630 | |
631 | __skb_unlink(skb, &rx_edma->rx_fifo); | |
632 | if (ret == -EINVAL) { | |
633 | /* corrupt descriptor, skip this one and the following one */ | |
634 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
635 | ath_rx_edma_buf_link(sc, qtype); | |
b5c80475 | 636 | |
3a2923e8 FF |
637 | skb = skb_peek(&rx_edma->rx_fifo); |
638 | if (skb) { | |
639 | bf = SKB_CB_ATHBUF(skb); | |
640 | BUG_ON(!bf); | |
641 | ||
642 | __skb_unlink(skb, &rx_edma->rx_fifo); | |
643 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
644 | ath_rx_edma_buf_link(sc, qtype); | |
3a2923e8 | 645 | } |
6bb51c70 TH |
646 | |
647 | bf = NULL; | |
b5c80475 | 648 | } |
b5c80475 | 649 | |
3a2923e8 | 650 | *dest = bf; |
b5c80475 FF |
651 | return true; |
652 | } | |
f078f209 | 653 | |
b5c80475 FF |
654 | static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, |
655 | struct ath_rx_status *rs, | |
656 | enum ath9k_rx_qtype qtype) | |
657 | { | |
3a2923e8 | 658 | struct ath_buf *bf = NULL; |
b5c80475 | 659 | |
3a2923e8 FF |
660 | while (ath_edma_get_buffers(sc, qtype, rs, &bf)) { |
661 | if (!bf) | |
662 | continue; | |
b5c80475 | 663 | |
3a2923e8 FF |
664 | return bf; |
665 | } | |
666 | return NULL; | |
b5c80475 FF |
667 | } |
668 | ||
669 | static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, | |
670 | struct ath_rx_status *rs) | |
671 | { | |
672 | struct ath_hw *ah = sc->sc_ah; | |
673 | struct ath_common *common = ath9k_hw_common(ah); | |
f078f209 | 674 | struct ath_desc *ds; |
b5c80475 FF |
675 | struct ath_buf *bf; |
676 | int ret; | |
677 | ||
678 | if (list_empty(&sc->rx.rxbuf)) { | |
679 | sc->rx.rxlink = NULL; | |
680 | return NULL; | |
681 | } | |
682 | ||
683 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); | |
684 | ds = bf->bf_desc; | |
685 | ||
686 | /* | |
687 | * Must provide the virtual address of the current | |
688 | * descriptor, the physical address, and the virtual | |
689 | * address of the next descriptor in the h/w chain. | |
690 | * This allows the HAL to look ahead to see if the | |
691 | * hardware is done with a descriptor by checking the | |
692 | * done bit in the following descriptor and the address | |
693 | * of the current descriptor the DMA engine is working | |
694 | * on. All this is necessary because of our use of | |
695 | * a self-linked list to avoid rx overruns. | |
696 | */ | |
3de21116 | 697 | ret = ath9k_hw_rxprocdesc(ah, ds, rs); |
b5c80475 FF |
698 | if (ret == -EINPROGRESS) { |
699 | struct ath_rx_status trs; | |
700 | struct ath_buf *tbf; | |
701 | struct ath_desc *tds; | |
702 | ||
703 | memset(&trs, 0, sizeof(trs)); | |
704 | if (list_is_last(&bf->list, &sc->rx.rxbuf)) { | |
705 | sc->rx.rxlink = NULL; | |
706 | return NULL; | |
707 | } | |
708 | ||
709 | tbf = list_entry(bf->list.next, struct ath_buf, list); | |
710 | ||
711 | /* | |
712 | * On some hardware the descriptor status words could | |
713 | * get corrupted, including the done bit. Because of | |
714 | * this, check if the next descriptor's done bit is | |
715 | * set or not. | |
716 | * | |
717 | * If the next descriptor's done bit is set, the current | |
718 | * descriptor has been corrupted. Force s/w to discard | |
719 | * this descriptor and continue... | |
720 | */ | |
721 | ||
722 | tds = tbf->bf_desc; | |
3de21116 | 723 | ret = ath9k_hw_rxprocdesc(ah, tds, &trs); |
b5c80475 FF |
724 | if (ret == -EINPROGRESS) |
725 | return NULL; | |
726 | } | |
727 | ||
a3dc48e8 | 728 | list_del(&bf->list); |
b5c80475 FF |
729 | if (!bf->bf_mpdu) |
730 | return bf; | |
731 | ||
732 | /* | |
733 | * Synchronize the DMA transfer with CPU before | |
734 | * 1. accessing the frame | |
735 | * 2. requeueing the same buffer to h/w | |
736 | */ | |
ce9426d1 | 737 | dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, |
b5c80475 FF |
738 | common->rx_bufsize, |
739 | DMA_FROM_DEVICE); | |
740 | ||
741 | return bf; | |
742 | } | |
743 | ||
d435700f S |
744 | /* Assumes you've already done the endian to CPU conversion */ |
745 | static bool ath9k_rx_accept(struct ath_common *common, | |
9f167f64 | 746 | struct ieee80211_hdr *hdr, |
d435700f S |
747 | struct ieee80211_rx_status *rxs, |
748 | struct ath_rx_status *rx_stats, | |
749 | bool *decrypt_error) | |
750 | { | |
ec205999 | 751 | struct ath_softc *sc = (struct ath_softc *) common->priv; |
66760eac | 752 | bool is_mc, is_valid_tkip, strip_mic, mic_error; |
d435700f | 753 | struct ath_hw *ah = common->ah; |
d435700f | 754 | __le16 fc; |
b7b1b512 | 755 | u8 rx_status_len = ah->caps.rx_status_len; |
d435700f | 756 | |
d435700f S |
757 | fc = hdr->frame_control; |
758 | ||
66760eac FF |
759 | is_mc = !!is_multicast_ether_addr(hdr->addr1); |
760 | is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && | |
761 | test_bit(rx_stats->rs_keyix, common->tkip_keymap); | |
152e585d | 762 | strip_mic = is_valid_tkip && ieee80211_is_data(fc) && |
2a5783b8 | 763 | ieee80211_has_protected(fc) && |
152e585d | 764 | !(rx_stats->rs_status & |
846d9363 FF |
765 | (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC | |
766 | ATH9K_RXERR_KEYMISS)); | |
66760eac | 767 | |
f88373fa FF |
768 | /* |
769 | * Key miss events are only relevant for pairwise keys where the | |
770 | * descriptor does contain a valid key index. This has been observed | |
771 | * mostly with CCMP encryption. | |
772 | */ | |
bed3d9c0 FF |
773 | if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID || |
774 | !test_bit(rx_stats->rs_keyix, common->ccmp_keymap)) | |
f88373fa FF |
775 | rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS; |
776 | ||
15072189 BG |
777 | if (!rx_stats->rs_datalen) { |
778 | RX_STAT_INC(rx_len_err); | |
d435700f | 779 | return false; |
15072189 BG |
780 | } |
781 | ||
d435700f S |
782 | /* |
783 | * rs_status follows rs_datalen so if rs_datalen is too large | |
784 | * we can take a hint that hardware corrupted it, so ignore | |
785 | * those frames. | |
786 | */ | |
15072189 BG |
787 | if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) { |
788 | RX_STAT_INC(rx_len_err); | |
d435700f | 789 | return false; |
15072189 | 790 | } |
d435700f | 791 | |
0d95521e | 792 | /* Only use error bits from the last fragment */ |
d435700f | 793 | if (rx_stats->rs_more) |
0d95521e | 794 | return true; |
d435700f | 795 | |
66760eac FF |
796 | mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) && |
797 | !ieee80211_has_morefrags(fc) && | |
798 | !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && | |
799 | (rx_stats->rs_status & ATH9K_RXERR_MIC); | |
800 | ||
d435700f S |
801 | /* |
802 | * The rx_stats->rs_status will not be set until the end of the | |
803 | * chained descriptors so it can be ignored if rs_more is set. The | |
804 | * rs_more will be false at the last element of the chained | |
805 | * descriptors. | |
806 | */ | |
807 | if (rx_stats->rs_status != 0) { | |
846d9363 FF |
808 | u8 status_mask; |
809 | ||
66760eac | 810 | if (rx_stats->rs_status & ATH9K_RXERR_CRC) { |
d435700f | 811 | rxs->flag |= RX_FLAG_FAILED_FCS_CRC; |
66760eac FF |
812 | mic_error = false; |
813 | } | |
d435700f S |
814 | if (rx_stats->rs_status & ATH9K_RXERR_PHY) |
815 | return false; | |
816 | ||
846d9363 FF |
817 | if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) || |
818 | (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) { | |
d435700f | 819 | *decrypt_error = true; |
66760eac | 820 | mic_error = false; |
d435700f | 821 | } |
66760eac | 822 | |
d435700f S |
823 | /* |
824 | * Reject error frames with the exception of | |
825 | * decryption and MIC failures. For monitor mode, | |
826 | * we also ignore the CRC error. | |
827 | */ | |
846d9363 FF |
828 | status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | |
829 | ATH9K_RXERR_KEYMISS; | |
830 | ||
ec205999 | 831 | if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL)) |
846d9363 FF |
832 | status_mask |= ATH9K_RXERR_CRC; |
833 | ||
834 | if (rx_stats->rs_status & ~status_mask) | |
835 | return false; | |
d435700f | 836 | } |
66760eac FF |
837 | |
838 | /* | |
839 | * For unicast frames the MIC error bit can have false positives, | |
840 | * so all MIC error reports need to be validated in software. | |
841 | * False negatives are not common, so skip software verification | |
842 | * if the hardware considers the MIC valid. | |
843 | */ | |
844 | if (strip_mic) | |
845 | rxs->flag |= RX_FLAG_MMIC_STRIPPED; | |
846 | else if (is_mc && mic_error) | |
847 | rxs->flag |= RX_FLAG_MMIC_ERROR; | |
848 | ||
d435700f S |
849 | return true; |
850 | } | |
851 | ||
852 | static int ath9k_process_rate(struct ath_common *common, | |
853 | struct ieee80211_hw *hw, | |
854 | struct ath_rx_status *rx_stats, | |
9f167f64 | 855 | struct ieee80211_rx_status *rxs) |
d435700f S |
856 | { |
857 | struct ieee80211_supported_band *sband; | |
858 | enum ieee80211_band band; | |
859 | unsigned int i = 0; | |
990e08a0 | 860 | struct ath_softc __maybe_unused *sc = common->priv; |
d435700f S |
861 | |
862 | band = hw->conf.channel->band; | |
863 | sband = hw->wiphy->bands[band]; | |
864 | ||
865 | if (rx_stats->rs_rate & 0x80) { | |
866 | /* HT rate */ | |
867 | rxs->flag |= RX_FLAG_HT; | |
868 | if (rx_stats->rs_flags & ATH9K_RX_2040) | |
869 | rxs->flag |= RX_FLAG_40MHZ; | |
870 | if (rx_stats->rs_flags & ATH9K_RX_GI) | |
871 | rxs->flag |= RX_FLAG_SHORT_GI; | |
872 | rxs->rate_idx = rx_stats->rs_rate & 0x7f; | |
873 | return 0; | |
874 | } | |
875 | ||
876 | for (i = 0; i < sband->n_bitrates; i++) { | |
877 | if (sband->bitrates[i].hw_value == rx_stats->rs_rate) { | |
878 | rxs->rate_idx = i; | |
879 | return 0; | |
880 | } | |
881 | if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { | |
882 | rxs->flag |= RX_FLAG_SHORTPRE; | |
883 | rxs->rate_idx = i; | |
884 | return 0; | |
885 | } | |
886 | } | |
887 | ||
888 | /* | |
889 | * No valid hardware bitrate found -- we should not get here | |
890 | * because hardware has already validated this frame as OK. | |
891 | */ | |
d2182b69 | 892 | ath_dbg(common, ANY, |
226afe68 JP |
893 | "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", |
894 | rx_stats->rs_rate); | |
15072189 | 895 | RX_STAT_INC(rx_rate_err); |
d435700f S |
896 | return -EINVAL; |
897 | } | |
898 | ||
899 | static void ath9k_process_rssi(struct ath_common *common, | |
900 | struct ieee80211_hw *hw, | |
9f167f64 | 901 | struct ieee80211_hdr *hdr, |
d435700f S |
902 | struct ath_rx_status *rx_stats) |
903 | { | |
9ac58615 | 904 | struct ath_softc *sc = hw->priv; |
d435700f | 905 | struct ath_hw *ah = common->ah; |
9fa23e17 | 906 | int last_rssi; |
2ef16755 | 907 | int rssi = rx_stats->rs_rssi; |
d435700f | 908 | |
cf3af748 RM |
909 | if (!rx_stats->is_mybeacon || |
910 | ((ah->opmode != NL80211_IFTYPE_STATION) && | |
911 | (ah->opmode != NL80211_IFTYPE_ADHOC))) | |
9fa23e17 FF |
912 | return; |
913 | ||
9fa23e17 | 914 | if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr) |
9ac58615 | 915 | ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi); |
d435700f | 916 | |
9ac58615 | 917 | last_rssi = sc->last_rssi; |
d435700f | 918 | if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) |
2ef16755 FF |
919 | rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER); |
920 | if (rssi < 0) | |
921 | rssi = 0; | |
d435700f S |
922 | |
923 | /* Update Beacon RSSI, this is used by ANI. */ | |
2ef16755 | 924 | ah->stats.avgbrssi = rssi; |
d435700f S |
925 | } |
926 | ||
927 | /* | |
928 | * For Decrypt or Demic errors, we only mark packet status here and always push | |
929 | * up the frame up to let mac80211 handle the actual error case, be it no | |
930 | * decryption key or real decryption error. This let us keep statistics there. | |
931 | */ | |
932 | static int ath9k_rx_skb_preprocess(struct ath_common *common, | |
933 | struct ieee80211_hw *hw, | |
9f167f64 | 934 | struct ieee80211_hdr *hdr, |
d435700f S |
935 | struct ath_rx_status *rx_stats, |
936 | struct ieee80211_rx_status *rx_status, | |
937 | bool *decrypt_error) | |
938 | { | |
f749b946 FF |
939 | struct ath_hw *ah = common->ah; |
940 | ||
d435700f S |
941 | /* |
942 | * everything but the rate is checked here, the rate check is done | |
943 | * separately to avoid doing two lookups for a rate for each frame. | |
944 | */ | |
9f167f64 | 945 | if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) |
d435700f S |
946 | return -EINVAL; |
947 | ||
0d95521e FF |
948 | /* Only use status info from the last fragment */ |
949 | if (rx_stats->rs_more) | |
950 | return 0; | |
951 | ||
9f167f64 | 952 | ath9k_process_rssi(common, hw, hdr, rx_stats); |
d435700f | 953 | |
9f167f64 | 954 | if (ath9k_process_rate(common, hw, rx_stats, rx_status)) |
d435700f S |
955 | return -EINVAL; |
956 | ||
d435700f S |
957 | rx_status->band = hw->conf.channel->band; |
958 | rx_status->freq = hw->conf.channel->center_freq; | |
f749b946 | 959 | rx_status->signal = ah->noise + rx_stats->rs_rssi; |
d435700f | 960 | rx_status->antenna = rx_stats->rs_antenna; |
96d21371 | 961 | rx_status->flag |= RX_FLAG_MACTIME_END; |
2ef16755 FF |
962 | if (rx_stats->rs_moreaggr) |
963 | rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; | |
d435700f S |
964 | |
965 | return 0; | |
966 | } | |
967 | ||
968 | static void ath9k_rx_skb_postprocess(struct ath_common *common, | |
969 | struct sk_buff *skb, | |
970 | struct ath_rx_status *rx_stats, | |
971 | struct ieee80211_rx_status *rxs, | |
972 | bool decrypt_error) | |
973 | { | |
974 | struct ath_hw *ah = common->ah; | |
975 | struct ieee80211_hdr *hdr; | |
976 | int hdrlen, padpos, padsize; | |
977 | u8 keyix; | |
978 | __le16 fc; | |
979 | ||
980 | /* see if any padding is done by the hw and remove it */ | |
981 | hdr = (struct ieee80211_hdr *) skb->data; | |
982 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | |
983 | fc = hdr->frame_control; | |
984 | padpos = ath9k_cmn_padpos(hdr->frame_control); | |
985 | ||
986 | /* The MAC header is padded to have 32-bit boundary if the | |
987 | * packet payload is non-zero. The general calculation for | |
988 | * padsize would take into account odd header lengths: | |
989 | * padsize = (4 - padpos % 4) % 4; However, since only | |
990 | * even-length headers are used, padding can only be 0 or 2 | |
991 | * bytes and we can optimize this a bit. In addition, we must | |
992 | * not try to remove padding from short control frames that do | |
993 | * not have payload. */ | |
994 | padsize = padpos & 3; | |
995 | if (padsize && skb->len>=padpos+padsize+FCS_LEN) { | |
996 | memmove(skb->data + padsize, skb->data, padpos); | |
997 | skb_pull(skb, padsize); | |
998 | } | |
999 | ||
1000 | keyix = rx_stats->rs_keyix; | |
1001 | ||
1002 | if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error && | |
1003 | ieee80211_has_protected(fc)) { | |
1004 | rxs->flag |= RX_FLAG_DECRYPTED; | |
1005 | } else if (ieee80211_has_protected(fc) | |
1006 | && !decrypt_error && skb->len >= hdrlen + 4) { | |
1007 | keyix = skb->data[hdrlen + 3] >> 6; | |
1008 | ||
1009 | if (test_bit(keyix, common->keymap)) | |
1010 | rxs->flag |= RX_FLAG_DECRYPTED; | |
1011 | } | |
1012 | if (ah->sw_mgmt_crypto && | |
1013 | (rxs->flag & RX_FLAG_DECRYPTED) && | |
1014 | ieee80211_is_mgmt(fc)) | |
1015 | /* Use software decrypt for management frames. */ | |
1016 | rxs->flag &= ~RX_FLAG_DECRYPTED; | |
1017 | } | |
b5c80475 | 1018 | |
e93d083f SW |
1019 | static s8 fix_rssi_inv_only(u8 rssi_val) |
1020 | { | |
1021 | if (rssi_val == 128) | |
1022 | rssi_val = 0; | |
1023 | return (s8) rssi_val; | |
1024 | } | |
1025 | ||
9b99e665 SW |
1026 | /* returns 1 if this was a spectral frame, even if not handled. */ |
1027 | static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr, | |
1028 | struct ath_rx_status *rs, u64 tsf) | |
e93d083f SW |
1029 | { |
1030 | #ifdef CONFIG_ATH_DEBUG | |
1031 | struct ath_hw *ah = sc->sc_ah; | |
1032 | u8 bins[SPECTRAL_HT20_NUM_BINS]; | |
1033 | u8 *vdata = (u8 *)hdr; | |
1034 | struct fft_sample_ht20 fft_sample; | |
1035 | struct ath_radar_info *radar_info; | |
1036 | struct ath_ht20_mag_info *mag_info; | |
1037 | int len = rs->rs_datalen; | |
4ab0b0aa | 1038 | int dc_pos; |
e93d083f SW |
1039 | |
1040 | /* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer | |
1041 | * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT | |
1042 | * yet, but this is supposed to be possible as well. | |
1043 | */ | |
1044 | if (rs->rs_phyerr != ATH9K_PHYERR_RADAR && | |
1045 | rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT && | |
1046 | rs->rs_phyerr != ATH9K_PHYERR_SPECTRAL) | |
9b99e665 SW |
1047 | return 0; |
1048 | ||
1049 | /* check if spectral scan bit is set. This does not have to be checked | |
1050 | * if received through a SPECTRAL phy error, but shouldn't hurt. | |
1051 | */ | |
1052 | radar_info = ((struct ath_radar_info *)&vdata[len]) - 1; | |
1053 | if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK)) | |
1054 | return 0; | |
e93d083f SW |
1055 | |
1056 | /* Variation in the data length is possible and will be fixed later. | |
1057 | * Note that we only support HT20 for now. | |
1058 | * | |
1059 | * TODO: add HT20_40 support as well. | |
1060 | */ | |
1061 | if ((len > SPECTRAL_HT20_TOTAL_DATA_LEN + 2) || | |
1062 | (len < SPECTRAL_HT20_TOTAL_DATA_LEN - 1)) | |
9b99e665 | 1063 | return 1; |
e93d083f SW |
1064 | |
1065 | fft_sample.tlv.type = ATH_FFT_SAMPLE_HT20; | |
1066 | fft_sample.tlv.length = sizeof(fft_sample) - sizeof(fft_sample.tlv); | |
4ab0b0aa | 1067 | fft_sample.tlv.length = __cpu_to_be16(fft_sample.tlv.length); |
e93d083f | 1068 | |
4ab0b0aa | 1069 | fft_sample.freq = __cpu_to_be16(ah->curchan->chan->center_freq); |
e93d083f SW |
1070 | fft_sample.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0); |
1071 | fft_sample.noise = ah->noise; | |
1072 | ||
1073 | switch (len - SPECTRAL_HT20_TOTAL_DATA_LEN) { | |
1074 | case 0: | |
1075 | /* length correct, nothing to do. */ | |
1076 | memcpy(bins, vdata, SPECTRAL_HT20_NUM_BINS); | |
1077 | break; | |
1078 | case -1: | |
1079 | /* first byte missing, duplicate it. */ | |
1080 | memcpy(&bins[1], vdata, SPECTRAL_HT20_NUM_BINS - 1); | |
1081 | bins[0] = vdata[0]; | |
1082 | break; | |
1083 | case 2: | |
1084 | /* MAC added 2 extra bytes at bin 30 and 32, remove them. */ | |
1085 | memcpy(bins, vdata, 30); | |
1086 | bins[30] = vdata[31]; | |
1087 | memcpy(&bins[31], &vdata[33], SPECTRAL_HT20_NUM_BINS - 31); | |
1088 | break; | |
1089 | case 1: | |
1090 | /* MAC added 2 extra bytes AND first byte is missing. */ | |
1091 | bins[0] = vdata[0]; | |
1092 | memcpy(&bins[0], vdata, 30); | |
1093 | bins[31] = vdata[31]; | |
1094 | memcpy(&bins[32], &vdata[33], SPECTRAL_HT20_NUM_BINS - 32); | |
1095 | break; | |
1096 | default: | |
9b99e665 | 1097 | return 1; |
e93d083f SW |
1098 | } |
1099 | ||
1100 | /* DC value (value in the middle) is the blind spot of the spectral | |
1101 | * sample and invalid, interpolate it. | |
1102 | */ | |
1103 | dc_pos = SPECTRAL_HT20_NUM_BINS / 2; | |
1104 | bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2; | |
1105 | ||
1106 | /* mag data is at the end of the frame, in front of radar_info */ | |
1107 | mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1; | |
1108 | ||
4ab0b0aa SE |
1109 | /* copy raw bins without scaling them */ |
1110 | memcpy(fft_sample.data, bins, SPECTRAL_HT20_NUM_BINS); | |
1111 | fft_sample.max_exp = mag_info->max_exp & 0xf; | |
e93d083f SW |
1112 | |
1113 | fft_sample.max_magnitude = spectral_max_magnitude(mag_info->all_bins); | |
4ab0b0aa | 1114 | fft_sample.max_magnitude = __cpu_to_be16(fft_sample.max_magnitude); |
e93d083f SW |
1115 | fft_sample.max_index = spectral_max_index(mag_info->all_bins); |
1116 | fft_sample.bitmap_weight = spectral_bitmap_weight(mag_info->all_bins); | |
4ab0b0aa | 1117 | fft_sample.tsf = __cpu_to_be64(tsf); |
e93d083f SW |
1118 | |
1119 | ath_debug_send_fft_sample(sc, &fft_sample.tlv); | |
9b99e665 SW |
1120 | return 1; |
1121 | #else | |
1122 | return 0; | |
e93d083f SW |
1123 | #endif |
1124 | } | |
1125 | ||
b5c80475 FF |
1126 | int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) |
1127 | { | |
1128 | struct ath_buf *bf; | |
0d95521e | 1129 | struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb; |
5ca42627 | 1130 | struct ieee80211_rx_status *rxs; |
cbe61d8a | 1131 | struct ath_hw *ah = sc->sc_ah; |
27c51f1a | 1132 | struct ath_common *common = ath9k_hw_common(ah); |
7545daf4 | 1133 | struct ieee80211_hw *hw = sc->hw; |
be0418ad | 1134 | struct ieee80211_hdr *hdr; |
c9b14170 | 1135 | int retval; |
29bffa96 | 1136 | struct ath_rx_status rs; |
b5c80475 FF |
1137 | enum ath9k_rx_qtype qtype; |
1138 | bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); | |
1139 | int dma_type; | |
5c6dd921 | 1140 | u8 rx_status_len = ah->caps.rx_status_len; |
a6d2055b FF |
1141 | u64 tsf = 0; |
1142 | u32 tsf_lower = 0; | |
8ab2cd09 | 1143 | unsigned long flags; |
be0418ad | 1144 | |
b5c80475 | 1145 | if (edma) |
b5c80475 | 1146 | dma_type = DMA_BIDIRECTIONAL; |
56824223 ML |
1147 | else |
1148 | dma_type = DMA_FROM_DEVICE; | |
b5c80475 FF |
1149 | |
1150 | qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; | |
f078f209 | 1151 | |
a6d2055b FF |
1152 | tsf = ath9k_hw_gettsf64(ah); |
1153 | tsf_lower = tsf & 0xffffffff; | |
1154 | ||
f078f209 | 1155 | do { |
e1352fde | 1156 | bool decrypt_error = false; |
f078f209 | 1157 | |
29bffa96 | 1158 | memset(&rs, 0, sizeof(rs)); |
b5c80475 FF |
1159 | if (edma) |
1160 | bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); | |
1161 | else | |
1162 | bf = ath_get_next_rx_buf(sc, &rs); | |
f078f209 | 1163 | |
b5c80475 FF |
1164 | if (!bf) |
1165 | break; | |
f078f209 | 1166 | |
f078f209 | 1167 | skb = bf->bf_mpdu; |
be0418ad | 1168 | if (!skb) |
f078f209 | 1169 | continue; |
f078f209 | 1170 | |
0d95521e FF |
1171 | /* |
1172 | * Take frame header from the first fragment and RX status from | |
1173 | * the last one. | |
1174 | */ | |
1175 | if (sc->rx.frag) | |
1176 | hdr_skb = sc->rx.frag; | |
1177 | else | |
1178 | hdr_skb = skb; | |
1179 | ||
1180 | hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len); | |
1181 | rxs = IEEE80211_SKB_RXCB(hdr_skb); | |
15072189 BG |
1182 | if (ieee80211_is_beacon(hdr->frame_control)) { |
1183 | RX_STAT_INC(rx_beacons); | |
1184 | if (!is_zero_ether_addr(common->curbssid) && | |
2e42e474 | 1185 | ether_addr_equal(hdr->addr3, common->curbssid)) |
15072189 BG |
1186 | rs.is_mybeacon = true; |
1187 | else | |
1188 | rs.is_mybeacon = false; | |
1189 | } | |
cf3af748 RM |
1190 | else |
1191 | rs.is_mybeacon = false; | |
5ca42627 | 1192 | |
be41b052 MSS |
1193 | if (ieee80211_is_data_present(hdr->frame_control) && |
1194 | !ieee80211_is_qos_nullfunc(hdr->frame_control)) | |
1195 | sc->rx.num_pkts++; | |
1196 | ||
29bffa96 | 1197 | ath_debug_stat_rx(sc, &rs); |
1395d3f0 | 1198 | |
ffb1c56a AN |
1199 | memset(rxs, 0, sizeof(struct ieee80211_rx_status)); |
1200 | ||
a6d2055b FF |
1201 | rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; |
1202 | if (rs.rs_tstamp > tsf_lower && | |
1203 | unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) | |
1204 | rxs->mactime -= 0x100000000ULL; | |
1205 | ||
1206 | if (rs.rs_tstamp < tsf_lower && | |
1207 | unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) | |
1208 | rxs->mactime += 0x100000000ULL; | |
1209 | ||
9b99e665 SW |
1210 | if (rs.rs_status & ATH9K_RXERR_PHY) { |
1211 | if (ath_process_fft(sc, hdr, &rs, rxs->mactime)) { | |
1212 | RX_STAT_INC(rx_spectral); | |
1213 | goto requeue_drop_frag; | |
1214 | } | |
1215 | } | |
e93d083f | 1216 | |
83c76570 ZK |
1217 | retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, |
1218 | rxs, &decrypt_error); | |
1219 | if (retval) | |
1220 | goto requeue_drop_frag; | |
1221 | ||
01e18918 RM |
1222 | if (rs.is_mybeacon) { |
1223 | sc->hw_busy_count = 0; | |
1224 | ath_start_rx_poll(sc, 3); | |
1225 | } | |
cb71d9ba LR |
1226 | /* Ensure we always have an skb to requeue once we are done |
1227 | * processing the current buffer's skb */ | |
cc861f74 | 1228 | requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); |
cb71d9ba LR |
1229 | |
1230 | /* If there is no memory we ignore the current RX'd frame, | |
1231 | * tell hardware it can give us a new frame using the old | |
b77f483f | 1232 | * skb and put it at the tail of the sc->rx.rxbuf list for |
cb71d9ba | 1233 | * processing. */ |
15072189 BG |
1234 | if (!requeue_skb) { |
1235 | RX_STAT_INC(rx_oom_err); | |
0d95521e | 1236 | goto requeue_drop_frag; |
15072189 | 1237 | } |
f078f209 | 1238 | |
9bf9fca8 | 1239 | /* Unmap the frame */ |
7da3c55c | 1240 | dma_unmap_single(sc->dev, bf->bf_buf_addr, |
cc861f74 | 1241 | common->rx_bufsize, |
b5c80475 | 1242 | dma_type); |
f078f209 | 1243 | |
b5c80475 FF |
1244 | skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); |
1245 | if (ah->caps.rx_status_len) | |
1246 | skb_pull(skb, ah->caps.rx_status_len); | |
be0418ad | 1247 | |
0d95521e FF |
1248 | if (!rs.rs_more) |
1249 | ath9k_rx_skb_postprocess(common, hdr_skb, &rs, | |
1250 | rxs, decrypt_error); | |
be0418ad | 1251 | |
cb71d9ba LR |
1252 | /* We will now give hardware our shiny new allocated skb */ |
1253 | bf->bf_mpdu = requeue_skb; | |
7da3c55c | 1254 | bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, |
cc861f74 | 1255 | common->rx_bufsize, |
b5c80475 | 1256 | dma_type); |
7da3c55c | 1257 | if (unlikely(dma_mapping_error(sc->dev, |
f8316df1 LR |
1258 | bf->bf_buf_addr))) { |
1259 | dev_kfree_skb_any(requeue_skb); | |
1260 | bf->bf_mpdu = NULL; | |
6cf9e995 | 1261 | bf->bf_buf_addr = 0; |
3800276a | 1262 | ath_err(common, "dma_mapping_error() on RX\n"); |
7545daf4 | 1263 | ieee80211_rx(hw, skb); |
f8316df1 LR |
1264 | break; |
1265 | } | |
f078f209 | 1266 | |
0d95521e | 1267 | if (rs.rs_more) { |
15072189 | 1268 | RX_STAT_INC(rx_frags); |
0d95521e FF |
1269 | /* |
1270 | * rs_more indicates chained descriptors which can be | |
1271 | * used to link buffers together for a sort of | |
1272 | * scatter-gather operation. | |
1273 | */ | |
1274 | if (sc->rx.frag) { | |
1275 | /* too many fragments - cannot handle frame */ | |
1276 | dev_kfree_skb_any(sc->rx.frag); | |
1277 | dev_kfree_skb_any(skb); | |
15072189 | 1278 | RX_STAT_INC(rx_too_many_frags_err); |
0d95521e FF |
1279 | skb = NULL; |
1280 | } | |
1281 | sc->rx.frag = skb; | |
1282 | goto requeue; | |
1283 | } | |
1284 | ||
1285 | if (sc->rx.frag) { | |
1286 | int space = skb->len - skb_tailroom(hdr_skb); | |
1287 | ||
0d95521e FF |
1288 | if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { |
1289 | dev_kfree_skb(skb); | |
15072189 | 1290 | RX_STAT_INC(rx_oom_err); |
0d95521e FF |
1291 | goto requeue_drop_frag; |
1292 | } | |
1293 | ||
b5447ff9 ED |
1294 | sc->rx.frag = NULL; |
1295 | ||
0d95521e FF |
1296 | skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len), |
1297 | skb->len); | |
1298 | dev_kfree_skb_any(skb); | |
1299 | skb = hdr_skb; | |
1300 | } | |
1301 | ||
eb840a80 MSS |
1302 | |
1303 | if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) { | |
1304 | ||
1305 | /* | |
1306 | * change the default rx antenna if rx diversity | |
1307 | * chooses the other antenna 3 times in a row. | |
1308 | */ | |
1309 | if (sc->rx.defant != rs.rs_antenna) { | |
1310 | if (++sc->rx.rxotherant >= 3) | |
1311 | ath_setdefantenna(sc, rs.rs_antenna); | |
1312 | } else { | |
1313 | sc->rx.rxotherant = 0; | |
1314 | } | |
1315 | ||
f078f209 | 1316 | } |
3cbb5dd7 | 1317 | |
66760eac FF |
1318 | if (rxs->flag & RX_FLAG_MMIC_STRIPPED) |
1319 | skb_trim(skb, skb->len - 8); | |
1320 | ||
8ab2cd09 | 1321 | spin_lock_irqsave(&sc->sc_pm_lock, flags); |
aaef24b4 | 1322 | if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | |
f73c604c RM |
1323 | PS_WAIT_FOR_CAB | |
1324 | PS_WAIT_FOR_PSPOLL_DATA)) || | |
1325 | ath9k_check_auto_sleep(sc)) | |
1326 | ath_rx_ps(sc, skb, rs.is_mybeacon); | |
8ab2cd09 | 1327 | spin_unlock_irqrestore(&sc->sc_pm_lock, flags); |
cc65965c | 1328 | |
43c35284 | 1329 | if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3) |
102885a5 VT |
1330 | ath_ant_comb_scan(sc, &rs); |
1331 | ||
7545daf4 | 1332 | ieee80211_rx(hw, skb); |
cc65965c | 1333 | |
0d95521e FF |
1334 | requeue_drop_frag: |
1335 | if (sc->rx.frag) { | |
1336 | dev_kfree_skb_any(sc->rx.frag); | |
1337 | sc->rx.frag = NULL; | |
1338 | } | |
cb71d9ba | 1339 | requeue: |
a3dc48e8 FF |
1340 | list_add_tail(&bf->list, &sc->rx.rxbuf); |
1341 | if (flush) | |
1342 | continue; | |
1343 | ||
b5c80475 | 1344 | if (edma) { |
b5c80475 FF |
1345 | ath_rx_edma_buf_link(sc, qtype); |
1346 | } else { | |
b5c80475 | 1347 | ath_rx_buf_link(sc, bf); |
a3dc48e8 | 1348 | ath9k_hw_rxena(ah); |
b5c80475 | 1349 | } |
be0418ad S |
1350 | } while (1); |
1351 | ||
29ab0b36 RM |
1352 | if (!(ah->imask & ATH9K_INT_RXEOL)) { |
1353 | ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN); | |
72d874c6 | 1354 | ath9k_hw_set_interrupts(ah); |
29ab0b36 RM |
1355 | } |
1356 | ||
f078f209 | 1357 | return 0; |
f078f209 | 1358 | } |