]>
Commit | Line | Data |
---|---|---|
f078f209 | 1 | /* |
5b68138e | 2 | * Copyright (c) 2008-2011 Atheros Communications Inc. |
f078f209 LR |
3 | * |
4 | * Permission to use, copy, modify, and/or distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
16 | ||
b7f080cf | 17 | #include <linux/dma-mapping.h> |
394cf0a1 | 18 | #include "ath9k.h" |
b622a720 | 19 | #include "ar9003_mac.h" |
f078f209 | 20 | |
1a04d59d | 21 | #define SKB_CB_ATHBUF(__skb) (*((struct ath_rxbuf **)__skb->cb)) |
b5c80475 | 22 | |
ededf1f8 VT |
23 | static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) |
24 | { | |
25 | return sc->ps_enabled && | |
26 | (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); | |
27 | } | |
28 | ||
f078f209 LR |
29 | /* |
30 | * Setup and link descriptors. | |
31 | * | |
32 | * 11N: we can no longer afford to self link the last descriptor. | |
33 | * MAC acknowledges BA status as long as it copies frames to host | |
34 | * buffer (or rx fifo). This can incorrectly acknowledge packets | |
35 | * to a sender if last desc is self-linked. | |
f078f209 | 36 | */ |
7dd74f5f FF |
37 | static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf, |
38 | bool flush) | |
f078f209 | 39 | { |
cbe61d8a | 40 | struct ath_hw *ah = sc->sc_ah; |
cc861f74 | 41 | struct ath_common *common = ath9k_hw_common(ah); |
f078f209 LR |
42 | struct ath_desc *ds; |
43 | struct sk_buff *skb; | |
44 | ||
f078f209 | 45 | ds = bf->bf_desc; |
be0418ad | 46 | ds->ds_link = 0; /* link to null */ |
f078f209 LR |
47 | ds->ds_data = bf->bf_buf_addr; |
48 | ||
be0418ad | 49 | /* virtual addr of the beginning of the buffer. */ |
f078f209 | 50 | skb = bf->bf_mpdu; |
9680e8a3 | 51 | BUG_ON(skb == NULL); |
f078f209 LR |
52 | ds->ds_vdata = skb->data; |
53 | ||
cc861f74 LR |
54 | /* |
55 | * setup rx descriptors. The rx_bufsize here tells the hardware | |
b4b6cda2 | 56 | * how much data it can DMA to us and that we are prepared |
cc861f74 LR |
57 | * to process |
58 | */ | |
b77f483f | 59 | ath9k_hw_setuprxdesc(ah, ds, |
cc861f74 | 60 | common->rx_bufsize, |
f078f209 LR |
61 | 0); |
62 | ||
7dd74f5f | 63 | if (sc->rx.rxlink) |
b77f483f | 64 | *sc->rx.rxlink = bf->bf_daddr; |
7dd74f5f FF |
65 | else if (!flush) |
66 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); | |
f078f209 | 67 | |
b77f483f | 68 | sc->rx.rxlink = &ds->ds_link; |
f078f209 LR |
69 | } |
70 | ||
7dd74f5f FF |
71 | static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf, |
72 | bool flush) | |
e96542e5 FF |
73 | { |
74 | if (sc->rx.buf_hold) | |
7dd74f5f | 75 | ath_rx_buf_link(sc, sc->rx.buf_hold, flush); |
e96542e5 FF |
76 | |
77 | sc->rx.buf_hold = bf; | |
78 | } | |
79 | ||
ff37e337 S |
80 | static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) |
81 | { | |
82 | /* XXX block beacon interrupts */ | |
83 | ath9k_hw_setantenna(sc->sc_ah, antenna); | |
b77f483f S |
84 | sc->rx.defant = antenna; |
85 | sc->rx.rxotherant = 0; | |
ff37e337 S |
86 | } |
87 | ||
f078f209 LR |
88 | static void ath_opmode_init(struct ath_softc *sc) |
89 | { | |
cbe61d8a | 90 | struct ath_hw *ah = sc->sc_ah; |
1510718d LR |
91 | struct ath_common *common = ath9k_hw_common(ah); |
92 | ||
f078f209 LR |
93 | u32 rfilt, mfilt[2]; |
94 | ||
95 | /* configure rx filter */ | |
96 | rfilt = ath_calcrxfilter(sc); | |
97 | ath9k_hw_setrxfilter(ah, rfilt); | |
98 | ||
99 | /* configure bssid mask */ | |
364734fa | 100 | ath_hw_setbssidmask(common); |
f078f209 LR |
101 | |
102 | /* configure operational mode */ | |
103 | ath9k_hw_setopmode(ah); | |
104 | ||
f078f209 LR |
105 | /* calculate and install multicast filter */ |
106 | mfilt[0] = mfilt[1] = ~0; | |
f078f209 | 107 | ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); |
f078f209 LR |
108 | } |
109 | ||
b5c80475 FF |
110 | static bool ath_rx_edma_buf_link(struct ath_softc *sc, |
111 | enum ath9k_rx_qtype qtype) | |
f078f209 | 112 | { |
b5c80475 FF |
113 | struct ath_hw *ah = sc->sc_ah; |
114 | struct ath_rx_edma *rx_edma; | |
f078f209 | 115 | struct sk_buff *skb; |
1a04d59d | 116 | struct ath_rxbuf *bf; |
f078f209 | 117 | |
b5c80475 FF |
118 | rx_edma = &sc->rx.rx_edma[qtype]; |
119 | if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) | |
120 | return false; | |
f078f209 | 121 | |
1a04d59d | 122 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list); |
b5c80475 | 123 | list_del_init(&bf->list); |
f078f209 | 124 | |
b5c80475 FF |
125 | skb = bf->bf_mpdu; |
126 | ||
b5c80475 FF |
127 | memset(skb->data, 0, ah->caps.rx_status_len); |
128 | dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, | |
129 | ah->caps.rx_status_len, DMA_TO_DEVICE); | |
f078f209 | 130 | |
b5c80475 FF |
131 | SKB_CB_ATHBUF(skb) = bf; |
132 | ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); | |
07236bf3 | 133 | __skb_queue_tail(&rx_edma->rx_fifo, skb); |
f078f209 | 134 | |
b5c80475 FF |
135 | return true; |
136 | } | |
137 | ||
138 | static void ath_rx_addbuffer_edma(struct ath_softc *sc, | |
7a897203 | 139 | enum ath9k_rx_qtype qtype) |
b5c80475 | 140 | { |
b5c80475 | 141 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
1a04d59d | 142 | struct ath_rxbuf *bf, *tbf; |
b5c80475 | 143 | |
b5c80475 | 144 | if (list_empty(&sc->rx.rxbuf)) { |
d2182b69 | 145 | ath_dbg(common, QUEUE, "No free rx buf available\n"); |
b5c80475 | 146 | return; |
797fe5cb | 147 | } |
f078f209 | 148 | |
6a01f0c0 | 149 | list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) |
b5c80475 FF |
150 | if (!ath_rx_edma_buf_link(sc, qtype)) |
151 | break; | |
152 | ||
b5c80475 FF |
153 | } |
154 | ||
155 | static void ath_rx_remove_buffer(struct ath_softc *sc, | |
156 | enum ath9k_rx_qtype qtype) | |
157 | { | |
1a04d59d | 158 | struct ath_rxbuf *bf; |
b5c80475 FF |
159 | struct ath_rx_edma *rx_edma; |
160 | struct sk_buff *skb; | |
161 | ||
162 | rx_edma = &sc->rx.rx_edma[qtype]; | |
163 | ||
07236bf3 | 164 | while ((skb = __skb_dequeue(&rx_edma->rx_fifo)) != NULL) { |
b5c80475 FF |
165 | bf = SKB_CB_ATHBUF(skb); |
166 | BUG_ON(!bf); | |
167 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
168 | } | |
169 | } | |
170 | ||
171 | static void ath_rx_edma_cleanup(struct ath_softc *sc) | |
172 | { | |
ba542385 MSS |
173 | struct ath_hw *ah = sc->sc_ah; |
174 | struct ath_common *common = ath9k_hw_common(ah); | |
1a04d59d | 175 | struct ath_rxbuf *bf; |
b5c80475 FF |
176 | |
177 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); | |
178 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); | |
179 | ||
797fe5cb | 180 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { |
ba542385 MSS |
181 | if (bf->bf_mpdu) { |
182 | dma_unmap_single(sc->dev, bf->bf_buf_addr, | |
183 | common->rx_bufsize, | |
184 | DMA_BIDIRECTIONAL); | |
b5c80475 | 185 | dev_kfree_skb_any(bf->bf_mpdu); |
ba542385 MSS |
186 | bf->bf_buf_addr = 0; |
187 | bf->bf_mpdu = NULL; | |
188 | } | |
b5c80475 | 189 | } |
b5c80475 FF |
190 | } |
191 | ||
192 | static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) | |
193 | { | |
5d07cca2 | 194 | __skb_queue_head_init(&rx_edma->rx_fifo); |
b5c80475 FF |
195 | rx_edma->rx_fifo_hwsize = size; |
196 | } | |
197 | ||
198 | static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) | |
199 | { | |
200 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | |
201 | struct ath_hw *ah = sc->sc_ah; | |
202 | struct sk_buff *skb; | |
1a04d59d | 203 | struct ath_rxbuf *bf; |
b5c80475 FF |
204 | int error = 0, i; |
205 | u32 size; | |
206 | ||
b5c80475 FF |
207 | ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - |
208 | ah->caps.rx_status_len); | |
209 | ||
210 | ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], | |
211 | ah->caps.rx_lp_qdepth); | |
212 | ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], | |
213 | ah->caps.rx_hp_qdepth); | |
214 | ||
1a04d59d | 215 | size = sizeof(struct ath_rxbuf) * nbufs; |
b81950b1 | 216 | bf = devm_kzalloc(sc->dev, size, GFP_KERNEL); |
b5c80475 FF |
217 | if (!bf) |
218 | return -ENOMEM; | |
219 | ||
220 | INIT_LIST_HEAD(&sc->rx.rxbuf); | |
b5c80475 FF |
221 | |
222 | for (i = 0; i < nbufs; i++, bf++) { | |
cc861f74 | 223 | skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); |
b5c80475 | 224 | if (!skb) { |
797fe5cb | 225 | error = -ENOMEM; |
b5c80475 | 226 | goto rx_init_fail; |
f078f209 | 227 | } |
f078f209 | 228 | |
b5c80475 | 229 | memset(skb->data, 0, common->rx_bufsize); |
797fe5cb | 230 | bf->bf_mpdu = skb; |
b5c80475 | 231 | |
797fe5cb | 232 | bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, |
cc861f74 | 233 | common->rx_bufsize, |
b5c80475 | 234 | DMA_BIDIRECTIONAL); |
797fe5cb | 235 | if (unlikely(dma_mapping_error(sc->dev, |
b5c80475 FF |
236 | bf->bf_buf_addr))) { |
237 | dev_kfree_skb_any(skb); | |
238 | bf->bf_mpdu = NULL; | |
6cf9e995 | 239 | bf->bf_buf_addr = 0; |
3800276a | 240 | ath_err(common, |
b5c80475 FF |
241 | "dma_mapping_error() on RX init\n"); |
242 | error = -ENOMEM; | |
243 | goto rx_init_fail; | |
244 | } | |
245 | ||
246 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
247 | } | |
248 | ||
249 | return 0; | |
250 | ||
251 | rx_init_fail: | |
252 | ath_rx_edma_cleanup(sc); | |
253 | return error; | |
254 | } | |
255 | ||
256 | static void ath_edma_start_recv(struct ath_softc *sc) | |
257 | { | |
b5c80475 | 258 | ath9k_hw_rxena(sc->sc_ah); |
7a897203 SM |
259 | ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP); |
260 | ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP); | |
b5c80475 | 261 | ath_opmode_init(sc); |
fbbcd146 | 262 | ath9k_hw_startpcureceive(sc->sc_ah, sc->cur_chan->offchannel); |
b5c80475 FF |
263 | } |
264 | ||
265 | static void ath_edma_stop_recv(struct ath_softc *sc) | |
266 | { | |
b5c80475 FF |
267 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); |
268 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); | |
b5c80475 FF |
269 | } |
270 | ||
271 | int ath_rx_init(struct ath_softc *sc, int nbufs) | |
272 | { | |
273 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | |
274 | struct sk_buff *skb; | |
1a04d59d | 275 | struct ath_rxbuf *bf; |
b5c80475 FF |
276 | int error = 0; |
277 | ||
4bdd1e97 | 278 | spin_lock_init(&sc->sc_pcu_lock); |
b5c80475 | 279 | |
0d95521e FF |
280 | common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + |
281 | sc->sc_ah->caps.rx_status_len; | |
282 | ||
e87f3d53 | 283 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) |
b5c80475 | 284 | return ath_rx_edma_init(sc, nbufs); |
b5c80475 | 285 | |
e87f3d53 SM |
286 | ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n", |
287 | common->cachelsz, common->rx_bufsize); | |
b5c80475 | 288 | |
e87f3d53 SM |
289 | /* Initialize rx descriptors */ |
290 | ||
291 | error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, | |
292 | "rx", nbufs, 1, 0); | |
293 | if (error != 0) { | |
294 | ath_err(common, | |
295 | "failed to allocate rx descriptors: %d\n", | |
296 | error); | |
297 | goto err; | |
298 | } | |
299 | ||
300 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { | |
301 | skb = ath_rxbuf_alloc(common, common->rx_bufsize, | |
302 | GFP_KERNEL); | |
303 | if (skb == NULL) { | |
304 | error = -ENOMEM; | |
797fe5cb S |
305 | goto err; |
306 | } | |
b5c80475 | 307 | |
e87f3d53 SM |
308 | bf->bf_mpdu = skb; |
309 | bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, | |
310 | common->rx_bufsize, | |
311 | DMA_FROM_DEVICE); | |
312 | if (unlikely(dma_mapping_error(sc->dev, | |
313 | bf->bf_buf_addr))) { | |
314 | dev_kfree_skb_any(skb); | |
315 | bf->bf_mpdu = NULL; | |
316 | bf->bf_buf_addr = 0; | |
317 | ath_err(common, | |
318 | "dma_mapping_error() on RX init\n"); | |
319 | error = -ENOMEM; | |
320 | goto err; | |
b5c80475 | 321 | } |
797fe5cb | 322 | } |
e87f3d53 | 323 | sc->rx.rxlink = NULL; |
797fe5cb | 324 | err: |
f078f209 LR |
325 | if (error) |
326 | ath_rx_cleanup(sc); | |
327 | ||
328 | return error; | |
329 | } | |
330 | ||
f078f209 LR |
331 | void ath_rx_cleanup(struct ath_softc *sc) |
332 | { | |
cc861f74 LR |
333 | struct ath_hw *ah = sc->sc_ah; |
334 | struct ath_common *common = ath9k_hw_common(ah); | |
f078f209 | 335 | struct sk_buff *skb; |
1a04d59d | 336 | struct ath_rxbuf *bf; |
f078f209 | 337 | |
b5c80475 FF |
338 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { |
339 | ath_rx_edma_cleanup(sc); | |
340 | return; | |
e87f3d53 SM |
341 | } |
342 | ||
343 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { | |
344 | skb = bf->bf_mpdu; | |
345 | if (skb) { | |
346 | dma_unmap_single(sc->dev, bf->bf_buf_addr, | |
347 | common->rx_bufsize, | |
348 | DMA_FROM_DEVICE); | |
349 | dev_kfree_skb(skb); | |
350 | bf->bf_buf_addr = 0; | |
351 | bf->bf_mpdu = NULL; | |
051b9191 | 352 | } |
b5c80475 | 353 | } |
f078f209 LR |
354 | } |
355 | ||
356 | /* | |
357 | * Calculate the receive filter according to the | |
358 | * operating mode and state: | |
359 | * | |
360 | * o always accept unicast, broadcast, and multicast traffic | |
361 | * o maintain current state of phy error reception (the hal | |
362 | * may enable phy error frames for noise immunity work) | |
363 | * o probe request frames are accepted only when operating in | |
364 | * hostap, adhoc, or monitor modes | |
365 | * o enable promiscuous mode according to the interface state | |
366 | * o accept beacons: | |
367 | * - when operating in adhoc mode so the 802.11 layer creates | |
368 | * node table entries for peers, | |
369 | * - when operating in station mode for collecting rssi data when | |
370 | * the station is otherwise quiet, or | |
371 | * - when operating as a repeater so we see repeater-sta beacons | |
372 | * - when scanning | |
373 | */ | |
374 | ||
375 | u32 ath_calcrxfilter(struct ath_softc *sc) | |
376 | { | |
78b21949 | 377 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
f078f209 LR |
378 | u32 rfilt; |
379 | ||
97f2645f | 380 | if (IS_ENABLED(CONFIG_ATH9K_TX99)) |
89f927af LR |
381 | return 0; |
382 | ||
ac06697c | 383 | rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST |
f078f209 LR |
384 | | ATH9K_RX_FILTER_MCAST; |
385 | ||
73e4937d ZK |
386 | /* if operating on a DFS channel, enable radar pulse detection */ |
387 | if (sc->hw->conf.radar_enabled) | |
388 | rfilt |= ATH9K_RX_FILTER_PHYRADAR | ATH9K_RX_FILTER_PHYERR; | |
389 | ||
fce34430 SM |
390 | spin_lock_bh(&sc->chan_lock); |
391 | ||
392 | if (sc->cur_chan->rxfilter & FIF_PROBE_REQ) | |
f078f209 LR |
393 | rfilt |= ATH9K_RX_FILTER_PROBEREQ; |
394 | ||
2e286947 | 395 | if (sc->sc_ah->is_monitoring) |
f078f209 | 396 | rfilt |= ATH9K_RX_FILTER_PROM; |
f078f209 | 397 | |
35c273ea LB |
398 | if ((sc->cur_chan->rxfilter & FIF_CONTROL) || |
399 | sc->sc_ah->dynack.enabled) | |
d42c6b71 S |
400 | rfilt |= ATH9K_RX_FILTER_CONTROL; |
401 | ||
dbaaa147 | 402 | if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && |
ca529c93 | 403 | (sc->cur_chan->nvifs <= 1) && |
fce34430 | 404 | !(sc->cur_chan->rxfilter & FIF_BCN_PRBRESP_PROMISC)) |
dbaaa147 | 405 | rfilt |= ATH9K_RX_FILTER_MYBEACON; |
862a336c | 406 | else if (sc->sc_ah->opmode != NL80211_IFTYPE_OCB) |
f078f209 LR |
407 | rfilt |= ATH9K_RX_FILTER_BEACON; |
408 | ||
264bbec8 | 409 | if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || |
fce34430 | 410 | (sc->cur_chan->rxfilter & FIF_PSPOLL)) |
dbaaa147 | 411 | rfilt |= ATH9K_RX_FILTER_PSPOLL; |
be0418ad | 412 | |
3d1132d0 | 413 | if (sc->cur_chandef.width != NL80211_CHAN_WIDTH_20_NOHT) |
7ea310be S |
414 | rfilt |= ATH9K_RX_FILTER_COMP_BAR; |
415 | ||
ca529c93 | 416 | if (sc->cur_chan->nvifs > 1 || (sc->cur_chan->rxfilter & FIF_OTHER_BSS)) { |
a549459c TW |
417 | /* This is needed for older chips */ |
418 | if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160) | |
5eb6ba83 | 419 | rfilt |= ATH9K_RX_FILTER_PROM; |
b93bce2a JM |
420 | rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; |
421 | } | |
422 | ||
ede6a5e7 MP |
423 | if (AR_SREV_9550(sc->sc_ah) || AR_SREV_9531(sc->sc_ah) || |
424 | AR_SREV_9561(sc->sc_ah)) | |
b3d7aa43 GJ |
425 | rfilt |= ATH9K_RX_FILTER_4ADDRESS; |
426 | ||
f0b2c30a MP |
427 | if (AR_SREV_9462(sc->sc_ah) || AR_SREV_9565(sc->sc_ah)) |
428 | rfilt |= ATH9K_RX_FILTER_CONTROL_WRAPPER; | |
429 | ||
499afacc | 430 | if (ath9k_is_chanctx_enabled() && |
78b21949 FF |
431 | test_bit(ATH_OP_SCANNING, &common->op_flags)) |
432 | rfilt |= ATH9K_RX_FILTER_BEACON; | |
433 | ||
fce34430 SM |
434 | spin_unlock_bh(&sc->chan_lock); |
435 | ||
f078f209 | 436 | return rfilt; |
7dcfdcd9 | 437 | |
f078f209 LR |
438 | } |
439 | ||
19ec477f | 440 | void ath_startrecv(struct ath_softc *sc) |
f078f209 | 441 | { |
cbe61d8a | 442 | struct ath_hw *ah = sc->sc_ah; |
1a04d59d | 443 | struct ath_rxbuf *bf, *tbf; |
f078f209 | 444 | |
b5c80475 FF |
445 | if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { |
446 | ath_edma_start_recv(sc); | |
19ec477f | 447 | return; |
b5c80475 FF |
448 | } |
449 | ||
b77f483f | 450 | if (list_empty(&sc->rx.rxbuf)) |
f078f209 LR |
451 | goto start_recv; |
452 | ||
e96542e5 | 453 | sc->rx.buf_hold = NULL; |
b77f483f S |
454 | sc->rx.rxlink = NULL; |
455 | list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { | |
7dd74f5f | 456 | ath_rx_buf_link(sc, bf, false); |
f078f209 LR |
457 | } |
458 | ||
459 | /* We could have deleted elements so the list may be empty now */ | |
b77f483f | 460 | if (list_empty(&sc->rx.rxbuf)) |
f078f209 LR |
461 | goto start_recv; |
462 | ||
1a04d59d | 463 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list); |
f078f209 | 464 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); |
be0418ad | 465 | ath9k_hw_rxena(ah); |
f078f209 LR |
466 | |
467 | start_recv: | |
be0418ad | 468 | ath_opmode_init(sc); |
fbbcd146 | 469 | ath9k_hw_startpcureceive(ah, sc->cur_chan->offchannel); |
f078f209 LR |
470 | } |
471 | ||
4b883f02 FF |
472 | static void ath_flushrecv(struct ath_softc *sc) |
473 | { | |
474 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) | |
475 | ath_rx_tasklet(sc, 1, true); | |
476 | ath_rx_tasklet(sc, 1, false); | |
477 | } | |
478 | ||
f078f209 LR |
479 | bool ath_stoprecv(struct ath_softc *sc) |
480 | { | |
cbe61d8a | 481 | struct ath_hw *ah = sc->sc_ah; |
5882da02 | 482 | bool stopped, reset = false; |
f078f209 | 483 | |
d47844a0 | 484 | ath9k_hw_abortpcurecv(ah); |
be0418ad | 485 | ath9k_hw_setrxfilter(ah, 0); |
5882da02 | 486 | stopped = ath9k_hw_stopdmarecv(ah, &reset); |
b5c80475 | 487 | |
4b883f02 FF |
488 | ath_flushrecv(sc); |
489 | ||
b5c80475 FF |
490 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) |
491 | ath_edma_stop_recv(sc); | |
492 | else | |
493 | sc->rx.rxlink = NULL; | |
be0418ad | 494 | |
d584747b RM |
495 | if (!(ah->ah_flags & AH_UNPLUGGED) && |
496 | unlikely(!stopped)) { | |
e60ac9c7 FF |
497 | ath_dbg(ath9k_hw_common(sc->sc_ah), RESET, |
498 | "Failed to stop Rx DMA\n"); | |
499 | RESET_STAT_INC(sc, RESET_RX_DMA_ERROR); | |
d7fd1b50 | 500 | } |
2232d31b | 501 | return stopped && !reset; |
f078f209 LR |
502 | } |
503 | ||
cc65965c JM |
504 | static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) |
505 | { | |
506 | /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ | |
507 | struct ieee80211_mgmt *mgmt; | |
508 | u8 *pos, *end, id, elen; | |
509 | struct ieee80211_tim_ie *tim; | |
510 | ||
511 | mgmt = (struct ieee80211_mgmt *)skb->data; | |
512 | pos = mgmt->u.beacon.variable; | |
513 | end = skb->data + skb->len; | |
514 | ||
515 | while (pos + 2 < end) { | |
516 | id = *pos++; | |
517 | elen = *pos++; | |
518 | if (pos + elen > end) | |
519 | break; | |
520 | ||
521 | if (id == WLAN_EID_TIM) { | |
522 | if (elen < sizeof(*tim)) | |
523 | break; | |
524 | tim = (struct ieee80211_tim_ie *) pos; | |
525 | if (tim->dtim_count != 0) | |
526 | break; | |
527 | return tim->bitmap_ctrl & 0x01; | |
528 | } | |
529 | ||
530 | pos += elen; | |
531 | } | |
532 | ||
533 | return false; | |
534 | } | |
535 | ||
cc65965c JM |
536 | static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) |
537 | { | |
1510718d | 538 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
48bf43fa | 539 | bool skip_beacon = false; |
cc65965c JM |
540 | |
541 | if (skb->len < 24 + 8 + 2 + 2) | |
542 | return; | |
543 | ||
1b04b930 | 544 | sc->ps_flags &= ~PS_WAIT_FOR_BEACON; |
293dc5df | 545 | |
1b04b930 S |
546 | if (sc->ps_flags & PS_BEACON_SYNC) { |
547 | sc->ps_flags &= ~PS_BEACON_SYNC; | |
d2182b69 | 548 | ath_dbg(common, PS, |
1a6404a1 | 549 | "Reconfigure beacon timers based on synchronized timestamp\n"); |
48bf43fa | 550 | |
853854d6 | 551 | #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT |
48bf43fa SM |
552 | if (ath9k_is_chanctx_enabled()) { |
553 | if (sc->cur_chan == &sc->offchannel.chan) | |
554 | skip_beacon = true; | |
555 | } | |
853854d6 | 556 | #endif |
48bf43fa SM |
557 | |
558 | if (!skip_beacon && | |
559 | !(WARN_ON_ONCE(sc->cur_chan->beacon.beacon_interval == 0))) | |
76c93983 | 560 | ath9k_set_beacon(sc); |
c7dd40c9 SM |
561 | |
562 | ath9k_p2p_beacon_sync(sc); | |
ccdfeab6 JM |
563 | } |
564 | ||
cc65965c JM |
565 | if (ath_beacon_dtim_pending_cab(skb)) { |
566 | /* | |
567 | * Remain awake waiting for buffered broadcast/multicast | |
58f5fffd GJ |
568 | * frames. If the last broadcast/multicast frame is not |
569 | * received properly, the next beacon frame will work as | |
570 | * a backup trigger for returning into NETWORK SLEEP state, | |
571 | * so we are waiting for it as well. | |
cc65965c | 572 | */ |
d2182b69 | 573 | ath_dbg(common, PS, |
226afe68 | 574 | "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n"); |
1b04b930 | 575 | sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; |
cc65965c JM |
576 | return; |
577 | } | |
578 | ||
1b04b930 | 579 | if (sc->ps_flags & PS_WAIT_FOR_CAB) { |
cc65965c JM |
580 | /* |
581 | * This can happen if a broadcast frame is dropped or the AP | |
582 | * fails to send a frame indicating that all CAB frames have | |
583 | * been delivered. | |
584 | */ | |
1b04b930 | 585 | sc->ps_flags &= ~PS_WAIT_FOR_CAB; |
d2182b69 | 586 | ath_dbg(common, PS, "PS wait for CAB frames timed out\n"); |
cc65965c | 587 | } |
cc65965c JM |
588 | } |
589 | ||
f73c604c | 590 | static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon) |
cc65965c JM |
591 | { |
592 | struct ieee80211_hdr *hdr; | |
c46917bb | 593 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
cc65965c JM |
594 | |
595 | hdr = (struct ieee80211_hdr *)skb->data; | |
596 | ||
597 | /* Process Beacon and CAB receive in PS state */ | |
ededf1f8 | 598 | if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) |
07c15a3f | 599 | && mybeacon) { |
cc65965c | 600 | ath_rx_ps_beacon(sc, skb); |
07c15a3f SM |
601 | } else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && |
602 | (ieee80211_is_data(hdr->frame_control) || | |
603 | ieee80211_is_action(hdr->frame_control)) && | |
604 | is_multicast_ether_addr(hdr->addr1) && | |
605 | !ieee80211_has_moredata(hdr->frame_control)) { | |
cc65965c JM |
606 | /* |
607 | * No more broadcast/multicast frames to be received at this | |
608 | * point. | |
609 | */ | |
3fac6dfd | 610 | sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); |
d2182b69 | 611 | ath_dbg(common, PS, |
226afe68 | 612 | "All PS CAB frames received, back to sleep\n"); |
1b04b930 | 613 | } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && |
9a23f9ca JM |
614 | !is_multicast_ether_addr(hdr->addr1) && |
615 | !ieee80211_has_morefrags(hdr->frame_control)) { | |
1b04b930 | 616 | sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; |
d2182b69 | 617 | ath_dbg(common, PS, |
226afe68 | 618 | "Going back to sleep after having received PS-Poll data (0x%lx)\n", |
1b04b930 S |
619 | sc->ps_flags & (PS_WAIT_FOR_BEACON | |
620 | PS_WAIT_FOR_CAB | | |
621 | PS_WAIT_FOR_PSPOLL_DATA | | |
622 | PS_WAIT_FOR_TX_ACK)); | |
cc65965c JM |
623 | } |
624 | } | |
625 | ||
b5c80475 | 626 | static bool ath_edma_get_buffers(struct ath_softc *sc, |
3a2923e8 FF |
627 | enum ath9k_rx_qtype qtype, |
628 | struct ath_rx_status *rs, | |
1a04d59d | 629 | struct ath_rxbuf **dest) |
f078f209 | 630 | { |
b5c80475 FF |
631 | struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; |
632 | struct ath_hw *ah = sc->sc_ah; | |
633 | struct ath_common *common = ath9k_hw_common(ah); | |
634 | struct sk_buff *skb; | |
1a04d59d | 635 | struct ath_rxbuf *bf; |
b5c80475 FF |
636 | int ret; |
637 | ||
638 | skb = skb_peek(&rx_edma->rx_fifo); | |
639 | if (!skb) | |
640 | return false; | |
641 | ||
642 | bf = SKB_CB_ATHBUF(skb); | |
643 | BUG_ON(!bf); | |
644 | ||
ce9426d1 | 645 | dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, |
b5c80475 FF |
646 | common->rx_bufsize, DMA_FROM_DEVICE); |
647 | ||
3a2923e8 | 648 | ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data); |
ce9426d1 ML |
649 | if (ret == -EINPROGRESS) { |
650 | /*let device gain the buffer again*/ | |
651 | dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, | |
652 | common->rx_bufsize, DMA_FROM_DEVICE); | |
b5c80475 | 653 | return false; |
ce9426d1 | 654 | } |
b5c80475 FF |
655 | |
656 | __skb_unlink(skb, &rx_edma->rx_fifo); | |
657 | if (ret == -EINVAL) { | |
658 | /* corrupt descriptor, skip this one and the following one */ | |
659 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
660 | ath_rx_edma_buf_link(sc, qtype); | |
b5c80475 | 661 | |
3a2923e8 FF |
662 | skb = skb_peek(&rx_edma->rx_fifo); |
663 | if (skb) { | |
664 | bf = SKB_CB_ATHBUF(skb); | |
665 | BUG_ON(!bf); | |
666 | ||
667 | __skb_unlink(skb, &rx_edma->rx_fifo); | |
668 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
669 | ath_rx_edma_buf_link(sc, qtype); | |
3a2923e8 | 670 | } |
6bb51c70 TH |
671 | |
672 | bf = NULL; | |
b5c80475 | 673 | } |
b5c80475 | 674 | |
3a2923e8 | 675 | *dest = bf; |
b5c80475 FF |
676 | return true; |
677 | } | |
f078f209 | 678 | |
1a04d59d | 679 | static struct ath_rxbuf *ath_edma_get_next_rx_buf(struct ath_softc *sc, |
b5c80475 FF |
680 | struct ath_rx_status *rs, |
681 | enum ath9k_rx_qtype qtype) | |
682 | { | |
1a04d59d | 683 | struct ath_rxbuf *bf = NULL; |
b5c80475 | 684 | |
3a2923e8 FF |
685 | while (ath_edma_get_buffers(sc, qtype, rs, &bf)) { |
686 | if (!bf) | |
687 | continue; | |
b5c80475 | 688 | |
3a2923e8 FF |
689 | return bf; |
690 | } | |
691 | return NULL; | |
b5c80475 FF |
692 | } |
693 | ||
1a04d59d | 694 | static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc, |
b5c80475 FF |
695 | struct ath_rx_status *rs) |
696 | { | |
697 | struct ath_hw *ah = sc->sc_ah; | |
698 | struct ath_common *common = ath9k_hw_common(ah); | |
f078f209 | 699 | struct ath_desc *ds; |
1a04d59d | 700 | struct ath_rxbuf *bf; |
b5c80475 FF |
701 | int ret; |
702 | ||
703 | if (list_empty(&sc->rx.rxbuf)) { | |
704 | sc->rx.rxlink = NULL; | |
705 | return NULL; | |
706 | } | |
707 | ||
1a04d59d | 708 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list); |
e96542e5 FF |
709 | if (bf == sc->rx.buf_hold) |
710 | return NULL; | |
711 | ||
b5c80475 FF |
712 | ds = bf->bf_desc; |
713 | ||
714 | /* | |
715 | * Must provide the virtual address of the current | |
716 | * descriptor, the physical address, and the virtual | |
717 | * address of the next descriptor in the h/w chain. | |
718 | * This allows the HAL to look ahead to see if the | |
719 | * hardware is done with a descriptor by checking the | |
720 | * done bit in the following descriptor and the address | |
721 | * of the current descriptor the DMA engine is working | |
722 | * on. All this is necessary because of our use of | |
723 | * a self-linked list to avoid rx overruns. | |
724 | */ | |
3de21116 | 725 | ret = ath9k_hw_rxprocdesc(ah, ds, rs); |
b5c80475 FF |
726 | if (ret == -EINPROGRESS) { |
727 | struct ath_rx_status trs; | |
1a04d59d | 728 | struct ath_rxbuf *tbf; |
b5c80475 FF |
729 | struct ath_desc *tds; |
730 | ||
731 | memset(&trs, 0, sizeof(trs)); | |
732 | if (list_is_last(&bf->list, &sc->rx.rxbuf)) { | |
733 | sc->rx.rxlink = NULL; | |
734 | return NULL; | |
735 | } | |
736 | ||
1a04d59d | 737 | tbf = list_entry(bf->list.next, struct ath_rxbuf, list); |
b5c80475 FF |
738 | |
739 | /* | |
740 | * On some hardware the descriptor status words could | |
741 | * get corrupted, including the done bit. Because of | |
742 | * this, check if the next descriptor's done bit is | |
743 | * set or not. | |
744 | * | |
745 | * If the next descriptor's done bit is set, the current | |
746 | * descriptor has been corrupted. Force s/w to discard | |
747 | * this descriptor and continue... | |
748 | */ | |
749 | ||
750 | tds = tbf->bf_desc; | |
3de21116 | 751 | ret = ath9k_hw_rxprocdesc(ah, tds, &trs); |
b5c80475 FF |
752 | if (ret == -EINPROGRESS) |
753 | return NULL; | |
723e7113 FF |
754 | |
755 | /* | |
b7b146c9 FF |
756 | * Re-check previous descriptor, in case it has been filled |
757 | * in the mean time. | |
723e7113 | 758 | */ |
b7b146c9 FF |
759 | ret = ath9k_hw_rxprocdesc(ah, ds, rs); |
760 | if (ret == -EINPROGRESS) { | |
761 | /* | |
762 | * mark descriptor as zero-length and set the 'more' | |
763 | * flag to ensure that both buffers get discarded | |
764 | */ | |
765 | rs->rs_datalen = 0; | |
766 | rs->rs_more = true; | |
767 | } | |
b5c80475 FF |
768 | } |
769 | ||
a3dc48e8 | 770 | list_del(&bf->list); |
b5c80475 FF |
771 | if (!bf->bf_mpdu) |
772 | return bf; | |
773 | ||
774 | /* | |
775 | * Synchronize the DMA transfer with CPU before | |
776 | * 1. accessing the frame | |
777 | * 2. requeueing the same buffer to h/w | |
778 | */ | |
ce9426d1 | 779 | dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, |
b5c80475 FF |
780 | common->rx_bufsize, |
781 | DMA_FROM_DEVICE); | |
782 | ||
783 | return bf; | |
784 | } | |
785 | ||
e0dd1a96 SM |
786 | static void ath9k_process_tsf(struct ath_rx_status *rs, |
787 | struct ieee80211_rx_status *rxs, | |
788 | u64 tsf) | |
789 | { | |
790 | u32 tsf_lower = tsf & 0xffffffff; | |
791 | ||
792 | rxs->mactime = (tsf & ~0xffffffffULL) | rs->rs_tstamp; | |
793 | if (rs->rs_tstamp > tsf_lower && | |
794 | unlikely(rs->rs_tstamp - tsf_lower > 0x10000000)) | |
795 | rxs->mactime -= 0x100000000ULL; | |
796 | ||
797 | if (rs->rs_tstamp < tsf_lower && | |
798 | unlikely(tsf_lower - rs->rs_tstamp > 0x10000000)) | |
799 | rxs->mactime += 0x100000000ULL; | |
800 | } | |
801 | ||
d435700f S |
802 | /* |
803 | * For Decrypt or Demic errors, we only mark packet status here and always push | |
804 | * up the frame up to let mac80211 handle the actual error case, be it no | |
805 | * decryption key or real decryption error. This let us keep statistics there. | |
806 | */ | |
723e7113 | 807 | static int ath9k_rx_skb_preprocess(struct ath_softc *sc, |
6f38482e | 808 | struct sk_buff *skb, |
d435700f S |
809 | struct ath_rx_status *rx_stats, |
810 | struct ieee80211_rx_status *rx_status, | |
e0dd1a96 | 811 | bool *decrypt_error, u64 tsf) |
d435700f | 812 | { |
723e7113 FF |
813 | struct ieee80211_hw *hw = sc->hw; |
814 | struct ath_hw *ah = sc->sc_ah; | |
815 | struct ath_common *common = ath9k_hw_common(ah); | |
6f38482e | 816 | struct ieee80211_hdr *hdr; |
723e7113 FF |
817 | bool discard_current = sc->rx.discard_next; |
818 | ||
5871d2d7 SM |
819 | /* |
820 | * Discard corrupt descriptors which are marked in | |
821 | * ath_get_next_rx_buf(). | |
822 | */ | |
723e7113 | 823 | if (discard_current) |
b7b146c9 FF |
824 | goto corrupt; |
825 | ||
826 | sc->rx.discard_next = false; | |
f749b946 | 827 | |
5871d2d7 SM |
828 | /* |
829 | * Discard zero-length packets. | |
830 | */ | |
831 | if (!rx_stats->rs_datalen) { | |
832 | RX_STAT_INC(rx_len_err); | |
b7b146c9 | 833 | goto corrupt; |
5871d2d7 SM |
834 | } |
835 | ||
b7b146c9 FF |
836 | /* |
837 | * rs_status follows rs_datalen so if rs_datalen is too large | |
838 | * we can take a hint that hardware corrupted it, so ignore | |
839 | * those frames. | |
840 | */ | |
5871d2d7 SM |
841 | if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) { |
842 | RX_STAT_INC(rx_len_err); | |
b7b146c9 | 843 | goto corrupt; |
5871d2d7 SM |
844 | } |
845 | ||
4a470647 SM |
846 | /* Only use status info from the last fragment */ |
847 | if (rx_stats->rs_more) | |
848 | return 0; | |
849 | ||
b0925595 SM |
850 | /* |
851 | * Return immediately if the RX descriptor has been marked | |
852 | * as corrupt based on the various error bits. | |
853 | * | |
854 | * This is different from the other corrupt descriptor | |
855 | * condition handled above. | |
856 | */ | |
b7b146c9 FF |
857 | if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) |
858 | goto corrupt; | |
b0925595 | 859 | |
6f38482e SM |
860 | hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len); |
861 | ||
e0dd1a96 | 862 | ath9k_process_tsf(rx_stats, rx_status, tsf); |
5e85a32a | 863 | ath_debug_stat_rx(sc, rx_stats); |
e0dd1a96 | 864 | |
6b87d71c SM |
865 | /* |
866 | * Process PHY errors and return so that the packet | |
867 | * can be dropped. | |
868 | */ | |
869 | if (rx_stats->rs_status & ATH9K_RXERR_PHY) { | |
870 | ath9k_dfs_process_phyerr(sc, hdr, rx_stats, rx_status->mactime); | |
67dc74f1 | 871 | if (ath_cmn_process_fft(&sc->spec_priv, hdr, rx_stats, rx_status->mactime)) |
6b87d71c SM |
872 | RX_STAT_INC(rx_spectral); |
873 | ||
b7b146c9 | 874 | return -EINVAL; |
6b87d71c SM |
875 | } |
876 | ||
d435700f S |
877 | /* |
878 | * everything but the rate is checked here, the rate check is done | |
879 | * separately to avoid doing two lookups for a rate for each frame. | |
880 | */ | |
fce34430 SM |
881 | spin_lock_bh(&sc->chan_lock); |
882 | if (!ath9k_cmn_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error, | |
883 | sc->cur_chan->rxfilter)) { | |
884 | spin_unlock_bh(&sc->chan_lock); | |
b7b146c9 | 885 | return -EINVAL; |
fce34430 SM |
886 | } |
887 | spin_unlock_bh(&sc->chan_lock); | |
d435700f | 888 | |
1cc47a5b OR |
889 | if (ath_is_mybeacon(common, hdr)) { |
890 | RX_STAT_INC(rx_beacons); | |
891 | rx_stats->is_mybeacon = true; | |
892 | } | |
6f38482e | 893 | |
ff9a93f2 SM |
894 | /* |
895 | * This shouldn't happen, but have a safety check anyway. | |
896 | */ | |
b7b146c9 FF |
897 | if (WARN_ON(!ah->curchan)) |
898 | return -EINVAL; | |
ff9a93f2 | 899 | |
12746036 OR |
900 | if (ath9k_cmn_process_rate(common, hw, rx_stats, rx_status)) { |
901 | /* | |
902 | * No valid hardware bitrate found -- we should not get here | |
903 | * because hardware has already validated this frame as OK. | |
904 | */ | |
905 | ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", | |
906 | rx_stats->rs_rate); | |
907 | RX_STAT_INC(rx_rate_err); | |
b7b146c9 | 908 | return -EINVAL; |
7c5c73cd | 909 | } |
d435700f | 910 | |
27babf9f | 911 | if (ath9k_is_chanctx_enabled()) { |
70b06dac | 912 | if (rx_stats->is_mybeacon) |
a2b28601 | 913 | ath_chanctx_beacon_recv_ev(sc, |
70b06dac | 914 | ATH_CHANCTX_EVENT_BEACON_RECEIVED); |
58b57375 FF |
915 | } |
916 | ||
32efb0cc | 917 | ath9k_cmn_process_rssi(common, hw, rx_stats, rx_status); |
74a97755 | 918 | |
ff9a93f2 SM |
919 | rx_status->band = ah->curchan->chan->band; |
920 | rx_status->freq = ah->curchan->chan->center_freq; | |
d435700f | 921 | rx_status->antenna = rx_stats->rs_antenna; |
96d21371 | 922 | rx_status->flag |= RX_FLAG_MACTIME_END; |
d435700f | 923 | |
a5525d9c SM |
924 | #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT |
925 | if (ieee80211_is_data_present(hdr->frame_control) && | |
926 | !ieee80211_is_qos_nullfunc(hdr->frame_control)) | |
927 | sc->rx.num_pkts++; | |
928 | #endif | |
929 | ||
b7b146c9 FF |
930 | return 0; |
931 | ||
932 | corrupt: | |
933 | sc->rx.discard_next = rx_stats->rs_more; | |
934 | return -EINVAL; | |
d435700f S |
935 | } |
936 | ||
c3124df7 SM |
937 | /* |
938 | * Run the LNA combining algorithm only in these cases: | |
939 | * | |
940 | * Standalone WLAN cards with both LNA/Antenna diversity | |
941 | * enabled in the EEPROM. | |
942 | * | |
943 | * WLAN+BT cards which are in the supported card list | |
944 | * in ath_pci_id_table and the user has loaded the | |
945 | * driver with "bt_ant_diversity" set to true. | |
946 | */ | |
947 | static void ath9k_antenna_check(struct ath_softc *sc, | |
948 | struct ath_rx_status *rs) | |
949 | { | |
950 | struct ath_hw *ah = sc->sc_ah; | |
951 | struct ath9k_hw_capabilities *pCap = &ah->caps; | |
952 | struct ath_common *common = ath9k_hw_common(ah); | |
953 | ||
954 | if (!(ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)) | |
955 | return; | |
956 | ||
c3124df7 SM |
957 | /* |
958 | * Change the default rx antenna if rx diversity | |
959 | * chooses the other antenna 3 times in a row. | |
960 | */ | |
961 | if (sc->rx.defant != rs->rs_antenna) { | |
962 | if (++sc->rx.rxotherant >= 3) | |
963 | ath_setdefantenna(sc, rs->rs_antenna); | |
964 | } else { | |
965 | sc->rx.rxotherant = 0; | |
966 | } | |
967 | ||
968 | if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) { | |
969 | if (common->bt_ant_diversity) | |
970 | ath_ant_comb_scan(sc, rs); | |
971 | } else { | |
972 | ath_ant_comb_scan(sc, rs); | |
973 | } | |
974 | } | |
975 | ||
21fbbca3 CL |
976 | static void ath9k_apply_ampdu_details(struct ath_softc *sc, |
977 | struct ath_rx_status *rs, struct ieee80211_rx_status *rxs) | |
978 | { | |
979 | if (rs->rs_isaggr) { | |
980 | rxs->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN; | |
981 | ||
982 | rxs->ampdu_reference = sc->rx.ampdu_ref; | |
983 | ||
984 | if (!rs->rs_moreaggr) { | |
985 | rxs->flag |= RX_FLAG_AMPDU_IS_LAST; | |
986 | sc->rx.ampdu_ref++; | |
987 | } | |
988 | ||
989 | if (rs->rs_flags & ATH9K_RX_DELIM_CRC_PRE) | |
990 | rxs->flag |= RX_FLAG_AMPDU_DELIM_CRC_ERROR; | |
991 | } | |
992 | } | |
993 | ||
b5c80475 FF |
994 | int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) |
995 | { | |
1a04d59d | 996 | struct ath_rxbuf *bf; |
0d95521e | 997 | struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb; |
5ca42627 | 998 | struct ieee80211_rx_status *rxs; |
cbe61d8a | 999 | struct ath_hw *ah = sc->sc_ah; |
27c51f1a | 1000 | struct ath_common *common = ath9k_hw_common(ah); |
7545daf4 | 1001 | struct ieee80211_hw *hw = sc->hw; |
c9b14170 | 1002 | int retval; |
29bffa96 | 1003 | struct ath_rx_status rs; |
b5c80475 FF |
1004 | enum ath9k_rx_qtype qtype; |
1005 | bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); | |
1006 | int dma_type; | |
a6d2055b | 1007 | u64 tsf = 0; |
8ab2cd09 | 1008 | unsigned long flags; |
2e1cd495 | 1009 | dma_addr_t new_buf_addr; |
c82552c5 | 1010 | unsigned int budget = 512; |
982e0395 | 1011 | struct ieee80211_hdr *hdr; |
be0418ad | 1012 | |
b5c80475 | 1013 | if (edma) |
b5c80475 | 1014 | dma_type = DMA_BIDIRECTIONAL; |
56824223 ML |
1015 | else |
1016 | dma_type = DMA_FROM_DEVICE; | |
b5c80475 FF |
1017 | |
1018 | qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; | |
f078f209 | 1019 | |
a6d2055b | 1020 | tsf = ath9k_hw_gettsf64(ah); |
a6d2055b | 1021 | |
f078f209 | 1022 | do { |
e1352fde | 1023 | bool decrypt_error = false; |
f078f209 | 1024 | |
29bffa96 | 1025 | memset(&rs, 0, sizeof(rs)); |
b5c80475 FF |
1026 | if (edma) |
1027 | bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); | |
1028 | else | |
1029 | bf = ath_get_next_rx_buf(sc, &rs); | |
f078f209 | 1030 | |
b5c80475 FF |
1031 | if (!bf) |
1032 | break; | |
f078f209 | 1033 | |
f078f209 | 1034 | skb = bf->bf_mpdu; |
be0418ad | 1035 | if (!skb) |
f078f209 | 1036 | continue; |
f078f209 | 1037 | |
0d95521e FF |
1038 | /* |
1039 | * Take frame header from the first fragment and RX status from | |
1040 | * the last one. | |
1041 | */ | |
1042 | if (sc->rx.frag) | |
1043 | hdr_skb = sc->rx.frag; | |
1044 | else | |
1045 | hdr_skb = skb; | |
1046 | ||
f6307dda | 1047 | rxs = IEEE80211_SKB_RXCB(hdr_skb); |
ffb1c56a AN |
1048 | memset(rxs, 0, sizeof(struct ieee80211_rx_status)); |
1049 | ||
6f38482e | 1050 | retval = ath9k_rx_skb_preprocess(sc, hdr_skb, &rs, rxs, |
e0dd1a96 | 1051 | &decrypt_error, tsf); |
83c76570 ZK |
1052 | if (retval) |
1053 | goto requeue_drop_frag; | |
1054 | ||
cb71d9ba LR |
1055 | /* Ensure we always have an skb to requeue once we are done |
1056 | * processing the current buffer's skb */ | |
cc861f74 | 1057 | requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); |
cb71d9ba LR |
1058 | |
1059 | /* If there is no memory we ignore the current RX'd frame, | |
1060 | * tell hardware it can give us a new frame using the old | |
b77f483f | 1061 | * skb and put it at the tail of the sc->rx.rxbuf list for |
cb71d9ba | 1062 | * processing. */ |
15072189 BG |
1063 | if (!requeue_skb) { |
1064 | RX_STAT_INC(rx_oom_err); | |
0d95521e | 1065 | goto requeue_drop_frag; |
15072189 | 1066 | } |
f078f209 | 1067 | |
2e1cd495 FF |
1068 | /* We will now give hardware our shiny new allocated skb */ |
1069 | new_buf_addr = dma_map_single(sc->dev, requeue_skb->data, | |
1070 | common->rx_bufsize, dma_type); | |
1071 | if (unlikely(dma_mapping_error(sc->dev, new_buf_addr))) { | |
1072 | dev_kfree_skb_any(requeue_skb); | |
1073 | goto requeue_drop_frag; | |
1074 | } | |
1075 | ||
9bf9fca8 | 1076 | /* Unmap the frame */ |
7da3c55c | 1077 | dma_unmap_single(sc->dev, bf->bf_buf_addr, |
2e1cd495 | 1078 | common->rx_bufsize, dma_type); |
f078f209 | 1079 | |
176f0e84 SM |
1080 | bf->bf_mpdu = requeue_skb; |
1081 | bf->bf_buf_addr = new_buf_addr; | |
1082 | ||
b5c80475 FF |
1083 | skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); |
1084 | if (ah->caps.rx_status_len) | |
1085 | skb_pull(skb, ah->caps.rx_status_len); | |
be0418ad | 1086 | |
0d95521e | 1087 | if (!rs.rs_more) |
5a078fcb OR |
1088 | ath9k_cmn_rx_skb_postprocess(common, hdr_skb, &rs, |
1089 | rxs, decrypt_error); | |
be0418ad | 1090 | |
0d95521e | 1091 | if (rs.rs_more) { |
15072189 | 1092 | RX_STAT_INC(rx_frags); |
0d95521e FF |
1093 | /* |
1094 | * rs_more indicates chained descriptors which can be | |
1095 | * used to link buffers together for a sort of | |
1096 | * scatter-gather operation. | |
1097 | */ | |
1098 | if (sc->rx.frag) { | |
1099 | /* too many fragments - cannot handle frame */ | |
1100 | dev_kfree_skb_any(sc->rx.frag); | |
1101 | dev_kfree_skb_any(skb); | |
15072189 | 1102 | RX_STAT_INC(rx_too_many_frags_err); |
0d95521e FF |
1103 | skb = NULL; |
1104 | } | |
1105 | sc->rx.frag = skb; | |
1106 | goto requeue; | |
1107 | } | |
1108 | ||
1109 | if (sc->rx.frag) { | |
1110 | int space = skb->len - skb_tailroom(hdr_skb); | |
1111 | ||
0d95521e FF |
1112 | if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { |
1113 | dev_kfree_skb(skb); | |
15072189 | 1114 | RX_STAT_INC(rx_oom_err); |
0d95521e FF |
1115 | goto requeue_drop_frag; |
1116 | } | |
1117 | ||
b5447ff9 ED |
1118 | sc->rx.frag = NULL; |
1119 | ||
0d95521e FF |
1120 | skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len), |
1121 | skb->len); | |
1122 | dev_kfree_skb_any(skb); | |
1123 | skb = hdr_skb; | |
1124 | } | |
1125 | ||
16fe28e9 SM |
1126 | if (rxs->flag & RX_FLAG_MMIC_STRIPPED) |
1127 | skb_trim(skb, skb->len - 8); | |
eb840a80 | 1128 | |
16fe28e9 SM |
1129 | spin_lock_irqsave(&sc->sc_pm_lock, flags); |
1130 | if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | | |
1131 | PS_WAIT_FOR_CAB | | |
1132 | PS_WAIT_FOR_PSPOLL_DATA)) || | |
1133 | ath9k_check_auto_sleep(sc)) | |
1134 | ath_rx_ps(sc, skb, rs.is_mybeacon); | |
1135 | spin_unlock_irqrestore(&sc->sc_pm_lock, flags); | |
eb840a80 | 1136 | |
c3124df7 | 1137 | ath9k_antenna_check(sc, &rs); |
21fbbca3 | 1138 | ath9k_apply_ampdu_details(sc, &rs, rxs); |
350e2dcb | 1139 | ath_debug_rate_stats(sc, &rs, skb); |
21fbbca3 | 1140 | |
982e0395 LB |
1141 | hdr = (struct ieee80211_hdr *)skb->data; |
1142 | if (ieee80211_is_ack(hdr->frame_control)) | |
1143 | ath_dynack_sample_ack_ts(sc->sc_ah, skb, rs.rs_tstamp); | |
1144 | ||
7545daf4 | 1145 | ieee80211_rx(hw, skb); |
cc65965c | 1146 | |
0d95521e FF |
1147 | requeue_drop_frag: |
1148 | if (sc->rx.frag) { | |
1149 | dev_kfree_skb_any(sc->rx.frag); | |
1150 | sc->rx.frag = NULL; | |
1151 | } | |
cb71d9ba | 1152 | requeue: |
a3dc48e8 | 1153 | list_add_tail(&bf->list, &sc->rx.rxbuf); |
a3dc48e8 | 1154 | |
7dd74f5f FF |
1155 | if (!edma) { |
1156 | ath_rx_buf_relink(sc, bf, flush); | |
3a758134 TH |
1157 | if (!flush) |
1158 | ath9k_hw_rxena(ah); | |
7dd74f5f FF |
1159 | } else if (!flush) { |
1160 | ath_rx_edma_buf_link(sc, qtype); | |
b5c80475 | 1161 | } |
c82552c5 TH |
1162 | |
1163 | if (!budget--) | |
1164 | break; | |
be0418ad S |
1165 | } while (1); |
1166 | ||
29ab0b36 RM |
1167 | if (!(ah->imask & ATH9K_INT_RXEOL)) { |
1168 | ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN); | |
72d874c6 | 1169 | ath9k_hw_set_interrupts(ah); |
29ab0b36 RM |
1170 | } |
1171 | ||
f078f209 | 1172 | return 0; |
f078f209 | 1173 | } |