]>
Commit | Line | Data |
---|---|---|
f078f209 | 1 | /* |
5b68138e | 2 | * Copyright (c) 2008-2011 Atheros Communications Inc. |
f078f209 LR |
3 | * |
4 | * Permission to use, copy, modify, and/or distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
16 | ||
b7f080cf | 17 | #include <linux/dma-mapping.h> |
394cf0a1 | 18 | #include "ath9k.h" |
b622a720 | 19 | #include "ar9003_mac.h" |
f078f209 | 20 | |
1a04d59d | 21 | #define SKB_CB_ATHBUF(__skb) (*((struct ath_rxbuf **)__skb->cb)) |
b5c80475 | 22 | |
ededf1f8 VT |
23 | static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) |
24 | { | |
25 | return sc->ps_enabled && | |
26 | (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); | |
27 | } | |
28 | ||
f078f209 LR |
29 | /* |
30 | * Setup and link descriptors. | |
31 | * | |
32 | * 11N: we can no longer afford to self link the last descriptor. | |
33 | * MAC acknowledges BA status as long as it copies frames to host | |
34 | * buffer (or rx fifo). This can incorrectly acknowledge packets | |
35 | * to a sender if last desc is self-linked. | |
f078f209 | 36 | */ |
1a04d59d | 37 | static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf) |
f078f209 | 38 | { |
cbe61d8a | 39 | struct ath_hw *ah = sc->sc_ah; |
cc861f74 | 40 | struct ath_common *common = ath9k_hw_common(ah); |
f078f209 LR |
41 | struct ath_desc *ds; |
42 | struct sk_buff *skb; | |
43 | ||
f078f209 | 44 | ds = bf->bf_desc; |
be0418ad | 45 | ds->ds_link = 0; /* link to null */ |
f078f209 LR |
46 | ds->ds_data = bf->bf_buf_addr; |
47 | ||
be0418ad | 48 | /* virtual addr of the beginning of the buffer. */ |
f078f209 | 49 | skb = bf->bf_mpdu; |
9680e8a3 | 50 | BUG_ON(skb == NULL); |
f078f209 LR |
51 | ds->ds_vdata = skb->data; |
52 | ||
cc861f74 LR |
53 | /* |
54 | * setup rx descriptors. The rx_bufsize here tells the hardware | |
b4b6cda2 | 55 | * how much data it can DMA to us and that we are prepared |
cc861f74 LR |
56 | * to process |
57 | */ | |
b77f483f | 58 | ath9k_hw_setuprxdesc(ah, ds, |
cc861f74 | 59 | common->rx_bufsize, |
f078f209 LR |
60 | 0); |
61 | ||
b77f483f | 62 | if (sc->rx.rxlink == NULL) |
f078f209 LR |
63 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); |
64 | else | |
b77f483f | 65 | *sc->rx.rxlink = bf->bf_daddr; |
f078f209 | 66 | |
b77f483f | 67 | sc->rx.rxlink = &ds->ds_link; |
f078f209 LR |
68 | } |
69 | ||
1a04d59d | 70 | static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf) |
e96542e5 FF |
71 | { |
72 | if (sc->rx.buf_hold) | |
73 | ath_rx_buf_link(sc, sc->rx.buf_hold); | |
74 | ||
75 | sc->rx.buf_hold = bf; | |
76 | } | |
77 | ||
ff37e337 S |
78 | static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) |
79 | { | |
80 | /* XXX block beacon interrupts */ | |
81 | ath9k_hw_setantenna(sc->sc_ah, antenna); | |
b77f483f S |
82 | sc->rx.defant = antenna; |
83 | sc->rx.rxotherant = 0; | |
ff37e337 S |
84 | } |
85 | ||
f078f209 LR |
86 | static void ath_opmode_init(struct ath_softc *sc) |
87 | { | |
cbe61d8a | 88 | struct ath_hw *ah = sc->sc_ah; |
1510718d LR |
89 | struct ath_common *common = ath9k_hw_common(ah); |
90 | ||
f078f209 LR |
91 | u32 rfilt, mfilt[2]; |
92 | ||
93 | /* configure rx filter */ | |
94 | rfilt = ath_calcrxfilter(sc); | |
95 | ath9k_hw_setrxfilter(ah, rfilt); | |
96 | ||
97 | /* configure bssid mask */ | |
364734fa | 98 | ath_hw_setbssidmask(common); |
f078f209 LR |
99 | |
100 | /* configure operational mode */ | |
101 | ath9k_hw_setopmode(ah); | |
102 | ||
f078f209 LR |
103 | /* calculate and install multicast filter */ |
104 | mfilt[0] = mfilt[1] = ~0; | |
f078f209 | 105 | ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); |
f078f209 LR |
106 | } |
107 | ||
b5c80475 FF |
108 | static bool ath_rx_edma_buf_link(struct ath_softc *sc, |
109 | enum ath9k_rx_qtype qtype) | |
f078f209 | 110 | { |
b5c80475 FF |
111 | struct ath_hw *ah = sc->sc_ah; |
112 | struct ath_rx_edma *rx_edma; | |
f078f209 | 113 | struct sk_buff *skb; |
1a04d59d | 114 | struct ath_rxbuf *bf; |
f078f209 | 115 | |
b5c80475 FF |
116 | rx_edma = &sc->rx.rx_edma[qtype]; |
117 | if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) | |
118 | return false; | |
f078f209 | 119 | |
1a04d59d | 120 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list); |
b5c80475 | 121 | list_del_init(&bf->list); |
f078f209 | 122 | |
b5c80475 FF |
123 | skb = bf->bf_mpdu; |
124 | ||
b5c80475 FF |
125 | memset(skb->data, 0, ah->caps.rx_status_len); |
126 | dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, | |
127 | ah->caps.rx_status_len, DMA_TO_DEVICE); | |
f078f209 | 128 | |
b5c80475 FF |
129 | SKB_CB_ATHBUF(skb) = bf; |
130 | ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); | |
07236bf3 | 131 | __skb_queue_tail(&rx_edma->rx_fifo, skb); |
f078f209 | 132 | |
b5c80475 FF |
133 | return true; |
134 | } | |
135 | ||
136 | static void ath_rx_addbuffer_edma(struct ath_softc *sc, | |
7a897203 | 137 | enum ath9k_rx_qtype qtype) |
b5c80475 | 138 | { |
b5c80475 | 139 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
1a04d59d | 140 | struct ath_rxbuf *bf, *tbf; |
b5c80475 | 141 | |
b5c80475 | 142 | if (list_empty(&sc->rx.rxbuf)) { |
d2182b69 | 143 | ath_dbg(common, QUEUE, "No free rx buf available\n"); |
b5c80475 | 144 | return; |
797fe5cb | 145 | } |
f078f209 | 146 | |
6a01f0c0 | 147 | list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) |
b5c80475 FF |
148 | if (!ath_rx_edma_buf_link(sc, qtype)) |
149 | break; | |
150 | ||
b5c80475 FF |
151 | } |
152 | ||
153 | static void ath_rx_remove_buffer(struct ath_softc *sc, | |
154 | enum ath9k_rx_qtype qtype) | |
155 | { | |
1a04d59d | 156 | struct ath_rxbuf *bf; |
b5c80475 FF |
157 | struct ath_rx_edma *rx_edma; |
158 | struct sk_buff *skb; | |
159 | ||
160 | rx_edma = &sc->rx.rx_edma[qtype]; | |
161 | ||
07236bf3 | 162 | while ((skb = __skb_dequeue(&rx_edma->rx_fifo)) != NULL) { |
b5c80475 FF |
163 | bf = SKB_CB_ATHBUF(skb); |
164 | BUG_ON(!bf); | |
165 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
166 | } | |
167 | } | |
168 | ||
169 | static void ath_rx_edma_cleanup(struct ath_softc *sc) | |
170 | { | |
ba542385 MSS |
171 | struct ath_hw *ah = sc->sc_ah; |
172 | struct ath_common *common = ath9k_hw_common(ah); | |
1a04d59d | 173 | struct ath_rxbuf *bf; |
b5c80475 FF |
174 | |
175 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); | |
176 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); | |
177 | ||
797fe5cb | 178 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { |
ba542385 MSS |
179 | if (bf->bf_mpdu) { |
180 | dma_unmap_single(sc->dev, bf->bf_buf_addr, | |
181 | common->rx_bufsize, | |
182 | DMA_BIDIRECTIONAL); | |
b5c80475 | 183 | dev_kfree_skb_any(bf->bf_mpdu); |
ba542385 MSS |
184 | bf->bf_buf_addr = 0; |
185 | bf->bf_mpdu = NULL; | |
186 | } | |
b5c80475 | 187 | } |
b5c80475 FF |
188 | } |
189 | ||
190 | static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) | |
191 | { | |
5d07cca2 | 192 | __skb_queue_head_init(&rx_edma->rx_fifo); |
b5c80475 FF |
193 | rx_edma->rx_fifo_hwsize = size; |
194 | } | |
195 | ||
196 | static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) | |
197 | { | |
198 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | |
199 | struct ath_hw *ah = sc->sc_ah; | |
200 | struct sk_buff *skb; | |
1a04d59d | 201 | struct ath_rxbuf *bf; |
b5c80475 FF |
202 | int error = 0, i; |
203 | u32 size; | |
204 | ||
b5c80475 FF |
205 | ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - |
206 | ah->caps.rx_status_len); | |
207 | ||
208 | ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], | |
209 | ah->caps.rx_lp_qdepth); | |
210 | ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], | |
211 | ah->caps.rx_hp_qdepth); | |
212 | ||
1a04d59d | 213 | size = sizeof(struct ath_rxbuf) * nbufs; |
b81950b1 | 214 | bf = devm_kzalloc(sc->dev, size, GFP_KERNEL); |
b5c80475 FF |
215 | if (!bf) |
216 | return -ENOMEM; | |
217 | ||
218 | INIT_LIST_HEAD(&sc->rx.rxbuf); | |
b5c80475 FF |
219 | |
220 | for (i = 0; i < nbufs; i++, bf++) { | |
cc861f74 | 221 | skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); |
b5c80475 | 222 | if (!skb) { |
797fe5cb | 223 | error = -ENOMEM; |
b5c80475 | 224 | goto rx_init_fail; |
f078f209 | 225 | } |
f078f209 | 226 | |
b5c80475 | 227 | memset(skb->data, 0, common->rx_bufsize); |
797fe5cb | 228 | bf->bf_mpdu = skb; |
b5c80475 | 229 | |
797fe5cb | 230 | bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, |
cc861f74 | 231 | common->rx_bufsize, |
b5c80475 | 232 | DMA_BIDIRECTIONAL); |
797fe5cb | 233 | if (unlikely(dma_mapping_error(sc->dev, |
b5c80475 FF |
234 | bf->bf_buf_addr))) { |
235 | dev_kfree_skb_any(skb); | |
236 | bf->bf_mpdu = NULL; | |
6cf9e995 | 237 | bf->bf_buf_addr = 0; |
3800276a | 238 | ath_err(common, |
b5c80475 FF |
239 | "dma_mapping_error() on RX init\n"); |
240 | error = -ENOMEM; | |
241 | goto rx_init_fail; | |
242 | } | |
243 | ||
244 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
245 | } | |
246 | ||
247 | return 0; | |
248 | ||
249 | rx_init_fail: | |
250 | ath_rx_edma_cleanup(sc); | |
251 | return error; | |
252 | } | |
253 | ||
254 | static void ath_edma_start_recv(struct ath_softc *sc) | |
255 | { | |
b5c80475 | 256 | ath9k_hw_rxena(sc->sc_ah); |
7a897203 SM |
257 | ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP); |
258 | ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP); | |
b5c80475 | 259 | ath_opmode_init(sc); |
4cb54fa3 | 260 | ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)); |
b5c80475 FF |
261 | } |
262 | ||
263 | static void ath_edma_stop_recv(struct ath_softc *sc) | |
264 | { | |
b5c80475 FF |
265 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); |
266 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); | |
b5c80475 FF |
267 | } |
268 | ||
269 | int ath_rx_init(struct ath_softc *sc, int nbufs) | |
270 | { | |
271 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | |
272 | struct sk_buff *skb; | |
1a04d59d | 273 | struct ath_rxbuf *bf; |
b5c80475 FF |
274 | int error = 0; |
275 | ||
4bdd1e97 | 276 | spin_lock_init(&sc->sc_pcu_lock); |
b5c80475 | 277 | |
0d95521e FF |
278 | common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + |
279 | sc->sc_ah->caps.rx_status_len; | |
280 | ||
e87f3d53 | 281 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) |
b5c80475 | 282 | return ath_rx_edma_init(sc, nbufs); |
b5c80475 | 283 | |
e87f3d53 SM |
284 | ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n", |
285 | common->cachelsz, common->rx_bufsize); | |
b5c80475 | 286 | |
e87f3d53 SM |
287 | /* Initialize rx descriptors */ |
288 | ||
289 | error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, | |
290 | "rx", nbufs, 1, 0); | |
291 | if (error != 0) { | |
292 | ath_err(common, | |
293 | "failed to allocate rx descriptors: %d\n", | |
294 | error); | |
295 | goto err; | |
296 | } | |
297 | ||
298 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { | |
299 | skb = ath_rxbuf_alloc(common, common->rx_bufsize, | |
300 | GFP_KERNEL); | |
301 | if (skb == NULL) { | |
302 | error = -ENOMEM; | |
797fe5cb S |
303 | goto err; |
304 | } | |
b5c80475 | 305 | |
e87f3d53 SM |
306 | bf->bf_mpdu = skb; |
307 | bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, | |
308 | common->rx_bufsize, | |
309 | DMA_FROM_DEVICE); | |
310 | if (unlikely(dma_mapping_error(sc->dev, | |
311 | bf->bf_buf_addr))) { | |
312 | dev_kfree_skb_any(skb); | |
313 | bf->bf_mpdu = NULL; | |
314 | bf->bf_buf_addr = 0; | |
315 | ath_err(common, | |
316 | "dma_mapping_error() on RX init\n"); | |
317 | error = -ENOMEM; | |
318 | goto err; | |
b5c80475 | 319 | } |
797fe5cb | 320 | } |
e87f3d53 | 321 | sc->rx.rxlink = NULL; |
797fe5cb | 322 | err: |
f078f209 LR |
323 | if (error) |
324 | ath_rx_cleanup(sc); | |
325 | ||
326 | return error; | |
327 | } | |
328 | ||
f078f209 LR |
329 | void ath_rx_cleanup(struct ath_softc *sc) |
330 | { | |
cc861f74 LR |
331 | struct ath_hw *ah = sc->sc_ah; |
332 | struct ath_common *common = ath9k_hw_common(ah); | |
f078f209 | 333 | struct sk_buff *skb; |
1a04d59d | 334 | struct ath_rxbuf *bf; |
f078f209 | 335 | |
b5c80475 FF |
336 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { |
337 | ath_rx_edma_cleanup(sc); | |
338 | return; | |
e87f3d53 SM |
339 | } |
340 | ||
341 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { | |
342 | skb = bf->bf_mpdu; | |
343 | if (skb) { | |
344 | dma_unmap_single(sc->dev, bf->bf_buf_addr, | |
345 | common->rx_bufsize, | |
346 | DMA_FROM_DEVICE); | |
347 | dev_kfree_skb(skb); | |
348 | bf->bf_buf_addr = 0; | |
349 | bf->bf_mpdu = NULL; | |
051b9191 | 350 | } |
b5c80475 | 351 | } |
f078f209 LR |
352 | } |
353 | ||
354 | /* | |
355 | * Calculate the receive filter according to the | |
356 | * operating mode and state: | |
357 | * | |
358 | * o always accept unicast, broadcast, and multicast traffic | |
359 | * o maintain current state of phy error reception (the hal | |
360 | * may enable phy error frames for noise immunity work) | |
361 | * o probe request frames are accepted only when operating in | |
362 | * hostap, adhoc, or monitor modes | |
363 | * o enable promiscuous mode according to the interface state | |
364 | * o accept beacons: | |
365 | * - when operating in adhoc mode so the 802.11 layer creates | |
366 | * node table entries for peers, | |
367 | * - when operating in station mode for collecting rssi data when | |
368 | * the station is otherwise quiet, or | |
369 | * - when operating as a repeater so we see repeater-sta beacons | |
370 | * - when scanning | |
371 | */ | |
372 | ||
373 | u32 ath_calcrxfilter(struct ath_softc *sc) | |
374 | { | |
f078f209 LR |
375 | u32 rfilt; |
376 | ||
89f927af LR |
377 | if (config_enabled(CONFIG_ATH9K_TX99)) |
378 | return 0; | |
379 | ||
ac06697c | 380 | rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST |
f078f209 LR |
381 | | ATH9K_RX_FILTER_MCAST; |
382 | ||
73e4937d ZK |
383 | /* if operating on a DFS channel, enable radar pulse detection */ |
384 | if (sc->hw->conf.radar_enabled) | |
385 | rfilt |= ATH9K_RX_FILTER_PHYRADAR | ATH9K_RX_FILTER_PHYERR; | |
386 | ||
9c1d8e4a | 387 | if (sc->rx.rxfilter & FIF_PROBE_REQ) |
f078f209 LR |
388 | rfilt |= ATH9K_RX_FILTER_PROBEREQ; |
389 | ||
217ba9da JM |
390 | /* |
391 | * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station | |
392 | * mode interface or when in monitor mode. AP mode does not need this | |
393 | * since it receives all in-BSS frames anyway. | |
394 | */ | |
2e286947 | 395 | if (sc->sc_ah->is_monitoring) |
f078f209 | 396 | rfilt |= ATH9K_RX_FILTER_PROM; |
f078f209 | 397 | |
d42c6b71 S |
398 | if (sc->rx.rxfilter & FIF_CONTROL) |
399 | rfilt |= ATH9K_RX_FILTER_CONTROL; | |
400 | ||
dbaaa147 | 401 | if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && |
cfda6695 | 402 | (sc->nvifs <= 1) && |
dbaaa147 VT |
403 | !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) |
404 | rfilt |= ATH9K_RX_FILTER_MYBEACON; | |
405 | else | |
f078f209 LR |
406 | rfilt |= ATH9K_RX_FILTER_BEACON; |
407 | ||
264bbec8 | 408 | if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || |
66afad01 | 409 | (sc->rx.rxfilter & FIF_PSPOLL)) |
dbaaa147 | 410 | rfilt |= ATH9K_RX_FILTER_PSPOLL; |
be0418ad | 411 | |
7ea310be S |
412 | if (conf_is_ht(&sc->hw->conf)) |
413 | rfilt |= ATH9K_RX_FILTER_COMP_BAR; | |
414 | ||
7545daf4 | 415 | if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) { |
a549459c TW |
416 | /* This is needed for older chips */ |
417 | if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160) | |
5eb6ba83 | 418 | rfilt |= ATH9K_RX_FILTER_PROM; |
b93bce2a JM |
419 | rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; |
420 | } | |
421 | ||
2c323058 | 422 | if (AR_SREV_9550(sc->sc_ah) || AR_SREV_9531(sc->sc_ah)) |
b3d7aa43 GJ |
423 | rfilt |= ATH9K_RX_FILTER_4ADDRESS; |
424 | ||
f078f209 | 425 | return rfilt; |
7dcfdcd9 | 426 | |
f078f209 LR |
427 | } |
428 | ||
f078f209 LR |
429 | int ath_startrecv(struct ath_softc *sc) |
430 | { | |
cbe61d8a | 431 | struct ath_hw *ah = sc->sc_ah; |
1a04d59d | 432 | struct ath_rxbuf *bf, *tbf; |
f078f209 | 433 | |
b5c80475 FF |
434 | if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { |
435 | ath_edma_start_recv(sc); | |
436 | return 0; | |
437 | } | |
438 | ||
b77f483f | 439 | if (list_empty(&sc->rx.rxbuf)) |
f078f209 LR |
440 | goto start_recv; |
441 | ||
e96542e5 | 442 | sc->rx.buf_hold = NULL; |
b77f483f S |
443 | sc->rx.rxlink = NULL; |
444 | list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { | |
f078f209 LR |
445 | ath_rx_buf_link(sc, bf); |
446 | } | |
447 | ||
448 | /* We could have deleted elements so the list may be empty now */ | |
b77f483f | 449 | if (list_empty(&sc->rx.rxbuf)) |
f078f209 LR |
450 | goto start_recv; |
451 | ||
1a04d59d | 452 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list); |
f078f209 | 453 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); |
be0418ad | 454 | ath9k_hw_rxena(ah); |
f078f209 LR |
455 | |
456 | start_recv: | |
be0418ad | 457 | ath_opmode_init(sc); |
4cb54fa3 | 458 | ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)); |
be0418ad | 459 | |
f078f209 LR |
460 | return 0; |
461 | } | |
462 | ||
4b883f02 FF |
463 | static void ath_flushrecv(struct ath_softc *sc) |
464 | { | |
465 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) | |
466 | ath_rx_tasklet(sc, 1, true); | |
467 | ath_rx_tasklet(sc, 1, false); | |
468 | } | |
469 | ||
f078f209 LR |
470 | bool ath_stoprecv(struct ath_softc *sc) |
471 | { | |
cbe61d8a | 472 | struct ath_hw *ah = sc->sc_ah; |
5882da02 | 473 | bool stopped, reset = false; |
f078f209 | 474 | |
d47844a0 | 475 | ath9k_hw_abortpcurecv(ah); |
be0418ad | 476 | ath9k_hw_setrxfilter(ah, 0); |
5882da02 | 477 | stopped = ath9k_hw_stopdmarecv(ah, &reset); |
b5c80475 | 478 | |
4b883f02 FF |
479 | ath_flushrecv(sc); |
480 | ||
b5c80475 FF |
481 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) |
482 | ath_edma_stop_recv(sc); | |
483 | else | |
484 | sc->rx.rxlink = NULL; | |
be0418ad | 485 | |
d584747b RM |
486 | if (!(ah->ah_flags & AH_UNPLUGGED) && |
487 | unlikely(!stopped)) { | |
d7fd1b50 BG |
488 | ath_err(ath9k_hw_common(sc->sc_ah), |
489 | "Could not stop RX, we could be " | |
490 | "confusing the DMA engine when we start RX up\n"); | |
491 | ATH_DBG_WARN_ON_ONCE(!stopped); | |
492 | } | |
2232d31b | 493 | return stopped && !reset; |
f078f209 LR |
494 | } |
495 | ||
cc65965c JM |
496 | static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) |
497 | { | |
498 | /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ | |
499 | struct ieee80211_mgmt *mgmt; | |
500 | u8 *pos, *end, id, elen; | |
501 | struct ieee80211_tim_ie *tim; | |
502 | ||
503 | mgmt = (struct ieee80211_mgmt *)skb->data; | |
504 | pos = mgmt->u.beacon.variable; | |
505 | end = skb->data + skb->len; | |
506 | ||
507 | while (pos + 2 < end) { | |
508 | id = *pos++; | |
509 | elen = *pos++; | |
510 | if (pos + elen > end) | |
511 | break; | |
512 | ||
513 | if (id == WLAN_EID_TIM) { | |
514 | if (elen < sizeof(*tim)) | |
515 | break; | |
516 | tim = (struct ieee80211_tim_ie *) pos; | |
517 | if (tim->dtim_count != 0) | |
518 | break; | |
519 | return tim->bitmap_ctrl & 0x01; | |
520 | } | |
521 | ||
522 | pos += elen; | |
523 | } | |
524 | ||
525 | return false; | |
526 | } | |
527 | ||
cc65965c JM |
528 | static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) |
529 | { | |
1510718d | 530 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
cc65965c JM |
531 | |
532 | if (skb->len < 24 + 8 + 2 + 2) | |
533 | return; | |
534 | ||
1b04b930 | 535 | sc->ps_flags &= ~PS_WAIT_FOR_BEACON; |
293dc5df | 536 | |
1b04b930 S |
537 | if (sc->ps_flags & PS_BEACON_SYNC) { |
538 | sc->ps_flags &= ~PS_BEACON_SYNC; | |
d2182b69 | 539 | ath_dbg(common, PS, |
1a6404a1 | 540 | "Reconfigure beacon timers based on synchronized timestamp\n"); |
ef4ad633 | 541 | ath9k_set_beacon(sc); |
ccdfeab6 JM |
542 | } |
543 | ||
cc65965c JM |
544 | if (ath_beacon_dtim_pending_cab(skb)) { |
545 | /* | |
546 | * Remain awake waiting for buffered broadcast/multicast | |
58f5fffd GJ |
547 | * frames. If the last broadcast/multicast frame is not |
548 | * received properly, the next beacon frame will work as | |
549 | * a backup trigger for returning into NETWORK SLEEP state, | |
550 | * so we are waiting for it as well. | |
cc65965c | 551 | */ |
d2182b69 | 552 | ath_dbg(common, PS, |
226afe68 | 553 | "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n"); |
1b04b930 | 554 | sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; |
cc65965c JM |
555 | return; |
556 | } | |
557 | ||
1b04b930 | 558 | if (sc->ps_flags & PS_WAIT_FOR_CAB) { |
cc65965c JM |
559 | /* |
560 | * This can happen if a broadcast frame is dropped or the AP | |
561 | * fails to send a frame indicating that all CAB frames have | |
562 | * been delivered. | |
563 | */ | |
1b04b930 | 564 | sc->ps_flags &= ~PS_WAIT_FOR_CAB; |
d2182b69 | 565 | ath_dbg(common, PS, "PS wait for CAB frames timed out\n"); |
cc65965c | 566 | } |
cc65965c JM |
567 | } |
568 | ||
f73c604c | 569 | static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon) |
cc65965c JM |
570 | { |
571 | struct ieee80211_hdr *hdr; | |
c46917bb | 572 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
cc65965c JM |
573 | |
574 | hdr = (struct ieee80211_hdr *)skb->data; | |
575 | ||
576 | /* Process Beacon and CAB receive in PS state */ | |
ededf1f8 | 577 | if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) |
07c15a3f | 578 | && mybeacon) { |
cc65965c | 579 | ath_rx_ps_beacon(sc, skb); |
07c15a3f SM |
580 | } else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && |
581 | (ieee80211_is_data(hdr->frame_control) || | |
582 | ieee80211_is_action(hdr->frame_control)) && | |
583 | is_multicast_ether_addr(hdr->addr1) && | |
584 | !ieee80211_has_moredata(hdr->frame_control)) { | |
cc65965c JM |
585 | /* |
586 | * No more broadcast/multicast frames to be received at this | |
587 | * point. | |
588 | */ | |
3fac6dfd | 589 | sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); |
d2182b69 | 590 | ath_dbg(common, PS, |
226afe68 | 591 | "All PS CAB frames received, back to sleep\n"); |
1b04b930 | 592 | } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && |
9a23f9ca JM |
593 | !is_multicast_ether_addr(hdr->addr1) && |
594 | !ieee80211_has_morefrags(hdr->frame_control)) { | |
1b04b930 | 595 | sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; |
d2182b69 | 596 | ath_dbg(common, PS, |
226afe68 | 597 | "Going back to sleep after having received PS-Poll data (0x%lx)\n", |
1b04b930 S |
598 | sc->ps_flags & (PS_WAIT_FOR_BEACON | |
599 | PS_WAIT_FOR_CAB | | |
600 | PS_WAIT_FOR_PSPOLL_DATA | | |
601 | PS_WAIT_FOR_TX_ACK)); | |
cc65965c JM |
602 | } |
603 | } | |
604 | ||
b5c80475 | 605 | static bool ath_edma_get_buffers(struct ath_softc *sc, |
3a2923e8 FF |
606 | enum ath9k_rx_qtype qtype, |
607 | struct ath_rx_status *rs, | |
1a04d59d | 608 | struct ath_rxbuf **dest) |
f078f209 | 609 | { |
b5c80475 FF |
610 | struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; |
611 | struct ath_hw *ah = sc->sc_ah; | |
612 | struct ath_common *common = ath9k_hw_common(ah); | |
613 | struct sk_buff *skb; | |
1a04d59d | 614 | struct ath_rxbuf *bf; |
b5c80475 FF |
615 | int ret; |
616 | ||
617 | skb = skb_peek(&rx_edma->rx_fifo); | |
618 | if (!skb) | |
619 | return false; | |
620 | ||
621 | bf = SKB_CB_ATHBUF(skb); | |
622 | BUG_ON(!bf); | |
623 | ||
ce9426d1 | 624 | dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, |
b5c80475 FF |
625 | common->rx_bufsize, DMA_FROM_DEVICE); |
626 | ||
3a2923e8 | 627 | ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data); |
ce9426d1 ML |
628 | if (ret == -EINPROGRESS) { |
629 | /*let device gain the buffer again*/ | |
630 | dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, | |
631 | common->rx_bufsize, DMA_FROM_DEVICE); | |
b5c80475 | 632 | return false; |
ce9426d1 | 633 | } |
b5c80475 FF |
634 | |
635 | __skb_unlink(skb, &rx_edma->rx_fifo); | |
636 | if (ret == -EINVAL) { | |
637 | /* corrupt descriptor, skip this one and the following one */ | |
638 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
639 | ath_rx_edma_buf_link(sc, qtype); | |
b5c80475 | 640 | |
3a2923e8 FF |
641 | skb = skb_peek(&rx_edma->rx_fifo); |
642 | if (skb) { | |
643 | bf = SKB_CB_ATHBUF(skb); | |
644 | BUG_ON(!bf); | |
645 | ||
646 | __skb_unlink(skb, &rx_edma->rx_fifo); | |
647 | list_add_tail(&bf->list, &sc->rx.rxbuf); | |
648 | ath_rx_edma_buf_link(sc, qtype); | |
3a2923e8 | 649 | } |
6bb51c70 TH |
650 | |
651 | bf = NULL; | |
b5c80475 | 652 | } |
b5c80475 | 653 | |
3a2923e8 | 654 | *dest = bf; |
b5c80475 FF |
655 | return true; |
656 | } | |
f078f209 | 657 | |
1a04d59d | 658 | static struct ath_rxbuf *ath_edma_get_next_rx_buf(struct ath_softc *sc, |
b5c80475 FF |
659 | struct ath_rx_status *rs, |
660 | enum ath9k_rx_qtype qtype) | |
661 | { | |
1a04d59d | 662 | struct ath_rxbuf *bf = NULL; |
b5c80475 | 663 | |
3a2923e8 FF |
664 | while (ath_edma_get_buffers(sc, qtype, rs, &bf)) { |
665 | if (!bf) | |
666 | continue; | |
b5c80475 | 667 | |
3a2923e8 FF |
668 | return bf; |
669 | } | |
670 | return NULL; | |
b5c80475 FF |
671 | } |
672 | ||
1a04d59d | 673 | static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc, |
b5c80475 FF |
674 | struct ath_rx_status *rs) |
675 | { | |
676 | struct ath_hw *ah = sc->sc_ah; | |
677 | struct ath_common *common = ath9k_hw_common(ah); | |
f078f209 | 678 | struct ath_desc *ds; |
1a04d59d | 679 | struct ath_rxbuf *bf; |
b5c80475 FF |
680 | int ret; |
681 | ||
682 | if (list_empty(&sc->rx.rxbuf)) { | |
683 | sc->rx.rxlink = NULL; | |
684 | return NULL; | |
685 | } | |
686 | ||
1a04d59d | 687 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list); |
e96542e5 FF |
688 | if (bf == sc->rx.buf_hold) |
689 | return NULL; | |
690 | ||
b5c80475 FF |
691 | ds = bf->bf_desc; |
692 | ||
693 | /* | |
694 | * Must provide the virtual address of the current | |
695 | * descriptor, the physical address, and the virtual | |
696 | * address of the next descriptor in the h/w chain. | |
697 | * This allows the HAL to look ahead to see if the | |
698 | * hardware is done with a descriptor by checking the | |
699 | * done bit in the following descriptor and the address | |
700 | * of the current descriptor the DMA engine is working | |
701 | * on. All this is necessary because of our use of | |
702 | * a self-linked list to avoid rx overruns. | |
703 | */ | |
3de21116 | 704 | ret = ath9k_hw_rxprocdesc(ah, ds, rs); |
b5c80475 FF |
705 | if (ret == -EINPROGRESS) { |
706 | struct ath_rx_status trs; | |
1a04d59d | 707 | struct ath_rxbuf *tbf; |
b5c80475 FF |
708 | struct ath_desc *tds; |
709 | ||
710 | memset(&trs, 0, sizeof(trs)); | |
711 | if (list_is_last(&bf->list, &sc->rx.rxbuf)) { | |
712 | sc->rx.rxlink = NULL; | |
713 | return NULL; | |
714 | } | |
715 | ||
1a04d59d | 716 | tbf = list_entry(bf->list.next, struct ath_rxbuf, list); |
b5c80475 FF |
717 | |
718 | /* | |
719 | * On some hardware the descriptor status words could | |
720 | * get corrupted, including the done bit. Because of | |
721 | * this, check if the next descriptor's done bit is | |
722 | * set or not. | |
723 | * | |
724 | * If the next descriptor's done bit is set, the current | |
725 | * descriptor has been corrupted. Force s/w to discard | |
726 | * this descriptor and continue... | |
727 | */ | |
728 | ||
729 | tds = tbf->bf_desc; | |
3de21116 | 730 | ret = ath9k_hw_rxprocdesc(ah, tds, &trs); |
b5c80475 FF |
731 | if (ret == -EINPROGRESS) |
732 | return NULL; | |
723e7113 FF |
733 | |
734 | /* | |
b7b146c9 FF |
735 | * Re-check previous descriptor, in case it has been filled |
736 | * in the mean time. | |
723e7113 | 737 | */ |
b7b146c9 FF |
738 | ret = ath9k_hw_rxprocdesc(ah, ds, rs); |
739 | if (ret == -EINPROGRESS) { | |
740 | /* | |
741 | * mark descriptor as zero-length and set the 'more' | |
742 | * flag to ensure that both buffers get discarded | |
743 | */ | |
744 | rs->rs_datalen = 0; | |
745 | rs->rs_more = true; | |
746 | } | |
b5c80475 FF |
747 | } |
748 | ||
a3dc48e8 | 749 | list_del(&bf->list); |
b5c80475 FF |
750 | if (!bf->bf_mpdu) |
751 | return bf; | |
752 | ||
753 | /* | |
754 | * Synchronize the DMA transfer with CPU before | |
755 | * 1. accessing the frame | |
756 | * 2. requeueing the same buffer to h/w | |
757 | */ | |
ce9426d1 | 758 | dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, |
b5c80475 FF |
759 | common->rx_bufsize, |
760 | DMA_FROM_DEVICE); | |
761 | ||
762 | return bf; | |
763 | } | |
764 | ||
d435700f S |
765 | /* Assumes you've already done the endian to CPU conversion */ |
766 | static bool ath9k_rx_accept(struct ath_common *common, | |
9f167f64 | 767 | struct ieee80211_hdr *hdr, |
d435700f S |
768 | struct ieee80211_rx_status *rxs, |
769 | struct ath_rx_status *rx_stats, | |
770 | bool *decrypt_error) | |
771 | { | |
ec205999 | 772 | struct ath_softc *sc = (struct ath_softc *) common->priv; |
66760eac | 773 | bool is_mc, is_valid_tkip, strip_mic, mic_error; |
d435700f | 774 | struct ath_hw *ah = common->ah; |
d435700f S |
775 | __le16 fc; |
776 | ||
d435700f S |
777 | fc = hdr->frame_control; |
778 | ||
66760eac FF |
779 | is_mc = !!is_multicast_ether_addr(hdr->addr1); |
780 | is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && | |
781 | test_bit(rx_stats->rs_keyix, common->tkip_keymap); | |
152e585d | 782 | strip_mic = is_valid_tkip && ieee80211_is_data(fc) && |
2a5783b8 | 783 | ieee80211_has_protected(fc) && |
152e585d | 784 | !(rx_stats->rs_status & |
846d9363 FF |
785 | (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC | |
786 | ATH9K_RXERR_KEYMISS)); | |
66760eac | 787 | |
f88373fa FF |
788 | /* |
789 | * Key miss events are only relevant for pairwise keys where the | |
790 | * descriptor does contain a valid key index. This has been observed | |
791 | * mostly with CCMP encryption. | |
792 | */ | |
bed3d9c0 FF |
793 | if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID || |
794 | !test_bit(rx_stats->rs_keyix, common->ccmp_keymap)) | |
f88373fa FF |
795 | rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS; |
796 | ||
66760eac FF |
797 | mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) && |
798 | !ieee80211_has_morefrags(fc) && | |
799 | !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && | |
800 | (rx_stats->rs_status & ATH9K_RXERR_MIC); | |
801 | ||
d435700f S |
802 | /* |
803 | * The rx_stats->rs_status will not be set until the end of the | |
804 | * chained descriptors so it can be ignored if rs_more is set. The | |
805 | * rs_more will be false at the last element of the chained | |
806 | * descriptors. | |
807 | */ | |
808 | if (rx_stats->rs_status != 0) { | |
846d9363 FF |
809 | u8 status_mask; |
810 | ||
66760eac | 811 | if (rx_stats->rs_status & ATH9K_RXERR_CRC) { |
d435700f | 812 | rxs->flag |= RX_FLAG_FAILED_FCS_CRC; |
66760eac FF |
813 | mic_error = false; |
814 | } | |
d435700f | 815 | |
846d9363 FF |
816 | if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) || |
817 | (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) { | |
d435700f | 818 | *decrypt_error = true; |
66760eac | 819 | mic_error = false; |
d435700f | 820 | } |
66760eac | 821 | |
d435700f S |
822 | /* |
823 | * Reject error frames with the exception of | |
824 | * decryption and MIC failures. For monitor mode, | |
825 | * we also ignore the CRC error. | |
826 | */ | |
846d9363 FF |
827 | status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | |
828 | ATH9K_RXERR_KEYMISS; | |
829 | ||
ec205999 | 830 | if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL)) |
846d9363 FF |
831 | status_mask |= ATH9K_RXERR_CRC; |
832 | ||
833 | if (rx_stats->rs_status & ~status_mask) | |
834 | return false; | |
d435700f | 835 | } |
66760eac FF |
836 | |
837 | /* | |
838 | * For unicast frames the MIC error bit can have false positives, | |
839 | * so all MIC error reports need to be validated in software. | |
840 | * False negatives are not common, so skip software verification | |
841 | * if the hardware considers the MIC valid. | |
842 | */ | |
843 | if (strip_mic) | |
844 | rxs->flag |= RX_FLAG_MMIC_STRIPPED; | |
845 | else if (is_mc && mic_error) | |
846 | rxs->flag |= RX_FLAG_MMIC_ERROR; | |
847 | ||
d435700f S |
848 | return true; |
849 | } | |
850 | ||
851 | static int ath9k_process_rate(struct ath_common *common, | |
852 | struct ieee80211_hw *hw, | |
853 | struct ath_rx_status *rx_stats, | |
9f167f64 | 854 | struct ieee80211_rx_status *rxs) |
d435700f S |
855 | { |
856 | struct ieee80211_supported_band *sband; | |
857 | enum ieee80211_band band; | |
858 | unsigned int i = 0; | |
990e08a0 | 859 | struct ath_softc __maybe_unused *sc = common->priv; |
ff9a93f2 | 860 | struct ath_hw *ah = sc->sc_ah; |
d435700f | 861 | |
ff9a93f2 | 862 | band = ah->curchan->chan->band; |
d435700f S |
863 | sband = hw->wiphy->bands[band]; |
864 | ||
ff9a93f2 | 865 | if (IS_CHAN_QUARTER_RATE(ah->curchan)) |
f819c0e7 | 866 | rxs->flag |= RX_FLAG_5MHZ; |
ff9a93f2 | 867 | else if (IS_CHAN_HALF_RATE(ah->curchan)) |
f819c0e7 | 868 | rxs->flag |= RX_FLAG_10MHZ; |
f819c0e7 | 869 | |
d435700f S |
870 | if (rx_stats->rs_rate & 0x80) { |
871 | /* HT rate */ | |
872 | rxs->flag |= RX_FLAG_HT; | |
ab276103 | 873 | rxs->flag |= rx_stats->flag; |
d435700f S |
874 | rxs->rate_idx = rx_stats->rs_rate & 0x7f; |
875 | return 0; | |
876 | } | |
877 | ||
878 | for (i = 0; i < sband->n_bitrates; i++) { | |
879 | if (sband->bitrates[i].hw_value == rx_stats->rs_rate) { | |
880 | rxs->rate_idx = i; | |
881 | return 0; | |
882 | } | |
883 | if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { | |
884 | rxs->flag |= RX_FLAG_SHORTPRE; | |
885 | rxs->rate_idx = i; | |
886 | return 0; | |
887 | } | |
888 | } | |
889 | ||
890 | /* | |
891 | * No valid hardware bitrate found -- we should not get here | |
892 | * because hardware has already validated this frame as OK. | |
893 | */ | |
d2182b69 | 894 | ath_dbg(common, ANY, |
226afe68 JP |
895 | "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", |
896 | rx_stats->rs_rate); | |
15072189 | 897 | RX_STAT_INC(rx_rate_err); |
d435700f S |
898 | return -EINVAL; |
899 | } | |
900 | ||
901 | static void ath9k_process_rssi(struct ath_common *common, | |
902 | struct ieee80211_hw *hw, | |
e3acd13d SM |
903 | struct ath_rx_status *rx_stats, |
904 | struct ieee80211_rx_status *rxs) | |
d435700f | 905 | { |
9ac58615 | 906 | struct ath_softc *sc = hw->priv; |
d435700f | 907 | struct ath_hw *ah = common->ah; |
9fa23e17 | 908 | int last_rssi; |
2ef16755 | 909 | int rssi = rx_stats->rs_rssi; |
e45e91d8 | 910 | int i, j; |
d435700f | 911 | |
e3acd13d SM |
912 | /* |
913 | * RSSI is not available for subframes in an A-MPDU. | |
914 | */ | |
915 | if (rx_stats->rs_moreaggr) { | |
916 | rxs->flag |= RX_FLAG_NO_SIGNAL_VAL; | |
917 | return; | |
918 | } | |
919 | ||
920 | /* | |
921 | * Check if the RSSI for the last subframe in an A-MPDU | |
922 | * or an unaggregated frame is valid. | |
923 | */ | |
924 | if (rx_stats->rs_rssi == ATH9K_RSSI_BAD) { | |
925 | rxs->flag |= RX_FLAG_NO_SIGNAL_VAL; | |
9fa23e17 | 926 | return; |
e3acd13d | 927 | } |
9fa23e17 | 928 | |
e45e91d8 FF |
929 | for (i = 0, j = 0; i < ARRAY_SIZE(rx_stats->rs_rssi_ctl); i++) { |
930 | s8 rssi; | |
931 | ||
932 | if (!(ah->rxchainmask & BIT(i))) | |
933 | continue; | |
934 | ||
935 | rssi = rx_stats->rs_rssi_ctl[i]; | |
936 | if (rssi != ATH9K_RSSI_BAD) { | |
937 | rxs->chains |= BIT(j); | |
938 | rxs->chain_signal[j] = ah->noise + rssi; | |
939 | } | |
940 | j++; | |
941 | } | |
942 | ||
e3acd13d SM |
943 | /* |
944 | * Update Beacon RSSI, this is used by ANI. | |
945 | */ | |
946 | if (rx_stats->is_mybeacon && | |
947 | ((ah->opmode == NL80211_IFTYPE_STATION) || | |
948 | (ah->opmode == NL80211_IFTYPE_ADHOC))) { | |
9ac58615 | 949 | ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi); |
e3acd13d SM |
950 | last_rssi = sc->last_rssi; |
951 | ||
952 | if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) | |
953 | rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER); | |
954 | if (rssi < 0) | |
955 | rssi = 0; | |
d435700f | 956 | |
e3acd13d SM |
957 | ah->stats.avgbrssi = rssi; |
958 | } | |
d435700f | 959 | |
e3acd13d | 960 | rxs->signal = ah->noise + rx_stats->rs_rssi; |
d435700f S |
961 | } |
962 | ||
e0dd1a96 SM |
963 | static void ath9k_process_tsf(struct ath_rx_status *rs, |
964 | struct ieee80211_rx_status *rxs, | |
965 | u64 tsf) | |
966 | { | |
967 | u32 tsf_lower = tsf & 0xffffffff; | |
968 | ||
969 | rxs->mactime = (tsf & ~0xffffffffULL) | rs->rs_tstamp; | |
970 | if (rs->rs_tstamp > tsf_lower && | |
971 | unlikely(rs->rs_tstamp - tsf_lower > 0x10000000)) | |
972 | rxs->mactime -= 0x100000000ULL; | |
973 | ||
974 | if (rs->rs_tstamp < tsf_lower && | |
975 | unlikely(tsf_lower - rs->rs_tstamp > 0x10000000)) | |
976 | rxs->mactime += 0x100000000ULL; | |
977 | } | |
978 | ||
d435700f S |
979 | /* |
980 | * For Decrypt or Demic errors, we only mark packet status here and always push | |
981 | * up the frame up to let mac80211 handle the actual error case, be it no | |
982 | * decryption key or real decryption error. This let us keep statistics there. | |
983 | */ | |
723e7113 | 984 | static int ath9k_rx_skb_preprocess(struct ath_softc *sc, |
6f38482e | 985 | struct sk_buff *skb, |
d435700f S |
986 | struct ath_rx_status *rx_stats, |
987 | struct ieee80211_rx_status *rx_status, | |
e0dd1a96 | 988 | bool *decrypt_error, u64 tsf) |
d435700f | 989 | { |
723e7113 FF |
990 | struct ieee80211_hw *hw = sc->hw; |
991 | struct ath_hw *ah = sc->sc_ah; | |
992 | struct ath_common *common = ath9k_hw_common(ah); | |
6f38482e | 993 | struct ieee80211_hdr *hdr; |
723e7113 FF |
994 | bool discard_current = sc->rx.discard_next; |
995 | ||
5871d2d7 SM |
996 | /* |
997 | * Discard corrupt descriptors which are marked in | |
998 | * ath_get_next_rx_buf(). | |
999 | */ | |
723e7113 | 1000 | if (discard_current) |
b7b146c9 FF |
1001 | goto corrupt; |
1002 | ||
1003 | sc->rx.discard_next = false; | |
f749b946 | 1004 | |
5871d2d7 SM |
1005 | /* |
1006 | * Discard zero-length packets. | |
1007 | */ | |
1008 | if (!rx_stats->rs_datalen) { | |
1009 | RX_STAT_INC(rx_len_err); | |
b7b146c9 | 1010 | goto corrupt; |
5871d2d7 SM |
1011 | } |
1012 | ||
b7b146c9 FF |
1013 | /* |
1014 | * rs_status follows rs_datalen so if rs_datalen is too large | |
1015 | * we can take a hint that hardware corrupted it, so ignore | |
1016 | * those frames. | |
1017 | */ | |
5871d2d7 SM |
1018 | if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) { |
1019 | RX_STAT_INC(rx_len_err); | |
b7b146c9 | 1020 | goto corrupt; |
5871d2d7 SM |
1021 | } |
1022 | ||
4a470647 SM |
1023 | /* Only use status info from the last fragment */ |
1024 | if (rx_stats->rs_more) | |
1025 | return 0; | |
1026 | ||
b0925595 SM |
1027 | /* |
1028 | * Return immediately if the RX descriptor has been marked | |
1029 | * as corrupt based on the various error bits. | |
1030 | * | |
1031 | * This is different from the other corrupt descriptor | |
1032 | * condition handled above. | |
1033 | */ | |
b7b146c9 FF |
1034 | if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) |
1035 | goto corrupt; | |
b0925595 | 1036 | |
6f38482e SM |
1037 | hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len); |
1038 | ||
e0dd1a96 | 1039 | ath9k_process_tsf(rx_stats, rx_status, tsf); |
5e85a32a | 1040 | ath_debug_stat_rx(sc, rx_stats); |
e0dd1a96 | 1041 | |
6b87d71c SM |
1042 | /* |
1043 | * Process PHY errors and return so that the packet | |
1044 | * can be dropped. | |
1045 | */ | |
1046 | if (rx_stats->rs_status & ATH9K_RXERR_PHY) { | |
1047 | ath9k_dfs_process_phyerr(sc, hdr, rx_stats, rx_status->mactime); | |
1048 | if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime)) | |
1049 | RX_STAT_INC(rx_spectral); | |
1050 | ||
b7b146c9 | 1051 | return -EINVAL; |
6b87d71c SM |
1052 | } |
1053 | ||
d435700f S |
1054 | /* |
1055 | * everything but the rate is checked here, the rate check is done | |
1056 | * separately to avoid doing two lookups for a rate for each frame. | |
1057 | */ | |
b7b146c9 FF |
1058 | if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) |
1059 | return -EINVAL; | |
d435700f | 1060 | |
1cc47a5b OR |
1061 | if (ath_is_mybeacon(common, hdr)) { |
1062 | RX_STAT_INC(rx_beacons); | |
1063 | rx_stats->is_mybeacon = true; | |
1064 | } | |
6f38482e | 1065 | |
ff9a93f2 SM |
1066 | /* |
1067 | * This shouldn't happen, but have a safety check anyway. | |
1068 | */ | |
b7b146c9 FF |
1069 | if (WARN_ON(!ah->curchan)) |
1070 | return -EINVAL; | |
ff9a93f2 | 1071 | |
b7b146c9 FF |
1072 | if (ath9k_process_rate(common, hw, rx_stats, rx_status)) |
1073 | return -EINVAL; | |
d435700f | 1074 | |
e3acd13d | 1075 | ath9k_process_rssi(common, hw, rx_stats, rx_status); |
74a97755 | 1076 | |
ff9a93f2 SM |
1077 | rx_status->band = ah->curchan->chan->band; |
1078 | rx_status->freq = ah->curchan->chan->center_freq; | |
d435700f | 1079 | rx_status->antenna = rx_stats->rs_antenna; |
96d21371 | 1080 | rx_status->flag |= RX_FLAG_MACTIME_END; |
d435700f | 1081 | |
a5525d9c SM |
1082 | #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT |
1083 | if (ieee80211_is_data_present(hdr->frame_control) && | |
1084 | !ieee80211_is_qos_nullfunc(hdr->frame_control)) | |
1085 | sc->rx.num_pkts++; | |
1086 | #endif | |
1087 | ||
b7b146c9 FF |
1088 | return 0; |
1089 | ||
1090 | corrupt: | |
1091 | sc->rx.discard_next = rx_stats->rs_more; | |
1092 | return -EINVAL; | |
d435700f S |
1093 | } |
1094 | ||
1095 | static void ath9k_rx_skb_postprocess(struct ath_common *common, | |
1096 | struct sk_buff *skb, | |
1097 | struct ath_rx_status *rx_stats, | |
1098 | struct ieee80211_rx_status *rxs, | |
1099 | bool decrypt_error) | |
1100 | { | |
1101 | struct ath_hw *ah = common->ah; | |
1102 | struct ieee80211_hdr *hdr; | |
1103 | int hdrlen, padpos, padsize; | |
1104 | u8 keyix; | |
1105 | __le16 fc; | |
1106 | ||
1107 | /* see if any padding is done by the hw and remove it */ | |
1108 | hdr = (struct ieee80211_hdr *) skb->data; | |
1109 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | |
1110 | fc = hdr->frame_control; | |
c60c9929 | 1111 | padpos = ieee80211_hdrlen(fc); |
d435700f S |
1112 | |
1113 | /* The MAC header is padded to have 32-bit boundary if the | |
1114 | * packet payload is non-zero. The general calculation for | |
1115 | * padsize would take into account odd header lengths: | |
1116 | * padsize = (4 - padpos % 4) % 4; However, since only | |
1117 | * even-length headers are used, padding can only be 0 or 2 | |
1118 | * bytes and we can optimize this a bit. In addition, we must | |
1119 | * not try to remove padding from short control frames that do | |
1120 | * not have payload. */ | |
1121 | padsize = padpos & 3; | |
1122 | if (padsize && skb->len>=padpos+padsize+FCS_LEN) { | |
1123 | memmove(skb->data + padsize, skb->data, padpos); | |
1124 | skb_pull(skb, padsize); | |
1125 | } | |
1126 | ||
1127 | keyix = rx_stats->rs_keyix; | |
1128 | ||
1129 | if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error && | |
1130 | ieee80211_has_protected(fc)) { | |
1131 | rxs->flag |= RX_FLAG_DECRYPTED; | |
1132 | } else if (ieee80211_has_protected(fc) | |
1133 | && !decrypt_error && skb->len >= hdrlen + 4) { | |
1134 | keyix = skb->data[hdrlen + 3] >> 6; | |
1135 | ||
1136 | if (test_bit(keyix, common->keymap)) | |
1137 | rxs->flag |= RX_FLAG_DECRYPTED; | |
1138 | } | |
1139 | if (ah->sw_mgmt_crypto && | |
1140 | (rxs->flag & RX_FLAG_DECRYPTED) && | |
1141 | ieee80211_is_mgmt(fc)) | |
1142 | /* Use software decrypt for management frames. */ | |
1143 | rxs->flag &= ~RX_FLAG_DECRYPTED; | |
1144 | } | |
b5c80475 | 1145 | |
c3124df7 SM |
1146 | /* |
1147 | * Run the LNA combining algorithm only in these cases: | |
1148 | * | |
1149 | * Standalone WLAN cards with both LNA/Antenna diversity | |
1150 | * enabled in the EEPROM. | |
1151 | * | |
1152 | * WLAN+BT cards which are in the supported card list | |
1153 | * in ath_pci_id_table and the user has loaded the | |
1154 | * driver with "bt_ant_diversity" set to true. | |
1155 | */ | |
1156 | static void ath9k_antenna_check(struct ath_softc *sc, | |
1157 | struct ath_rx_status *rs) | |
1158 | { | |
1159 | struct ath_hw *ah = sc->sc_ah; | |
1160 | struct ath9k_hw_capabilities *pCap = &ah->caps; | |
1161 | struct ath_common *common = ath9k_hw_common(ah); | |
1162 | ||
1163 | if (!(ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)) | |
1164 | return; | |
1165 | ||
c3124df7 SM |
1166 | /* |
1167 | * Change the default rx antenna if rx diversity | |
1168 | * chooses the other antenna 3 times in a row. | |
1169 | */ | |
1170 | if (sc->rx.defant != rs->rs_antenna) { | |
1171 | if (++sc->rx.rxotherant >= 3) | |
1172 | ath_setdefantenna(sc, rs->rs_antenna); | |
1173 | } else { | |
1174 | sc->rx.rxotherant = 0; | |
1175 | } | |
1176 | ||
1177 | if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) { | |
1178 | if (common->bt_ant_diversity) | |
1179 | ath_ant_comb_scan(sc, rs); | |
1180 | } else { | |
1181 | ath_ant_comb_scan(sc, rs); | |
1182 | } | |
1183 | } | |
1184 | ||
21fbbca3 CL |
1185 | static void ath9k_apply_ampdu_details(struct ath_softc *sc, |
1186 | struct ath_rx_status *rs, struct ieee80211_rx_status *rxs) | |
1187 | { | |
1188 | if (rs->rs_isaggr) { | |
1189 | rxs->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN; | |
1190 | ||
1191 | rxs->ampdu_reference = sc->rx.ampdu_ref; | |
1192 | ||
1193 | if (!rs->rs_moreaggr) { | |
1194 | rxs->flag |= RX_FLAG_AMPDU_IS_LAST; | |
1195 | sc->rx.ampdu_ref++; | |
1196 | } | |
1197 | ||
1198 | if (rs->rs_flags & ATH9K_RX_DELIM_CRC_PRE) | |
1199 | rxs->flag |= RX_FLAG_AMPDU_DELIM_CRC_ERROR; | |
1200 | } | |
1201 | } | |
1202 | ||
b5c80475 FF |
1203 | int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) |
1204 | { | |
1a04d59d | 1205 | struct ath_rxbuf *bf; |
0d95521e | 1206 | struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb; |
5ca42627 | 1207 | struct ieee80211_rx_status *rxs; |
cbe61d8a | 1208 | struct ath_hw *ah = sc->sc_ah; |
27c51f1a | 1209 | struct ath_common *common = ath9k_hw_common(ah); |
7545daf4 | 1210 | struct ieee80211_hw *hw = sc->hw; |
c9b14170 | 1211 | int retval; |
29bffa96 | 1212 | struct ath_rx_status rs; |
b5c80475 FF |
1213 | enum ath9k_rx_qtype qtype; |
1214 | bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); | |
1215 | int dma_type; | |
a6d2055b | 1216 | u64 tsf = 0; |
8ab2cd09 | 1217 | unsigned long flags; |
2e1cd495 | 1218 | dma_addr_t new_buf_addr; |
be0418ad | 1219 | |
b5c80475 | 1220 | if (edma) |
b5c80475 | 1221 | dma_type = DMA_BIDIRECTIONAL; |
56824223 ML |
1222 | else |
1223 | dma_type = DMA_FROM_DEVICE; | |
b5c80475 FF |
1224 | |
1225 | qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; | |
f078f209 | 1226 | |
a6d2055b | 1227 | tsf = ath9k_hw_gettsf64(ah); |
a6d2055b | 1228 | |
f078f209 | 1229 | do { |
e1352fde | 1230 | bool decrypt_error = false; |
f078f209 | 1231 | |
29bffa96 | 1232 | memset(&rs, 0, sizeof(rs)); |
b5c80475 FF |
1233 | if (edma) |
1234 | bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); | |
1235 | else | |
1236 | bf = ath_get_next_rx_buf(sc, &rs); | |
f078f209 | 1237 | |
b5c80475 FF |
1238 | if (!bf) |
1239 | break; | |
f078f209 | 1240 | |
f078f209 | 1241 | skb = bf->bf_mpdu; |
be0418ad | 1242 | if (!skb) |
f078f209 | 1243 | continue; |
f078f209 | 1244 | |
0d95521e FF |
1245 | /* |
1246 | * Take frame header from the first fragment and RX status from | |
1247 | * the last one. | |
1248 | */ | |
1249 | if (sc->rx.frag) | |
1250 | hdr_skb = sc->rx.frag; | |
1251 | else | |
1252 | hdr_skb = skb; | |
1253 | ||
f6307dda | 1254 | rxs = IEEE80211_SKB_RXCB(hdr_skb); |
ffb1c56a AN |
1255 | memset(rxs, 0, sizeof(struct ieee80211_rx_status)); |
1256 | ||
6f38482e | 1257 | retval = ath9k_rx_skb_preprocess(sc, hdr_skb, &rs, rxs, |
e0dd1a96 | 1258 | &decrypt_error, tsf); |
83c76570 ZK |
1259 | if (retval) |
1260 | goto requeue_drop_frag; | |
1261 | ||
cb71d9ba LR |
1262 | /* Ensure we always have an skb to requeue once we are done |
1263 | * processing the current buffer's skb */ | |
cc861f74 | 1264 | requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); |
cb71d9ba LR |
1265 | |
1266 | /* If there is no memory we ignore the current RX'd frame, | |
1267 | * tell hardware it can give us a new frame using the old | |
b77f483f | 1268 | * skb and put it at the tail of the sc->rx.rxbuf list for |
cb71d9ba | 1269 | * processing. */ |
15072189 BG |
1270 | if (!requeue_skb) { |
1271 | RX_STAT_INC(rx_oom_err); | |
0d95521e | 1272 | goto requeue_drop_frag; |
15072189 | 1273 | } |
f078f209 | 1274 | |
2e1cd495 FF |
1275 | /* We will now give hardware our shiny new allocated skb */ |
1276 | new_buf_addr = dma_map_single(sc->dev, requeue_skb->data, | |
1277 | common->rx_bufsize, dma_type); | |
1278 | if (unlikely(dma_mapping_error(sc->dev, new_buf_addr))) { | |
1279 | dev_kfree_skb_any(requeue_skb); | |
1280 | goto requeue_drop_frag; | |
1281 | } | |
1282 | ||
9bf9fca8 | 1283 | /* Unmap the frame */ |
7da3c55c | 1284 | dma_unmap_single(sc->dev, bf->bf_buf_addr, |
2e1cd495 | 1285 | common->rx_bufsize, dma_type); |
f078f209 | 1286 | |
176f0e84 SM |
1287 | bf->bf_mpdu = requeue_skb; |
1288 | bf->bf_buf_addr = new_buf_addr; | |
1289 | ||
b5c80475 FF |
1290 | skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); |
1291 | if (ah->caps.rx_status_len) | |
1292 | skb_pull(skb, ah->caps.rx_status_len); | |
be0418ad | 1293 | |
0d95521e FF |
1294 | if (!rs.rs_more) |
1295 | ath9k_rx_skb_postprocess(common, hdr_skb, &rs, | |
1296 | rxs, decrypt_error); | |
be0418ad | 1297 | |
0d95521e | 1298 | if (rs.rs_more) { |
15072189 | 1299 | RX_STAT_INC(rx_frags); |
0d95521e FF |
1300 | /* |
1301 | * rs_more indicates chained descriptors which can be | |
1302 | * used to link buffers together for a sort of | |
1303 | * scatter-gather operation. | |
1304 | */ | |
1305 | if (sc->rx.frag) { | |
1306 | /* too many fragments - cannot handle frame */ | |
1307 | dev_kfree_skb_any(sc->rx.frag); | |
1308 | dev_kfree_skb_any(skb); | |
15072189 | 1309 | RX_STAT_INC(rx_too_many_frags_err); |
0d95521e FF |
1310 | skb = NULL; |
1311 | } | |
1312 | sc->rx.frag = skb; | |
1313 | goto requeue; | |
1314 | } | |
1315 | ||
1316 | if (sc->rx.frag) { | |
1317 | int space = skb->len - skb_tailroom(hdr_skb); | |
1318 | ||
0d95521e FF |
1319 | if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { |
1320 | dev_kfree_skb(skb); | |
15072189 | 1321 | RX_STAT_INC(rx_oom_err); |
0d95521e FF |
1322 | goto requeue_drop_frag; |
1323 | } | |
1324 | ||
b5447ff9 ED |
1325 | sc->rx.frag = NULL; |
1326 | ||
0d95521e FF |
1327 | skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len), |
1328 | skb->len); | |
1329 | dev_kfree_skb_any(skb); | |
1330 | skb = hdr_skb; | |
1331 | } | |
1332 | ||
16fe28e9 SM |
1333 | if (rxs->flag & RX_FLAG_MMIC_STRIPPED) |
1334 | skb_trim(skb, skb->len - 8); | |
eb840a80 | 1335 | |
16fe28e9 SM |
1336 | spin_lock_irqsave(&sc->sc_pm_lock, flags); |
1337 | if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | | |
1338 | PS_WAIT_FOR_CAB | | |
1339 | PS_WAIT_FOR_PSPOLL_DATA)) || | |
1340 | ath9k_check_auto_sleep(sc)) | |
1341 | ath_rx_ps(sc, skb, rs.is_mybeacon); | |
1342 | spin_unlock_irqrestore(&sc->sc_pm_lock, flags); | |
eb840a80 | 1343 | |
c3124df7 | 1344 | ath9k_antenna_check(sc, &rs); |
21fbbca3 | 1345 | ath9k_apply_ampdu_details(sc, &rs, rxs); |
350e2dcb | 1346 | ath_debug_rate_stats(sc, &rs, skb); |
21fbbca3 | 1347 | |
7545daf4 | 1348 | ieee80211_rx(hw, skb); |
cc65965c | 1349 | |
0d95521e FF |
1350 | requeue_drop_frag: |
1351 | if (sc->rx.frag) { | |
1352 | dev_kfree_skb_any(sc->rx.frag); | |
1353 | sc->rx.frag = NULL; | |
1354 | } | |
cb71d9ba | 1355 | requeue: |
a3dc48e8 FF |
1356 | list_add_tail(&bf->list, &sc->rx.rxbuf); |
1357 | if (flush) | |
1358 | continue; | |
1359 | ||
b5c80475 | 1360 | if (edma) { |
b5c80475 FF |
1361 | ath_rx_edma_buf_link(sc, qtype); |
1362 | } else { | |
e96542e5 | 1363 | ath_rx_buf_relink(sc, bf); |
a3dc48e8 | 1364 | ath9k_hw_rxena(ah); |
b5c80475 | 1365 | } |
be0418ad S |
1366 | } while (1); |
1367 | ||
29ab0b36 RM |
1368 | if (!(ah->imask & ATH9K_INT_RXEOL)) { |
1369 | ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN); | |
72d874c6 | 1370 | ath9k_hw_set_interrupts(ah); |
29ab0b36 RM |
1371 | } |
1372 | ||
f078f209 | 1373 | return 0; |
f078f209 | 1374 | } |