]>
Commit | Line | Data |
---|---|---|
f078f209 | 1 | /* |
cee075a2 | 2 | * Copyright (c) 2008-2009 Atheros Communications Inc. |
f078f209 LR |
3 | * |
4 | * Permission to use, copy, modify, and/or distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
16 | ||
394cf0a1 | 17 | #include "ath9k.h" |
f078f209 | 18 | |
bce048d7 JM |
19 | static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, |
20 | struct ieee80211_hdr *hdr) | |
21 | { | |
c52f33d0 JM |
22 | struct ieee80211_hw *hw = sc->pri_wiphy->hw; |
23 | int i; | |
24 | ||
25 | spin_lock_bh(&sc->wiphy_lock); | |
26 | for (i = 0; i < sc->num_sec_wiphy; i++) { | |
27 | struct ath_wiphy *aphy = sc->sec_wiphy[i]; | |
28 | if (aphy == NULL) | |
29 | continue; | |
30 | if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr) | |
31 | == 0) { | |
32 | hw = aphy->hw; | |
33 | break; | |
34 | } | |
35 | } | |
36 | spin_unlock_bh(&sc->wiphy_lock); | |
37 | return hw; | |
bce048d7 JM |
38 | } |
39 | ||
f078f209 LR |
40 | /* |
41 | * Setup and link descriptors. | |
42 | * | |
43 | * 11N: we can no longer afford to self link the last descriptor. | |
44 | * MAC acknowledges BA status as long as it copies frames to host | |
45 | * buffer (or rx fifo). This can incorrectly acknowledge packets | |
46 | * to a sender if last desc is self-linked. | |
f078f209 | 47 | */ |
f078f209 LR |
48 | static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) |
49 | { | |
cbe61d8a | 50 | struct ath_hw *ah = sc->sc_ah; |
cc861f74 | 51 | struct ath_common *common = ath9k_hw_common(ah); |
f078f209 LR |
52 | struct ath_desc *ds; |
53 | struct sk_buff *skb; | |
54 | ||
55 | ATH_RXBUF_RESET(bf); | |
56 | ||
57 | ds = bf->bf_desc; | |
be0418ad | 58 | ds->ds_link = 0; /* link to null */ |
f078f209 LR |
59 | ds->ds_data = bf->bf_buf_addr; |
60 | ||
be0418ad | 61 | /* virtual addr of the beginning of the buffer. */ |
f078f209 | 62 | skb = bf->bf_mpdu; |
9680e8a3 | 63 | BUG_ON(skb == NULL); |
f078f209 LR |
64 | ds->ds_vdata = skb->data; |
65 | ||
cc861f74 LR |
66 | /* |
67 | * setup rx descriptors. The rx_bufsize here tells the hardware | |
b4b6cda2 | 68 | * how much data it can DMA to us and that we are prepared |
cc861f74 LR |
69 | * to process |
70 | */ | |
b77f483f | 71 | ath9k_hw_setuprxdesc(ah, ds, |
cc861f74 | 72 | common->rx_bufsize, |
f078f209 LR |
73 | 0); |
74 | ||
b77f483f | 75 | if (sc->rx.rxlink == NULL) |
f078f209 LR |
76 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); |
77 | else | |
b77f483f | 78 | *sc->rx.rxlink = bf->bf_daddr; |
f078f209 | 79 | |
b77f483f | 80 | sc->rx.rxlink = &ds->ds_link; |
f078f209 LR |
81 | ath9k_hw_rxena(ah); |
82 | } | |
83 | ||
ff37e337 S |
84 | static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) |
85 | { | |
86 | /* XXX block beacon interrupts */ | |
87 | ath9k_hw_setantenna(sc->sc_ah, antenna); | |
b77f483f S |
88 | sc->rx.defant = antenna; |
89 | sc->rx.rxotherant = 0; | |
ff37e337 S |
90 | } |
91 | ||
f078f209 LR |
92 | static void ath_opmode_init(struct ath_softc *sc) |
93 | { | |
cbe61d8a | 94 | struct ath_hw *ah = sc->sc_ah; |
1510718d LR |
95 | struct ath_common *common = ath9k_hw_common(ah); |
96 | ||
f078f209 LR |
97 | u32 rfilt, mfilt[2]; |
98 | ||
99 | /* configure rx filter */ | |
100 | rfilt = ath_calcrxfilter(sc); | |
101 | ath9k_hw_setrxfilter(ah, rfilt); | |
102 | ||
103 | /* configure bssid mask */ | |
2660b81a | 104 | if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) |
13b81559 | 105 | ath_hw_setbssidmask(common); |
f078f209 LR |
106 | |
107 | /* configure operational mode */ | |
108 | ath9k_hw_setopmode(ah); | |
109 | ||
110 | /* Handle any link-level address change. */ | |
1510718d | 111 | ath9k_hw_setmac(ah, common->macaddr); |
f078f209 LR |
112 | |
113 | /* calculate and install multicast filter */ | |
114 | mfilt[0] = mfilt[1] = ~0; | |
f078f209 | 115 | ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); |
f078f209 LR |
116 | } |
117 | ||
118 | int ath_rx_init(struct ath_softc *sc, int nbufs) | |
119 | { | |
27c51f1a | 120 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
f078f209 LR |
121 | struct sk_buff *skb; |
122 | struct ath_buf *bf; | |
123 | int error = 0; | |
124 | ||
797fe5cb S |
125 | spin_lock_init(&sc->rx.rxflushlock); |
126 | sc->sc_flags &= ~SC_OP_RXFLUSH; | |
127 | spin_lock_init(&sc->rx.rxbuflock); | |
f078f209 | 128 | |
cc861f74 LR |
129 | common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN, |
130 | min(common->cachelsz, (u16)64)); | |
f078f209 | 131 | |
c46917bb | 132 | ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", |
cc861f74 | 133 | common->cachelsz, common->rx_bufsize); |
f078f209 | 134 | |
797fe5cb | 135 | /* Initialize rx descriptors */ |
f078f209 | 136 | |
797fe5cb S |
137 | error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, |
138 | "rx", nbufs, 1); | |
139 | if (error != 0) { | |
c46917bb LR |
140 | ath_print(common, ATH_DBG_FATAL, |
141 | "failed to allocate rx descriptors: %d\n", error); | |
797fe5cb S |
142 | goto err; |
143 | } | |
f078f209 | 144 | |
797fe5cb | 145 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { |
cc861f74 | 146 | skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); |
797fe5cb S |
147 | if (skb == NULL) { |
148 | error = -ENOMEM; | |
149 | goto err; | |
f078f209 | 150 | } |
f078f209 | 151 | |
797fe5cb S |
152 | bf->bf_mpdu = skb; |
153 | bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, | |
cc861f74 | 154 | common->rx_bufsize, |
797fe5cb S |
155 | DMA_FROM_DEVICE); |
156 | if (unlikely(dma_mapping_error(sc->dev, | |
157 | bf->bf_buf_addr))) { | |
158 | dev_kfree_skb_any(skb); | |
159 | bf->bf_mpdu = NULL; | |
c46917bb LR |
160 | ath_print(common, ATH_DBG_FATAL, |
161 | "dma_mapping_error() on RX init\n"); | |
797fe5cb S |
162 | error = -ENOMEM; |
163 | goto err; | |
164 | } | |
165 | bf->bf_dmacontext = bf->bf_buf_addr; | |
166 | } | |
167 | sc->rx.rxlink = NULL; | |
f078f209 | 168 | |
797fe5cb | 169 | err: |
f078f209 LR |
170 | if (error) |
171 | ath_rx_cleanup(sc); | |
172 | ||
173 | return error; | |
174 | } | |
175 | ||
f078f209 LR |
176 | void ath_rx_cleanup(struct ath_softc *sc) |
177 | { | |
cc861f74 LR |
178 | struct ath_hw *ah = sc->sc_ah; |
179 | struct ath_common *common = ath9k_hw_common(ah); | |
f078f209 LR |
180 | struct sk_buff *skb; |
181 | struct ath_buf *bf; | |
182 | ||
b77f483f | 183 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { |
f078f209 | 184 | skb = bf->bf_mpdu; |
051b9191 | 185 | if (skb) { |
797fe5cb | 186 | dma_unmap_single(sc->dev, bf->bf_buf_addr, |
cc861f74 | 187 | common->rx_bufsize, DMA_FROM_DEVICE); |
f078f209 | 188 | dev_kfree_skb(skb); |
051b9191 | 189 | } |
f078f209 LR |
190 | } |
191 | ||
b77f483f S |
192 | if (sc->rx.rxdma.dd_desc_len != 0) |
193 | ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); | |
f078f209 LR |
194 | } |
195 | ||
196 | /* | |
197 | * Calculate the receive filter according to the | |
198 | * operating mode and state: | |
199 | * | |
200 | * o always accept unicast, broadcast, and multicast traffic | |
201 | * o maintain current state of phy error reception (the hal | |
202 | * may enable phy error frames for noise immunity work) | |
203 | * o probe request frames are accepted only when operating in | |
204 | * hostap, adhoc, or monitor modes | |
205 | * o enable promiscuous mode according to the interface state | |
206 | * o accept beacons: | |
207 | * - when operating in adhoc mode so the 802.11 layer creates | |
208 | * node table entries for peers, | |
209 | * - when operating in station mode for collecting rssi data when | |
210 | * the station is otherwise quiet, or | |
211 | * - when operating as a repeater so we see repeater-sta beacons | |
212 | * - when scanning | |
213 | */ | |
214 | ||
215 | u32 ath_calcrxfilter(struct ath_softc *sc) | |
216 | { | |
217 | #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR) | |
7dcfdcd9 | 218 | |
f078f209 LR |
219 | u32 rfilt; |
220 | ||
221 | rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE) | |
222 | | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST | |
223 | | ATH9K_RX_FILTER_MCAST; | |
224 | ||
225 | /* If not a STA, enable processing of Probe Requests */ | |
2660b81a | 226 | if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION) |
f078f209 LR |
227 | rfilt |= ATH9K_RX_FILTER_PROBEREQ; |
228 | ||
217ba9da JM |
229 | /* |
230 | * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station | |
231 | * mode interface or when in monitor mode. AP mode does not need this | |
232 | * since it receives all in-BSS frames anyway. | |
233 | */ | |
2660b81a | 234 | if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && |
b77f483f | 235 | (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || |
217ba9da | 236 | (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR)) |
f078f209 | 237 | rfilt |= ATH9K_RX_FILTER_PROM; |
f078f209 | 238 | |
d42c6b71 S |
239 | if (sc->rx.rxfilter & FIF_CONTROL) |
240 | rfilt |= ATH9K_RX_FILTER_CONTROL; | |
241 | ||
dbaaa147 VT |
242 | if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && |
243 | !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) | |
244 | rfilt |= ATH9K_RX_FILTER_MYBEACON; | |
245 | else | |
f078f209 LR |
246 | rfilt |= ATH9K_RX_FILTER_BEACON; |
247 | ||
66afad01 SB |
248 | if ((AR_SREV_9280_10_OR_LATER(sc->sc_ah) || |
249 | AR_SREV_9285_10_OR_LATER(sc->sc_ah)) && | |
250 | (sc->sc_ah->opmode == NL80211_IFTYPE_AP) && | |
251 | (sc->rx.rxfilter & FIF_PSPOLL)) | |
dbaaa147 | 252 | rfilt |= ATH9K_RX_FILTER_PSPOLL; |
be0418ad | 253 | |
7ea310be S |
254 | if (conf_is_ht(&sc->hw->conf)) |
255 | rfilt |= ATH9K_RX_FILTER_COMP_BAR; | |
256 | ||
5eb6ba83 | 257 | if (sc->sec_wiphy || (sc->rx.rxfilter & FIF_OTHER_BSS)) { |
b93bce2a JM |
258 | /* TODO: only needed if more than one BSSID is in use in |
259 | * station/adhoc mode */ | |
5eb6ba83 JC |
260 | /* The following may also be needed for other older chips */ |
261 | if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) | |
262 | rfilt |= ATH9K_RX_FILTER_PROM; | |
b93bce2a JM |
263 | rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; |
264 | } | |
265 | ||
f078f209 | 266 | return rfilt; |
7dcfdcd9 | 267 | |
f078f209 LR |
268 | #undef RX_FILTER_PRESERVE |
269 | } | |
270 | ||
f078f209 LR |
271 | int ath_startrecv(struct ath_softc *sc) |
272 | { | |
cbe61d8a | 273 | struct ath_hw *ah = sc->sc_ah; |
f078f209 LR |
274 | struct ath_buf *bf, *tbf; |
275 | ||
b77f483f S |
276 | spin_lock_bh(&sc->rx.rxbuflock); |
277 | if (list_empty(&sc->rx.rxbuf)) | |
f078f209 LR |
278 | goto start_recv; |
279 | ||
b77f483f S |
280 | sc->rx.rxlink = NULL; |
281 | list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { | |
f078f209 LR |
282 | ath_rx_buf_link(sc, bf); |
283 | } | |
284 | ||
285 | /* We could have deleted elements so the list may be empty now */ | |
b77f483f | 286 | if (list_empty(&sc->rx.rxbuf)) |
f078f209 LR |
287 | goto start_recv; |
288 | ||
b77f483f | 289 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); |
f078f209 | 290 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); |
be0418ad | 291 | ath9k_hw_rxena(ah); |
f078f209 LR |
292 | |
293 | start_recv: | |
b77f483f | 294 | spin_unlock_bh(&sc->rx.rxbuflock); |
be0418ad S |
295 | ath_opmode_init(sc); |
296 | ath9k_hw_startpcureceive(ah); | |
297 | ||
f078f209 LR |
298 | return 0; |
299 | } | |
300 | ||
f078f209 LR |
301 | bool ath_stoprecv(struct ath_softc *sc) |
302 | { | |
cbe61d8a | 303 | struct ath_hw *ah = sc->sc_ah; |
f078f209 LR |
304 | bool stopped; |
305 | ||
be0418ad S |
306 | ath9k_hw_stoppcurecv(ah); |
307 | ath9k_hw_setrxfilter(ah, 0); | |
308 | stopped = ath9k_hw_stopdmarecv(ah); | |
b77f483f | 309 | sc->rx.rxlink = NULL; |
be0418ad | 310 | |
f078f209 LR |
311 | return stopped; |
312 | } | |
313 | ||
f078f209 LR |
314 | void ath_flushrecv(struct ath_softc *sc) |
315 | { | |
b77f483f | 316 | spin_lock_bh(&sc->rx.rxflushlock); |
98deeea0 | 317 | sc->sc_flags |= SC_OP_RXFLUSH; |
f078f209 | 318 | ath_rx_tasklet(sc, 1); |
98deeea0 | 319 | sc->sc_flags &= ~SC_OP_RXFLUSH; |
b77f483f | 320 | spin_unlock_bh(&sc->rx.rxflushlock); |
f078f209 LR |
321 | } |
322 | ||
cc65965c JM |
323 | static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) |
324 | { | |
325 | /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ | |
326 | struct ieee80211_mgmt *mgmt; | |
327 | u8 *pos, *end, id, elen; | |
328 | struct ieee80211_tim_ie *tim; | |
329 | ||
330 | mgmt = (struct ieee80211_mgmt *)skb->data; | |
331 | pos = mgmt->u.beacon.variable; | |
332 | end = skb->data + skb->len; | |
333 | ||
334 | while (pos + 2 < end) { | |
335 | id = *pos++; | |
336 | elen = *pos++; | |
337 | if (pos + elen > end) | |
338 | break; | |
339 | ||
340 | if (id == WLAN_EID_TIM) { | |
341 | if (elen < sizeof(*tim)) | |
342 | break; | |
343 | tim = (struct ieee80211_tim_ie *) pos; | |
344 | if (tim->dtim_count != 0) | |
345 | break; | |
346 | return tim->bitmap_ctrl & 0x01; | |
347 | } | |
348 | ||
349 | pos += elen; | |
350 | } | |
351 | ||
352 | return false; | |
353 | } | |
354 | ||
cc65965c JM |
355 | static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) |
356 | { | |
357 | struct ieee80211_mgmt *mgmt; | |
1510718d | 358 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
cc65965c JM |
359 | |
360 | if (skb->len < 24 + 8 + 2 + 2) | |
361 | return; | |
362 | ||
363 | mgmt = (struct ieee80211_mgmt *)skb->data; | |
1510718d | 364 | if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) |
cc65965c JM |
365 | return; /* not from our current AP */ |
366 | ||
1b04b930 | 367 | sc->ps_flags &= ~PS_WAIT_FOR_BEACON; |
293dc5df | 368 | |
1b04b930 S |
369 | if (sc->ps_flags & PS_BEACON_SYNC) { |
370 | sc->ps_flags &= ~PS_BEACON_SYNC; | |
c46917bb LR |
371 | ath_print(common, ATH_DBG_PS, |
372 | "Reconfigure Beacon timers based on " | |
373 | "timestamp from the AP\n"); | |
ccdfeab6 JM |
374 | ath_beacon_config(sc, NULL); |
375 | } | |
376 | ||
cc65965c JM |
377 | if (ath_beacon_dtim_pending_cab(skb)) { |
378 | /* | |
379 | * Remain awake waiting for buffered broadcast/multicast | |
58f5fffd GJ |
380 | * frames. If the last broadcast/multicast frame is not |
381 | * received properly, the next beacon frame will work as | |
382 | * a backup trigger for returning into NETWORK SLEEP state, | |
383 | * so we are waiting for it as well. | |
cc65965c | 384 | */ |
c46917bb LR |
385 | ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating " |
386 | "buffered broadcast/multicast frame(s)\n"); | |
1b04b930 | 387 | sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; |
cc65965c JM |
388 | return; |
389 | } | |
390 | ||
1b04b930 | 391 | if (sc->ps_flags & PS_WAIT_FOR_CAB) { |
cc65965c JM |
392 | /* |
393 | * This can happen if a broadcast frame is dropped or the AP | |
394 | * fails to send a frame indicating that all CAB frames have | |
395 | * been delivered. | |
396 | */ | |
1b04b930 | 397 | sc->ps_flags &= ~PS_WAIT_FOR_CAB; |
c46917bb LR |
398 | ath_print(common, ATH_DBG_PS, |
399 | "PS wait for CAB frames timed out\n"); | |
cc65965c | 400 | } |
cc65965c JM |
401 | } |
402 | ||
403 | static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb) | |
404 | { | |
405 | struct ieee80211_hdr *hdr; | |
c46917bb | 406 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
cc65965c JM |
407 | |
408 | hdr = (struct ieee80211_hdr *)skb->data; | |
409 | ||
410 | /* Process Beacon and CAB receive in PS state */ | |
1b04b930 | 411 | if ((sc->ps_flags & PS_WAIT_FOR_BEACON) && |
9a23f9ca | 412 | ieee80211_is_beacon(hdr->frame_control)) |
cc65965c | 413 | ath_rx_ps_beacon(sc, skb); |
1b04b930 | 414 | else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && |
cc65965c JM |
415 | (ieee80211_is_data(hdr->frame_control) || |
416 | ieee80211_is_action(hdr->frame_control)) && | |
417 | is_multicast_ether_addr(hdr->addr1) && | |
418 | !ieee80211_has_moredata(hdr->frame_control)) { | |
cc65965c JM |
419 | /* |
420 | * No more broadcast/multicast frames to be received at this | |
421 | * point. | |
422 | */ | |
1b04b930 | 423 | sc->ps_flags &= ~PS_WAIT_FOR_CAB; |
c46917bb LR |
424 | ath_print(common, ATH_DBG_PS, |
425 | "All PS CAB frames received, back to sleep\n"); | |
1b04b930 | 426 | } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && |
9a23f9ca JM |
427 | !is_multicast_ether_addr(hdr->addr1) && |
428 | !ieee80211_has_morefrags(hdr->frame_control)) { | |
1b04b930 | 429 | sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; |
c46917bb LR |
430 | ath_print(common, ATH_DBG_PS, |
431 | "Going back to sleep after having received " | |
432 | "PS-Poll data (0x%x)\n", | |
1b04b930 S |
433 | sc->ps_flags & (PS_WAIT_FOR_BEACON | |
434 | PS_WAIT_FOR_CAB | | |
435 | PS_WAIT_FOR_PSPOLL_DATA | | |
436 | PS_WAIT_FOR_TX_ACK)); | |
cc65965c JM |
437 | } |
438 | } | |
439 | ||
b4afffc0 LR |
440 | static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw, |
441 | struct ath_softc *sc, struct sk_buff *skb, | |
5ca42627 | 442 | struct ieee80211_rx_status *rxs) |
9d64a3cf JM |
443 | { |
444 | struct ieee80211_hdr *hdr; | |
445 | ||
446 | hdr = (struct ieee80211_hdr *)skb->data; | |
447 | ||
448 | /* Send the frame to mac80211 */ | |
449 | if (is_multicast_ether_addr(hdr->addr1)) { | |
450 | int i; | |
451 | /* | |
452 | * Deliver broadcast/multicast frames to all suitable | |
453 | * virtual wiphys. | |
454 | */ | |
455 | /* TODO: filter based on channel configuration */ | |
456 | for (i = 0; i < sc->num_sec_wiphy; i++) { | |
457 | struct ath_wiphy *aphy = sc->sec_wiphy[i]; | |
458 | struct sk_buff *nskb; | |
459 | if (aphy == NULL) | |
460 | continue; | |
461 | nskb = skb_copy(skb, GFP_ATOMIC); | |
5ca42627 LR |
462 | if (!nskb) |
463 | continue; | |
464 | ieee80211_rx(aphy->hw, nskb); | |
9d64a3cf | 465 | } |
f1d58c25 | 466 | ieee80211_rx(sc->hw, skb); |
5ca42627 | 467 | } else |
9d64a3cf | 468 | /* Deliver unicast frames based on receiver address */ |
b4afffc0 | 469 | ieee80211_rx(hw, skb); |
9d64a3cf JM |
470 | } |
471 | ||
f078f209 LR |
472 | int ath_rx_tasklet(struct ath_softc *sc, int flush) |
473 | { | |
474 | #define PA2DESC(_sc, _pa) \ | |
b77f483f S |
475 | ((struct ath_desc *)((caddr_t)(_sc)->rx.rxdma.dd_desc + \ |
476 | ((_pa) - (_sc)->rx.rxdma.dd_desc_paddr))) | |
f078f209 | 477 | |
be0418ad | 478 | struct ath_buf *bf; |
f078f209 | 479 | struct ath_desc *ds; |
26ab2645 | 480 | struct ath_rx_status *rx_stats; |
cb71d9ba | 481 | struct sk_buff *skb = NULL, *requeue_skb; |
5ca42627 | 482 | struct ieee80211_rx_status *rxs; |
cbe61d8a | 483 | struct ath_hw *ah = sc->sc_ah; |
27c51f1a | 484 | struct ath_common *common = ath9k_hw_common(ah); |
b4afffc0 LR |
485 | /* |
486 | * The hw can techncically differ from common->hw when using ath9k | |
487 | * virtual wiphy so to account for that we iterate over the active | |
488 | * wiphys and find the appropriate wiphy and therefore hw. | |
489 | */ | |
490 | struct ieee80211_hw *hw = NULL; | |
be0418ad | 491 | struct ieee80211_hdr *hdr; |
c9b14170 | 492 | int retval; |
be0418ad | 493 | bool decrypt_error = false; |
be0418ad | 494 | |
b77f483f | 495 | spin_lock_bh(&sc->rx.rxbuflock); |
f078f209 LR |
496 | |
497 | do { | |
498 | /* If handling rx interrupt and flush is in progress => exit */ | |
98deeea0 | 499 | if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) |
f078f209 LR |
500 | break; |
501 | ||
b77f483f S |
502 | if (list_empty(&sc->rx.rxbuf)) { |
503 | sc->rx.rxlink = NULL; | |
f078f209 LR |
504 | break; |
505 | } | |
506 | ||
b77f483f | 507 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); |
f078f209 | 508 | ds = bf->bf_desc; |
f078f209 LR |
509 | |
510 | /* | |
511 | * Must provide the virtual address of the current | |
512 | * descriptor, the physical address, and the virtual | |
513 | * address of the next descriptor in the h/w chain. | |
514 | * This allows the HAL to look ahead to see if the | |
515 | * hardware is done with a descriptor by checking the | |
516 | * done bit in the following descriptor and the address | |
517 | * of the current descriptor the DMA engine is working | |
518 | * on. All this is necessary because of our use of | |
519 | * a self-linked list to avoid rx overruns. | |
520 | */ | |
be0418ad | 521 | retval = ath9k_hw_rxprocdesc(ah, ds, |
f078f209 LR |
522 | bf->bf_daddr, |
523 | PA2DESC(sc, ds->ds_link), | |
524 | 0); | |
525 | if (retval == -EINPROGRESS) { | |
526 | struct ath_buf *tbf; | |
527 | struct ath_desc *tds; | |
528 | ||
b77f483f S |
529 | if (list_is_last(&bf->list, &sc->rx.rxbuf)) { |
530 | sc->rx.rxlink = NULL; | |
f078f209 LR |
531 | break; |
532 | } | |
533 | ||
534 | tbf = list_entry(bf->list.next, struct ath_buf, list); | |
535 | ||
536 | /* | |
537 | * On some hardware the descriptor status words could | |
538 | * get corrupted, including the done bit. Because of | |
539 | * this, check if the next descriptor's done bit is | |
540 | * set or not. | |
541 | * | |
542 | * If the next descriptor's done bit is set, the current | |
543 | * descriptor has been corrupted. Force s/w to discard | |
544 | * this descriptor and continue... | |
545 | */ | |
546 | ||
547 | tds = tbf->bf_desc; | |
be0418ad S |
548 | retval = ath9k_hw_rxprocdesc(ah, tds, tbf->bf_daddr, |
549 | PA2DESC(sc, tds->ds_link), 0); | |
f078f209 | 550 | if (retval == -EINPROGRESS) { |
f078f209 LR |
551 | break; |
552 | } | |
553 | } | |
554 | ||
f078f209 | 555 | skb = bf->bf_mpdu; |
be0418ad | 556 | if (!skb) |
f078f209 | 557 | continue; |
f078f209 | 558 | |
9bf9fca8 VT |
559 | /* |
560 | * Synchronize the DMA transfer with CPU before | |
561 | * 1. accessing the frame | |
562 | * 2. requeueing the same buffer to h/w | |
563 | */ | |
7da3c55c | 564 | dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, |
cc861f74 | 565 | common->rx_bufsize, |
7da3c55c | 566 | DMA_FROM_DEVICE); |
9bf9fca8 | 567 | |
b4afffc0 | 568 | hdr = (struct ieee80211_hdr *) skb->data; |
5ca42627 LR |
569 | rxs = IEEE80211_SKB_RXCB(skb); |
570 | ||
b4afffc0 | 571 | hw = ath_get_virt_hw(sc, hdr); |
26ab2645 | 572 | rx_stats = &ds->ds_rxstat; |
b4afffc0 | 573 | |
1395d3f0 S |
574 | ath_debug_stat_rx(sc, bf); |
575 | ||
f078f209 | 576 | /* |
be0418ad S |
577 | * If we're asked to flush receive queue, directly |
578 | * chain it back at the queue without processing it. | |
f078f209 | 579 | */ |
be0418ad | 580 | if (flush) |
cb71d9ba | 581 | goto requeue; |
f078f209 | 582 | |
db86f07e LR |
583 | retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, rx_stats, |
584 | rxs, &decrypt_error); | |
1e875e9f | 585 | if (retval) |
cb71d9ba LR |
586 | goto requeue; |
587 | ||
588 | /* Ensure we always have an skb to requeue once we are done | |
589 | * processing the current buffer's skb */ | |
cc861f74 | 590 | requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); |
cb71d9ba LR |
591 | |
592 | /* If there is no memory we ignore the current RX'd frame, | |
593 | * tell hardware it can give us a new frame using the old | |
b77f483f | 594 | * skb and put it at the tail of the sc->rx.rxbuf list for |
cb71d9ba LR |
595 | * processing. */ |
596 | if (!requeue_skb) | |
597 | goto requeue; | |
f078f209 | 598 | |
9bf9fca8 | 599 | /* Unmap the frame */ |
7da3c55c | 600 | dma_unmap_single(sc->dev, bf->bf_buf_addr, |
cc861f74 | 601 | common->rx_bufsize, |
7da3c55c | 602 | DMA_FROM_DEVICE); |
f078f209 | 603 | |
26ab2645 | 604 | skb_put(skb, rx_stats->rs_datalen); |
be0418ad | 605 | |
db86f07e LR |
606 | ath9k_cmn_rx_skb_postprocess(common, skb, rx_stats, |
607 | rxs, decrypt_error); | |
be0418ad | 608 | |
cb71d9ba LR |
609 | /* We will now give hardware our shiny new allocated skb */ |
610 | bf->bf_mpdu = requeue_skb; | |
7da3c55c | 611 | bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, |
cc861f74 LR |
612 | common->rx_bufsize, |
613 | DMA_FROM_DEVICE); | |
7da3c55c | 614 | if (unlikely(dma_mapping_error(sc->dev, |
f8316df1 LR |
615 | bf->bf_buf_addr))) { |
616 | dev_kfree_skb_any(requeue_skb); | |
617 | bf->bf_mpdu = NULL; | |
c46917bb LR |
618 | ath_print(common, ATH_DBG_FATAL, |
619 | "dma_mapping_error() on RX\n"); | |
5ca42627 | 620 | ath_rx_send_to_mac80211(hw, sc, skb, rxs); |
f8316df1 LR |
621 | break; |
622 | } | |
cb71d9ba | 623 | bf->bf_dmacontext = bf->bf_buf_addr; |
f078f209 LR |
624 | |
625 | /* | |
626 | * change the default rx antenna if rx diversity chooses the | |
627 | * other antenna 3 times in a row. | |
628 | */ | |
b77f483f S |
629 | if (sc->rx.defant != ds->ds_rxstat.rs_antenna) { |
630 | if (++sc->rx.rxotherant >= 3) | |
26ab2645 | 631 | ath_setdefantenna(sc, rx_stats->rs_antenna); |
f078f209 | 632 | } else { |
b77f483f | 633 | sc->rx.rxotherant = 0; |
f078f209 | 634 | } |
3cbb5dd7 | 635 | |
1b04b930 S |
636 | if (unlikely(sc->ps_flags & (PS_WAIT_FOR_BEACON | |
637 | PS_WAIT_FOR_CAB | | |
638 | PS_WAIT_FOR_PSPOLL_DATA))) | |
cc65965c JM |
639 | ath_rx_ps(sc, skb); |
640 | ||
5ca42627 | 641 | ath_rx_send_to_mac80211(hw, sc, skb, rxs); |
cc65965c | 642 | |
cb71d9ba | 643 | requeue: |
b77f483f | 644 | list_move_tail(&bf->list, &sc->rx.rxbuf); |
cb71d9ba | 645 | ath_rx_buf_link(sc, bf); |
be0418ad S |
646 | } while (1); |
647 | ||
b77f483f | 648 | spin_unlock_bh(&sc->rx.rxbuflock); |
f078f209 LR |
649 | |
650 | return 0; | |
651 | #undef PA2DESC | |
652 | } |