]>
Commit | Line | Data |
---|---|---|
8ca21f01 JM |
1 | /* |
2 | * Copyright (c) 2008-2009 Atheros Communications Inc. | |
3 | * | |
4 | * Permission to use, copy, modify, and/or distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
16 | ||
5a0e3ad6 TH |
17 | #include <linux/slab.h> |
18 | ||
8ca21f01 JM |
19 | #include "ath9k.h" |
20 | ||
21 | struct ath9k_vif_iter_data { | |
22 | int count; | |
23 | u8 *addr; | |
24 | }; | |
25 | ||
26 | static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) | |
27 | { | |
28 | struct ath9k_vif_iter_data *iter_data = data; | |
29 | u8 *nbuf; | |
30 | ||
31 | nbuf = krealloc(iter_data->addr, (iter_data->count + 1) * ETH_ALEN, | |
32 | GFP_ATOMIC); | |
33 | if (nbuf == NULL) | |
34 | return; | |
35 | ||
36 | memcpy(nbuf + iter_data->count * ETH_ALEN, mac, ETH_ALEN); | |
37 | iter_data->addr = nbuf; | |
38 | iter_data->count++; | |
39 | } | |
40 | ||
41 | void ath9k_set_bssid_mask(struct ieee80211_hw *hw) | |
42 | { | |
bce048d7 JM |
43 | struct ath_wiphy *aphy = hw->priv; |
44 | struct ath_softc *sc = aphy->sc; | |
1510718d | 45 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
8ca21f01 JM |
46 | struct ath9k_vif_iter_data iter_data; |
47 | int i, j; | |
48 | u8 mask[ETH_ALEN]; | |
49 | ||
50 | /* | |
51 | * Add primary MAC address even if it is not in active use since it | |
52 | * will be configured to the hardware as the starting point and the | |
53 | * BSSID mask will need to be changed if another address is active. | |
54 | */ | |
55 | iter_data.addr = kmalloc(ETH_ALEN, GFP_ATOMIC); | |
56 | if (iter_data.addr) { | |
1510718d | 57 | memcpy(iter_data.addr, common->macaddr, ETH_ALEN); |
8ca21f01 JM |
58 | iter_data.count = 1; |
59 | } else | |
60 | iter_data.count = 0; | |
61 | ||
62 | /* Get list of all active MAC addresses */ | |
c52f33d0 JM |
63 | spin_lock_bh(&sc->wiphy_lock); |
64 | ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter, | |
8ca21f01 | 65 | &iter_data); |
c52f33d0 JM |
66 | for (i = 0; i < sc->num_sec_wiphy; i++) { |
67 | if (sc->sec_wiphy[i] == NULL) | |
68 | continue; | |
69 | ieee80211_iterate_active_interfaces_atomic( | |
70 | sc->sec_wiphy[i]->hw, ath9k_vif_iter, &iter_data); | |
71 | } | |
72 | spin_unlock_bh(&sc->wiphy_lock); | |
8ca21f01 JM |
73 | |
74 | /* Generate an address mask to cover all active addresses */ | |
75 | memset(mask, 0, ETH_ALEN); | |
76 | for (i = 0; i < iter_data.count; i++) { | |
77 | u8 *a1 = iter_data.addr + i * ETH_ALEN; | |
78 | for (j = i + 1; j < iter_data.count; j++) { | |
79 | u8 *a2 = iter_data.addr + j * ETH_ALEN; | |
80 | mask[0] |= a1[0] ^ a2[0]; | |
81 | mask[1] |= a1[1] ^ a2[1]; | |
82 | mask[2] |= a1[2] ^ a2[2]; | |
83 | mask[3] |= a1[3] ^ a2[3]; | |
84 | mask[4] |= a1[4] ^ a2[4]; | |
85 | mask[5] |= a1[5] ^ a2[5]; | |
86 | } | |
87 | } | |
88 | ||
89 | kfree(iter_data.addr); | |
90 | ||
91 | /* Invert the mask and configure hardware */ | |
1510718d LR |
92 | common->bssidmask[0] = ~mask[0]; |
93 | common->bssidmask[1] = ~mask[1]; | |
94 | common->bssidmask[2] = ~mask[2]; | |
95 | common->bssidmask[3] = ~mask[3]; | |
96 | common->bssidmask[4] = ~mask[4]; | |
97 | common->bssidmask[5] = ~mask[5]; | |
8ca21f01 | 98 | |
13b81559 | 99 | ath_hw_setbssidmask(common); |
8ca21f01 | 100 | } |
c52f33d0 JM |
101 | |
102 | int ath9k_wiphy_add(struct ath_softc *sc) | |
103 | { | |
104 | int i, error; | |
105 | struct ath_wiphy *aphy; | |
1510718d | 106 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
c52f33d0 JM |
107 | struct ieee80211_hw *hw; |
108 | u8 addr[ETH_ALEN]; | |
109 | ||
110 | hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy), &ath9k_ops); | |
111 | if (hw == NULL) | |
112 | return -ENOMEM; | |
113 | ||
114 | spin_lock_bh(&sc->wiphy_lock); | |
115 | for (i = 0; i < sc->num_sec_wiphy; i++) { | |
116 | if (sc->sec_wiphy[i] == NULL) | |
117 | break; | |
118 | } | |
119 | ||
120 | if (i == sc->num_sec_wiphy) { | |
121 | /* No empty slot available; increase array length */ | |
122 | struct ath_wiphy **n; | |
123 | n = krealloc(sc->sec_wiphy, | |
124 | (sc->num_sec_wiphy + 1) * | |
125 | sizeof(struct ath_wiphy *), | |
126 | GFP_ATOMIC); | |
127 | if (n == NULL) { | |
128 | spin_unlock_bh(&sc->wiphy_lock); | |
129 | ieee80211_free_hw(hw); | |
130 | return -ENOMEM; | |
131 | } | |
132 | n[i] = NULL; | |
133 | sc->sec_wiphy = n; | |
134 | sc->num_sec_wiphy++; | |
135 | } | |
136 | ||
137 | SET_IEEE80211_DEV(hw, sc->dev); | |
138 | ||
139 | aphy = hw->priv; | |
140 | aphy->sc = sc; | |
141 | aphy->hw = hw; | |
142 | sc->sec_wiphy[i] = aphy; | |
143 | spin_unlock_bh(&sc->wiphy_lock); | |
144 | ||
1510718d | 145 | memcpy(addr, common->macaddr, ETH_ALEN); |
c52f33d0 JM |
146 | addr[0] |= 0x02; /* Locally managed address */ |
147 | /* | |
148 | * XOR virtual wiphy index into the least significant bits to generate | |
149 | * a different MAC address for each virtual wiphy. | |
150 | */ | |
151 | addr[5] ^= i & 0xff; | |
152 | addr[4] ^= (i & 0xff00) >> 8; | |
153 | addr[3] ^= (i & 0xff0000) >> 16; | |
154 | ||
155 | SET_IEEE80211_PERM_ADDR(hw, addr); | |
156 | ||
285f2dda | 157 | ath9k_set_hw_capab(sc, hw); |
c52f33d0 JM |
158 | |
159 | error = ieee80211_register_hw(hw); | |
160 | ||
f98c3bd2 JM |
161 | if (error == 0) { |
162 | /* Make sure wiphy scheduler is started (if enabled) */ | |
163 | ath9k_wiphy_set_scheduler(sc, sc->wiphy_scheduler_int); | |
164 | } | |
165 | ||
c52f33d0 JM |
166 | return error; |
167 | } | |
168 | ||
169 | int ath9k_wiphy_del(struct ath_wiphy *aphy) | |
170 | { | |
171 | struct ath_softc *sc = aphy->sc; | |
172 | int i; | |
173 | ||
174 | spin_lock_bh(&sc->wiphy_lock); | |
175 | for (i = 0; i < sc->num_sec_wiphy; i++) { | |
176 | if (aphy == sc->sec_wiphy[i]) { | |
177 | sc->sec_wiphy[i] = NULL; | |
178 | spin_unlock_bh(&sc->wiphy_lock); | |
179 | ieee80211_unregister_hw(aphy->hw); | |
180 | ieee80211_free_hw(aphy->hw); | |
181 | return 0; | |
182 | } | |
183 | } | |
184 | spin_unlock_bh(&sc->wiphy_lock); | |
185 | return -ENOENT; | |
186 | } | |
f0ed85c6 JM |
187 | |
188 | static int ath9k_send_nullfunc(struct ath_wiphy *aphy, | |
189 | struct ieee80211_vif *vif, const u8 *bssid, | |
190 | int ps) | |
191 | { | |
192 | struct ath_softc *sc = aphy->sc; | |
193 | struct ath_tx_control txctl; | |
194 | struct sk_buff *skb; | |
195 | struct ieee80211_hdr *hdr; | |
196 | __le16 fc; | |
197 | struct ieee80211_tx_info *info; | |
198 | ||
199 | skb = dev_alloc_skb(24); | |
200 | if (skb == NULL) | |
201 | return -ENOMEM; | |
202 | hdr = (struct ieee80211_hdr *) skb_put(skb, 24); | |
203 | memset(hdr, 0, 24); | |
204 | fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | | |
205 | IEEE80211_FCTL_TODS); | |
206 | if (ps) | |
207 | fc |= cpu_to_le16(IEEE80211_FCTL_PM); | |
208 | hdr->frame_control = fc; | |
209 | memcpy(hdr->addr1, bssid, ETH_ALEN); | |
210 | memcpy(hdr->addr2, aphy->hw->wiphy->perm_addr, ETH_ALEN); | |
211 | memcpy(hdr->addr3, bssid, ETH_ALEN); | |
212 | ||
213 | info = IEEE80211_SKB_CB(skb); | |
214 | memset(info, 0, sizeof(*info)); | |
215 | info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS; | |
216 | info->control.vif = vif; | |
217 | info->control.rates[0].idx = 0; | |
218 | info->control.rates[0].count = 4; | |
219 | info->control.rates[1].idx = -1; | |
220 | ||
221 | memset(&txctl, 0, sizeof(struct ath_tx_control)); | |
222 | txctl.txq = &sc->tx.txq[sc->tx.hwq_map[ATH9K_WME_AC_VO]]; | |
223 | txctl.frame_type = ps ? ATH9K_INT_PAUSE : ATH9K_INT_UNPAUSE; | |
224 | ||
225 | if (ath_tx_start(aphy->hw, skb, &txctl) != 0) | |
226 | goto exit; | |
227 | ||
228 | return 0; | |
229 | exit: | |
230 | dev_kfree_skb_any(skb); | |
231 | return -1; | |
232 | } | |
233 | ||
0e2dedf9 JM |
234 | static bool __ath9k_wiphy_pausing(struct ath_softc *sc) |
235 | { | |
236 | int i; | |
237 | if (sc->pri_wiphy->state == ATH_WIPHY_PAUSING) | |
238 | return true; | |
239 | for (i = 0; i < sc->num_sec_wiphy; i++) { | |
240 | if (sc->sec_wiphy[i] && | |
241 | sc->sec_wiphy[i]->state == ATH_WIPHY_PAUSING) | |
242 | return true; | |
243 | } | |
244 | return false; | |
245 | } | |
246 | ||
247 | static bool ath9k_wiphy_pausing(struct ath_softc *sc) | |
248 | { | |
249 | bool ret; | |
250 | spin_lock_bh(&sc->wiphy_lock); | |
251 | ret = __ath9k_wiphy_pausing(sc); | |
252 | spin_unlock_bh(&sc->wiphy_lock); | |
253 | return ret; | |
254 | } | |
255 | ||
8089cc47 JM |
256 | static bool __ath9k_wiphy_scanning(struct ath_softc *sc) |
257 | { | |
258 | int i; | |
259 | if (sc->pri_wiphy->state == ATH_WIPHY_SCAN) | |
260 | return true; | |
261 | for (i = 0; i < sc->num_sec_wiphy; i++) { | |
262 | if (sc->sec_wiphy[i] && | |
263 | sc->sec_wiphy[i]->state == ATH_WIPHY_SCAN) | |
264 | return true; | |
265 | } | |
266 | return false; | |
267 | } | |
268 | ||
269 | bool ath9k_wiphy_scanning(struct ath_softc *sc) | |
270 | { | |
271 | bool ret; | |
272 | spin_lock_bh(&sc->wiphy_lock); | |
273 | ret = __ath9k_wiphy_scanning(sc); | |
274 | spin_unlock_bh(&sc->wiphy_lock); | |
275 | return ret; | |
276 | } | |
277 | ||
0e2dedf9 JM |
278 | static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy); |
279 | ||
280 | /* caller must hold wiphy_lock */ | |
281 | static void __ath9k_wiphy_unpause_ch(struct ath_wiphy *aphy) | |
282 | { | |
283 | if (aphy == NULL) | |
284 | return; | |
285 | if (aphy->chan_idx != aphy->sc->chan_idx) | |
286 | return; /* wiphy not on the selected channel */ | |
287 | __ath9k_wiphy_unpause(aphy); | |
288 | } | |
289 | ||
290 | static void ath9k_wiphy_unpause_channel(struct ath_softc *sc) | |
291 | { | |
292 | int i; | |
293 | spin_lock_bh(&sc->wiphy_lock); | |
294 | __ath9k_wiphy_unpause_ch(sc->pri_wiphy); | |
295 | for (i = 0; i < sc->num_sec_wiphy; i++) | |
296 | __ath9k_wiphy_unpause_ch(sc->sec_wiphy[i]); | |
297 | spin_unlock_bh(&sc->wiphy_lock); | |
298 | } | |
299 | ||
300 | void ath9k_wiphy_chan_work(struct work_struct *work) | |
301 | { | |
302 | struct ath_softc *sc = container_of(work, struct ath_softc, chan_work); | |
1bdf6c3b | 303 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
0e2dedf9 JM |
304 | struct ath_wiphy *aphy = sc->next_wiphy; |
305 | ||
306 | if (aphy == NULL) | |
307 | return; | |
308 | ||
309 | /* | |
310 | * All pending interfaces paused; ready to change | |
311 | * channels. | |
312 | */ | |
313 | ||
314 | /* Change channels */ | |
315 | mutex_lock(&sc->mutex); | |
316 | /* XXX: remove me eventually */ | |
317 | ath9k_update_ichannel(sc, aphy->hw, | |
318 | &sc->sc_ah->channels[sc->chan_idx]); | |
1bdf6c3b LR |
319 | |
320 | /* sync hw configuration for hw code */ | |
321 | common->hw = aphy->hw; | |
322 | ||
0e2dedf9 JM |
323 | ath_update_chainmask(sc, sc->chan_is_ht); |
324 | if (ath_set_channel(sc, aphy->hw, | |
325 | &sc->sc_ah->channels[sc->chan_idx]) < 0) { | |
326 | printk(KERN_DEBUG "ath9k: Failed to set channel for new " | |
327 | "virtual wiphy\n"); | |
328 | mutex_unlock(&sc->mutex); | |
329 | return; | |
330 | } | |
331 | mutex_unlock(&sc->mutex); | |
332 | ||
333 | ath9k_wiphy_unpause_channel(sc); | |
334 | } | |
335 | ||
f0ed85c6 JM |
336 | /* |
337 | * ath9k version of ieee80211_tx_status() for TX frames that are generated | |
338 | * internally in the driver. | |
339 | */ | |
340 | void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) | |
341 | { | |
342 | struct ath_wiphy *aphy = hw->priv; | |
f0ed85c6 | 343 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
f0ed85c6 | 344 | |
827e69bf | 345 | if ((tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_PAUSE) && |
f0ed85c6 | 346 | aphy->state == ATH_WIPHY_PAUSING) { |
827e69bf | 347 | if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) { |
f0ed85c6 JM |
348 | printk(KERN_DEBUG "ath9k: %s: no ACK for pause " |
349 | "frame\n", wiphy_name(hw->wiphy)); | |
350 | /* | |
351 | * The AP did not reply; ignore this to allow us to | |
352 | * continue. | |
353 | */ | |
354 | } | |
355 | aphy->state = ATH_WIPHY_PAUSED; | |
0e2dedf9 JM |
356 | if (!ath9k_wiphy_pausing(aphy->sc)) { |
357 | /* | |
358 | * Drop from tasklet to work to allow mutex for channel | |
359 | * change. | |
360 | */ | |
42935eca | 361 | ieee80211_queue_work(aphy->sc->hw, |
0e2dedf9 JM |
362 | &aphy->sc->chan_work); |
363 | } | |
f0ed85c6 JM |
364 | } |
365 | ||
f0ed85c6 JM |
366 | dev_kfree_skb(skb); |
367 | } | |
368 | ||
0e2dedf9 JM |
369 | static void ath9k_mark_paused(struct ath_wiphy *aphy) |
370 | { | |
371 | struct ath_softc *sc = aphy->sc; | |
372 | aphy->state = ATH_WIPHY_PAUSED; | |
373 | if (!__ath9k_wiphy_pausing(sc)) | |
42935eca | 374 | ieee80211_queue_work(sc->hw, &sc->chan_work); |
0e2dedf9 JM |
375 | } |
376 | ||
f0ed85c6 JM |
377 | static void ath9k_pause_iter(void *data, u8 *mac, struct ieee80211_vif *vif) |
378 | { | |
379 | struct ath_wiphy *aphy = data; | |
380 | struct ath_vif *avp = (void *) vif->drv_priv; | |
381 | ||
382 | switch (vif->type) { | |
383 | case NL80211_IFTYPE_STATION: | |
384 | if (!vif->bss_conf.assoc) { | |
0e2dedf9 | 385 | ath9k_mark_paused(aphy); |
f0ed85c6 JM |
386 | break; |
387 | } | |
388 | /* TODO: could avoid this if already in PS mode */ | |
0e2dedf9 JM |
389 | if (ath9k_send_nullfunc(aphy, vif, avp->bssid, 1)) { |
390 | printk(KERN_DEBUG "%s: failed to send PS nullfunc\n", | |
391 | __func__); | |
392 | ath9k_mark_paused(aphy); | |
393 | } | |
f0ed85c6 JM |
394 | break; |
395 | case NL80211_IFTYPE_AP: | |
396 | /* Beacon transmission is paused by aphy->state change */ | |
0e2dedf9 | 397 | ath9k_mark_paused(aphy); |
f0ed85c6 JM |
398 | break; |
399 | default: | |
400 | break; | |
401 | } | |
402 | } | |
403 | ||
404 | /* caller must hold wiphy_lock */ | |
405 | static int __ath9k_wiphy_pause(struct ath_wiphy *aphy) | |
406 | { | |
407 | ieee80211_stop_queues(aphy->hw); | |
408 | aphy->state = ATH_WIPHY_PAUSING; | |
409 | /* | |
410 | * TODO: handle PAUSING->PAUSED for the case where there are multiple | |
411 | * active vifs (now we do it on the first vif getting ready; should be | |
412 | * on the last) | |
413 | */ | |
414 | ieee80211_iterate_active_interfaces_atomic(aphy->hw, ath9k_pause_iter, | |
415 | aphy); | |
416 | return 0; | |
417 | } | |
418 | ||
419 | int ath9k_wiphy_pause(struct ath_wiphy *aphy) | |
420 | { | |
421 | int ret; | |
422 | spin_lock_bh(&aphy->sc->wiphy_lock); | |
423 | ret = __ath9k_wiphy_pause(aphy); | |
424 | spin_unlock_bh(&aphy->sc->wiphy_lock); | |
425 | return ret; | |
426 | } | |
427 | ||
428 | static void ath9k_unpause_iter(void *data, u8 *mac, struct ieee80211_vif *vif) | |
429 | { | |
430 | struct ath_wiphy *aphy = data; | |
431 | struct ath_vif *avp = (void *) vif->drv_priv; | |
432 | ||
433 | switch (vif->type) { | |
434 | case NL80211_IFTYPE_STATION: | |
435 | if (!vif->bss_conf.assoc) | |
436 | break; | |
437 | ath9k_send_nullfunc(aphy, vif, avp->bssid, 0); | |
438 | break; | |
439 | case NL80211_IFTYPE_AP: | |
440 | /* Beacon transmission is re-enabled by aphy->state change */ | |
441 | break; | |
442 | default: | |
443 | break; | |
444 | } | |
445 | } | |
446 | ||
447 | /* caller must hold wiphy_lock */ | |
448 | static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy) | |
449 | { | |
450 | ieee80211_iterate_active_interfaces_atomic(aphy->hw, | |
451 | ath9k_unpause_iter, aphy); | |
452 | aphy->state = ATH_WIPHY_ACTIVE; | |
453 | ieee80211_wake_queues(aphy->hw); | |
454 | return 0; | |
455 | } | |
456 | ||
457 | int ath9k_wiphy_unpause(struct ath_wiphy *aphy) | |
458 | { | |
459 | int ret; | |
460 | spin_lock_bh(&aphy->sc->wiphy_lock); | |
461 | ret = __ath9k_wiphy_unpause(aphy); | |
462 | spin_unlock_bh(&aphy->sc->wiphy_lock); | |
463 | return ret; | |
464 | } | |
0e2dedf9 | 465 | |
7ec3e514 JM |
466 | static void __ath9k_wiphy_mark_all_paused(struct ath_softc *sc) |
467 | { | |
468 | int i; | |
469 | if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) | |
470 | sc->pri_wiphy->state = ATH_WIPHY_PAUSED; | |
471 | for (i = 0; i < sc->num_sec_wiphy; i++) { | |
472 | if (sc->sec_wiphy[i] && | |
473 | sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE) | |
474 | sc->sec_wiphy[i]->state = ATH_WIPHY_PAUSED; | |
475 | } | |
476 | } | |
477 | ||
0e2dedf9 JM |
478 | /* caller must hold wiphy_lock */ |
479 | static void __ath9k_wiphy_pause_all(struct ath_softc *sc) | |
480 | { | |
481 | int i; | |
482 | if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) | |
483 | __ath9k_wiphy_pause(sc->pri_wiphy); | |
484 | for (i = 0; i < sc->num_sec_wiphy; i++) { | |
485 | if (sc->sec_wiphy[i] && | |
486 | sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE) | |
487 | __ath9k_wiphy_pause(sc->sec_wiphy[i]); | |
488 | } | |
489 | } | |
490 | ||
491 | int ath9k_wiphy_select(struct ath_wiphy *aphy) | |
492 | { | |
493 | struct ath_softc *sc = aphy->sc; | |
494 | bool now; | |
495 | ||
496 | spin_lock_bh(&sc->wiphy_lock); | |
8089cc47 JM |
497 | if (__ath9k_wiphy_scanning(sc)) { |
498 | /* | |
499 | * For now, we are using mac80211 sw scan and it expects to | |
500 | * have full control over channel changes, so avoid wiphy | |
501 | * scheduling during a scan. This could be optimized if the | |
502 | * scanning control were moved into the driver. | |
503 | */ | |
504 | spin_unlock_bh(&sc->wiphy_lock); | |
505 | return -EBUSY; | |
506 | } | |
0e2dedf9 | 507 | if (__ath9k_wiphy_pausing(sc)) { |
7ec3e514 JM |
508 | if (sc->wiphy_select_failures == 0) |
509 | sc->wiphy_select_first_fail = jiffies; | |
510 | sc->wiphy_select_failures++; | |
511 | if (time_after(jiffies, sc->wiphy_select_first_fail + HZ / 2)) | |
512 | { | |
513 | printk(KERN_DEBUG "ath9k: Previous wiphy select timed " | |
514 | "out; disable/enable hw to recover\n"); | |
515 | __ath9k_wiphy_mark_all_paused(sc); | |
516 | /* | |
517 | * TODO: this workaround to fix hardware is unlikely to | |
518 | * be specific to virtual wiphy changes. It can happen | |
519 | * on normal channel change, too, and as such, this | |
520 | * should really be made more generic. For example, | |
521 | * tricker radio disable/enable on GTT interrupt burst | |
522 | * (say, 10 GTT interrupts received without any TX | |
523 | * frame being completed) | |
524 | */ | |
525 | spin_unlock_bh(&sc->wiphy_lock); | |
68a89116 LR |
526 | ath_radio_disable(sc, aphy->hw); |
527 | ath_radio_enable(sc, aphy->hw); | |
528 | /* Only the primary wiphy hw is used for queuing work */ | |
42935eca | 529 | ieee80211_queue_work(aphy->sc->hw, |
7ec3e514 JM |
530 | &aphy->sc->chan_work); |
531 | return -EBUSY; /* previous select still in progress */ | |
532 | } | |
0e2dedf9 JM |
533 | spin_unlock_bh(&sc->wiphy_lock); |
534 | return -EBUSY; /* previous select still in progress */ | |
535 | } | |
7ec3e514 | 536 | sc->wiphy_select_failures = 0; |
0e2dedf9 JM |
537 | |
538 | /* Store the new channel */ | |
539 | sc->chan_idx = aphy->chan_idx; | |
540 | sc->chan_is_ht = aphy->chan_is_ht; | |
541 | sc->next_wiphy = aphy; | |
542 | ||
543 | __ath9k_wiphy_pause_all(sc); | |
544 | now = !__ath9k_wiphy_pausing(aphy->sc); | |
545 | spin_unlock_bh(&sc->wiphy_lock); | |
546 | ||
547 | if (now) { | |
548 | /* Ready to request channel change immediately */ | |
42935eca | 549 | ieee80211_queue_work(aphy->sc->hw, &aphy->sc->chan_work); |
0e2dedf9 JM |
550 | } |
551 | ||
552 | /* | |
553 | * wiphys will be unpaused in ath9k_tx_status() once channel has been | |
554 | * changed if any wiphy needs time to become paused. | |
555 | */ | |
556 | ||
557 | return 0; | |
558 | } | |
9580a222 JM |
559 | |
560 | bool ath9k_wiphy_started(struct ath_softc *sc) | |
561 | { | |
562 | int i; | |
563 | spin_lock_bh(&sc->wiphy_lock); | |
564 | if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) { | |
565 | spin_unlock_bh(&sc->wiphy_lock); | |
566 | return true; | |
567 | } | |
568 | for (i = 0; i < sc->num_sec_wiphy; i++) { | |
569 | if (sc->sec_wiphy[i] && | |
570 | sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE) { | |
571 | spin_unlock_bh(&sc->wiphy_lock); | |
572 | return true; | |
573 | } | |
574 | } | |
575 | spin_unlock_bh(&sc->wiphy_lock); | |
576 | return false; | |
577 | } | |
18eb62f8 JM |
578 | |
579 | static void ath9k_wiphy_pause_chan(struct ath_wiphy *aphy, | |
580 | struct ath_wiphy *selected) | |
581 | { | |
8089cc47 JM |
582 | if (selected->state == ATH_WIPHY_SCAN) { |
583 | if (aphy == selected) | |
584 | return; | |
585 | /* | |
586 | * Pause all other wiphys for the duration of the scan even if | |
587 | * they are on the current channel now. | |
588 | */ | |
589 | } else if (aphy->chan_idx == selected->chan_idx) | |
18eb62f8 JM |
590 | return; |
591 | aphy->state = ATH_WIPHY_PAUSED; | |
592 | ieee80211_stop_queues(aphy->hw); | |
593 | } | |
594 | ||
595 | void ath9k_wiphy_pause_all_forced(struct ath_softc *sc, | |
596 | struct ath_wiphy *selected) | |
597 | { | |
598 | int i; | |
599 | spin_lock_bh(&sc->wiphy_lock); | |
600 | if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) | |
601 | ath9k_wiphy_pause_chan(sc->pri_wiphy, selected); | |
602 | for (i = 0; i < sc->num_sec_wiphy; i++) { | |
603 | if (sc->sec_wiphy[i] && | |
604 | sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE) | |
605 | ath9k_wiphy_pause_chan(sc->sec_wiphy[i], selected); | |
606 | } | |
607 | spin_unlock_bh(&sc->wiphy_lock); | |
608 | } | |
f98c3bd2 JM |
609 | |
610 | void ath9k_wiphy_work(struct work_struct *work) | |
611 | { | |
612 | struct ath_softc *sc = container_of(work, struct ath_softc, | |
613 | wiphy_work.work); | |
614 | struct ath_wiphy *aphy = NULL; | |
615 | bool first = true; | |
616 | ||
617 | spin_lock_bh(&sc->wiphy_lock); | |
618 | ||
619 | if (sc->wiphy_scheduler_int == 0) { | |
620 | /* wiphy scheduler is disabled */ | |
621 | spin_unlock_bh(&sc->wiphy_lock); | |
622 | return; | |
623 | } | |
624 | ||
625 | try_again: | |
626 | sc->wiphy_scheduler_index++; | |
627 | while (sc->wiphy_scheduler_index <= sc->num_sec_wiphy) { | |
628 | aphy = sc->sec_wiphy[sc->wiphy_scheduler_index - 1]; | |
629 | if (aphy && aphy->state != ATH_WIPHY_INACTIVE) | |
630 | break; | |
631 | ||
632 | sc->wiphy_scheduler_index++; | |
633 | aphy = NULL; | |
634 | } | |
635 | if (aphy == NULL) { | |
636 | sc->wiphy_scheduler_index = 0; | |
637 | if (sc->pri_wiphy->state == ATH_WIPHY_INACTIVE) { | |
638 | if (first) { | |
639 | first = false; | |
640 | goto try_again; | |
641 | } | |
642 | /* No wiphy is ready to be scheduled */ | |
643 | } else | |
644 | aphy = sc->pri_wiphy; | |
645 | } | |
646 | ||
647 | spin_unlock_bh(&sc->wiphy_lock); | |
648 | ||
649 | if (aphy && | |
650 | aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN && | |
651 | ath9k_wiphy_select(aphy)) { | |
652 | printk(KERN_DEBUG "ath9k: Failed to schedule virtual wiphy " | |
653 | "change\n"); | |
654 | } | |
655 | ||
42935eca LR |
656 | ieee80211_queue_delayed_work(sc->hw, |
657 | &sc->wiphy_work, | |
658 | sc->wiphy_scheduler_int); | |
f98c3bd2 JM |
659 | } |
660 | ||
661 | void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int) | |
662 | { | |
663 | cancel_delayed_work_sync(&sc->wiphy_work); | |
664 | sc->wiphy_scheduler_int = msecs_to_jiffies(msec_int); | |
665 | if (sc->wiphy_scheduler_int) | |
42935eca LR |
666 | ieee80211_queue_delayed_work(sc->hw, &sc->wiphy_work, |
667 | sc->wiphy_scheduler_int); | |
f98c3bd2 | 668 | } |
64839170 LR |
669 | |
670 | /* caller must hold wiphy_lock */ | |
671 | bool ath9k_all_wiphys_idle(struct ath_softc *sc) | |
672 | { | |
673 | unsigned int i; | |
194b7c13 | 674 | if (!sc->pri_wiphy->idle) |
64839170 | 675 | return false; |
64839170 LR |
676 | for (i = 0; i < sc->num_sec_wiphy; i++) { |
677 | struct ath_wiphy *aphy = sc->sec_wiphy[i]; | |
678 | if (!aphy) | |
679 | continue; | |
194b7c13 | 680 | if (!aphy->idle) |
64839170 LR |
681 | return false; |
682 | } | |
683 | return true; | |
684 | } | |
194b7c13 LR |
685 | |
686 | /* caller must hold wiphy_lock */ | |
687 | void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle) | |
688 | { | |
689 | struct ath_softc *sc = aphy->sc; | |
690 | ||
691 | aphy->idle = idle; | |
692 | ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG, | |
693 | "Marking %s as %s\n", | |
694 | wiphy_name(aphy->hw->wiphy), | |
695 | idle ? "idle" : "not-idle"); | |
696 | } | |
f52de03b LR |
697 | /* Only bother starting a queue on an active virtual wiphy */ |
698 | void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue) | |
699 | { | |
700 | struct ieee80211_hw *hw = sc->pri_wiphy->hw; | |
701 | unsigned int i; | |
702 | ||
703 | spin_lock_bh(&sc->wiphy_lock); | |
704 | ||
705 | /* Start the primary wiphy */ | |
706 | if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) { | |
707 | ieee80211_wake_queue(hw, skb_queue); | |
708 | goto unlock; | |
709 | } | |
710 | ||
711 | /* Now start the secondary wiphy queues */ | |
712 | for (i = 0; i < sc->num_sec_wiphy; i++) { | |
713 | struct ath_wiphy *aphy = sc->sec_wiphy[i]; | |
714 | if (!aphy) | |
715 | continue; | |
716 | if (aphy->state != ATH_WIPHY_ACTIVE) | |
717 | continue; | |
718 | ||
719 | hw = aphy->hw; | |
720 | ieee80211_wake_queue(hw, skb_queue); | |
721 | break; | |
722 | } | |
723 | ||
724 | unlock: | |
725 | spin_unlock_bh(&sc->wiphy_lock); | |
726 | } | |
727 | ||
728 | /* Go ahead and propagate information to all virtual wiphys, it won't hurt */ | |
729 | void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue) | |
730 | { | |
731 | struct ieee80211_hw *hw = sc->pri_wiphy->hw; | |
732 | unsigned int i; | |
733 | ||
734 | spin_lock_bh(&sc->wiphy_lock); | |
735 | ||
736 | /* Stop the primary wiphy */ | |
737 | ieee80211_stop_queue(hw, skb_queue); | |
738 | ||
739 | /* Now stop the secondary wiphy queues */ | |
740 | for (i = 0; i < sc->num_sec_wiphy; i++) { | |
741 | struct ath_wiphy *aphy = sc->sec_wiphy[i]; | |
742 | if (!aphy) | |
743 | continue; | |
744 | hw = aphy->hw; | |
745 | ieee80211_stop_queue(hw, skb_queue); | |
746 | } | |
747 | spin_unlock_bh(&sc->wiphy_lock); | |
748 | } |