2 * Copyright (c) 2008-2009 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/slab.h>
21 struct ath9k_vif_iter_data
{
26 static void ath9k_vif_iter(void *data
, u8
*mac
, struct ieee80211_vif
*vif
)
28 struct ath9k_vif_iter_data
*iter_data
= data
;
31 nbuf
= krealloc(iter_data
->addr
, (iter_data
->count
+ 1) * ETH_ALEN
,
36 memcpy(nbuf
+ iter_data
->count
* ETH_ALEN
, mac
, ETH_ALEN
);
37 iter_data
->addr
= nbuf
;
41 void ath9k_set_bssid_mask(struct ieee80211_hw
*hw
)
43 struct ath_wiphy
*aphy
= hw
->priv
;
44 struct ath_softc
*sc
= aphy
->sc
;
45 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
46 struct ath9k_vif_iter_data iter_data
;
51 * Add primary MAC address even if it is not in active use since it
52 * will be configured to the hardware as the starting point and the
53 * BSSID mask will need to be changed if another address is active.
55 iter_data
.addr
= kmalloc(ETH_ALEN
, GFP_ATOMIC
);
57 memcpy(iter_data
.addr
, common
->macaddr
, ETH_ALEN
);
62 /* Get list of all active MAC addresses */
63 spin_lock_bh(&sc
->wiphy_lock
);
64 ieee80211_iterate_active_interfaces_atomic(sc
->hw
, ath9k_vif_iter
,
66 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
67 if (sc
->sec_wiphy
[i
] == NULL
)
69 ieee80211_iterate_active_interfaces_atomic(
70 sc
->sec_wiphy
[i
]->hw
, ath9k_vif_iter
, &iter_data
);
72 spin_unlock_bh(&sc
->wiphy_lock
);
74 /* Generate an address mask to cover all active addresses */
75 memset(mask
, 0, ETH_ALEN
);
76 for (i
= 0; i
< iter_data
.count
; i
++) {
77 u8
*a1
= iter_data
.addr
+ i
* ETH_ALEN
;
78 for (j
= i
+ 1; j
< iter_data
.count
; j
++) {
79 u8
*a2
= iter_data
.addr
+ j
* ETH_ALEN
;
80 mask
[0] |= a1
[0] ^ a2
[0];
81 mask
[1] |= a1
[1] ^ a2
[1];
82 mask
[2] |= a1
[2] ^ a2
[2];
83 mask
[3] |= a1
[3] ^ a2
[3];
84 mask
[4] |= a1
[4] ^ a2
[4];
85 mask
[5] |= a1
[5] ^ a2
[5];
89 kfree(iter_data
.addr
);
91 /* Invert the mask and configure hardware */
92 common
->bssidmask
[0] = ~mask
[0];
93 common
->bssidmask
[1] = ~mask
[1];
94 common
->bssidmask
[2] = ~mask
[2];
95 common
->bssidmask
[3] = ~mask
[3];
96 common
->bssidmask
[4] = ~mask
[4];
97 common
->bssidmask
[5] = ~mask
[5];
99 ath_hw_setbssidmask(common
);
102 int ath9k_wiphy_add(struct ath_softc
*sc
)
105 struct ath_wiphy
*aphy
;
106 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
107 struct ieee80211_hw
*hw
;
110 hw
= ieee80211_alloc_hw(sizeof(struct ath_wiphy
), &ath9k_ops
);
114 spin_lock_bh(&sc
->wiphy_lock
);
115 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
116 if (sc
->sec_wiphy
[i
] == NULL
)
120 if (i
== sc
->num_sec_wiphy
) {
121 /* No empty slot available; increase array length */
122 struct ath_wiphy
**n
;
123 n
= krealloc(sc
->sec_wiphy
,
124 (sc
->num_sec_wiphy
+ 1) *
125 sizeof(struct ath_wiphy
*),
128 spin_unlock_bh(&sc
->wiphy_lock
);
129 ieee80211_free_hw(hw
);
137 SET_IEEE80211_DEV(hw
, sc
->dev
);
142 sc
->sec_wiphy
[i
] = aphy
;
143 spin_unlock_bh(&sc
->wiphy_lock
);
145 memcpy(addr
, common
->macaddr
, ETH_ALEN
);
146 addr
[0] |= 0x02; /* Locally managed address */
148 * XOR virtual wiphy index into the least significant bits to generate
149 * a different MAC address for each virtual wiphy.
152 addr
[4] ^= (i
& 0xff00) >> 8;
153 addr
[3] ^= (i
& 0xff0000) >> 16;
155 SET_IEEE80211_PERM_ADDR(hw
, addr
);
157 ath9k_set_hw_capab(sc
, hw
);
159 error
= ieee80211_register_hw(hw
);
162 /* Make sure wiphy scheduler is started (if enabled) */
163 ath9k_wiphy_set_scheduler(sc
, sc
->wiphy_scheduler_int
);
169 int ath9k_wiphy_del(struct ath_wiphy
*aphy
)
171 struct ath_softc
*sc
= aphy
->sc
;
174 spin_lock_bh(&sc
->wiphy_lock
);
175 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
176 if (aphy
== sc
->sec_wiphy
[i
]) {
177 sc
->sec_wiphy
[i
] = NULL
;
178 spin_unlock_bh(&sc
->wiphy_lock
);
179 ieee80211_unregister_hw(aphy
->hw
);
180 ieee80211_free_hw(aphy
->hw
);
184 spin_unlock_bh(&sc
->wiphy_lock
);
188 static int ath9k_send_nullfunc(struct ath_wiphy
*aphy
,
189 struct ieee80211_vif
*vif
, const u8
*bssid
,
192 struct ath_softc
*sc
= aphy
->sc
;
193 struct ath_tx_control txctl
;
195 struct ieee80211_hdr
*hdr
;
197 struct ieee80211_tx_info
*info
;
199 skb
= dev_alloc_skb(24);
202 hdr
= (struct ieee80211_hdr
*) skb_put(skb
, 24);
204 fc
= cpu_to_le16(IEEE80211_FTYPE_DATA
| IEEE80211_STYPE_NULLFUNC
|
205 IEEE80211_FCTL_TODS
);
207 fc
|= cpu_to_le16(IEEE80211_FCTL_PM
);
208 hdr
->frame_control
= fc
;
209 memcpy(hdr
->addr1
, bssid
, ETH_ALEN
);
210 memcpy(hdr
->addr2
, aphy
->hw
->wiphy
->perm_addr
, ETH_ALEN
);
211 memcpy(hdr
->addr3
, bssid
, ETH_ALEN
);
213 info
= IEEE80211_SKB_CB(skb
);
214 memset(info
, 0, sizeof(*info
));
215 info
->flags
= IEEE80211_TX_CTL_REQ_TX_STATUS
;
216 info
->control
.vif
= vif
;
217 info
->control
.rates
[0].idx
= 0;
218 info
->control
.rates
[0].count
= 4;
219 info
->control
.rates
[1].idx
= -1;
221 memset(&txctl
, 0, sizeof(struct ath_tx_control
));
222 txctl
.txq
= &sc
->tx
.txq
[sc
->tx
.hwq_map
[ATH9K_WME_AC_VO
]];
223 txctl
.frame_type
= ps
? ATH9K_INT_PAUSE
: ATH9K_INT_UNPAUSE
;
225 if (ath_tx_start(aphy
->hw
, skb
, &txctl
) != 0)
230 dev_kfree_skb_any(skb
);
234 static bool __ath9k_wiphy_pausing(struct ath_softc
*sc
)
237 if (sc
->pri_wiphy
->state
== ATH_WIPHY_PAUSING
)
239 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
240 if (sc
->sec_wiphy
[i
] &&
241 sc
->sec_wiphy
[i
]->state
== ATH_WIPHY_PAUSING
)
247 static bool ath9k_wiphy_pausing(struct ath_softc
*sc
)
250 spin_lock_bh(&sc
->wiphy_lock
);
251 ret
= __ath9k_wiphy_pausing(sc
);
252 spin_unlock_bh(&sc
->wiphy_lock
);
256 static bool __ath9k_wiphy_scanning(struct ath_softc
*sc
)
259 if (sc
->pri_wiphy
->state
== ATH_WIPHY_SCAN
)
261 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
262 if (sc
->sec_wiphy
[i
] &&
263 sc
->sec_wiphy
[i
]->state
== ATH_WIPHY_SCAN
)
269 bool ath9k_wiphy_scanning(struct ath_softc
*sc
)
272 spin_lock_bh(&sc
->wiphy_lock
);
273 ret
= __ath9k_wiphy_scanning(sc
);
274 spin_unlock_bh(&sc
->wiphy_lock
);
278 static int __ath9k_wiphy_unpause(struct ath_wiphy
*aphy
);
280 /* caller must hold wiphy_lock */
281 static void __ath9k_wiphy_unpause_ch(struct ath_wiphy
*aphy
)
285 if (aphy
->chan_idx
!= aphy
->sc
->chan_idx
)
286 return; /* wiphy not on the selected channel */
287 __ath9k_wiphy_unpause(aphy
);
290 static void ath9k_wiphy_unpause_channel(struct ath_softc
*sc
)
293 spin_lock_bh(&sc
->wiphy_lock
);
294 __ath9k_wiphy_unpause_ch(sc
->pri_wiphy
);
295 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++)
296 __ath9k_wiphy_unpause_ch(sc
->sec_wiphy
[i
]);
297 spin_unlock_bh(&sc
->wiphy_lock
);
300 void ath9k_wiphy_chan_work(struct work_struct
*work
)
302 struct ath_softc
*sc
= container_of(work
, struct ath_softc
, chan_work
);
303 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
304 struct ath_wiphy
*aphy
= sc
->next_wiphy
;
310 * All pending interfaces paused; ready to change
314 /* Change channels */
315 mutex_lock(&sc
->mutex
);
316 /* XXX: remove me eventually */
317 ath9k_update_ichannel(sc
, aphy
->hw
,
318 &sc
->sc_ah
->channels
[sc
->chan_idx
]);
320 /* sync hw configuration for hw code */
321 common
->hw
= aphy
->hw
;
323 ath_update_chainmask(sc
, sc
->chan_is_ht
);
324 if (ath_set_channel(sc
, aphy
->hw
,
325 &sc
->sc_ah
->channels
[sc
->chan_idx
]) < 0) {
326 printk(KERN_DEBUG
"ath9k: Failed to set channel for new "
328 mutex_unlock(&sc
->mutex
);
331 mutex_unlock(&sc
->mutex
);
333 ath9k_wiphy_unpause_channel(sc
);
337 * ath9k version of ieee80211_tx_status() for TX frames that are generated
338 * internally in the driver.
340 void ath9k_tx_status(struct ieee80211_hw
*hw
, struct sk_buff
*skb
)
342 struct ath_wiphy
*aphy
= hw
->priv
;
343 struct ieee80211_tx_info
*tx_info
= IEEE80211_SKB_CB(skb
);
345 if ((tx_info
->pad
[0] & ATH_TX_INFO_FRAME_TYPE_PAUSE
) &&
346 aphy
->state
== ATH_WIPHY_PAUSING
) {
347 if (!(tx_info
->flags
& IEEE80211_TX_STAT_ACK
)) {
348 printk(KERN_DEBUG
"ath9k: %s: no ACK for pause "
349 "frame\n", wiphy_name(hw
->wiphy
));
351 * The AP did not reply; ignore this to allow us to
355 aphy
->state
= ATH_WIPHY_PAUSED
;
356 if (!ath9k_wiphy_pausing(aphy
->sc
)) {
358 * Drop from tasklet to work to allow mutex for channel
361 ieee80211_queue_work(aphy
->sc
->hw
,
362 &aphy
->sc
->chan_work
);
369 static void ath9k_mark_paused(struct ath_wiphy
*aphy
)
371 struct ath_softc
*sc
= aphy
->sc
;
372 aphy
->state
= ATH_WIPHY_PAUSED
;
373 if (!__ath9k_wiphy_pausing(sc
))
374 ieee80211_queue_work(sc
->hw
, &sc
->chan_work
);
377 static void ath9k_pause_iter(void *data
, u8
*mac
, struct ieee80211_vif
*vif
)
379 struct ath_wiphy
*aphy
= data
;
380 struct ath_vif
*avp
= (void *) vif
->drv_priv
;
383 case NL80211_IFTYPE_STATION
:
384 if (!vif
->bss_conf
.assoc
) {
385 ath9k_mark_paused(aphy
);
388 /* TODO: could avoid this if already in PS mode */
389 if (ath9k_send_nullfunc(aphy
, vif
, avp
->bssid
, 1)) {
390 printk(KERN_DEBUG
"%s: failed to send PS nullfunc\n",
392 ath9k_mark_paused(aphy
);
395 case NL80211_IFTYPE_AP
:
396 /* Beacon transmission is paused by aphy->state change */
397 ath9k_mark_paused(aphy
);
404 /* caller must hold wiphy_lock */
405 static int __ath9k_wiphy_pause(struct ath_wiphy
*aphy
)
407 ieee80211_stop_queues(aphy
->hw
);
408 aphy
->state
= ATH_WIPHY_PAUSING
;
410 * TODO: handle PAUSING->PAUSED for the case where there are multiple
411 * active vifs (now we do it on the first vif getting ready; should be
414 ieee80211_iterate_active_interfaces_atomic(aphy
->hw
, ath9k_pause_iter
,
419 int ath9k_wiphy_pause(struct ath_wiphy
*aphy
)
422 spin_lock_bh(&aphy
->sc
->wiphy_lock
);
423 ret
= __ath9k_wiphy_pause(aphy
);
424 spin_unlock_bh(&aphy
->sc
->wiphy_lock
);
428 static void ath9k_unpause_iter(void *data
, u8
*mac
, struct ieee80211_vif
*vif
)
430 struct ath_wiphy
*aphy
= data
;
431 struct ath_vif
*avp
= (void *) vif
->drv_priv
;
434 case NL80211_IFTYPE_STATION
:
435 if (!vif
->bss_conf
.assoc
)
437 ath9k_send_nullfunc(aphy
, vif
, avp
->bssid
, 0);
439 case NL80211_IFTYPE_AP
:
440 /* Beacon transmission is re-enabled by aphy->state change */
447 /* caller must hold wiphy_lock */
448 static int __ath9k_wiphy_unpause(struct ath_wiphy
*aphy
)
450 ieee80211_iterate_active_interfaces_atomic(aphy
->hw
,
451 ath9k_unpause_iter
, aphy
);
452 aphy
->state
= ATH_WIPHY_ACTIVE
;
453 ieee80211_wake_queues(aphy
->hw
);
457 int ath9k_wiphy_unpause(struct ath_wiphy
*aphy
)
460 spin_lock_bh(&aphy
->sc
->wiphy_lock
);
461 ret
= __ath9k_wiphy_unpause(aphy
);
462 spin_unlock_bh(&aphy
->sc
->wiphy_lock
);
466 static void __ath9k_wiphy_mark_all_paused(struct ath_softc
*sc
)
469 if (sc
->pri_wiphy
->state
!= ATH_WIPHY_INACTIVE
)
470 sc
->pri_wiphy
->state
= ATH_WIPHY_PAUSED
;
471 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
472 if (sc
->sec_wiphy
[i
] &&
473 sc
->sec_wiphy
[i
]->state
!= ATH_WIPHY_INACTIVE
)
474 sc
->sec_wiphy
[i
]->state
= ATH_WIPHY_PAUSED
;
478 /* caller must hold wiphy_lock */
479 static void __ath9k_wiphy_pause_all(struct ath_softc
*sc
)
482 if (sc
->pri_wiphy
->state
== ATH_WIPHY_ACTIVE
)
483 __ath9k_wiphy_pause(sc
->pri_wiphy
);
484 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
485 if (sc
->sec_wiphy
[i
] &&
486 sc
->sec_wiphy
[i
]->state
== ATH_WIPHY_ACTIVE
)
487 __ath9k_wiphy_pause(sc
->sec_wiphy
[i
]);
491 int ath9k_wiphy_select(struct ath_wiphy
*aphy
)
493 struct ath_softc
*sc
= aphy
->sc
;
496 spin_lock_bh(&sc
->wiphy_lock
);
497 if (__ath9k_wiphy_scanning(sc
)) {
499 * For now, we are using mac80211 sw scan and it expects to
500 * have full control over channel changes, so avoid wiphy
501 * scheduling during a scan. This could be optimized if the
502 * scanning control were moved into the driver.
504 spin_unlock_bh(&sc
->wiphy_lock
);
507 if (__ath9k_wiphy_pausing(sc
)) {
508 if (sc
->wiphy_select_failures
== 0)
509 sc
->wiphy_select_first_fail
= jiffies
;
510 sc
->wiphy_select_failures
++;
511 if (time_after(jiffies
, sc
->wiphy_select_first_fail
+ HZ
/ 2))
513 printk(KERN_DEBUG
"ath9k: Previous wiphy select timed "
514 "out; disable/enable hw to recover\n");
515 __ath9k_wiphy_mark_all_paused(sc
);
517 * TODO: this workaround to fix hardware is unlikely to
518 * be specific to virtual wiphy changes. It can happen
519 * on normal channel change, too, and as such, this
520 * should really be made more generic. For example,
521 * tricker radio disable/enable on GTT interrupt burst
522 * (say, 10 GTT interrupts received without any TX
523 * frame being completed)
525 spin_unlock_bh(&sc
->wiphy_lock
);
526 ath_radio_disable(sc
, aphy
->hw
);
527 ath_radio_enable(sc
, aphy
->hw
);
528 /* Only the primary wiphy hw is used for queuing work */
529 ieee80211_queue_work(aphy
->sc
->hw
,
530 &aphy
->sc
->chan_work
);
531 return -EBUSY
; /* previous select still in progress */
533 spin_unlock_bh(&sc
->wiphy_lock
);
534 return -EBUSY
; /* previous select still in progress */
536 sc
->wiphy_select_failures
= 0;
538 /* Store the new channel */
539 sc
->chan_idx
= aphy
->chan_idx
;
540 sc
->chan_is_ht
= aphy
->chan_is_ht
;
541 sc
->next_wiphy
= aphy
;
543 __ath9k_wiphy_pause_all(sc
);
544 now
= !__ath9k_wiphy_pausing(aphy
->sc
);
545 spin_unlock_bh(&sc
->wiphy_lock
);
548 /* Ready to request channel change immediately */
549 ieee80211_queue_work(aphy
->sc
->hw
, &aphy
->sc
->chan_work
);
553 * wiphys will be unpaused in ath9k_tx_status() once channel has been
554 * changed if any wiphy needs time to become paused.
560 bool ath9k_wiphy_started(struct ath_softc
*sc
)
563 spin_lock_bh(&sc
->wiphy_lock
);
564 if (sc
->pri_wiphy
->state
!= ATH_WIPHY_INACTIVE
) {
565 spin_unlock_bh(&sc
->wiphy_lock
);
568 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
569 if (sc
->sec_wiphy
[i
] &&
570 sc
->sec_wiphy
[i
]->state
!= ATH_WIPHY_INACTIVE
) {
571 spin_unlock_bh(&sc
->wiphy_lock
);
575 spin_unlock_bh(&sc
->wiphy_lock
);
579 static void ath9k_wiphy_pause_chan(struct ath_wiphy
*aphy
,
580 struct ath_wiphy
*selected
)
582 if (selected
->state
== ATH_WIPHY_SCAN
) {
583 if (aphy
== selected
)
586 * Pause all other wiphys for the duration of the scan even if
587 * they are on the current channel now.
589 } else if (aphy
->chan_idx
== selected
->chan_idx
)
591 aphy
->state
= ATH_WIPHY_PAUSED
;
592 ieee80211_stop_queues(aphy
->hw
);
595 void ath9k_wiphy_pause_all_forced(struct ath_softc
*sc
,
596 struct ath_wiphy
*selected
)
599 spin_lock_bh(&sc
->wiphy_lock
);
600 if (sc
->pri_wiphy
->state
== ATH_WIPHY_ACTIVE
)
601 ath9k_wiphy_pause_chan(sc
->pri_wiphy
, selected
);
602 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
603 if (sc
->sec_wiphy
[i
] &&
604 sc
->sec_wiphy
[i
]->state
== ATH_WIPHY_ACTIVE
)
605 ath9k_wiphy_pause_chan(sc
->sec_wiphy
[i
], selected
);
607 spin_unlock_bh(&sc
->wiphy_lock
);
610 void ath9k_wiphy_work(struct work_struct
*work
)
612 struct ath_softc
*sc
= container_of(work
, struct ath_softc
,
614 struct ath_wiphy
*aphy
= NULL
;
617 spin_lock_bh(&sc
->wiphy_lock
);
619 if (sc
->wiphy_scheduler_int
== 0) {
620 /* wiphy scheduler is disabled */
621 spin_unlock_bh(&sc
->wiphy_lock
);
626 sc
->wiphy_scheduler_index
++;
627 while (sc
->wiphy_scheduler_index
<= sc
->num_sec_wiphy
) {
628 aphy
= sc
->sec_wiphy
[sc
->wiphy_scheduler_index
- 1];
629 if (aphy
&& aphy
->state
!= ATH_WIPHY_INACTIVE
)
632 sc
->wiphy_scheduler_index
++;
636 sc
->wiphy_scheduler_index
= 0;
637 if (sc
->pri_wiphy
->state
== ATH_WIPHY_INACTIVE
) {
642 /* No wiphy is ready to be scheduled */
644 aphy
= sc
->pri_wiphy
;
647 spin_unlock_bh(&sc
->wiphy_lock
);
650 aphy
->state
!= ATH_WIPHY_ACTIVE
&& aphy
->state
!= ATH_WIPHY_SCAN
&&
651 ath9k_wiphy_select(aphy
)) {
652 printk(KERN_DEBUG
"ath9k: Failed to schedule virtual wiphy "
656 ieee80211_queue_delayed_work(sc
->hw
,
658 sc
->wiphy_scheduler_int
);
661 void ath9k_wiphy_set_scheduler(struct ath_softc
*sc
, unsigned int msec_int
)
663 cancel_delayed_work_sync(&sc
->wiphy_work
);
664 sc
->wiphy_scheduler_int
= msecs_to_jiffies(msec_int
);
665 if (sc
->wiphy_scheduler_int
)
666 ieee80211_queue_delayed_work(sc
->hw
, &sc
->wiphy_work
,
667 sc
->wiphy_scheduler_int
);
670 /* caller must hold wiphy_lock */
671 bool ath9k_all_wiphys_idle(struct ath_softc
*sc
)
674 if (!sc
->pri_wiphy
->idle
)
676 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
677 struct ath_wiphy
*aphy
= sc
->sec_wiphy
[i
];
686 /* caller must hold wiphy_lock */
687 void ath9k_set_wiphy_idle(struct ath_wiphy
*aphy
, bool idle
)
689 struct ath_softc
*sc
= aphy
->sc
;
692 ath_print(ath9k_hw_common(sc
->sc_ah
), ATH_DBG_CONFIG
,
693 "Marking %s as %s\n",
694 wiphy_name(aphy
->hw
->wiphy
),
695 idle
? "idle" : "not-idle");
697 /* Only bother starting a queue on an active virtual wiphy */
698 void ath_mac80211_start_queue(struct ath_softc
*sc
, u16 skb_queue
)
700 struct ieee80211_hw
*hw
= sc
->pri_wiphy
->hw
;
703 spin_lock_bh(&sc
->wiphy_lock
);
705 /* Start the primary wiphy */
706 if (sc
->pri_wiphy
->state
== ATH_WIPHY_ACTIVE
) {
707 ieee80211_wake_queue(hw
, skb_queue
);
711 /* Now start the secondary wiphy queues */
712 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
713 struct ath_wiphy
*aphy
= sc
->sec_wiphy
[i
];
716 if (aphy
->state
!= ATH_WIPHY_ACTIVE
)
720 ieee80211_wake_queue(hw
, skb_queue
);
725 spin_unlock_bh(&sc
->wiphy_lock
);
728 /* Go ahead and propagate information to all virtual wiphys, it won't hurt */
729 void ath_mac80211_stop_queue(struct ath_softc
*sc
, u16 skb_queue
)
731 struct ieee80211_hw
*hw
= sc
->pri_wiphy
->hw
;
734 spin_lock_bh(&sc
->wiphy_lock
);
736 /* Stop the primary wiphy */
737 ieee80211_stop_queue(hw
, skb_queue
);
739 /* Now stop the secondary wiphy queues */
740 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
741 struct ath_wiphy
*aphy
= sc
->sec_wiphy
[i
];
745 ieee80211_stop_queue(hw
, skb_queue
);
747 spin_unlock_bh(&sc
->wiphy_lock
);