]>
Commit | Line | Data |
---|---|---|
2b27bdcc | 1 | // SPDX-License-Identifier: GPL-2.0-only |
f5fc0f86 LC |
2 | /* |
3 | * This file is part of wl1271 | |
4 | * | |
5 | * Copyright (C) 2009 Nokia Corporation | |
6 | * | |
7 | * Contact: Luciano Coelho <luciano.coelho@nokia.com> | |
f5fc0f86 LC |
8 | */ |
9 | ||
10 | #include <linux/kernel.h> | |
11 | #include <linux/module.h> | |
c6c8a65d | 12 | #include <linux/etherdevice.h> |
fa2648a3 | 13 | #include <linux/pm_runtime.h> |
8910cfa3 | 14 | #include <linux/spinlock.h> |
f5fc0f86 | 15 | |
c31be25a | 16 | #include "wlcore.h" |
0f4e3122 | 17 | #include "debug.h" |
00d20100 | 18 | #include "io.h" |
00d20100 SL |
19 | #include "ps.h" |
20 | #include "tx.h" | |
56d4f8f6 | 21 | #include "event.h" |
b3b4b4b8 | 22 | #include "hw_ops.h" |
f5fc0f86 | 23 | |
00782136 LC |
24 | /* |
25 | * TODO: this is here just for now, it must be removed when the data | |
26 | * operations are in place. | |
27 | */ | |
28 | #include "../wl12xx/reg.h" | |
29 | ||
536129c8 EP |
30 | static int wl1271_set_default_wep_key(struct wl1271 *wl, |
31 | struct wl12xx_vif *wlvif, u8 id) | |
7f179b46 AN |
32 | { |
33 | int ret; | |
536129c8 | 34 | bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); |
7f179b46 AN |
35 | |
36 | if (is_ap) | |
c690ec81 | 37 | ret = wl12xx_cmd_set_default_wep_key(wl, id, |
a8ab39a4 | 38 | wlvif->ap.bcast_hlid); |
7f179b46 | 39 | else |
154da67c | 40 | ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid); |
7f179b46 AN |
41 | |
42 | if (ret < 0) | |
43 | return ret; | |
44 | ||
45 | wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id); | |
46 | return 0; | |
47 | } | |
48 | ||
25eeb9e3 | 49 | static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb) |
f5fc0f86 | 50 | { |
25eeb9e3 IY |
51 | int id; |
52 | ||
72b0624f AN |
53 | id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc); |
54 | if (id >= wl->num_tx_desc) | |
25eeb9e3 IY |
55 | return -EBUSY; |
56 | ||
57 | __set_bit(id, wl->tx_frames_map); | |
58 | wl->tx_frames[id] = skb; | |
59 | wl->tx_frames_cnt++; | |
60 | return id; | |
61 | } | |
f5fc0f86 | 62 | |
872b345f | 63 | void wl1271_free_tx_id(struct wl1271 *wl, int id) |
25eeb9e3 IY |
64 | { |
65 | if (__test_and_clear_bit(id, wl->tx_frames_map)) { | |
72b0624f | 66 | if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc)) |
ef2e3004 IY |
67 | clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); |
68 | ||
25eeb9e3 IY |
69 | wl->tx_frames[id] = NULL; |
70 | wl->tx_frames_cnt--; | |
71 | } | |
f5fc0f86 | 72 | } |
872b345f | 73 | EXPORT_SYMBOL(wl1271_free_tx_id); |
f5fc0f86 | 74 | |
99a2775d | 75 | static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl, |
187e52cc | 76 | struct wl12xx_vif *wlvif, |
99a2775d AN |
77 | struct sk_buff *skb) |
78 | { | |
79 | struct ieee80211_hdr *hdr; | |
80 | ||
187e52cc AN |
81 | hdr = (struct ieee80211_hdr *)(skb->data + |
82 | sizeof(struct wl1271_tx_hw_descr)); | |
83 | if (!ieee80211_is_auth(hdr->frame_control)) | |
84 | return; | |
85 | ||
99a2775d AN |
86 | /* |
87 | * add the station to the known list before transmitting the | |
88 | * authentication response. this way it won't get de-authed by FW | |
89 | * when transmitting too soon. | |
90 | */ | |
028e7243 | 91 | wl1271_acx_set_inconnection_sta(wl, wlvif, hdr->addr1); |
187e52cc AN |
92 | |
93 | /* | |
94 | * ROC for 1 second on the AP channel for completing the connection. | |
95 | * Note the ROC will be continued by the update_sta_state callbacks | |
96 | * once the station reaches the associated state. | |
97 | */ | |
98 | wlcore_update_inconn_sta(wl, wlvif, NULL, true); | |
99 | wlvif->pending_auth_reply_time = jiffies; | |
100 | cancel_delayed_work(&wlvif->pending_auth_complete_work); | |
101 | ieee80211_queue_delayed_work(wl->hw, | |
102 | &wlvif->pending_auth_complete_work, | |
103 | msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT)); | |
99a2775d AN |
104 | } |
105 | ||
c7ffb902 EP |
106 | static void wl1271_tx_regulate_link(struct wl1271 *wl, |
107 | struct wl12xx_vif *wlvif, | |
108 | u8 hlid) | |
b622d992 | 109 | { |
37c68ea6 | 110 | bool fw_ps; |
9b17f1b3 | 111 | u8 tx_pkts; |
b622d992 | 112 | |
c7ffb902 | 113 | if (WARN_ON(!test_bit(hlid, wlvif->links_map))) |
b622d992 AN |
114 | return; |
115 | ||
5e74b3aa | 116 | fw_ps = test_bit(hlid, &wl->ap_fw_ps_map); |
9b17f1b3 | 117 | tx_pkts = wl->links[hlid].allocated_pkts; |
b622d992 AN |
118 | |
119 | /* | |
120 | * if in FW PS and there is enough data in FW we can put the link | |
121 | * into high-level PS and clean out its TX queues. | |
9a100968 AN |
122 | * Make an exception if this is the only connected link. In this |
123 | * case FW-memory congestion is less of a problem. | |
41ed1a78 EP |
124 | * Note that a single connected STA means 2*ap_count + 1 active links, |
125 | * since we must account for the global and broadcast AP links | |
126 | * for each AP. The "fw_ps" check assures us the other link is a STA | |
127 | * connected to the AP. Otherwise the FW would not set the PSM bit. | |
b622d992 | 128 | */ |
41ed1a78 | 129 | if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps && |
37c68ea6 | 130 | tx_pkts >= WL1271_PS_STA_MAX_PACKETS) |
6e8cd331 | 131 | wl12xx_ps_link_start(wl, wlvif, hlid, true); |
b622d992 AN |
132 | } |
133 | ||
f8e0af6b | 134 | bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb) |
f4df1bd5 EP |
135 | { |
136 | return wl->dummy_packet == skb; | |
137 | } | |
872b345f | 138 | EXPORT_SYMBOL(wl12xx_is_dummy_packet); |
f4df1bd5 | 139 | |
2b2b6438 AN |
140 | static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
141 | struct sk_buff *skb, struct ieee80211_sta *sta) | |
a8c0ddb5 | 142 | { |
2b2b6438 | 143 | if (sta) { |
a8c0ddb5 AN |
144 | struct wl1271_station *wl_sta; |
145 | ||
2b2b6438 | 146 | wl_sta = (struct wl1271_station *)sta->drv_priv; |
a8c0ddb5 AN |
147 | return wl_sta->hlid; |
148 | } else { | |
149 | struct ieee80211_hdr *hdr; | |
150 | ||
53d40d0b | 151 | if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) |
f4df1bd5 EP |
152 | return wl->system_hlid; |
153 | ||
a8c0ddb5 | 154 | hdr = (struct ieee80211_hdr *)skb->data; |
45b60f7d | 155 | if (is_multicast_ether_addr(ieee80211_get_DA(hdr))) |
a8ab39a4 | 156 | return wlvif->ap.bcast_hlid; |
45b60f7d EP |
157 | else |
158 | return wlvif->ap.global_hlid; | |
a8c0ddb5 AN |
159 | } |
160 | } | |
161 | ||
d6a3cc2e | 162 | u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
2b2b6438 | 163 | struct sk_buff *skb, struct ieee80211_sta *sta) |
f4df1bd5 | 164 | { |
dabf37db EP |
165 | struct ieee80211_tx_info *control; |
166 | ||
536129c8 | 167 | if (wlvif->bss_type == BSS_TYPE_AP_BSS) |
2b2b6438 | 168 | return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta); |
f4df1bd5 | 169 | |
dabf37db EP |
170 | control = IEEE80211_SKB_CB(skb); |
171 | if (control->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { | |
172 | wl1271_debug(DEBUG_TX, "tx offchannel"); | |
173 | return wlvif->dev_hlid; | |
174 | } | |
175 | ||
3230f35e | 176 | return wlvif->sta.hlid; |
f4df1bd5 EP |
177 | } |
178 | ||
b3b4b4b8 AN |
179 | unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl, |
180 | unsigned int packet_length) | |
0da13da7 | 181 | { |
9fccc82e IR |
182 | if ((wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) || |
183 | !(wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN)) | |
f83985bb | 184 | return ALIGN(packet_length, WL1271_TX_ALIGN_TO); |
9fccc82e IR |
185 | else |
186 | return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE); | |
0da13da7 | 187 | } |
b3b4b4b8 | 188 | EXPORT_SYMBOL(wlcore_calc_packet_alignment); |
0da13da7 | 189 | |
a32d0cdf | 190 | static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
536129c8 | 191 | struct sk_buff *skb, u32 extra, u32 buf_offset, |
32bb2c03 | 192 | u8 hlid, bool is_gem) |
f5fc0f86 LC |
193 | { |
194 | struct wl1271_tx_hw_descr *desc; | |
195 | u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; | |
5c9417f1 | 196 | u32 total_blocks; |
742246f8 | 197 | int id, ret = -EBUSY, ac; |
32bb2c03 | 198 | u32 spare_blocks; |
f5fc0f86 | 199 | |
26a309c7 | 200 | if (buf_offset + total_len > wl->aggr_buf_size) |
6c6e669e | 201 | return -EAGAIN; |
a19606b4 | 202 | |
32bb2c03 AN |
203 | spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem); |
204 | ||
f5fc0f86 | 205 | /* allocate free identifier for the packet */ |
25eeb9e3 | 206 | id = wl1271_alloc_tx_id(wl, skb); |
f5fc0f86 LC |
207 | if (id < 0) |
208 | return id; | |
209 | ||
b3b4b4b8 | 210 | total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks); |
48a61477 | 211 | |
f5fc0f86 | 212 | if (total_blocks <= wl->tx_blocks_available) { |
d58ff351 | 213 | desc = skb_push(skb, total_len - skb->len); |
f5fc0f86 | 214 | |
4a3b97ee AN |
215 | wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks, |
216 | spare_blocks); | |
ae77eccf | 217 | |
f5fc0f86 LC |
218 | desc->id = id; |
219 | ||
220 | wl->tx_blocks_available -= total_blocks; | |
7bb5d6ce | 221 | wl->tx_allocated_blocks += total_blocks; |
f5fc0f86 | 222 | |
9be86cf0 AN |
223 | /* |
224 | * If the FW was empty before, arm the Tx watchdog. Also do | |
225 | * this on the first Tx after resume, as we always cancel the | |
226 | * watchdog on suspend. | |
227 | */ | |
228 | if (wl->tx_allocated_blocks == total_blocks || | |
229 | test_and_clear_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags)) | |
55df5afb AN |
230 | wl12xx_rearm_tx_watchdog_locked(wl); |
231 | ||
742246f8 AN |
232 | ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); |
233 | wl->tx_allocated_pkts[ac]++; | |
bf54e301 | 234 | |
9ebcb232 | 235 | if (test_bit(hlid, wl->links_map)) |
9b17f1b3 | 236 | wl->links[hlid].allocated_pkts++; |
09039f42 | 237 | |
f5fc0f86 LC |
238 | ret = 0; |
239 | ||
240 | wl1271_debug(DEBUG_TX, | |
241 | "tx_allocate: size: %d, blocks: %d, id: %d", | |
242 | total_len, total_blocks, id); | |
781608c4 | 243 | } else { |
25eeb9e3 | 244 | wl1271_free_tx_id(wl, id); |
781608c4 | 245 | } |
f5fc0f86 LC |
246 | |
247 | return ret; | |
248 | } | |
249 | ||
a32d0cdf | 250 | static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
536129c8 EP |
251 | struct sk_buff *skb, u32 extra, |
252 | struct ieee80211_tx_info *control, u8 hlid) | |
f5fc0f86 LC |
253 | { |
254 | struct wl1271_tx_hw_descr *desc; | |
6f266e91 | 255 | int ac, rate_idx; |
ac5e1e39 | 256 | s64 hosttime; |
cf00f379 | 257 | u16 tx_attr = 0; |
f4f57943 EP |
258 | __le16 frame_control; |
259 | struct ieee80211_hdr *hdr; | |
260 | u8 *frame_start; | |
a32d0cdf | 261 | bool is_dummy; |
f5fc0f86 LC |
262 | |
263 | desc = (struct wl1271_tx_hw_descr *) skb->data; | |
f4f57943 EP |
264 | frame_start = (u8 *)(desc + 1); |
265 | hdr = (struct ieee80211_hdr *)(frame_start + extra); | |
266 | frame_control = hdr->frame_control; | |
f5fc0f86 | 267 | |
1e2b7976 JO |
268 | /* relocate space for security header */ |
269 | if (extra) { | |
f4f57943 EP |
270 | int hdrlen = ieee80211_hdrlen(frame_control); |
271 | memmove(frame_start, hdr, hdrlen); | |
2fc28de5 | 272 | skb_set_network_header(skb, skb_network_offset(skb) + extra); |
1e2b7976 JO |
273 | } |
274 | ||
f5fc0f86 | 275 | /* configure packet life time */ |
99f6996d | 276 | hosttime = (ktime_get_boot_ns() >> 10); |
ac5e1e39 | 277 | desc->start_time = cpu_to_le32(hosttime - wl->time_offset); |
c6c8a65d | 278 | |
a32d0cdf | 279 | is_dummy = wl12xx_is_dummy_packet(wl, skb); |
0f168014 | 280 | if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS) |
c6c8a65d AN |
281 | desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU); |
282 | else | |
283 | desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU); | |
f5fc0f86 | 284 | |
db674d24 | 285 | /* queue */ |
c6999d83 | 286 | ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); |
db674d24 | 287 | desc->tid = skb->priority; |
c6c8a65d | 288 | |
a32d0cdf | 289 | if (is_dummy) { |
ae47c45f SL |
290 | /* |
291 | * FW expects the dummy packet to have an invalid session id - | |
292 | * any session id that is different than the one set in the join | |
293 | */ | |
98b86253 | 294 | tx_attr = (SESSION_COUNTER_INVALID << |
ae47c45f SL |
295 | TX_HW_ATTR_OFST_SESSION_COUNTER) & |
296 | TX_HW_ATTR_SESSION_COUNTER; | |
297 | ||
298 | tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ; | |
0f168014 | 299 | } else if (wlvif) { |
3ea186d1 AN |
300 | u8 session_id = wl->session_ids[hlid]; |
301 | ||
302 | if ((wl->quirks & WLCORE_QUIRK_AP_ZERO_SESSION_ID) && | |
303 | (wlvif->bss_type == BSS_TYPE_AP_BSS)) | |
304 | session_id = 0; | |
305 | ||
ae47c45f | 306 | /* configure the tx attributes */ |
3ea186d1 | 307 | tx_attr = session_id << TX_HW_ATTR_OFST_SESSION_COUNTER; |
ae47c45f SL |
308 | } |
309 | ||
79b122dc | 310 | desc->hlid = hlid; |
0f168014 | 311 | if (is_dummy || !wlvif) |
a32d0cdf EP |
312 | rate_idx = 0; |
313 | else if (wlvif->bss_type != BSS_TYPE_AP_BSS) { | |
8f1a8684 | 314 | /* |
4340d1cf | 315 | * if the packets are data packets |
8f1a8684 ES |
316 | * send them with AP rate policies (EAPOLs are an exception), |
317 | * otherwise use default basic rates | |
318 | */ | |
bed483f7 | 319 | if (skb->protocol == cpu_to_be16(ETH_P_PAE)) |
8f1a8684 | 320 | rate_idx = wlvif->sta.basic_rate_idx; |
bed483f7 IC |
321 | else if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE) |
322 | rate_idx = wlvif->sta.p2p_rate_idx; | |
4340d1cf | 323 | else if (ieee80211_is_data(frame_control)) |
e5a359f8 | 324 | rate_idx = wlvif->sta.ap_rate_idx; |
c6c8a65d | 325 | else |
e5a359f8 | 326 | rate_idx = wlvif->sta.basic_rate_idx; |
c6c8a65d | 327 | } else { |
a8ab39a4 | 328 | if (hlid == wlvif->ap.global_hlid) |
e5a359f8 | 329 | rate_idx = wlvif->ap.mgmt_rate_idx; |
bed483f7 | 330 | else if (hlid == wlvif->ap.bcast_hlid || |
c3e06fc0 AN |
331 | skb->protocol == cpu_to_be16(ETH_P_PAE) || |
332 | !ieee80211_is_data(frame_control)) | |
333 | /* | |
334 | * send non-data, bcast and EAPOLs using the | |
335 | * min basic rate | |
336 | */ | |
e5a359f8 | 337 | rate_idx = wlvif->ap.bcast_rate_idx; |
e51ae9be | 338 | else |
e5a359f8 | 339 | rate_idx = wlvif->ap.ucast_rate_idx[ac]; |
c6c8a65d AN |
340 | } |
341 | ||
342 | tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY; | |
d0f63b20 | 343 | |
f4f57943 EP |
344 | /* for WEP shared auth - no fw encryption is needed */ |
345 | if (ieee80211_is_auth(frame_control) && | |
346 | ieee80211_has_protected(frame_control)) | |
347 | tx_attr |= TX_HW_ATTR_HOST_ENCRYPT; | |
348 | ||
2a5ad92e IC |
349 | /* send EAPOL frames as voice */ |
350 | if (control->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) | |
351 | tx_attr |= TX_HW_ATTR_EAPOL_FRAME; | |
352 | ||
d0f63b20 | 353 | desc->tx_attr = cpu_to_le16(tx_attr); |
6f266e91 | 354 | |
2fc28de5 | 355 | wlcore_hw_set_tx_desc_csum(wl, desc, skb); |
6f266e91 | 356 | wlcore_hw_set_tx_desc_data_len(wl, desc, skb); |
f5fc0f86 LC |
357 | } |
358 | ||
359 | /* caller must hold wl->mutex */ | |
a32d0cdf | 360 | static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
930e1915 | 361 | struct sk_buff *skb, u32 buf_offset, u8 hlid) |
f5fc0f86 LC |
362 | { |
363 | struct ieee80211_tx_info *info; | |
364 | u32 extra = 0; | |
365 | int ret = 0; | |
a19606b4 | 366 | u32 total_len; |
536129c8 | 367 | bool is_dummy; |
32bb2c03 | 368 | bool is_gem = false; |
f5fc0f86 | 369 | |
7a50bdfb ES |
370 | if (!skb) { |
371 | wl1271_error("discarding null skb"); | |
f5fc0f86 | 372 | return -EINVAL; |
7a50bdfb | 373 | } |
f5fc0f86 | 374 | |
930e1915 AN |
375 | if (hlid == WL12XX_INVALID_LINK_ID) { |
376 | wl1271_error("invalid hlid. dropping skb 0x%p", skb); | |
377 | return -EINVAL; | |
378 | } | |
379 | ||
f5fc0f86 LC |
380 | info = IEEE80211_SKB_CB(skb); |
381 | ||
536129c8 | 382 | is_dummy = wl12xx_is_dummy_packet(wl, skb); |
536129c8 | 383 | |
2c0133a4 AN |
384 | if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && |
385 | info->control.hw_key && | |
97359d12 | 386 | info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) |
5ec8a448 | 387 | extra = WL1271_EXTRA_SPACE_TKIP; |
f5fc0f86 LC |
388 | |
389 | if (info->control.hw_key) { | |
7f179b46 AN |
390 | bool is_wep; |
391 | u8 idx = info->control.hw_key->hw_key_idx; | |
392 | u32 cipher = info->control.hw_key->cipher; | |
393 | ||
394 | is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) || | |
395 | (cipher == WLAN_CIPHER_SUITE_WEP104); | |
f5fc0f86 | 396 | |
bf9d5d28 | 397 | if (WARN_ON(is_wep && wlvif && wlvif->default_key != idx)) { |
536129c8 | 398 | ret = wl1271_set_default_wep_key(wl, wlvif, idx); |
f5fc0f86 LC |
399 | if (ret < 0) |
400 | return ret; | |
f75c753f | 401 | wlvif->default_key = idx; |
f5fc0f86 | 402 | } |
32bb2c03 AN |
403 | |
404 | is_gem = (cipher == WL1271_CIPHER_SUITE_GEM); | |
f5fc0f86 | 405 | } |
09039f42 | 406 | |
32bb2c03 AN |
407 | ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid, |
408 | is_gem); | |
f5fc0f86 LC |
409 | if (ret < 0) |
410 | return ret; | |
411 | ||
a32d0cdf | 412 | wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid); |
fae2fd76 | 413 | |
0f168014 | 414 | if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) { |
187e52cc | 415 | wl1271_tx_ap_update_inconnection_sta(wl, wlvif, skb); |
c7ffb902 | 416 | wl1271_tx_regulate_link(wl, wlvif, hlid); |
b622d992 | 417 | } |
99a2775d | 418 | |
a19606b4 | 419 | /* |
48a61477 SL |
420 | * The length of each packet is stored in terms of |
421 | * words. Thus, we must pad the skb data to make sure its | |
422 | * length is aligned. The number of padding bytes is computed | |
423 | * and set in wl1271_tx_fill_hdr. | |
424 | * In special cases, we want to align to a specific block size | |
425 | * (eg. for wl128x with SDIO we align to 256). | |
a19606b4 | 426 | */ |
b3b4b4b8 | 427 | total_len = wlcore_calc_packet_alignment(wl, skb->len); |
48a61477 | 428 | |
a19606b4 IY |
429 | memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len); |
430 | memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len); | |
f5fc0f86 | 431 | |
990f5de7 | 432 | /* Revert side effects in the dummy packet skb, so it can be reused */ |
536129c8 | 433 | if (is_dummy) |
990f5de7 IY |
434 | skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); |
435 | ||
a19606b4 | 436 | return total_len; |
f5fc0f86 LC |
437 | } |
438 | ||
af7fbb28 | 439 | u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, |
57fbcce3 | 440 | enum nl80211_band rate_band) |
830fb67b JO |
441 | { |
442 | struct ieee80211_supported_band *band; | |
443 | u32 enabled_rates = 0; | |
444 | int bit; | |
445 | ||
af7fbb28 | 446 | band = wl->hw->wiphy->bands[rate_band]; |
830fb67b JO |
447 | for (bit = 0; bit < band->n_bitrates; bit++) { |
448 | if (rate_set & 0x1) | |
449 | enabled_rates |= band->bitrates[bit].hw_value; | |
450 | rate_set >>= 1; | |
451 | } | |
452 | ||
b3a47ee0 | 453 | /* MCS rates indication are on bits 16 - 31 */ |
18357850 SL |
454 | rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates; |
455 | ||
b3a47ee0 | 456 | for (bit = 0; bit < 16; bit++) { |
18357850 SL |
457 | if (rate_set & 0x1) |
458 | enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit); | |
459 | rate_set >>= 1; | |
460 | } | |
18357850 | 461 | |
830fb67b JO |
462 | return enabled_rates; |
463 | } | |
464 | ||
a8c0ddb5 | 465 | void wl1271_handle_tx_low_watermark(struct wl1271 *wl) |
2fe33e8c | 466 | { |
708bb3cf | 467 | int i; |
1c33db78 | 468 | struct wl12xx_vif *wlvif; |
2fe33e8c | 469 | |
1c33db78 AN |
470 | wl12xx_for_each_wlvif(wl, wlvif) { |
471 | for (i = 0; i < NUM_TX_QUEUES; i++) { | |
472 | if (wlcore_is_queue_stopped_by_reason(wl, wlvif, i, | |
473 | WLCORE_QUEUE_STOP_REASON_WATERMARK) && | |
474 | wlvif->tx_queue_count[i] <= | |
475 | WL1271_TX_QUEUE_LOW_WATERMARK) | |
476 | /* firmware buffer has space, restart queues */ | |
477 | wlcore_wake_queue(wl, wlvif, i, | |
478 | WLCORE_QUEUE_STOP_REASON_WATERMARK); | |
708bb3cf | 479 | } |
2fe33e8c IY |
480 | } |
481 | } | |
482 | ||
0e810479 | 483 | static int wlcore_select_ac(struct wl1271 *wl) |
742246f8 AN |
484 | { |
485 | int i, q = -1, ac; | |
486 | u32 min_pkts = 0xffffffff; | |
487 | ||
488 | /* | |
489 | * Find a non-empty ac where: | |
490 | * 1. There are packets to transmit | |
491 | * 2. The FW has the least allocated blocks | |
492 | * | |
493 | * We prioritize the ACs according to VO>VI>BE>BK | |
494 | */ | |
495 | for (i = 0; i < NUM_TX_QUEUES; i++) { | |
496 | ac = wl1271_tx_get_queue(i); | |
0e810479 AN |
497 | if (wl->tx_queue_count[ac] && |
498 | wl->tx_allocated_pkts[ac] < min_pkts) { | |
742246f8 AN |
499 | q = ac; |
500 | min_pkts = wl->tx_allocated_pkts[q]; | |
501 | } | |
502 | } | |
503 | ||
0e810479 | 504 | return q; |
742246f8 AN |
505 | } |
506 | ||
0e810479 AN |
507 | static struct sk_buff *wlcore_lnk_dequeue(struct wl1271 *wl, |
508 | struct wl1271_link *lnk, u8 q) | |
6742f554 | 509 | { |
d6a3cc2e | 510 | struct sk_buff *skb; |
6742f554 JO |
511 | unsigned long flags; |
512 | ||
0e810479 | 513 | skb = skb_dequeue(&lnk->tx_queue[q]); |
6742f554 JO |
514 | if (skb) { |
515 | spin_lock_irqsave(&wl->wl_lock, flags); | |
6246ca00 | 516 | WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); |
f1a46384 | 517 | wl->tx_queue_count[q]--; |
8591d424 AN |
518 | if (lnk->wlvif) { |
519 | WARN_ON_ONCE(lnk->wlvif->tx_queue_count[q] <= 0); | |
520 | lnk->wlvif->tx_queue_count[q]--; | |
521 | } | |
6742f554 JO |
522 | spin_unlock_irqrestore(&wl->wl_lock, flags); |
523 | } | |
524 | ||
525 | return skb; | |
526 | } | |
527 | ||
0e810479 AN |
528 | static struct sk_buff *wlcore_lnk_dequeue_high_prio(struct wl1271 *wl, |
529 | u8 hlid, u8 ac, | |
530 | u8 *low_prio_hlid) | |
531 | { | |
532 | struct wl1271_link *lnk = &wl->links[hlid]; | |
533 | ||
f1626fd8 | 534 | if (!wlcore_hw_lnk_high_prio(wl, hlid, lnk)) { |
0e810479 | 535 | if (*low_prio_hlid == WL12XX_INVALID_LINK_ID && |
f1626fd8 AN |
536 | !skb_queue_empty(&lnk->tx_queue[ac]) && |
537 | wlcore_hw_lnk_low_prio(wl, hlid, lnk)) | |
0e810479 AN |
538 | /* we found the first non-empty low priority queue */ |
539 | *low_prio_hlid = hlid; | |
540 | ||
541 | return NULL; | |
542 | } | |
543 | ||
544 | return wlcore_lnk_dequeue(wl, lnk, ac); | |
545 | } | |
546 | ||
547 | static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl, | |
548 | struct wl12xx_vif *wlvif, | |
549 | u8 ac, u8 *hlid, | |
550 | u8 *low_prio_hlid) | |
a8c0ddb5 AN |
551 | { |
552 | struct sk_buff *skb = NULL; | |
a8c0ddb5 AN |
553 | int i, h, start_hlid; |
554 | ||
555 | /* start from the link after the last one */ | |
da08fdfa | 556 | start_hlid = (wlvif->last_tx_hlid + 1) % wl->num_links; |
a8c0ddb5 AN |
557 | |
558 | /* dequeue according to AC, round robin on each link */ | |
da08fdfa EP |
559 | for (i = 0; i < wl->num_links; i++) { |
560 | h = (start_hlid + i) % wl->num_links; | |
a8c0ddb5 | 561 | |
742246f8 | 562 | /* only consider connected stations */ |
c7ffb902 | 563 | if (!test_bit(h, wlvif->links_map)) |
742246f8 AN |
564 | continue; |
565 | ||
0e810479 AN |
566 | skb = wlcore_lnk_dequeue_high_prio(wl, h, ac, |
567 | low_prio_hlid); | |
d6a3cc2e | 568 | if (!skb) |
742246f8 AN |
569 | continue; |
570 | ||
d6a3cc2e EP |
571 | wlvif->last_tx_hlid = h; |
572 | break; | |
a8c0ddb5 AN |
573 | } |
574 | ||
d6a3cc2e | 575 | if (!skb) |
4438aca9 | 576 | wlvif->last_tx_hlid = 0; |
a8c0ddb5 | 577 | |
930e1915 | 578 | *hlid = wlvif->last_tx_hlid; |
a8c0ddb5 AN |
579 | return skb; |
580 | } | |
581 | ||
930e1915 | 582 | static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid) |
a8c0ddb5 | 583 | { |
990f5de7 | 584 | unsigned long flags; |
e4120df9 | 585 | struct wl12xx_vif *wlvif = wl->last_wlvif; |
990f5de7 | 586 | struct sk_buff *skb = NULL; |
0e810479 AN |
587 | int ac; |
588 | u8 low_prio_hlid = WL12XX_INVALID_LINK_ID; | |
589 | ||
590 | ac = wlcore_select_ac(wl); | |
591 | if (ac < 0) | |
592 | goto out; | |
990f5de7 | 593 | |
49c9cd26 | 594 | /* continue from last wlvif (round robin) */ |
e4120df9 EP |
595 | if (wlvif) { |
596 | wl12xx_for_each_wlvif_continue(wl, wlvif) { | |
0e810479 AN |
597 | if (!wlvif->tx_queue_count[ac]) |
598 | continue; | |
599 | ||
600 | skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid, | |
601 | &low_prio_hlid); | |
602 | if (!skb) | |
603 | continue; | |
604 | ||
605 | wl->last_wlvif = wlvif; | |
606 | break; | |
e4120df9 EP |
607 | } |
608 | } | |
609 | ||
49c9cd26 | 610 | /* dequeue from the system HLID before the restarting wlvif list */ |
930e1915 | 611 | if (!skb) { |
0e810479 AN |
612 | skb = wlcore_lnk_dequeue_high_prio(wl, wl->system_hlid, |
613 | ac, &low_prio_hlid); | |
614 | if (skb) { | |
615 | *hlid = wl->system_hlid; | |
616 | wl->last_wlvif = NULL; | |
617 | } | |
930e1915 | 618 | } |
49c9cd26 | 619 | |
0e810479 AN |
620 | /* Do a new pass over the wlvif list. But no need to continue |
621 | * after last_wlvif. The previous pass should have found it. */ | |
e4120df9 EP |
622 | if (!skb) { |
623 | wl12xx_for_each_wlvif(wl, wlvif) { | |
0e810479 AN |
624 | if (!wlvif->tx_queue_count[ac]) |
625 | goto next; | |
626 | ||
627 | skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid, | |
628 | &low_prio_hlid); | |
e4120df9 EP |
629 | if (skb) { |
630 | wl->last_wlvif = wlvif; | |
631 | break; | |
632 | } | |
49c9cd26 | 633 | |
0e810479 | 634 | next: |
49c9cd26 AN |
635 | if (wlvif == wl->last_wlvif) |
636 | break; | |
e4120df9 | 637 | } |
a32d0cdf EP |
638 | } |
639 | ||
0e810479 AN |
640 | /* no high priority skbs found - but maybe a low priority one? */ |
641 | if (!skb && low_prio_hlid != WL12XX_INVALID_LINK_ID) { | |
642 | struct wl1271_link *lnk = &wl->links[low_prio_hlid]; | |
643 | skb = wlcore_lnk_dequeue(wl, lnk, ac); | |
644 | ||
645 | WARN_ON(!skb); /* we checked this before */ | |
646 | *hlid = low_prio_hlid; | |
647 | ||
648 | /* ensure proper round robin in the vif/link levels */ | |
649 | wl->last_wlvif = lnk->wlvif; | |
650 | if (lnk->wlvif) | |
651 | lnk->wlvif->last_tx_hlid = low_prio_hlid; | |
652 | ||
653 | } | |
654 | ||
abca1237 | 655 | out: |
990f5de7 IY |
656 | if (!skb && |
657 | test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) { | |
f1a46384 AN |
658 | int q; |
659 | ||
990f5de7 | 660 | skb = wl->dummy_packet; |
930e1915 | 661 | *hlid = wl->system_hlid; |
f1a46384 | 662 | q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); |
990f5de7 | 663 | spin_lock_irqsave(&wl->wl_lock, flags); |
6246ca00 | 664 | WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); |
f1a46384 | 665 | wl->tx_queue_count[q]--; |
990f5de7 IY |
666 | spin_unlock_irqrestore(&wl->wl_lock, flags); |
667 | } | |
668 | ||
669 | return skb; | |
a8c0ddb5 AN |
670 | } |
671 | ||
d6a3cc2e | 672 | static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
930e1915 | 673 | struct sk_buff *skb, u8 hlid) |
6742f554 JO |
674 | { |
675 | unsigned long flags; | |
676 | int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); | |
677 | ||
990f5de7 IY |
678 | if (wl12xx_is_dummy_packet(wl, skb)) { |
679 | set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); | |
d6a3cc2e | 680 | } else { |
a8c0ddb5 AN |
681 | skb_queue_head(&wl->links[hlid].tx_queue[q], skb); |
682 | ||
683 | /* make sure we dequeue the same packet next time */ | |
da08fdfa EP |
684 | wlvif->last_tx_hlid = (hlid + wl->num_links - 1) % |
685 | wl->num_links; | |
a8c0ddb5 AN |
686 | } |
687 | ||
6742f554 | 688 | spin_lock_irqsave(&wl->wl_lock, flags); |
f1a46384 | 689 | wl->tx_queue_count[q]++; |
8591d424 AN |
690 | if (wlvif) |
691 | wlvif->tx_queue_count[q]++; | |
6742f554 JO |
692 | spin_unlock_irqrestore(&wl->wl_lock, flags); |
693 | } | |
694 | ||
77ddaa10 EP |
695 | static bool wl1271_tx_is_data_present(struct sk_buff *skb) |
696 | { | |
697 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); | |
698 | ||
699 | return ieee80211_is_data_present(hdr->frame_control); | |
700 | } | |
701 | ||
9eb599e9 EP |
702 | void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids) |
703 | { | |
704 | struct wl12xx_vif *wlvif; | |
705 | u32 timeout; | |
706 | u8 hlid; | |
707 | ||
708 | if (!wl->conf.rx_streaming.interval) | |
709 | return; | |
710 | ||
711 | if (!wl->conf.rx_streaming.always && | |
712 | !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)) | |
713 | return; | |
714 | ||
715 | timeout = wl->conf.rx_streaming.duration; | |
716 | wl12xx_for_each_wlvif_sta(wl, wlvif) { | |
717 | bool found = false; | |
da08fdfa | 718 | for_each_set_bit(hlid, active_hlids, wl->num_links) { |
9eb599e9 EP |
719 | if (test_bit(hlid, wlvif->links_map)) { |
720 | found = true; | |
721 | break; | |
722 | } | |
723 | } | |
724 | ||
725 | if (!found) | |
726 | continue; | |
727 | ||
728 | /* enable rx streaming */ | |
0744bdb6 | 729 | if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) |
9eb599e9 EP |
730 | ieee80211_queue_work(wl->hw, |
731 | &wlvif->rx_streaming_enable_work); | |
732 | ||
733 | mod_timer(&wlvif->rx_streaming_timer, | |
734 | jiffies + msecs_to_jiffies(timeout)); | |
735 | } | |
736 | } | |
737 | ||
7a50bdfb ES |
738 | /* |
739 | * Returns failure values only in case of failed bus ops within this function. | |
740 | * wl1271_prepare_tx_frame retvals won't be returned in order to avoid | |
741 | * triggering recovery by higher layers when not necessary. | |
742 | * In case a FW command fails within wl1271_prepare_tx_frame fails a recovery | |
743 | * will be queued in wl1271_cmd_send. -EAGAIN/-EBUSY from prepare_tx_frame | |
744 | * can occur and are legitimate so don't propagate. -EINVAL will emit a WARNING | |
745 | * within prepare_tx_frame code but there's nothing we should do about those | |
746 | * as well. | |
747 | */ | |
eb96f841 | 748 | int wlcore_tx_work_locked(struct wl1271 *wl) |
f5fc0f86 | 749 | { |
a32d0cdf | 750 | struct wl12xx_vif *wlvif; |
f5fc0f86 | 751 | struct sk_buff *skb; |
9eb599e9 | 752 | struct wl1271_tx_hw_descr *desc; |
9fccc82e | 753 | u32 buf_offset = 0, last_len = 0; |
6c6e669e | 754 | bool sent_packets = false; |
da08fdfa | 755 | unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0}; |
eb96f841 | 756 | int ret = 0; |
7a50bdfb | 757 | int bus_ret = 0; |
930e1915 | 758 | u8 hlid; |
f5fc0f86 | 759 | |
4cc53383 | 760 | if (unlikely(wl->state != WLCORE_STATE_ON)) |
7a50bdfb | 761 | return 0; |
f5fc0f86 | 762 | |
930e1915 | 763 | while ((skb = wl1271_skb_dequeue(wl, &hlid))) { |
0f168014 | 764 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
9eb599e9 EP |
765 | bool has_data = false; |
766 | ||
a32d0cdf | 767 | wlvif = NULL; |
f4d02007 | 768 | if (!wl12xx_is_dummy_packet(wl, skb)) |
0f168014 | 769 | wlvif = wl12xx_vif_to_data(info->control.vif); |
930e1915 AN |
770 | else |
771 | hlid = wl->system_hlid; | |
a32d0cdf | 772 | |
9eb599e9 | 773 | has_data = wlvif && wl1271_tx_is_data_present(skb); |
930e1915 AN |
774 | ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset, |
775 | hlid); | |
6c6e669e | 776 | if (ret == -EAGAIN) { |
a19606b4 | 777 | /* |
6c6e669e IY |
778 | * Aggregation buffer is full. |
779 | * Flush buffer and try again. | |
780 | */ | |
930e1915 | 781 | wl1271_skb_queue_head(wl, wlvif, skb, hlid); |
9fccc82e IR |
782 | |
783 | buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, | |
784 | last_len); | |
7a50bdfb ES |
785 | bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, |
786 | wl->aggr_buf, buf_offset, true); | |
787 | if (bus_ret < 0) | |
eb96f841 IY |
788 | goto out; |
789 | ||
6c6e669e IY |
790 | sent_packets = true; |
791 | buf_offset = 0; | |
792 | continue; | |
793 | } else if (ret == -EBUSY) { | |
794 | /* | |
795 | * Firmware buffer is full. | |
a19606b4 IY |
796 | * Queue back last skb, and stop aggregating. |
797 | */ | |
930e1915 | 798 | wl1271_skb_queue_head(wl, wlvif, skb, hlid); |
a522550a IY |
799 | /* No work left, avoid scheduling redundant tx work */ |
800 | set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); | |
ffb591cd | 801 | goto out_ack; |
f5fc0f86 | 802 | } else if (ret < 0) { |
5de8eef4 EP |
803 | if (wl12xx_is_dummy_packet(wl, skb)) |
804 | /* | |
805 | * fw still expects dummy packet, | |
806 | * so re-enqueue it | |
807 | */ | |
930e1915 | 808 | wl1271_skb_queue_head(wl, wlvif, skb, hlid); |
5de8eef4 EP |
809 | else |
810 | ieee80211_free_txskb(wl->hw, skb); | |
ffb591cd | 811 | goto out_ack; |
f5fc0f86 | 812 | } |
9fccc82e IR |
813 | last_len = ret; |
814 | buf_offset += last_len; | |
a19606b4 | 815 | wl->tx_packets_count++; |
9eb599e9 EP |
816 | if (has_data) { |
817 | desc = (struct wl1271_tx_hw_descr *) skb->data; | |
818 | __set_bit(desc->hlid, active_hlids); | |
819 | } | |
f5fc0f86 LC |
820 | } |
821 | ||
ffb591cd | 822 | out_ack: |
a19606b4 | 823 | if (buf_offset) { |
9fccc82e | 824 | buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len); |
7a50bdfb ES |
825 | bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf, |
826 | buf_offset, true); | |
827 | if (bus_ret < 0) | |
eb96f841 IY |
828 | goto out; |
829 | ||
6c6e669e IY |
830 | sent_packets = true; |
831 | } | |
832 | if (sent_packets) { | |
606ea9fa IY |
833 | /* |
834 | * Interrupt the firmware with the new packets. This is only | |
835 | * required for older hardware revisions | |
836 | */ | |
b0f0ad39 | 837 | if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) { |
7a50bdfb | 838 | bus_ret = wlcore_write32(wl, WL12XX_HOST_WR_ACCESS, |
b0f0ad39 | 839 | wl->tx_packets_count); |
7a50bdfb | 840 | if (bus_ret < 0) |
b0f0ad39 IY |
841 | goto out; |
842 | } | |
606ea9fa | 843 | |
a8c0ddb5 | 844 | wl1271_handle_tx_low_watermark(wl); |
a19606b4 | 845 | } |
9eb599e9 | 846 | wl12xx_rearm_rx_streaming(wl, active_hlids); |
eb96f841 IY |
847 | |
848 | out: | |
7a50bdfb | 849 | return bus_ret; |
a522550a | 850 | } |
f5fc0f86 | 851 | |
a522550a IY |
852 | void wl1271_tx_work(struct work_struct *work) |
853 | { | |
854 | struct wl1271 *wl = container_of(work, struct wl1271, tx_work); | |
c1b193eb | 855 | int ret; |
a522550a IY |
856 | |
857 | mutex_lock(&wl->mutex); | |
fa2648a3 TL |
858 | ret = pm_runtime_get_sync(wl->dev); |
859 | if (ret < 0) { | |
860 | pm_runtime_put_noidle(wl->dev); | |
c1b193eb | 861 | goto out; |
fa2648a3 | 862 | } |
c1b193eb | 863 | |
eb96f841 IY |
864 | ret = wlcore_tx_work_locked(wl); |
865 | if (ret < 0) { | |
866 | wl12xx_queue_recovery_work(wl); | |
867 | goto out; | |
868 | } | |
c1b193eb | 869 | |
9b71578d TL |
870 | pm_runtime_mark_last_busy(wl->dev); |
871 | pm_runtime_put_autosuspend(wl->dev); | |
c1b193eb | 872 | out: |
f5fc0f86 LC |
873 | mutex_unlock(&wl->mutex); |
874 | } | |
875 | ||
d2e2d769 PF |
876 | static u8 wl1271_tx_get_rate_flags(u8 rate_class_index) |
877 | { | |
defe02c7 PF |
878 | u8 flags = 0; |
879 | ||
43a8bc5a AN |
880 | /* |
881 | * TODO: use wl12xx constants when this code is moved to wl12xx, as | |
882 | * only it uses Tx-completion. | |
883 | */ | |
884 | if (rate_class_index <= 8) | |
defe02c7 | 885 | flags |= IEEE80211_TX_RC_MCS; |
43a8bc5a AN |
886 | |
887 | /* | |
888 | * TODO: use wl12xx constants when this code is moved to wl12xx, as | |
889 | * only it uses Tx-completion. | |
890 | */ | |
891 | if (rate_class_index == 0) | |
defe02c7 | 892 | flags |= IEEE80211_TX_RC_SHORT_GI; |
43a8bc5a | 893 | |
defe02c7 | 894 | return flags; |
d2e2d769 PF |
895 | } |
896 | ||
f5fc0f86 LC |
897 | static void wl1271_tx_complete_packet(struct wl1271 *wl, |
898 | struct wl1271_tx_hw_res_descr *result) | |
899 | { | |
f5fc0f86 | 900 | struct ieee80211_tx_info *info; |
48e93e40 EP |
901 | struct ieee80211_vif *vif; |
902 | struct wl12xx_vif *wlvif; | |
f5fc0f86 | 903 | struct sk_buff *skb; |
f5fc0f86 | 904 | int id = result->id; |
31627dc5 | 905 | int rate = -1; |
d2e2d769 | 906 | u8 rate_flags = 0; |
31627dc5 | 907 | u8 retries = 0; |
f5fc0f86 LC |
908 | |
909 | /* check for id legality */ | |
72b0624f | 910 | if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) { |
f5fc0f86 LC |
911 | wl1271_warning("TX result illegal id: %d", id); |
912 | return; | |
913 | } | |
914 | ||
915 | skb = wl->tx_frames[id]; | |
916 | info = IEEE80211_SKB_CB(skb); | |
917 | ||
990f5de7 | 918 | if (wl12xx_is_dummy_packet(wl, skb)) { |
ae47c45f SL |
919 | wl1271_free_tx_id(wl, id); |
920 | return; | |
921 | } | |
922 | ||
48e93e40 EP |
923 | /* info->control is valid as long as we don't update info->status */ |
924 | vif = info->control.vif; | |
925 | wlvif = wl12xx_vif_to_data(vif); | |
926 | ||
31627dc5 JO |
927 | /* update the TX status info */ |
928 | if (result->status == TX_SUCCESS) { | |
929 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) | |
f5fc0f86 | 930 | info->flags |= IEEE80211_TX_STAT_ACK; |
43a8bc5a | 931 | rate = wlcore_rate_to_idx(wl, result->rate_class_index, |
1b92f15e | 932 | wlvif->band); |
d2e2d769 | 933 | rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index); |
31627dc5 JO |
934 | retries = result->ack_failures; |
935 | } else if (result->status == TX_RETRY_EXCEEDED) { | |
936 | wl->stats.excessive_retries++; | |
937 | retries = result->ack_failures; | |
f5fc0f86 LC |
938 | } |
939 | ||
31627dc5 JO |
940 | info->status.rates[0].idx = rate; |
941 | info->status.rates[0].count = retries; | |
d2e2d769 | 942 | info->status.rates[0].flags = rate_flags; |
31627dc5 JO |
943 | info->status.ack_signal = -1; |
944 | ||
f5fc0f86 LC |
945 | wl->stats.retry_count += result->ack_failures; |
946 | ||
1e2b7976 JO |
947 | /* remove private header from packet */ |
948 | skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); | |
949 | ||
950 | /* remove TKIP header space if present */ | |
2c0133a4 AN |
951 | if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && |
952 | info->control.hw_key && | |
97359d12 | 953 | info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) { |
1e2b7976 | 954 | int hdrlen = ieee80211_get_hdrlen_from_skb(skb); |
5ec8a448 EP |
955 | memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data, |
956 | hdrlen); | |
957 | skb_pull(skb, WL1271_EXTRA_SPACE_TKIP); | |
1e2b7976 | 958 | } |
f5fc0f86 LC |
959 | |
960 | wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x" | |
961 | " status 0x%x", | |
962 | result->id, skb, result->ack_failures, | |
963 | result->rate_class_index, result->status); | |
964 | ||
f5fc0f86 | 965 | /* return the packet to the stack */ |
a620865e | 966 | skb_queue_tail(&wl->deferred_tx_queue, skb); |
92ef8960 | 967 | queue_work(wl->freezable_wq, &wl->netstack_work); |
25eeb9e3 | 968 | wl1271_free_tx_id(wl, result->id); |
f5fc0f86 LC |
969 | } |
970 | ||
971 | /* Called upon reception of a TX complete interrupt */ | |
045b9b5f | 972 | int wlcore_tx_complete(struct wl1271 *wl) |
f5fc0f86 | 973 | { |
2c208890 | 974 | struct wl1271_acx_mem_map *memmap = wl->target_mem_map; |
ffb591cd | 975 | u32 count, fw_counter; |
f5fc0f86 | 976 | u32 i; |
045b9b5f | 977 | int ret; |
f5fc0f86 | 978 | |
f5fc0f86 | 979 | /* read the tx results from the chipset */ |
045b9b5f IY |
980 | ret = wlcore_read(wl, le32_to_cpu(memmap->tx_result), |
981 | wl->tx_res_if, sizeof(*wl->tx_res_if), false); | |
982 | if (ret < 0) | |
983 | goto out; | |
984 | ||
ffb591cd JO |
985 | fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter); |
986 | ||
987 | /* write host counter to chipset (to ack) */ | |
b0f0ad39 IY |
988 | ret = wlcore_write32(wl, le32_to_cpu(memmap->tx_result) + |
989 | offsetof(struct wl1271_tx_hw_res_if, | |
990 | tx_result_host_counter), fw_counter); | |
991 | if (ret < 0) | |
992 | goto out; | |
ffb591cd JO |
993 | |
994 | count = fw_counter - wl->tx_results_count; | |
06f7bc7d | 995 | wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count); |
f5fc0f86 LC |
996 | |
997 | /* verify that the result buffer is not getting overrun */ | |
ffb591cd | 998 | if (unlikely(count > TX_HW_RESULT_QUEUE_LEN)) |
f5fc0f86 | 999 | wl1271_warning("TX result overflow from chipset: %d", count); |
f5fc0f86 LC |
1000 | |
1001 | /* process the results */ | |
1002 | for (i = 0; i < count; i++) { | |
1003 | struct wl1271_tx_hw_res_descr *result; | |
1004 | u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK; | |
1005 | ||
1006 | /* process the packet */ | |
1007 | result = &(wl->tx_res_if->tx_results_queue[offset]); | |
1008 | wl1271_tx_complete_packet(wl, result); | |
1009 | ||
1010 | wl->tx_results_count++; | |
1011 | } | |
045b9b5f IY |
1012 | |
1013 | out: | |
1014 | return ret; | |
f5fc0f86 | 1015 | } |
045b9b5f | 1016 | EXPORT_SYMBOL(wlcore_tx_complete); |
f5fc0f86 | 1017 | |
a8c0ddb5 AN |
1018 | void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) |
1019 | { | |
1020 | struct sk_buff *skb; | |
f1a46384 | 1021 | int i; |
a8c0ddb5 | 1022 | unsigned long flags; |
1d36cd89 | 1023 | struct ieee80211_tx_info *info; |
f1a46384 | 1024 | int total[NUM_TX_QUEUES]; |
8591d424 | 1025 | struct wl1271_link *lnk = &wl->links[hlid]; |
a8c0ddb5 AN |
1026 | |
1027 | for (i = 0; i < NUM_TX_QUEUES; i++) { | |
f1a46384 | 1028 | total[i] = 0; |
8591d424 | 1029 | while ((skb = skb_dequeue(&lnk->tx_queue[i]))) { |
a8c0ddb5 | 1030 | wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb); |
79ebec76 AN |
1031 | |
1032 | if (!wl12xx_is_dummy_packet(wl, skb)) { | |
1033 | info = IEEE80211_SKB_CB(skb); | |
1034 | info->status.rates[0].idx = -1; | |
1035 | info->status.rates[0].count = 0; | |
1036 | ieee80211_tx_status_ni(wl->hw, skb); | |
1037 | } | |
1038 | ||
f1a46384 | 1039 | total[i]++; |
a8c0ddb5 AN |
1040 | } |
1041 | } | |
1042 | ||
1043 | spin_lock_irqsave(&wl->wl_lock, flags); | |
8591d424 | 1044 | for (i = 0; i < NUM_TX_QUEUES; i++) { |
f1a46384 | 1045 | wl->tx_queue_count[i] -= total[i]; |
8591d424 AN |
1046 | if (lnk->wlvif) |
1047 | lnk->wlvif->tx_queue_count[i] -= total[i]; | |
1048 | } | |
a8c0ddb5 AN |
1049 | spin_unlock_irqrestore(&wl->wl_lock, flags); |
1050 | ||
1051 | wl1271_handle_tx_low_watermark(wl); | |
1052 | } | |
1053 | ||
7dece1c8 | 1054 | /* caller must hold wl->mutex and TX must be stopped */ |
d6a3cc2e | 1055 | void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif) |
f5fc0f86 LC |
1056 | { |
1057 | int i; | |
f5fc0f86 LC |
1058 | |
1059 | /* TX failure */ | |
da08fdfa | 1060 | for_each_set_bit(i, wlvif->links_map, wl->num_links) { |
5a99610c AN |
1061 | if (wlvif->bss_type == BSS_TYPE_AP_BSS && |
1062 | i != wlvif->ap.bcast_hlid && i != wlvif->ap.global_hlid) { | |
6c4c4534 | 1063 | /* this calls wl12xx_free_link */ |
c7ffb902 | 1064 | wl1271_free_sta(wl, wlvif, i); |
6c4c4534 AN |
1065 | } else { |
1066 | u8 hlid = i; | |
6c4c4534 AN |
1067 | wl12xx_free_link(wl, wlvif, &hlid); |
1068 | } | |
f5fc0f86 | 1069 | } |
d6a3cc2e EP |
1070 | wlvif->last_tx_hlid = 0; |
1071 | ||
8591d424 AN |
1072 | for (i = 0; i < NUM_TX_QUEUES; i++) |
1073 | wlvif->tx_queue_count[i] = 0; | |
d6a3cc2e EP |
1074 | } |
1075 | /* caller must hold wl->mutex and TX must be stopped */ | |
66396114 | 1076 | void wl12xx_tx_reset(struct wl1271 *wl) |
d6a3cc2e EP |
1077 | { |
1078 | int i; | |
1079 | struct sk_buff *skb; | |
1080 | struct ieee80211_tx_info *info; | |
a8c0ddb5 | 1081 | |
6246ca00 | 1082 | /* only reset the queues if something bad happened */ |
4c145185 | 1083 | if (wl1271_tx_total_queue_count(wl) != 0) { |
da08fdfa | 1084 | for (i = 0; i < wl->num_links; i++) |
6246ca00 AN |
1085 | wl1271_tx_reset_link_queues(wl, i); |
1086 | ||
1087 | for (i = 0; i < NUM_TX_QUEUES; i++) | |
1088 | wl->tx_queue_count[i] = 0; | |
1089 | } | |
f1acea9a | 1090 | |
2fe33e8c IY |
1091 | /* |
1092 | * Make sure the driver is at a consistent state, in case this | |
1093 | * function is called from a context other than interface removal. | |
7dece1c8 | 1094 | * This call will always wake the TX queues. |
2fe33e8c | 1095 | */ |
66396114 | 1096 | wl1271_handle_tx_low_watermark(wl); |
2fe33e8c | 1097 | |
72b0624f | 1098 | for (i = 0; i < wl->num_tx_desc; i++) { |
50e9f746 IY |
1099 | if (wl->tx_frames[i] == NULL) |
1100 | continue; | |
1101 | ||
1102 | skb = wl->tx_frames[i]; | |
1103 | wl1271_free_tx_id(wl, i); | |
1104 | wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); | |
1105 | ||
990f5de7 | 1106 | if (!wl12xx_is_dummy_packet(wl, skb)) { |
ae47c45f SL |
1107 | /* |
1108 | * Remove private headers before passing the skb to | |
1109 | * mac80211 | |
1110 | */ | |
1111 | info = IEEE80211_SKB_CB(skb); | |
1112 | skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); | |
2c0133a4 AN |
1113 | if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && |
1114 | info->control.hw_key && | |
ae47c45f SL |
1115 | info->control.hw_key->cipher == |
1116 | WLAN_CIPHER_SUITE_TKIP) { | |
1117 | int hdrlen = ieee80211_get_hdrlen_from_skb(skb); | |
5ec8a448 | 1118 | memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, |
ae47c45f | 1119 | skb->data, hdrlen); |
5ec8a448 | 1120 | skb_pull(skb, WL1271_EXTRA_SPACE_TKIP); |
ae47c45f | 1121 | } |
50e9f746 | 1122 | |
ae47c45f SL |
1123 | info->status.rates[0].idx = -1; |
1124 | info->status.rates[0].count = 0; | |
50e9f746 | 1125 | |
c27d3acc | 1126 | ieee80211_tx_status_ni(wl->hw, skb); |
ae47c45f | 1127 | } |
50e9f746 | 1128 | } |
781608c4 JO |
1129 | } |
1130 | ||
1131 | #define WL1271_TX_FLUSH_TIMEOUT 500000 | |
1132 | ||
1133 | /* caller must *NOT* hold wl->mutex */ | |
1134 | void wl1271_tx_flush(struct wl1271 *wl) | |
1135 | { | |
958e303a | 1136 | unsigned long timeout, start_time; |
18aa755b | 1137 | int i; |
958e303a AN |
1138 | start_time = jiffies; |
1139 | timeout = start_time + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT); | |
781608c4 | 1140 | |
2c38849f AN |
1141 | /* only one flush should be in progress, for consistent queue state */ |
1142 | mutex_lock(&wl->flush_mutex); | |
1143 | ||
f83e5413 AN |
1144 | mutex_lock(&wl->mutex); |
1145 | if (wl->tx_frames_cnt == 0 && wl1271_tx_total_queue_count(wl) == 0) { | |
1146 | mutex_unlock(&wl->mutex); | |
1147 | goto out; | |
1148 | } | |
1149 | ||
2c38849f AN |
1150 | wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH); |
1151 | ||
781608c4 | 1152 | while (!time_after(jiffies, timeout)) { |
958e303a | 1153 | wl1271_debug(DEBUG_MAC80211, "flushing tx buffer: %d %d", |
f1a46384 AN |
1154 | wl->tx_frames_cnt, |
1155 | wl1271_tx_total_queue_count(wl)); | |
f83e5413 AN |
1156 | |
1157 | /* force Tx and give the driver some time to flush data */ | |
1158 | mutex_unlock(&wl->mutex); | |
1159 | if (wl1271_tx_total_queue_count(wl)) | |
1160 | wl1271_tx_work(&wl->tx_work); | |
1161 | msleep(20); | |
1162 | mutex_lock(&wl->mutex); | |
1163 | ||
f1a46384 AN |
1164 | if ((wl->tx_frames_cnt == 0) && |
1165 | (wl1271_tx_total_queue_count(wl) == 0)) { | |
958e303a AN |
1166 | wl1271_debug(DEBUG_MAC80211, "tx flush took %d ms", |
1167 | jiffies_to_msecs(jiffies - start_time)); | |
f83e5413 | 1168 | goto out_wake; |
781608c4 | 1169 | } |
781608c4 JO |
1170 | } |
1171 | ||
958e303a AN |
1172 | wl1271_warning("Unable to flush all TX buffers, " |
1173 | "timed out (timeout %d ms", | |
1174 | WL1271_TX_FLUSH_TIMEOUT / 1000); | |
18aa755b AN |
1175 | |
1176 | /* forcibly flush all Tx buffers on our queues */ | |
da08fdfa | 1177 | for (i = 0; i < wl->num_links; i++) |
18aa755b | 1178 | wl1271_tx_reset_link_queues(wl, i); |
2c38849f | 1179 | |
f83e5413 | 1180 | out_wake: |
2c38849f | 1181 | wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH); |
f83e5413 AN |
1182 | mutex_unlock(&wl->mutex); |
1183 | out: | |
2c38849f | 1184 | mutex_unlock(&wl->flush_mutex); |
f5fc0f86 | 1185 | } |
a1c597f2 | 1186 | EXPORT_SYMBOL_GPL(wl1271_tx_flush); |
e0fe371b | 1187 | |
af7fbb28 | 1188 | u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set) |
e0fe371b | 1189 | { |
af7fbb28 EP |
1190 | if (WARN_ON(!rate_set)) |
1191 | return 0; | |
e0fe371b | 1192 | |
af7fbb28 | 1193 | return BIT(__ffs(rate_set)); |
e0fe371b | 1194 | } |
78e28062 | 1195 | EXPORT_SYMBOL_GPL(wl1271_tx_min_rate_get); |
66396114 | 1196 | |
1c33db78 AN |
1197 | void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
1198 | u8 queue, enum wlcore_queue_stop_reason reason) | |
66396114 | 1199 | { |
1c33db78 AN |
1200 | int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); |
1201 | bool stopped = !!wl->queue_stop_reasons[hwq]; | |
66396114 AN |
1202 | |
1203 | /* queue should not be stopped for this reason */ | |
1c33db78 | 1204 | WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq])); |
66396114 AN |
1205 | |
1206 | if (stopped) | |
1207 | return; | |
1208 | ||
1c33db78 | 1209 | ieee80211_stop_queue(wl->hw, hwq); |
66396114 AN |
1210 | } |
1211 | ||
1c33db78 | 1212 | void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue, |
66396114 AN |
1213 | enum wlcore_queue_stop_reason reason) |
1214 | { | |
1215 | unsigned long flags; | |
1216 | ||
1217 | spin_lock_irqsave(&wl->wl_lock, flags); | |
1c33db78 | 1218 | wlcore_stop_queue_locked(wl, wlvif, queue, reason); |
66396114 AN |
1219 | spin_unlock_irqrestore(&wl->wl_lock, flags); |
1220 | } | |
1221 | ||
1c33db78 | 1222 | void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue, |
66396114 AN |
1223 | enum wlcore_queue_stop_reason reason) |
1224 | { | |
1225 | unsigned long flags; | |
1c33db78 | 1226 | int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); |
66396114 AN |
1227 | |
1228 | spin_lock_irqsave(&wl->wl_lock, flags); | |
1229 | ||
1230 | /* queue should not be clear for this reason */ | |
1c33db78 | 1231 | WARN_ON_ONCE(!test_and_clear_bit(reason, &wl->queue_stop_reasons[hwq])); |
66396114 | 1232 | |
1c33db78 | 1233 | if (wl->queue_stop_reasons[hwq]) |
66396114 AN |
1234 | goto out; |
1235 | ||
1c33db78 | 1236 | ieee80211_wake_queue(wl->hw, hwq); |
66396114 AN |
1237 | |
1238 | out: | |
1239 | spin_unlock_irqrestore(&wl->wl_lock, flags); | |
1240 | } | |
1241 | ||
1242 | void wlcore_stop_queues(struct wl1271 *wl, | |
1243 | enum wlcore_queue_stop_reason reason) | |
1244 | { | |
1245 | int i; | |
1c33db78 | 1246 | unsigned long flags; |
66396114 | 1247 | |
1c33db78 | 1248 | spin_lock_irqsave(&wl->wl_lock, flags); |
66396114 | 1249 | |
1c33db78 AN |
1250 | /* mark all possible queues as stopped */ |
1251 | for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++) | |
1252 | WARN_ON_ONCE(test_and_set_bit(reason, | |
1253 | &wl->queue_stop_reasons[i])); | |
66396114 | 1254 | |
1c33db78 AN |
1255 | /* use the global version to make sure all vifs in mac80211 we don't |
1256 | * know are stopped. | |
1257 | */ | |
1258 | ieee80211_stop_queues(wl->hw); | |
1259 | ||
1260 | spin_unlock_irqrestore(&wl->wl_lock, flags); | |
66396114 AN |
1261 | } |
1262 | ||
1c33db78 AN |
1263 | void wlcore_wake_queues(struct wl1271 *wl, |
1264 | enum wlcore_queue_stop_reason reason) | |
66396114 AN |
1265 | { |
1266 | int i; | |
1267 | unsigned long flags; | |
1268 | ||
1269 | spin_lock_irqsave(&wl->wl_lock, flags); | |
1270 | ||
1c33db78 AN |
1271 | /* mark all possible queues as awake */ |
1272 | for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++) | |
1273 | WARN_ON_ONCE(!test_and_clear_bit(reason, | |
1274 | &wl->queue_stop_reasons[i])); | |
66396114 | 1275 | |
1c33db78 AN |
1276 | /* use the global version to make sure all vifs in mac80211 we don't |
1277 | * know are woken up. | |
1278 | */ | |
1279 | ieee80211_wake_queues(wl->hw); | |
66396114 AN |
1280 | |
1281 | spin_unlock_irqrestore(&wl->wl_lock, flags); | |
1282 | } | |
1283 | ||
1c33db78 AN |
1284 | bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, |
1285 | struct wl12xx_vif *wlvif, u8 queue, | |
1286 | enum wlcore_queue_stop_reason reason) | |
d6037d22 AN |
1287 | { |
1288 | unsigned long flags; | |
1289 | bool stopped; | |
1290 | ||
1291 | spin_lock_irqsave(&wl->wl_lock, flags); | |
1292 | stopped = wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, queue, | |
1293 | reason); | |
1294 | spin_unlock_irqrestore(&wl->wl_lock, flags); | |
1295 | ||
1296 | return stopped; | |
1297 | } | |
1298 | ||
1299 | bool wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl, | |
1300 | struct wl12xx_vif *wlvif, u8 queue, | |
1301 | enum wlcore_queue_stop_reason reason) | |
66396114 | 1302 | { |
1c33db78 | 1303 | int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); |
d6037d22 | 1304 | |
8910cfa3 | 1305 | assert_spin_locked(&wl->wl_lock); |
1c33db78 | 1306 | return test_bit(reason, &wl->queue_stop_reasons[hwq]); |
66396114 AN |
1307 | } |
1308 | ||
d6037d22 AN |
1309 | bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
1310 | u8 queue) | |
66396114 | 1311 | { |
1c33db78 | 1312 | int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); |
d6037d22 | 1313 | |
8910cfa3 | 1314 | assert_spin_locked(&wl->wl_lock); |
1c33db78 | 1315 | return !!wl->queue_stop_reasons[hwq]; |
66396114 | 1316 | } |