]>
Commit | Line | Data |
---|---|---|
f5fc0f86 LC |
1 | /* |
2 | * This file is part of wl1271 | |
3 | * | |
4 | * Copyright (C) 2009 Nokia Corporation | |
5 | * | |
6 | * Contact: Luciano Coelho <luciano.coelho@nokia.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU General Public License | |
10 | * version 2 as published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, but | |
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program; if not, write to the Free Software | |
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | |
20 | * 02110-1301 USA | |
21 | * | |
22 | */ | |
23 | ||
24 | #include <linux/kernel.h> | |
25 | #include <linux/module.h> | |
c6c8a65d | 26 | #include <linux/etherdevice.h> |
8910cfa3 | 27 | #include <linux/spinlock.h> |
f5fc0f86 | 28 | |
c31be25a | 29 | #include "wlcore.h" |
0f4e3122 | 30 | #include "debug.h" |
00d20100 | 31 | #include "io.h" |
00d20100 SL |
32 | #include "ps.h" |
33 | #include "tx.h" | |
56d4f8f6 | 34 | #include "event.h" |
b3b4b4b8 | 35 | #include "hw_ops.h" |
f5fc0f86 | 36 | |
00782136 LC |
37 | /* |
38 | * TODO: this is here just for now, it must be removed when the data | |
39 | * operations are in place. | |
40 | */ | |
41 | #include "../wl12xx/reg.h" | |
42 | ||
536129c8 EP |
43 | static int wl1271_set_default_wep_key(struct wl1271 *wl, |
44 | struct wl12xx_vif *wlvif, u8 id) | |
7f179b46 AN |
45 | { |
46 | int ret; | |
536129c8 | 47 | bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); |
7f179b46 AN |
48 | |
49 | if (is_ap) | |
c690ec81 | 50 | ret = wl12xx_cmd_set_default_wep_key(wl, id, |
a8ab39a4 | 51 | wlvif->ap.bcast_hlid); |
7f179b46 | 52 | else |
154da67c | 53 | ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid); |
7f179b46 AN |
54 | |
55 | if (ret < 0) | |
56 | return ret; | |
57 | ||
58 | wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id); | |
59 | return 0; | |
60 | } | |
61 | ||
25eeb9e3 | 62 | static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb) |
f5fc0f86 | 63 | { |
25eeb9e3 IY |
64 | int id; |
65 | ||
72b0624f AN |
66 | id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc); |
67 | if (id >= wl->num_tx_desc) | |
25eeb9e3 IY |
68 | return -EBUSY; |
69 | ||
70 | __set_bit(id, wl->tx_frames_map); | |
71 | wl->tx_frames[id] = skb; | |
72 | wl->tx_frames_cnt++; | |
73 | return id; | |
74 | } | |
f5fc0f86 | 75 | |
872b345f | 76 | void wl1271_free_tx_id(struct wl1271 *wl, int id) |
25eeb9e3 IY |
77 | { |
78 | if (__test_and_clear_bit(id, wl->tx_frames_map)) { | |
72b0624f | 79 | if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc)) |
ef2e3004 IY |
80 | clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); |
81 | ||
25eeb9e3 IY |
82 | wl->tx_frames[id] = NULL; |
83 | wl->tx_frames_cnt--; | |
84 | } | |
f5fc0f86 | 85 | } |
872b345f | 86 | EXPORT_SYMBOL(wl1271_free_tx_id); |
f5fc0f86 | 87 | |
99a2775d | 88 | static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl, |
187e52cc | 89 | struct wl12xx_vif *wlvif, |
99a2775d AN |
90 | struct sk_buff *skb) |
91 | { | |
92 | struct ieee80211_hdr *hdr; | |
93 | ||
187e52cc AN |
94 | hdr = (struct ieee80211_hdr *)(skb->data + |
95 | sizeof(struct wl1271_tx_hw_descr)); | |
96 | if (!ieee80211_is_auth(hdr->frame_control)) | |
97 | return; | |
98 | ||
99a2775d AN |
99 | /* |
100 | * add the station to the known list before transmitting the | |
101 | * authentication response. this way it won't get de-authed by FW | |
102 | * when transmitting too soon. | |
103 | */ | |
028e7243 | 104 | wl1271_acx_set_inconnection_sta(wl, wlvif, hdr->addr1); |
187e52cc AN |
105 | |
106 | /* | |
107 | * ROC for 1 second on the AP channel for completing the connection. | |
108 | * Note the ROC will be continued by the update_sta_state callbacks | |
109 | * once the station reaches the associated state. | |
110 | */ | |
111 | wlcore_update_inconn_sta(wl, wlvif, NULL, true); | |
112 | wlvif->pending_auth_reply_time = jiffies; | |
113 | cancel_delayed_work(&wlvif->pending_auth_complete_work); | |
114 | ieee80211_queue_delayed_work(wl->hw, | |
115 | &wlvif->pending_auth_complete_work, | |
116 | msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT)); | |
99a2775d AN |
117 | } |
118 | ||
c7ffb902 EP |
119 | static void wl1271_tx_regulate_link(struct wl1271 *wl, |
120 | struct wl12xx_vif *wlvif, | |
121 | u8 hlid) | |
b622d992 | 122 | { |
37c68ea6 | 123 | bool fw_ps; |
9b17f1b3 | 124 | u8 tx_pkts; |
b622d992 | 125 | |
c7ffb902 | 126 | if (WARN_ON(!test_bit(hlid, wlvif->links_map))) |
b622d992 AN |
127 | return; |
128 | ||
5e74b3aa | 129 | fw_ps = test_bit(hlid, &wl->ap_fw_ps_map); |
9b17f1b3 | 130 | tx_pkts = wl->links[hlid].allocated_pkts; |
b622d992 AN |
131 | |
132 | /* | |
133 | * if in FW PS and there is enough data in FW we can put the link | |
134 | * into high-level PS and clean out its TX queues. | |
9a100968 AN |
135 | * Make an exception if this is the only connected link. In this |
136 | * case FW-memory congestion is less of a problem. | |
41ed1a78 EP |
137 | * Note that a single connected STA means 2*ap_count + 1 active links, |
138 | * since we must account for the global and broadcast AP links | |
139 | * for each AP. The "fw_ps" check assures us the other link is a STA | |
140 | * connected to the AP. Otherwise the FW would not set the PSM bit. | |
b622d992 | 141 | */ |
41ed1a78 | 142 | if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps && |
37c68ea6 | 143 | tx_pkts >= WL1271_PS_STA_MAX_PACKETS) |
6e8cd331 | 144 | wl12xx_ps_link_start(wl, wlvif, hlid, true); |
b622d992 AN |
145 | } |
146 | ||
f8e0af6b | 147 | bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb) |
f4df1bd5 EP |
148 | { |
149 | return wl->dummy_packet == skb; | |
150 | } | |
872b345f | 151 | EXPORT_SYMBOL(wl12xx_is_dummy_packet); |
f4df1bd5 | 152 | |
2b2b6438 AN |
153 | static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
154 | struct sk_buff *skb, struct ieee80211_sta *sta) | |
a8c0ddb5 | 155 | { |
2b2b6438 | 156 | if (sta) { |
a8c0ddb5 AN |
157 | struct wl1271_station *wl_sta; |
158 | ||
2b2b6438 | 159 | wl_sta = (struct wl1271_station *)sta->drv_priv; |
a8c0ddb5 AN |
160 | return wl_sta->hlid; |
161 | } else { | |
162 | struct ieee80211_hdr *hdr; | |
163 | ||
53d40d0b | 164 | if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) |
f4df1bd5 EP |
165 | return wl->system_hlid; |
166 | ||
a8c0ddb5 | 167 | hdr = (struct ieee80211_hdr *)skb->data; |
45b60f7d | 168 | if (is_multicast_ether_addr(ieee80211_get_DA(hdr))) |
a8ab39a4 | 169 | return wlvif->ap.bcast_hlid; |
45b60f7d EP |
170 | else |
171 | return wlvif->ap.global_hlid; | |
a8c0ddb5 AN |
172 | } |
173 | } | |
174 | ||
d6a3cc2e | 175 | u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
2b2b6438 | 176 | struct sk_buff *skb, struct ieee80211_sta *sta) |
f4df1bd5 | 177 | { |
dabf37db EP |
178 | struct ieee80211_tx_info *control; |
179 | ||
536129c8 | 180 | if (wlvif->bss_type == BSS_TYPE_AP_BSS) |
2b2b6438 | 181 | return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta); |
f4df1bd5 | 182 | |
dabf37db EP |
183 | control = IEEE80211_SKB_CB(skb); |
184 | if (control->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { | |
185 | wl1271_debug(DEBUG_TX, "tx offchannel"); | |
186 | return wlvif->dev_hlid; | |
187 | } | |
188 | ||
3230f35e | 189 | return wlvif->sta.hlid; |
f4df1bd5 EP |
190 | } |
191 | ||
b3b4b4b8 AN |
192 | unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl, |
193 | unsigned int packet_length) | |
0da13da7 | 194 | { |
9fccc82e IR |
195 | if ((wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) || |
196 | !(wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN)) | |
f83985bb | 197 | return ALIGN(packet_length, WL1271_TX_ALIGN_TO); |
9fccc82e IR |
198 | else |
199 | return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE); | |
0da13da7 | 200 | } |
b3b4b4b8 | 201 | EXPORT_SYMBOL(wlcore_calc_packet_alignment); |
0da13da7 | 202 | |
a32d0cdf | 203 | static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
536129c8 | 204 | struct sk_buff *skb, u32 extra, u32 buf_offset, |
32bb2c03 | 205 | u8 hlid, bool is_gem) |
f5fc0f86 LC |
206 | { |
207 | struct wl1271_tx_hw_descr *desc; | |
208 | u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; | |
5c9417f1 | 209 | u32 total_blocks; |
742246f8 | 210 | int id, ret = -EBUSY, ac; |
32bb2c03 | 211 | u32 spare_blocks; |
f5fc0f86 | 212 | |
26a309c7 | 213 | if (buf_offset + total_len > wl->aggr_buf_size) |
6c6e669e | 214 | return -EAGAIN; |
a19606b4 | 215 | |
32bb2c03 AN |
216 | spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem); |
217 | ||
f5fc0f86 | 218 | /* allocate free identifier for the packet */ |
25eeb9e3 | 219 | id = wl1271_alloc_tx_id(wl, skb); |
f5fc0f86 LC |
220 | if (id < 0) |
221 | return id; | |
222 | ||
b3b4b4b8 | 223 | total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks); |
48a61477 | 224 | |
f5fc0f86 | 225 | if (total_blocks <= wl->tx_blocks_available) { |
d58ff351 | 226 | desc = skb_push(skb, total_len - skb->len); |
f5fc0f86 | 227 | |
4a3b97ee AN |
228 | wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks, |
229 | spare_blocks); | |
ae77eccf | 230 | |
f5fc0f86 LC |
231 | desc->id = id; |
232 | ||
233 | wl->tx_blocks_available -= total_blocks; | |
7bb5d6ce | 234 | wl->tx_allocated_blocks += total_blocks; |
f5fc0f86 | 235 | |
9be86cf0 AN |
236 | /* |
237 | * If the FW was empty before, arm the Tx watchdog. Also do | |
238 | * this on the first Tx after resume, as we always cancel the | |
239 | * watchdog on suspend. | |
240 | */ | |
241 | if (wl->tx_allocated_blocks == total_blocks || | |
242 | test_and_clear_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags)) | |
55df5afb AN |
243 | wl12xx_rearm_tx_watchdog_locked(wl); |
244 | ||
742246f8 AN |
245 | ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); |
246 | wl->tx_allocated_pkts[ac]++; | |
bf54e301 | 247 | |
9ebcb232 | 248 | if (test_bit(hlid, wl->links_map)) |
9b17f1b3 | 249 | wl->links[hlid].allocated_pkts++; |
09039f42 | 250 | |
f5fc0f86 LC |
251 | ret = 0; |
252 | ||
253 | wl1271_debug(DEBUG_TX, | |
254 | "tx_allocate: size: %d, blocks: %d, id: %d", | |
255 | total_len, total_blocks, id); | |
781608c4 | 256 | } else { |
25eeb9e3 | 257 | wl1271_free_tx_id(wl, id); |
781608c4 | 258 | } |
f5fc0f86 LC |
259 | |
260 | return ret; | |
261 | } | |
262 | ||
a32d0cdf | 263 | static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
536129c8 EP |
264 | struct sk_buff *skb, u32 extra, |
265 | struct ieee80211_tx_info *control, u8 hlid) | |
f5fc0f86 | 266 | { |
ac5e1e39 | 267 | struct timespec ts; |
f5fc0f86 | 268 | struct wl1271_tx_hw_descr *desc; |
6f266e91 | 269 | int ac, rate_idx; |
ac5e1e39 | 270 | s64 hosttime; |
cf00f379 | 271 | u16 tx_attr = 0; |
f4f57943 EP |
272 | __le16 frame_control; |
273 | struct ieee80211_hdr *hdr; | |
274 | u8 *frame_start; | |
a32d0cdf | 275 | bool is_dummy; |
f5fc0f86 LC |
276 | |
277 | desc = (struct wl1271_tx_hw_descr *) skb->data; | |
f4f57943 EP |
278 | frame_start = (u8 *)(desc + 1); |
279 | hdr = (struct ieee80211_hdr *)(frame_start + extra); | |
280 | frame_control = hdr->frame_control; | |
f5fc0f86 | 281 | |
1e2b7976 JO |
282 | /* relocate space for security header */ |
283 | if (extra) { | |
f4f57943 EP |
284 | int hdrlen = ieee80211_hdrlen(frame_control); |
285 | memmove(frame_start, hdr, hdrlen); | |
2fc28de5 | 286 | skb_set_network_header(skb, skb_network_offset(skb) + extra); |
1e2b7976 JO |
287 | } |
288 | ||
f5fc0f86 | 289 | /* configure packet life time */ |
ac5e1e39 JO |
290 | getnstimeofday(&ts); |
291 | hosttime = (timespec_to_ns(&ts) >> 10); | |
292 | desc->start_time = cpu_to_le32(hosttime - wl->time_offset); | |
c6c8a65d | 293 | |
a32d0cdf | 294 | is_dummy = wl12xx_is_dummy_packet(wl, skb); |
0f168014 | 295 | if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS) |
c6c8a65d AN |
296 | desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU); |
297 | else | |
298 | desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU); | |
f5fc0f86 | 299 | |
db674d24 | 300 | /* queue */ |
c6999d83 | 301 | ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); |
db674d24 | 302 | desc->tid = skb->priority; |
c6c8a65d | 303 | |
a32d0cdf | 304 | if (is_dummy) { |
ae47c45f SL |
305 | /* |
306 | * FW expects the dummy packet to have an invalid session id - | |
307 | * any session id that is different than the one set in the join | |
308 | */ | |
98b86253 | 309 | tx_attr = (SESSION_COUNTER_INVALID << |
ae47c45f SL |
310 | TX_HW_ATTR_OFST_SESSION_COUNTER) & |
311 | TX_HW_ATTR_SESSION_COUNTER; | |
312 | ||
313 | tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ; | |
0f168014 | 314 | } else if (wlvif) { |
3ea186d1 AN |
315 | u8 session_id = wl->session_ids[hlid]; |
316 | ||
317 | if ((wl->quirks & WLCORE_QUIRK_AP_ZERO_SESSION_ID) && | |
318 | (wlvif->bss_type == BSS_TYPE_AP_BSS)) | |
319 | session_id = 0; | |
320 | ||
ae47c45f | 321 | /* configure the tx attributes */ |
3ea186d1 | 322 | tx_attr = session_id << TX_HW_ATTR_OFST_SESSION_COUNTER; |
ae47c45f SL |
323 | } |
324 | ||
79b122dc | 325 | desc->hlid = hlid; |
0f168014 | 326 | if (is_dummy || !wlvif) |
a32d0cdf EP |
327 | rate_idx = 0; |
328 | else if (wlvif->bss_type != BSS_TYPE_AP_BSS) { | |
8f1a8684 | 329 | /* |
4340d1cf | 330 | * if the packets are data packets |
8f1a8684 ES |
331 | * send them with AP rate policies (EAPOLs are an exception), |
332 | * otherwise use default basic rates | |
333 | */ | |
bed483f7 | 334 | if (skb->protocol == cpu_to_be16(ETH_P_PAE)) |
8f1a8684 | 335 | rate_idx = wlvif->sta.basic_rate_idx; |
bed483f7 IC |
336 | else if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE) |
337 | rate_idx = wlvif->sta.p2p_rate_idx; | |
4340d1cf | 338 | else if (ieee80211_is_data(frame_control)) |
e5a359f8 | 339 | rate_idx = wlvif->sta.ap_rate_idx; |
c6c8a65d | 340 | else |
e5a359f8 | 341 | rate_idx = wlvif->sta.basic_rate_idx; |
c6c8a65d | 342 | } else { |
a8ab39a4 | 343 | if (hlid == wlvif->ap.global_hlid) |
e5a359f8 | 344 | rate_idx = wlvif->ap.mgmt_rate_idx; |
bed483f7 | 345 | else if (hlid == wlvif->ap.bcast_hlid || |
c3e06fc0 AN |
346 | skb->protocol == cpu_to_be16(ETH_P_PAE) || |
347 | !ieee80211_is_data(frame_control)) | |
348 | /* | |
349 | * send non-data, bcast and EAPOLs using the | |
350 | * min basic rate | |
351 | */ | |
e5a359f8 | 352 | rate_idx = wlvif->ap.bcast_rate_idx; |
e51ae9be | 353 | else |
e5a359f8 | 354 | rate_idx = wlvif->ap.ucast_rate_idx[ac]; |
c6c8a65d AN |
355 | } |
356 | ||
357 | tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY; | |
d0f63b20 | 358 | |
f4f57943 EP |
359 | /* for WEP shared auth - no fw encryption is needed */ |
360 | if (ieee80211_is_auth(frame_control) && | |
361 | ieee80211_has_protected(frame_control)) | |
362 | tx_attr |= TX_HW_ATTR_HOST_ENCRYPT; | |
363 | ||
2a5ad92e IC |
364 | /* send EAPOL frames as voice */ |
365 | if (control->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) | |
366 | tx_attr |= TX_HW_ATTR_EAPOL_FRAME; | |
367 | ||
d0f63b20 | 368 | desc->tx_attr = cpu_to_le16(tx_attr); |
6f266e91 | 369 | |
2fc28de5 | 370 | wlcore_hw_set_tx_desc_csum(wl, desc, skb); |
6f266e91 | 371 | wlcore_hw_set_tx_desc_data_len(wl, desc, skb); |
f5fc0f86 LC |
372 | } |
373 | ||
374 | /* caller must hold wl->mutex */ | |
a32d0cdf | 375 | static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
930e1915 | 376 | struct sk_buff *skb, u32 buf_offset, u8 hlid) |
f5fc0f86 LC |
377 | { |
378 | struct ieee80211_tx_info *info; | |
379 | u32 extra = 0; | |
380 | int ret = 0; | |
a19606b4 | 381 | u32 total_len; |
536129c8 | 382 | bool is_dummy; |
32bb2c03 | 383 | bool is_gem = false; |
f5fc0f86 | 384 | |
7a50bdfb ES |
385 | if (!skb) { |
386 | wl1271_error("discarding null skb"); | |
f5fc0f86 | 387 | return -EINVAL; |
7a50bdfb | 388 | } |
f5fc0f86 | 389 | |
930e1915 AN |
390 | if (hlid == WL12XX_INVALID_LINK_ID) { |
391 | wl1271_error("invalid hlid. dropping skb 0x%p", skb); | |
392 | return -EINVAL; | |
393 | } | |
394 | ||
f5fc0f86 LC |
395 | info = IEEE80211_SKB_CB(skb); |
396 | ||
536129c8 | 397 | is_dummy = wl12xx_is_dummy_packet(wl, skb); |
536129c8 | 398 | |
2c0133a4 AN |
399 | if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && |
400 | info->control.hw_key && | |
97359d12 | 401 | info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) |
5ec8a448 | 402 | extra = WL1271_EXTRA_SPACE_TKIP; |
f5fc0f86 LC |
403 | |
404 | if (info->control.hw_key) { | |
7f179b46 AN |
405 | bool is_wep; |
406 | u8 idx = info->control.hw_key->hw_key_idx; | |
407 | u32 cipher = info->control.hw_key->cipher; | |
408 | ||
409 | is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) || | |
410 | (cipher == WLAN_CIPHER_SUITE_WEP104); | |
f5fc0f86 | 411 | |
bf9d5d28 | 412 | if (WARN_ON(is_wep && wlvif && wlvif->default_key != idx)) { |
536129c8 | 413 | ret = wl1271_set_default_wep_key(wl, wlvif, idx); |
f5fc0f86 LC |
414 | if (ret < 0) |
415 | return ret; | |
f75c753f | 416 | wlvif->default_key = idx; |
f5fc0f86 | 417 | } |
32bb2c03 AN |
418 | |
419 | is_gem = (cipher == WL1271_CIPHER_SUITE_GEM); | |
f5fc0f86 | 420 | } |
09039f42 | 421 | |
32bb2c03 AN |
422 | ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid, |
423 | is_gem); | |
f5fc0f86 LC |
424 | if (ret < 0) |
425 | return ret; | |
426 | ||
a32d0cdf | 427 | wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid); |
fae2fd76 | 428 | |
0f168014 | 429 | if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) { |
187e52cc | 430 | wl1271_tx_ap_update_inconnection_sta(wl, wlvif, skb); |
c7ffb902 | 431 | wl1271_tx_regulate_link(wl, wlvif, hlid); |
b622d992 | 432 | } |
99a2775d | 433 | |
a19606b4 | 434 | /* |
48a61477 SL |
435 | * The length of each packet is stored in terms of |
436 | * words. Thus, we must pad the skb data to make sure its | |
437 | * length is aligned. The number of padding bytes is computed | |
438 | * and set in wl1271_tx_fill_hdr. | |
439 | * In special cases, we want to align to a specific block size | |
440 | * (eg. for wl128x with SDIO we align to 256). | |
a19606b4 | 441 | */ |
b3b4b4b8 | 442 | total_len = wlcore_calc_packet_alignment(wl, skb->len); |
48a61477 | 443 | |
a19606b4 IY |
444 | memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len); |
445 | memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len); | |
f5fc0f86 | 446 | |
990f5de7 | 447 | /* Revert side effects in the dummy packet skb, so it can be reused */ |
536129c8 | 448 | if (is_dummy) |
990f5de7 IY |
449 | skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); |
450 | ||
a19606b4 | 451 | return total_len; |
f5fc0f86 LC |
452 | } |
453 | ||
af7fbb28 | 454 | u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, |
57fbcce3 | 455 | enum nl80211_band rate_band) |
830fb67b JO |
456 | { |
457 | struct ieee80211_supported_band *band; | |
458 | u32 enabled_rates = 0; | |
459 | int bit; | |
460 | ||
af7fbb28 | 461 | band = wl->hw->wiphy->bands[rate_band]; |
830fb67b JO |
462 | for (bit = 0; bit < band->n_bitrates; bit++) { |
463 | if (rate_set & 0x1) | |
464 | enabled_rates |= band->bitrates[bit].hw_value; | |
465 | rate_set >>= 1; | |
466 | } | |
467 | ||
b3a47ee0 | 468 | /* MCS rates indication are on bits 16 - 31 */ |
18357850 SL |
469 | rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates; |
470 | ||
b3a47ee0 | 471 | for (bit = 0; bit < 16; bit++) { |
18357850 SL |
472 | if (rate_set & 0x1) |
473 | enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit); | |
474 | rate_set >>= 1; | |
475 | } | |
18357850 | 476 | |
830fb67b JO |
477 | return enabled_rates; |
478 | } | |
479 | ||
a8c0ddb5 | 480 | void wl1271_handle_tx_low_watermark(struct wl1271 *wl) |
2fe33e8c | 481 | { |
708bb3cf | 482 | int i; |
1c33db78 | 483 | struct wl12xx_vif *wlvif; |
2fe33e8c | 484 | |
1c33db78 AN |
485 | wl12xx_for_each_wlvif(wl, wlvif) { |
486 | for (i = 0; i < NUM_TX_QUEUES; i++) { | |
487 | if (wlcore_is_queue_stopped_by_reason(wl, wlvif, i, | |
488 | WLCORE_QUEUE_STOP_REASON_WATERMARK) && | |
489 | wlvif->tx_queue_count[i] <= | |
490 | WL1271_TX_QUEUE_LOW_WATERMARK) | |
491 | /* firmware buffer has space, restart queues */ | |
492 | wlcore_wake_queue(wl, wlvif, i, | |
493 | WLCORE_QUEUE_STOP_REASON_WATERMARK); | |
708bb3cf | 494 | } |
2fe33e8c IY |
495 | } |
496 | } | |
497 | ||
0e810479 | 498 | static int wlcore_select_ac(struct wl1271 *wl) |
742246f8 AN |
499 | { |
500 | int i, q = -1, ac; | |
501 | u32 min_pkts = 0xffffffff; | |
502 | ||
503 | /* | |
504 | * Find a non-empty ac where: | |
505 | * 1. There are packets to transmit | |
506 | * 2. The FW has the least allocated blocks | |
507 | * | |
508 | * We prioritize the ACs according to VO>VI>BE>BK | |
509 | */ | |
510 | for (i = 0; i < NUM_TX_QUEUES; i++) { | |
511 | ac = wl1271_tx_get_queue(i); | |
0e810479 AN |
512 | if (wl->tx_queue_count[ac] && |
513 | wl->tx_allocated_pkts[ac] < min_pkts) { | |
742246f8 AN |
514 | q = ac; |
515 | min_pkts = wl->tx_allocated_pkts[q]; | |
516 | } | |
517 | } | |
518 | ||
0e810479 | 519 | return q; |
742246f8 AN |
520 | } |
521 | ||
0e810479 AN |
522 | static struct sk_buff *wlcore_lnk_dequeue(struct wl1271 *wl, |
523 | struct wl1271_link *lnk, u8 q) | |
6742f554 | 524 | { |
d6a3cc2e | 525 | struct sk_buff *skb; |
6742f554 JO |
526 | unsigned long flags; |
527 | ||
0e810479 | 528 | skb = skb_dequeue(&lnk->tx_queue[q]); |
6742f554 JO |
529 | if (skb) { |
530 | spin_lock_irqsave(&wl->wl_lock, flags); | |
6246ca00 | 531 | WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); |
f1a46384 | 532 | wl->tx_queue_count[q]--; |
8591d424 AN |
533 | if (lnk->wlvif) { |
534 | WARN_ON_ONCE(lnk->wlvif->tx_queue_count[q] <= 0); | |
535 | lnk->wlvif->tx_queue_count[q]--; | |
536 | } | |
6742f554 JO |
537 | spin_unlock_irqrestore(&wl->wl_lock, flags); |
538 | } | |
539 | ||
540 | return skb; | |
541 | } | |
542 | ||
0e810479 AN |
543 | static struct sk_buff *wlcore_lnk_dequeue_high_prio(struct wl1271 *wl, |
544 | u8 hlid, u8 ac, | |
545 | u8 *low_prio_hlid) | |
546 | { | |
547 | struct wl1271_link *lnk = &wl->links[hlid]; | |
548 | ||
f1626fd8 | 549 | if (!wlcore_hw_lnk_high_prio(wl, hlid, lnk)) { |
0e810479 | 550 | if (*low_prio_hlid == WL12XX_INVALID_LINK_ID && |
f1626fd8 AN |
551 | !skb_queue_empty(&lnk->tx_queue[ac]) && |
552 | wlcore_hw_lnk_low_prio(wl, hlid, lnk)) | |
0e810479 AN |
553 | /* we found the first non-empty low priority queue */ |
554 | *low_prio_hlid = hlid; | |
555 | ||
556 | return NULL; | |
557 | } | |
558 | ||
559 | return wlcore_lnk_dequeue(wl, lnk, ac); | |
560 | } | |
561 | ||
562 | static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl, | |
563 | struct wl12xx_vif *wlvif, | |
564 | u8 ac, u8 *hlid, | |
565 | u8 *low_prio_hlid) | |
a8c0ddb5 AN |
566 | { |
567 | struct sk_buff *skb = NULL; | |
a8c0ddb5 AN |
568 | int i, h, start_hlid; |
569 | ||
570 | /* start from the link after the last one */ | |
da08fdfa | 571 | start_hlid = (wlvif->last_tx_hlid + 1) % wl->num_links; |
a8c0ddb5 AN |
572 | |
573 | /* dequeue according to AC, round robin on each link */ | |
da08fdfa EP |
574 | for (i = 0; i < wl->num_links; i++) { |
575 | h = (start_hlid + i) % wl->num_links; | |
a8c0ddb5 | 576 | |
742246f8 | 577 | /* only consider connected stations */ |
c7ffb902 | 578 | if (!test_bit(h, wlvif->links_map)) |
742246f8 AN |
579 | continue; |
580 | ||
0e810479 AN |
581 | skb = wlcore_lnk_dequeue_high_prio(wl, h, ac, |
582 | low_prio_hlid); | |
d6a3cc2e | 583 | if (!skb) |
742246f8 AN |
584 | continue; |
585 | ||
d6a3cc2e EP |
586 | wlvif->last_tx_hlid = h; |
587 | break; | |
a8c0ddb5 AN |
588 | } |
589 | ||
d6a3cc2e | 590 | if (!skb) |
4438aca9 | 591 | wlvif->last_tx_hlid = 0; |
a8c0ddb5 | 592 | |
930e1915 | 593 | *hlid = wlvif->last_tx_hlid; |
a8c0ddb5 AN |
594 | return skb; |
595 | } | |
596 | ||
930e1915 | 597 | static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid) |
a8c0ddb5 | 598 | { |
990f5de7 | 599 | unsigned long flags; |
e4120df9 | 600 | struct wl12xx_vif *wlvif = wl->last_wlvif; |
990f5de7 | 601 | struct sk_buff *skb = NULL; |
0e810479 AN |
602 | int ac; |
603 | u8 low_prio_hlid = WL12XX_INVALID_LINK_ID; | |
604 | ||
605 | ac = wlcore_select_ac(wl); | |
606 | if (ac < 0) | |
607 | goto out; | |
990f5de7 | 608 | |
49c9cd26 | 609 | /* continue from last wlvif (round robin) */ |
e4120df9 EP |
610 | if (wlvif) { |
611 | wl12xx_for_each_wlvif_continue(wl, wlvif) { | |
0e810479 AN |
612 | if (!wlvif->tx_queue_count[ac]) |
613 | continue; | |
614 | ||
615 | skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid, | |
616 | &low_prio_hlid); | |
617 | if (!skb) | |
618 | continue; | |
619 | ||
620 | wl->last_wlvif = wlvif; | |
621 | break; | |
e4120df9 EP |
622 | } |
623 | } | |
624 | ||
49c9cd26 | 625 | /* dequeue from the system HLID before the restarting wlvif list */ |
930e1915 | 626 | if (!skb) { |
0e810479 AN |
627 | skb = wlcore_lnk_dequeue_high_prio(wl, wl->system_hlid, |
628 | ac, &low_prio_hlid); | |
629 | if (skb) { | |
630 | *hlid = wl->system_hlid; | |
631 | wl->last_wlvif = NULL; | |
632 | } | |
930e1915 | 633 | } |
49c9cd26 | 634 | |
0e810479 AN |
635 | /* Do a new pass over the wlvif list. But no need to continue |
636 | * after last_wlvif. The previous pass should have found it. */ | |
e4120df9 EP |
637 | if (!skb) { |
638 | wl12xx_for_each_wlvif(wl, wlvif) { | |
0e810479 AN |
639 | if (!wlvif->tx_queue_count[ac]) |
640 | goto next; | |
641 | ||
642 | skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid, | |
643 | &low_prio_hlid); | |
e4120df9 EP |
644 | if (skb) { |
645 | wl->last_wlvif = wlvif; | |
646 | break; | |
647 | } | |
49c9cd26 | 648 | |
0e810479 | 649 | next: |
49c9cd26 AN |
650 | if (wlvif == wl->last_wlvif) |
651 | break; | |
e4120df9 | 652 | } |
a32d0cdf EP |
653 | } |
654 | ||
0e810479 AN |
655 | /* no high priority skbs found - but maybe a low priority one? */ |
656 | if (!skb && low_prio_hlid != WL12XX_INVALID_LINK_ID) { | |
657 | struct wl1271_link *lnk = &wl->links[low_prio_hlid]; | |
658 | skb = wlcore_lnk_dequeue(wl, lnk, ac); | |
659 | ||
660 | WARN_ON(!skb); /* we checked this before */ | |
661 | *hlid = low_prio_hlid; | |
662 | ||
663 | /* ensure proper round robin in the vif/link levels */ | |
664 | wl->last_wlvif = lnk->wlvif; | |
665 | if (lnk->wlvif) | |
666 | lnk->wlvif->last_tx_hlid = low_prio_hlid; | |
667 | ||
668 | } | |
669 | ||
abca1237 | 670 | out: |
990f5de7 IY |
671 | if (!skb && |
672 | test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) { | |
f1a46384 AN |
673 | int q; |
674 | ||
990f5de7 | 675 | skb = wl->dummy_packet; |
930e1915 | 676 | *hlid = wl->system_hlid; |
f1a46384 | 677 | q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); |
990f5de7 | 678 | spin_lock_irqsave(&wl->wl_lock, flags); |
6246ca00 | 679 | WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); |
f1a46384 | 680 | wl->tx_queue_count[q]--; |
990f5de7 IY |
681 | spin_unlock_irqrestore(&wl->wl_lock, flags); |
682 | } | |
683 | ||
684 | return skb; | |
a8c0ddb5 AN |
685 | } |
686 | ||
d6a3cc2e | 687 | static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
930e1915 | 688 | struct sk_buff *skb, u8 hlid) |
6742f554 JO |
689 | { |
690 | unsigned long flags; | |
691 | int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); | |
692 | ||
990f5de7 IY |
693 | if (wl12xx_is_dummy_packet(wl, skb)) { |
694 | set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); | |
d6a3cc2e | 695 | } else { |
a8c0ddb5 AN |
696 | skb_queue_head(&wl->links[hlid].tx_queue[q], skb); |
697 | ||
698 | /* make sure we dequeue the same packet next time */ | |
da08fdfa EP |
699 | wlvif->last_tx_hlid = (hlid + wl->num_links - 1) % |
700 | wl->num_links; | |
a8c0ddb5 AN |
701 | } |
702 | ||
6742f554 | 703 | spin_lock_irqsave(&wl->wl_lock, flags); |
f1a46384 | 704 | wl->tx_queue_count[q]++; |
8591d424 AN |
705 | if (wlvif) |
706 | wlvif->tx_queue_count[q]++; | |
6742f554 JO |
707 | spin_unlock_irqrestore(&wl->wl_lock, flags); |
708 | } | |
709 | ||
77ddaa10 EP |
710 | static bool wl1271_tx_is_data_present(struct sk_buff *skb) |
711 | { | |
712 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); | |
713 | ||
714 | return ieee80211_is_data_present(hdr->frame_control); | |
715 | } | |
716 | ||
9eb599e9 EP |
717 | void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids) |
718 | { | |
719 | struct wl12xx_vif *wlvif; | |
720 | u32 timeout; | |
721 | u8 hlid; | |
722 | ||
723 | if (!wl->conf.rx_streaming.interval) | |
724 | return; | |
725 | ||
726 | if (!wl->conf.rx_streaming.always && | |
727 | !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)) | |
728 | return; | |
729 | ||
730 | timeout = wl->conf.rx_streaming.duration; | |
731 | wl12xx_for_each_wlvif_sta(wl, wlvif) { | |
732 | bool found = false; | |
da08fdfa | 733 | for_each_set_bit(hlid, active_hlids, wl->num_links) { |
9eb599e9 EP |
734 | if (test_bit(hlid, wlvif->links_map)) { |
735 | found = true; | |
736 | break; | |
737 | } | |
738 | } | |
739 | ||
740 | if (!found) | |
741 | continue; | |
742 | ||
743 | /* enable rx streaming */ | |
0744bdb6 | 744 | if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) |
9eb599e9 EP |
745 | ieee80211_queue_work(wl->hw, |
746 | &wlvif->rx_streaming_enable_work); | |
747 | ||
748 | mod_timer(&wlvif->rx_streaming_timer, | |
749 | jiffies + msecs_to_jiffies(timeout)); | |
750 | } | |
751 | } | |
752 | ||
7a50bdfb ES |
753 | /* |
754 | * Returns failure values only in case of failed bus ops within this function. | |
755 | * wl1271_prepare_tx_frame retvals won't be returned in order to avoid | |
756 | * triggering recovery by higher layers when not necessary. | |
757 | * In case a FW command fails within wl1271_prepare_tx_frame fails a recovery | |
758 | * will be queued in wl1271_cmd_send. -EAGAIN/-EBUSY from prepare_tx_frame | |
759 | * can occur and are legitimate so don't propagate. -EINVAL will emit a WARNING | |
760 | * within prepare_tx_frame code but there's nothing we should do about those | |
761 | * as well. | |
762 | */ | |
eb96f841 | 763 | int wlcore_tx_work_locked(struct wl1271 *wl) |
f5fc0f86 | 764 | { |
a32d0cdf | 765 | struct wl12xx_vif *wlvif; |
f5fc0f86 | 766 | struct sk_buff *skb; |
9eb599e9 | 767 | struct wl1271_tx_hw_descr *desc; |
9fccc82e | 768 | u32 buf_offset = 0, last_len = 0; |
6c6e669e | 769 | bool sent_packets = false; |
da08fdfa | 770 | unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0}; |
eb96f841 | 771 | int ret = 0; |
7a50bdfb | 772 | int bus_ret = 0; |
930e1915 | 773 | u8 hlid; |
f5fc0f86 | 774 | |
4cc53383 | 775 | if (unlikely(wl->state != WLCORE_STATE_ON)) |
7a50bdfb | 776 | return 0; |
f5fc0f86 | 777 | |
930e1915 | 778 | while ((skb = wl1271_skb_dequeue(wl, &hlid))) { |
0f168014 | 779 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
9eb599e9 EP |
780 | bool has_data = false; |
781 | ||
a32d0cdf | 782 | wlvif = NULL; |
f4d02007 | 783 | if (!wl12xx_is_dummy_packet(wl, skb)) |
0f168014 | 784 | wlvif = wl12xx_vif_to_data(info->control.vif); |
930e1915 AN |
785 | else |
786 | hlid = wl->system_hlid; | |
a32d0cdf | 787 | |
9eb599e9 | 788 | has_data = wlvif && wl1271_tx_is_data_present(skb); |
930e1915 AN |
789 | ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset, |
790 | hlid); | |
6c6e669e | 791 | if (ret == -EAGAIN) { |
a19606b4 | 792 | /* |
6c6e669e IY |
793 | * Aggregation buffer is full. |
794 | * Flush buffer and try again. | |
795 | */ | |
930e1915 | 796 | wl1271_skb_queue_head(wl, wlvif, skb, hlid); |
9fccc82e IR |
797 | |
798 | buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, | |
799 | last_len); | |
7a50bdfb ES |
800 | bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, |
801 | wl->aggr_buf, buf_offset, true); | |
802 | if (bus_ret < 0) | |
eb96f841 IY |
803 | goto out; |
804 | ||
6c6e669e IY |
805 | sent_packets = true; |
806 | buf_offset = 0; | |
807 | continue; | |
808 | } else if (ret == -EBUSY) { | |
809 | /* | |
810 | * Firmware buffer is full. | |
a19606b4 IY |
811 | * Queue back last skb, and stop aggregating. |
812 | */ | |
930e1915 | 813 | wl1271_skb_queue_head(wl, wlvif, skb, hlid); |
a522550a IY |
814 | /* No work left, avoid scheduling redundant tx work */ |
815 | set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); | |
ffb591cd | 816 | goto out_ack; |
f5fc0f86 | 817 | } else if (ret < 0) { |
5de8eef4 EP |
818 | if (wl12xx_is_dummy_packet(wl, skb)) |
819 | /* | |
820 | * fw still expects dummy packet, | |
821 | * so re-enqueue it | |
822 | */ | |
930e1915 | 823 | wl1271_skb_queue_head(wl, wlvif, skb, hlid); |
5de8eef4 EP |
824 | else |
825 | ieee80211_free_txskb(wl->hw, skb); | |
ffb591cd | 826 | goto out_ack; |
f5fc0f86 | 827 | } |
9fccc82e IR |
828 | last_len = ret; |
829 | buf_offset += last_len; | |
a19606b4 | 830 | wl->tx_packets_count++; |
9eb599e9 EP |
831 | if (has_data) { |
832 | desc = (struct wl1271_tx_hw_descr *) skb->data; | |
833 | __set_bit(desc->hlid, active_hlids); | |
834 | } | |
f5fc0f86 LC |
835 | } |
836 | ||
ffb591cd | 837 | out_ack: |
a19606b4 | 838 | if (buf_offset) { |
9fccc82e | 839 | buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len); |
7a50bdfb ES |
840 | bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf, |
841 | buf_offset, true); | |
842 | if (bus_ret < 0) | |
eb96f841 IY |
843 | goto out; |
844 | ||
6c6e669e IY |
845 | sent_packets = true; |
846 | } | |
847 | if (sent_packets) { | |
606ea9fa IY |
848 | /* |
849 | * Interrupt the firmware with the new packets. This is only | |
850 | * required for older hardware revisions | |
851 | */ | |
b0f0ad39 | 852 | if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) { |
7a50bdfb | 853 | bus_ret = wlcore_write32(wl, WL12XX_HOST_WR_ACCESS, |
b0f0ad39 | 854 | wl->tx_packets_count); |
7a50bdfb | 855 | if (bus_ret < 0) |
b0f0ad39 IY |
856 | goto out; |
857 | } | |
606ea9fa | 858 | |
a8c0ddb5 | 859 | wl1271_handle_tx_low_watermark(wl); |
a19606b4 | 860 | } |
9eb599e9 | 861 | wl12xx_rearm_rx_streaming(wl, active_hlids); |
eb96f841 IY |
862 | |
863 | out: | |
7a50bdfb | 864 | return bus_ret; |
a522550a | 865 | } |
f5fc0f86 | 866 | |
a522550a IY |
867 | void wl1271_tx_work(struct work_struct *work) |
868 | { | |
869 | struct wl1271 *wl = container_of(work, struct wl1271, tx_work); | |
c1b193eb | 870 | int ret; |
a522550a IY |
871 | |
872 | mutex_lock(&wl->mutex); | |
c1b193eb EP |
873 | ret = wl1271_ps_elp_wakeup(wl); |
874 | if (ret < 0) | |
875 | goto out; | |
876 | ||
eb96f841 IY |
877 | ret = wlcore_tx_work_locked(wl); |
878 | if (ret < 0) { | |
879 | wl12xx_queue_recovery_work(wl); | |
880 | goto out; | |
881 | } | |
c1b193eb | 882 | |
c75bbcdb | 883 | wl1271_ps_elp_sleep(wl); |
c1b193eb | 884 | out: |
f5fc0f86 LC |
885 | mutex_unlock(&wl->mutex); |
886 | } | |
887 | ||
d2e2d769 PF |
888 | static u8 wl1271_tx_get_rate_flags(u8 rate_class_index) |
889 | { | |
defe02c7 PF |
890 | u8 flags = 0; |
891 | ||
43a8bc5a AN |
892 | /* |
893 | * TODO: use wl12xx constants when this code is moved to wl12xx, as | |
894 | * only it uses Tx-completion. | |
895 | */ | |
896 | if (rate_class_index <= 8) | |
defe02c7 | 897 | flags |= IEEE80211_TX_RC_MCS; |
43a8bc5a AN |
898 | |
899 | /* | |
900 | * TODO: use wl12xx constants when this code is moved to wl12xx, as | |
901 | * only it uses Tx-completion. | |
902 | */ | |
903 | if (rate_class_index == 0) | |
defe02c7 | 904 | flags |= IEEE80211_TX_RC_SHORT_GI; |
43a8bc5a | 905 | |
defe02c7 | 906 | return flags; |
d2e2d769 PF |
907 | } |
908 | ||
f5fc0f86 LC |
909 | static void wl1271_tx_complete_packet(struct wl1271 *wl, |
910 | struct wl1271_tx_hw_res_descr *result) | |
911 | { | |
f5fc0f86 | 912 | struct ieee80211_tx_info *info; |
48e93e40 EP |
913 | struct ieee80211_vif *vif; |
914 | struct wl12xx_vif *wlvif; | |
f5fc0f86 | 915 | struct sk_buff *skb; |
f5fc0f86 | 916 | int id = result->id; |
31627dc5 | 917 | int rate = -1; |
d2e2d769 | 918 | u8 rate_flags = 0; |
31627dc5 | 919 | u8 retries = 0; |
f5fc0f86 LC |
920 | |
921 | /* check for id legality */ | |
72b0624f | 922 | if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) { |
f5fc0f86 LC |
923 | wl1271_warning("TX result illegal id: %d", id); |
924 | return; | |
925 | } | |
926 | ||
927 | skb = wl->tx_frames[id]; | |
928 | info = IEEE80211_SKB_CB(skb); | |
929 | ||
990f5de7 | 930 | if (wl12xx_is_dummy_packet(wl, skb)) { |
ae47c45f SL |
931 | wl1271_free_tx_id(wl, id); |
932 | return; | |
933 | } | |
934 | ||
48e93e40 EP |
935 | /* info->control is valid as long as we don't update info->status */ |
936 | vif = info->control.vif; | |
937 | wlvif = wl12xx_vif_to_data(vif); | |
938 | ||
31627dc5 JO |
939 | /* update the TX status info */ |
940 | if (result->status == TX_SUCCESS) { | |
941 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) | |
f5fc0f86 | 942 | info->flags |= IEEE80211_TX_STAT_ACK; |
43a8bc5a | 943 | rate = wlcore_rate_to_idx(wl, result->rate_class_index, |
1b92f15e | 944 | wlvif->band); |
d2e2d769 | 945 | rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index); |
31627dc5 JO |
946 | retries = result->ack_failures; |
947 | } else if (result->status == TX_RETRY_EXCEEDED) { | |
948 | wl->stats.excessive_retries++; | |
949 | retries = result->ack_failures; | |
f5fc0f86 LC |
950 | } |
951 | ||
31627dc5 JO |
952 | info->status.rates[0].idx = rate; |
953 | info->status.rates[0].count = retries; | |
d2e2d769 | 954 | info->status.rates[0].flags = rate_flags; |
31627dc5 JO |
955 | info->status.ack_signal = -1; |
956 | ||
f5fc0f86 LC |
957 | wl->stats.retry_count += result->ack_failures; |
958 | ||
1e2b7976 JO |
959 | /* remove private header from packet */ |
960 | skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); | |
961 | ||
962 | /* remove TKIP header space if present */ | |
2c0133a4 AN |
963 | if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && |
964 | info->control.hw_key && | |
97359d12 | 965 | info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) { |
1e2b7976 | 966 | int hdrlen = ieee80211_get_hdrlen_from_skb(skb); |
5ec8a448 EP |
967 | memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data, |
968 | hdrlen); | |
969 | skb_pull(skb, WL1271_EXTRA_SPACE_TKIP); | |
1e2b7976 | 970 | } |
f5fc0f86 LC |
971 | |
972 | wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x" | |
973 | " status 0x%x", | |
974 | result->id, skb, result->ack_failures, | |
975 | result->rate_class_index, result->status); | |
976 | ||
f5fc0f86 | 977 | /* return the packet to the stack */ |
a620865e | 978 | skb_queue_tail(&wl->deferred_tx_queue, skb); |
92ef8960 | 979 | queue_work(wl->freezable_wq, &wl->netstack_work); |
25eeb9e3 | 980 | wl1271_free_tx_id(wl, result->id); |
f5fc0f86 LC |
981 | } |
982 | ||
983 | /* Called upon reception of a TX complete interrupt */ | |
045b9b5f | 984 | int wlcore_tx_complete(struct wl1271 *wl) |
f5fc0f86 | 985 | { |
2c208890 | 986 | struct wl1271_acx_mem_map *memmap = wl->target_mem_map; |
ffb591cd | 987 | u32 count, fw_counter; |
f5fc0f86 | 988 | u32 i; |
045b9b5f | 989 | int ret; |
f5fc0f86 | 990 | |
f5fc0f86 | 991 | /* read the tx results from the chipset */ |
045b9b5f IY |
992 | ret = wlcore_read(wl, le32_to_cpu(memmap->tx_result), |
993 | wl->tx_res_if, sizeof(*wl->tx_res_if), false); | |
994 | if (ret < 0) | |
995 | goto out; | |
996 | ||
ffb591cd JO |
997 | fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter); |
998 | ||
999 | /* write host counter to chipset (to ack) */ | |
b0f0ad39 IY |
1000 | ret = wlcore_write32(wl, le32_to_cpu(memmap->tx_result) + |
1001 | offsetof(struct wl1271_tx_hw_res_if, | |
1002 | tx_result_host_counter), fw_counter); | |
1003 | if (ret < 0) | |
1004 | goto out; | |
ffb591cd JO |
1005 | |
1006 | count = fw_counter - wl->tx_results_count; | |
06f7bc7d | 1007 | wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count); |
f5fc0f86 LC |
1008 | |
1009 | /* verify that the result buffer is not getting overrun */ | |
ffb591cd | 1010 | if (unlikely(count > TX_HW_RESULT_QUEUE_LEN)) |
f5fc0f86 | 1011 | wl1271_warning("TX result overflow from chipset: %d", count); |
f5fc0f86 LC |
1012 | |
1013 | /* process the results */ | |
1014 | for (i = 0; i < count; i++) { | |
1015 | struct wl1271_tx_hw_res_descr *result; | |
1016 | u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK; | |
1017 | ||
1018 | /* process the packet */ | |
1019 | result = &(wl->tx_res_if->tx_results_queue[offset]); | |
1020 | wl1271_tx_complete_packet(wl, result); | |
1021 | ||
1022 | wl->tx_results_count++; | |
1023 | } | |
045b9b5f IY |
1024 | |
1025 | out: | |
1026 | return ret; | |
f5fc0f86 | 1027 | } |
045b9b5f | 1028 | EXPORT_SYMBOL(wlcore_tx_complete); |
f5fc0f86 | 1029 | |
a8c0ddb5 AN |
1030 | void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) |
1031 | { | |
1032 | struct sk_buff *skb; | |
f1a46384 | 1033 | int i; |
a8c0ddb5 | 1034 | unsigned long flags; |
1d36cd89 | 1035 | struct ieee80211_tx_info *info; |
f1a46384 | 1036 | int total[NUM_TX_QUEUES]; |
8591d424 | 1037 | struct wl1271_link *lnk = &wl->links[hlid]; |
a8c0ddb5 AN |
1038 | |
1039 | for (i = 0; i < NUM_TX_QUEUES; i++) { | |
f1a46384 | 1040 | total[i] = 0; |
8591d424 | 1041 | while ((skb = skb_dequeue(&lnk->tx_queue[i]))) { |
a8c0ddb5 | 1042 | wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb); |
79ebec76 AN |
1043 | |
1044 | if (!wl12xx_is_dummy_packet(wl, skb)) { | |
1045 | info = IEEE80211_SKB_CB(skb); | |
1046 | info->status.rates[0].idx = -1; | |
1047 | info->status.rates[0].count = 0; | |
1048 | ieee80211_tx_status_ni(wl->hw, skb); | |
1049 | } | |
1050 | ||
f1a46384 | 1051 | total[i]++; |
a8c0ddb5 AN |
1052 | } |
1053 | } | |
1054 | ||
1055 | spin_lock_irqsave(&wl->wl_lock, flags); | |
8591d424 | 1056 | for (i = 0; i < NUM_TX_QUEUES; i++) { |
f1a46384 | 1057 | wl->tx_queue_count[i] -= total[i]; |
8591d424 AN |
1058 | if (lnk->wlvif) |
1059 | lnk->wlvif->tx_queue_count[i] -= total[i]; | |
1060 | } | |
a8c0ddb5 AN |
1061 | spin_unlock_irqrestore(&wl->wl_lock, flags); |
1062 | ||
1063 | wl1271_handle_tx_low_watermark(wl); | |
1064 | } | |
1065 | ||
7dece1c8 | 1066 | /* caller must hold wl->mutex and TX must be stopped */ |
d6a3cc2e | 1067 | void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif) |
f5fc0f86 LC |
1068 | { |
1069 | int i; | |
f5fc0f86 LC |
1070 | |
1071 | /* TX failure */ | |
da08fdfa | 1072 | for_each_set_bit(i, wlvif->links_map, wl->num_links) { |
5a99610c AN |
1073 | if (wlvif->bss_type == BSS_TYPE_AP_BSS && |
1074 | i != wlvif->ap.bcast_hlid && i != wlvif->ap.global_hlid) { | |
6c4c4534 | 1075 | /* this calls wl12xx_free_link */ |
c7ffb902 | 1076 | wl1271_free_sta(wl, wlvif, i); |
6c4c4534 AN |
1077 | } else { |
1078 | u8 hlid = i; | |
6c4c4534 AN |
1079 | wl12xx_free_link(wl, wlvif, &hlid); |
1080 | } | |
f5fc0f86 | 1081 | } |
d6a3cc2e EP |
1082 | wlvif->last_tx_hlid = 0; |
1083 | ||
8591d424 AN |
1084 | for (i = 0; i < NUM_TX_QUEUES; i++) |
1085 | wlvif->tx_queue_count[i] = 0; | |
d6a3cc2e EP |
1086 | } |
1087 | /* caller must hold wl->mutex and TX must be stopped */ | |
66396114 | 1088 | void wl12xx_tx_reset(struct wl1271 *wl) |
d6a3cc2e EP |
1089 | { |
1090 | int i; | |
1091 | struct sk_buff *skb; | |
1092 | struct ieee80211_tx_info *info; | |
a8c0ddb5 | 1093 | |
6246ca00 | 1094 | /* only reset the queues if something bad happened */ |
4c145185 | 1095 | if (wl1271_tx_total_queue_count(wl) != 0) { |
da08fdfa | 1096 | for (i = 0; i < wl->num_links; i++) |
6246ca00 AN |
1097 | wl1271_tx_reset_link_queues(wl, i); |
1098 | ||
1099 | for (i = 0; i < NUM_TX_QUEUES; i++) | |
1100 | wl->tx_queue_count[i] = 0; | |
1101 | } | |
f1acea9a | 1102 | |
2fe33e8c IY |
1103 | /* |
1104 | * Make sure the driver is at a consistent state, in case this | |
1105 | * function is called from a context other than interface removal. | |
7dece1c8 | 1106 | * This call will always wake the TX queues. |
2fe33e8c | 1107 | */ |
66396114 | 1108 | wl1271_handle_tx_low_watermark(wl); |
2fe33e8c | 1109 | |
72b0624f | 1110 | for (i = 0; i < wl->num_tx_desc; i++) { |
50e9f746 IY |
1111 | if (wl->tx_frames[i] == NULL) |
1112 | continue; | |
1113 | ||
1114 | skb = wl->tx_frames[i]; | |
1115 | wl1271_free_tx_id(wl, i); | |
1116 | wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); | |
1117 | ||
990f5de7 | 1118 | if (!wl12xx_is_dummy_packet(wl, skb)) { |
ae47c45f SL |
1119 | /* |
1120 | * Remove private headers before passing the skb to | |
1121 | * mac80211 | |
1122 | */ | |
1123 | info = IEEE80211_SKB_CB(skb); | |
1124 | skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); | |
2c0133a4 AN |
1125 | if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && |
1126 | info->control.hw_key && | |
ae47c45f SL |
1127 | info->control.hw_key->cipher == |
1128 | WLAN_CIPHER_SUITE_TKIP) { | |
1129 | int hdrlen = ieee80211_get_hdrlen_from_skb(skb); | |
5ec8a448 | 1130 | memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, |
ae47c45f | 1131 | skb->data, hdrlen); |
5ec8a448 | 1132 | skb_pull(skb, WL1271_EXTRA_SPACE_TKIP); |
ae47c45f | 1133 | } |
50e9f746 | 1134 | |
ae47c45f SL |
1135 | info->status.rates[0].idx = -1; |
1136 | info->status.rates[0].count = 0; | |
50e9f746 | 1137 | |
c27d3acc | 1138 | ieee80211_tx_status_ni(wl->hw, skb); |
ae47c45f | 1139 | } |
50e9f746 | 1140 | } |
781608c4 JO |
1141 | } |
1142 | ||
1143 | #define WL1271_TX_FLUSH_TIMEOUT 500000 | |
1144 | ||
1145 | /* caller must *NOT* hold wl->mutex */ | |
1146 | void wl1271_tx_flush(struct wl1271 *wl) | |
1147 | { | |
958e303a | 1148 | unsigned long timeout, start_time; |
18aa755b | 1149 | int i; |
958e303a AN |
1150 | start_time = jiffies; |
1151 | timeout = start_time + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT); | |
781608c4 | 1152 | |
2c38849f AN |
1153 | /* only one flush should be in progress, for consistent queue state */ |
1154 | mutex_lock(&wl->flush_mutex); | |
1155 | ||
f83e5413 AN |
1156 | mutex_lock(&wl->mutex); |
1157 | if (wl->tx_frames_cnt == 0 && wl1271_tx_total_queue_count(wl) == 0) { | |
1158 | mutex_unlock(&wl->mutex); | |
1159 | goto out; | |
1160 | } | |
1161 | ||
2c38849f AN |
1162 | wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH); |
1163 | ||
781608c4 | 1164 | while (!time_after(jiffies, timeout)) { |
958e303a | 1165 | wl1271_debug(DEBUG_MAC80211, "flushing tx buffer: %d %d", |
f1a46384 AN |
1166 | wl->tx_frames_cnt, |
1167 | wl1271_tx_total_queue_count(wl)); | |
f83e5413 AN |
1168 | |
1169 | /* force Tx and give the driver some time to flush data */ | |
1170 | mutex_unlock(&wl->mutex); | |
1171 | if (wl1271_tx_total_queue_count(wl)) | |
1172 | wl1271_tx_work(&wl->tx_work); | |
1173 | msleep(20); | |
1174 | mutex_lock(&wl->mutex); | |
1175 | ||
f1a46384 AN |
1176 | if ((wl->tx_frames_cnt == 0) && |
1177 | (wl1271_tx_total_queue_count(wl) == 0)) { | |
958e303a AN |
1178 | wl1271_debug(DEBUG_MAC80211, "tx flush took %d ms", |
1179 | jiffies_to_msecs(jiffies - start_time)); | |
f83e5413 | 1180 | goto out_wake; |
781608c4 | 1181 | } |
781608c4 JO |
1182 | } |
1183 | ||
958e303a AN |
1184 | wl1271_warning("Unable to flush all TX buffers, " |
1185 | "timed out (timeout %d ms", | |
1186 | WL1271_TX_FLUSH_TIMEOUT / 1000); | |
18aa755b AN |
1187 | |
1188 | /* forcibly flush all Tx buffers on our queues */ | |
da08fdfa | 1189 | for (i = 0; i < wl->num_links; i++) |
18aa755b | 1190 | wl1271_tx_reset_link_queues(wl, i); |
2c38849f | 1191 | |
f83e5413 | 1192 | out_wake: |
2c38849f | 1193 | wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH); |
f83e5413 AN |
1194 | mutex_unlock(&wl->mutex); |
1195 | out: | |
2c38849f | 1196 | mutex_unlock(&wl->flush_mutex); |
f5fc0f86 | 1197 | } |
a1c597f2 | 1198 | EXPORT_SYMBOL_GPL(wl1271_tx_flush); |
e0fe371b | 1199 | |
af7fbb28 | 1200 | u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set) |
e0fe371b | 1201 | { |
af7fbb28 EP |
1202 | if (WARN_ON(!rate_set)) |
1203 | return 0; | |
e0fe371b | 1204 | |
af7fbb28 | 1205 | return BIT(__ffs(rate_set)); |
e0fe371b | 1206 | } |
78e28062 | 1207 | EXPORT_SYMBOL_GPL(wl1271_tx_min_rate_get); |
66396114 | 1208 | |
1c33db78 AN |
1209 | void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
1210 | u8 queue, enum wlcore_queue_stop_reason reason) | |
66396114 | 1211 | { |
1c33db78 AN |
1212 | int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); |
1213 | bool stopped = !!wl->queue_stop_reasons[hwq]; | |
66396114 AN |
1214 | |
1215 | /* queue should not be stopped for this reason */ | |
1c33db78 | 1216 | WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq])); |
66396114 AN |
1217 | |
1218 | if (stopped) | |
1219 | return; | |
1220 | ||
1c33db78 | 1221 | ieee80211_stop_queue(wl->hw, hwq); |
66396114 AN |
1222 | } |
1223 | ||
1c33db78 | 1224 | void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue, |
66396114 AN |
1225 | enum wlcore_queue_stop_reason reason) |
1226 | { | |
1227 | unsigned long flags; | |
1228 | ||
1229 | spin_lock_irqsave(&wl->wl_lock, flags); | |
1c33db78 | 1230 | wlcore_stop_queue_locked(wl, wlvif, queue, reason); |
66396114 AN |
1231 | spin_unlock_irqrestore(&wl->wl_lock, flags); |
1232 | } | |
1233 | ||
1c33db78 | 1234 | void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue, |
66396114 AN |
1235 | enum wlcore_queue_stop_reason reason) |
1236 | { | |
1237 | unsigned long flags; | |
1c33db78 | 1238 | int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); |
66396114 AN |
1239 | |
1240 | spin_lock_irqsave(&wl->wl_lock, flags); | |
1241 | ||
1242 | /* queue should not be clear for this reason */ | |
1c33db78 | 1243 | WARN_ON_ONCE(!test_and_clear_bit(reason, &wl->queue_stop_reasons[hwq])); |
66396114 | 1244 | |
1c33db78 | 1245 | if (wl->queue_stop_reasons[hwq]) |
66396114 AN |
1246 | goto out; |
1247 | ||
1c33db78 | 1248 | ieee80211_wake_queue(wl->hw, hwq); |
66396114 AN |
1249 | |
1250 | out: | |
1251 | spin_unlock_irqrestore(&wl->wl_lock, flags); | |
1252 | } | |
1253 | ||
1254 | void wlcore_stop_queues(struct wl1271 *wl, | |
1255 | enum wlcore_queue_stop_reason reason) | |
1256 | { | |
1257 | int i; | |
1c33db78 | 1258 | unsigned long flags; |
66396114 | 1259 | |
1c33db78 | 1260 | spin_lock_irqsave(&wl->wl_lock, flags); |
66396114 | 1261 | |
1c33db78 AN |
1262 | /* mark all possible queues as stopped */ |
1263 | for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++) | |
1264 | WARN_ON_ONCE(test_and_set_bit(reason, | |
1265 | &wl->queue_stop_reasons[i])); | |
66396114 | 1266 | |
1c33db78 AN |
1267 | /* use the global version to make sure all vifs in mac80211 we don't |
1268 | * know are stopped. | |
1269 | */ | |
1270 | ieee80211_stop_queues(wl->hw); | |
1271 | ||
1272 | spin_unlock_irqrestore(&wl->wl_lock, flags); | |
66396114 AN |
1273 | } |
1274 | ||
1c33db78 AN |
1275 | void wlcore_wake_queues(struct wl1271 *wl, |
1276 | enum wlcore_queue_stop_reason reason) | |
66396114 AN |
1277 | { |
1278 | int i; | |
1279 | unsigned long flags; | |
1280 | ||
1281 | spin_lock_irqsave(&wl->wl_lock, flags); | |
1282 | ||
1c33db78 AN |
1283 | /* mark all possible queues as awake */ |
1284 | for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++) | |
1285 | WARN_ON_ONCE(!test_and_clear_bit(reason, | |
1286 | &wl->queue_stop_reasons[i])); | |
66396114 | 1287 | |
1c33db78 AN |
1288 | /* use the global version to make sure all vifs in mac80211 we don't |
1289 | * know are woken up. | |
1290 | */ | |
1291 | ieee80211_wake_queues(wl->hw); | |
66396114 AN |
1292 | |
1293 | spin_unlock_irqrestore(&wl->wl_lock, flags); | |
1294 | } | |
1295 | ||
1c33db78 AN |
1296 | bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, |
1297 | struct wl12xx_vif *wlvif, u8 queue, | |
1298 | enum wlcore_queue_stop_reason reason) | |
d6037d22 AN |
1299 | { |
1300 | unsigned long flags; | |
1301 | bool stopped; | |
1302 | ||
1303 | spin_lock_irqsave(&wl->wl_lock, flags); | |
1304 | stopped = wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, queue, | |
1305 | reason); | |
1306 | spin_unlock_irqrestore(&wl->wl_lock, flags); | |
1307 | ||
1308 | return stopped; | |
1309 | } | |
1310 | ||
1311 | bool wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl, | |
1312 | struct wl12xx_vif *wlvif, u8 queue, | |
1313 | enum wlcore_queue_stop_reason reason) | |
66396114 | 1314 | { |
1c33db78 | 1315 | int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); |
d6037d22 | 1316 | |
8910cfa3 | 1317 | assert_spin_locked(&wl->wl_lock); |
1c33db78 | 1318 | return test_bit(reason, &wl->queue_stop_reasons[hwq]); |
66396114 AN |
1319 | } |
1320 | ||
d6037d22 AN |
1321 | bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
1322 | u8 queue) | |
66396114 | 1323 | { |
1c33db78 | 1324 | int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); |
d6037d22 | 1325 | |
8910cfa3 | 1326 | assert_spin_locked(&wl->wl_lock); |
1c33db78 | 1327 | return !!wl->queue_stop_reasons[hwq]; |
66396114 | 1328 | } |