]>
Commit | Line | Data |
---|---|---|
8ca151b5 JB |
1 | /****************************************************************************** |
2 | * | |
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
4 | * redistributing this file, you may do so under either license. | |
5 | * | |
6 | * GPL LICENSE SUMMARY | |
7 | * | |
fa7878e7 AO |
8 | * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. |
9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | |
854c5705 | 10 | * Copyright(c) 2016 Intel Deutschland GmbH |
8ca151b5 JB |
11 | * |
12 | * This program is free software; you can redistribute it and/or modify | |
13 | * it under the terms of version 2 of the GNU General Public License as | |
14 | * published by the Free Software Foundation. | |
15 | * | |
16 | * This program is distributed in the hope that it will be useful, but | |
17 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
19 | * General Public License for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License | |
22 | * along with this program; if not, write to the Free Software | |
23 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, | |
24 | * USA | |
25 | * | |
26 | * The full GNU General Public License is included in this distribution | |
410dc5aa | 27 | * in the file called COPYING. |
8ca151b5 JB |
28 | * |
29 | * Contact Information: | |
cb2f8277 | 30 | * Intel Linux Wireless <linuxwifi@intel.com> |
8ca151b5 JB |
31 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
32 | * | |
33 | * BSD LICENSE | |
34 | * | |
fa7878e7 AO |
35 | * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. |
36 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | |
854c5705 | 37 | * Copyright(c) 2016 Intel Deutschland GmbH |
8ca151b5 JB |
38 | * All rights reserved. |
39 | * | |
40 | * Redistribution and use in source and binary forms, with or without | |
41 | * modification, are permitted provided that the following conditions | |
42 | * are met: | |
43 | * | |
44 | * * Redistributions of source code must retain the above copyright | |
45 | * notice, this list of conditions and the following disclaimer. | |
46 | * * Redistributions in binary form must reproduce the above copyright | |
47 | * notice, this list of conditions and the following disclaimer in | |
48 | * the documentation and/or other materials provided with the | |
49 | * distribution. | |
50 | * * Neither the name Intel Corporation nor the names of its | |
51 | * contributors may be used to endorse or promote products derived | |
52 | * from this software without specific prior written permission. | |
53 | * | |
54 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
55 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
56 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
57 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
58 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
59 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
60 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
61 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
62 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
63 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
64 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
65 | * | |
66 | *****************************************************************************/ | |
67 | #include <net/mac80211.h> | |
68 | ||
69 | #include "mvm.h" | |
70 | #include "sta.h" | |
9ee718aa | 71 | #include "rs.h" |
8ca151b5 | 72 | |
854c5705 SS |
73 | /* |
74 | * New version of ADD_STA_sta command added new fields at the end of the | |
75 | * structure, so sending the size of the relevant API's structure is enough to | |
76 | * support both API versions. | |
77 | */ | |
78 | static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm) | |
79 | { | |
80 | return iwl_mvm_has_new_rx_api(mvm) ? | |
81 | sizeof(struct iwl_mvm_add_sta_cmd) : | |
82 | sizeof(struct iwl_mvm_add_sta_cmd_v7); | |
83 | } | |
84 | ||
b92e661b EP |
85 | static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, |
86 | enum nl80211_iftype iftype) | |
8ca151b5 JB |
87 | { |
88 | int sta_id; | |
b92e661b | 89 | u32 reserved_ids = 0; |
8ca151b5 | 90 | |
b92e661b | 91 | BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32); |
8ca151b5 JB |
92 | WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)); |
93 | ||
94 | lockdep_assert_held(&mvm->mutex); | |
95 | ||
b92e661b EP |
96 | /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */ |
97 | if (iftype != NL80211_IFTYPE_STATION) | |
98 | reserved_ids = BIT(0); | |
99 | ||
8ca151b5 | 100 | /* Don't take rcu_read_lock() since we are protected by mvm->mutex */ |
b92e661b EP |
101 | for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) { |
102 | if (BIT(sta_id) & reserved_ids) | |
103 | continue; | |
104 | ||
8ca151b5 JB |
105 | if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], |
106 | lockdep_is_held(&mvm->mutex))) | |
107 | return sta_id; | |
b92e661b | 108 | } |
8ca151b5 JB |
109 | return IWL_MVM_STATION_COUNT; |
110 | } | |
111 | ||
7a453973 JB |
112 | /* send station add/update command to firmware */ |
113 | int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | |
24afba76 | 114 | bool update, unsigned int flags) |
8ca151b5 | 115 | { |
9d8ce6af | 116 | struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); |
4b8265ab EG |
117 | struct iwl_mvm_add_sta_cmd add_sta_cmd = { |
118 | .sta_id = mvm_sta->sta_id, | |
119 | .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), | |
120 | .add_modify = update ? 1 : 0, | |
121 | .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK | | |
122 | STA_FLG_MIMO_EN_MSK), | |
cf0cda19 | 123 | .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg), |
4b8265ab | 124 | }; |
8ca151b5 JB |
125 | int ret; |
126 | u32 status; | |
127 | u32 agg_size = 0, mpdu_dens = 0; | |
128 | ||
24afba76 | 129 | if (!update || (flags & STA_MODIFY_QUEUES)) { |
7a453973 JB |
130 | add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); |
131 | memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); | |
24afba76 LK |
132 | |
133 | if (flags & STA_MODIFY_QUEUES) | |
134 | add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES; | |
7a453973 | 135 | } |
5bc5aaad JB |
136 | |
137 | switch (sta->bandwidth) { | |
138 | case IEEE80211_STA_RX_BW_160: | |
139 | add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ); | |
140 | /* fall through */ | |
141 | case IEEE80211_STA_RX_BW_80: | |
142 | add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ); | |
143 | /* fall through */ | |
144 | case IEEE80211_STA_RX_BW_40: | |
145 | add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ); | |
146 | /* fall through */ | |
147 | case IEEE80211_STA_RX_BW_20: | |
148 | if (sta->ht_cap.ht_supported) | |
149 | add_sta_cmd.station_flags |= | |
150 | cpu_to_le32(STA_FLG_FAT_EN_20MHZ); | |
151 | break; | |
152 | } | |
153 | ||
154 | switch (sta->rx_nss) { | |
155 | case 1: | |
156 | add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); | |
157 | break; | |
158 | case 2: | |
159 | add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2); | |
160 | break; | |
161 | case 3 ... 8: | |
162 | add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3); | |
163 | break; | |
164 | } | |
165 | ||
166 | switch (sta->smps_mode) { | |
167 | case IEEE80211_SMPS_AUTOMATIC: | |
168 | case IEEE80211_SMPS_NUM_MODES: | |
169 | WARN_ON(1); | |
170 | break; | |
171 | case IEEE80211_SMPS_STATIC: | |
172 | /* override NSS */ | |
173 | add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK); | |
174 | add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); | |
175 | break; | |
176 | case IEEE80211_SMPS_DYNAMIC: | |
177 | add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT); | |
178 | break; | |
179 | case IEEE80211_SMPS_OFF: | |
180 | /* nothing */ | |
181 | break; | |
182 | } | |
8ca151b5 JB |
183 | |
184 | if (sta->ht_cap.ht_supported) { | |
185 | add_sta_cmd.station_flags_msk |= | |
186 | cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | | |
187 | STA_FLG_AGG_MPDU_DENS_MSK); | |
188 | ||
189 | mpdu_dens = sta->ht_cap.ampdu_density; | |
190 | } | |
191 | ||
192 | if (sta->vht_cap.vht_supported) { | |
193 | agg_size = sta->vht_cap.cap & | |
194 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; | |
195 | agg_size >>= | |
196 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; | |
197 | } else if (sta->ht_cap.ht_supported) { | |
198 | agg_size = sta->ht_cap.ampdu_factor; | |
199 | } | |
200 | ||
201 | add_sta_cmd.station_flags |= | |
202 | cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT); | |
203 | add_sta_cmd.station_flags |= | |
204 | cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); | |
6ea29ce5 JB |
205 | if (mvm_sta->associated) |
206 | add_sta_cmd.assoc_id = cpu_to_le16(sta->aid); | |
8ca151b5 | 207 | |
65e25482 JB |
208 | if (sta->wme) { |
209 | add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS; | |
210 | ||
211 | if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) | |
212 | add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BK); | |
213 | if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) | |
214 | add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BE); | |
215 | if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) | |
216 | add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VI); | |
217 | if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) | |
218 | add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VO); | |
219 | } | |
220 | ||
8ca151b5 | 221 | status = ADD_STA_SUCCESS; |
854c5705 SS |
222 | ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, |
223 | iwl_mvm_add_sta_cmd_size(mvm), | |
f9dc0004 | 224 | &add_sta_cmd, &status); |
8ca151b5 JB |
225 | if (ret) |
226 | return ret; | |
227 | ||
837c4da9 | 228 | switch (status & IWL_ADD_STA_STATUS_MASK) { |
8ca151b5 JB |
229 | case ADD_STA_SUCCESS: |
230 | IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n"); | |
231 | break; | |
232 | default: | |
233 | ret = -EIO; | |
234 | IWL_ERR(mvm, "ADD_STA failed\n"); | |
235 | break; | |
236 | } | |
237 | ||
238 | return ret; | |
239 | } | |
240 | ||
10b2b201 SS |
241 | static void iwl_mvm_rx_agg_session_expired(unsigned long data) |
242 | { | |
243 | struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data; | |
244 | struct iwl_mvm_baid_data *ba_data; | |
245 | struct ieee80211_sta *sta; | |
246 | struct iwl_mvm_sta *mvm_sta; | |
247 | unsigned long timeout; | |
248 | ||
249 | rcu_read_lock(); | |
250 | ||
251 | ba_data = rcu_dereference(*rcu_ptr); | |
252 | ||
253 | if (WARN_ON(!ba_data)) | |
254 | goto unlock; | |
255 | ||
256 | if (!ba_data->timeout) | |
257 | goto unlock; | |
258 | ||
259 | timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2); | |
260 | if (time_is_after_jiffies(timeout)) { | |
261 | mod_timer(&ba_data->session_timer, timeout); | |
262 | goto unlock; | |
263 | } | |
264 | ||
265 | /* Timer expired */ | |
266 | sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]); | |
267 | mvm_sta = iwl_mvm_sta_from_mac80211(sta); | |
268 | ieee80211_stop_rx_ba_session_offl(mvm_sta->vif, | |
269 | sta->addr, ba_data->tid); | |
270 | unlock: | |
271 | rcu_read_unlock(); | |
272 | } | |
273 | ||
a0f6bf2a AN |
274 | static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm, |
275 | struct ieee80211_sta *sta) | |
276 | { | |
277 | unsigned long used_hw_queues; | |
278 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); | |
5d42e7b2 EG |
279 | unsigned int wdg_timeout = |
280 | iwl_mvm_get_wd_timeout(mvm, NULL, true, false); | |
a0f6bf2a AN |
281 | u32 ac; |
282 | ||
283 | lockdep_assert_held(&mvm->mutex); | |
284 | ||
285 | used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL); | |
286 | ||
287 | /* Find available queues, and allocate them to the ACs */ | |
288 | for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { | |
289 | u8 queue = find_first_zero_bit(&used_hw_queues, | |
290 | mvm->first_agg_queue); | |
291 | ||
292 | if (queue >= mvm->first_agg_queue) { | |
293 | IWL_ERR(mvm, "Failed to allocate STA queue\n"); | |
294 | return -EBUSY; | |
295 | } | |
296 | ||
297 | __set_bit(queue, &used_hw_queues); | |
298 | mvmsta->hw_queue[ac] = queue; | |
299 | } | |
300 | ||
301 | /* Found a place for all queues - enable them */ | |
302 | for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { | |
303 | iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac], | |
4ecafae9 | 304 | mvmsta->hw_queue[ac], |
5c1156ef LK |
305 | iwl_mvm_ac_to_tx_fifo[ac], 0, |
306 | wdg_timeout); | |
a0f6bf2a AN |
307 | mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]); |
308 | } | |
309 | ||
310 | return 0; | |
311 | } | |
312 | ||
313 | static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm, | |
314 | struct ieee80211_sta *sta) | |
315 | { | |
316 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); | |
317 | unsigned long sta_msk; | |
318 | int i; | |
319 | ||
320 | lockdep_assert_held(&mvm->mutex); | |
321 | ||
322 | /* disable the TDLS STA-specific queues */ | |
323 | sta_msk = mvmsta->tfd_queue_msk; | |
a4ca3ed4 | 324 | for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE) |
06ecdba3 | 325 | iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0); |
a0f6bf2a AN |
326 | } |
327 | ||
9794c64f LK |
328 | /* Disable aggregations for a bitmap of TIDs for a given station */ |
329 | static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, | |
330 | unsigned long disable_agg_tids, | |
331 | bool remove_queue) | |
332 | { | |
333 | struct iwl_mvm_add_sta_cmd cmd = {}; | |
334 | struct ieee80211_sta *sta; | |
335 | struct iwl_mvm_sta *mvmsta; | |
336 | u32 status; | |
337 | u8 sta_id; | |
338 | int ret; | |
339 | ||
340 | spin_lock_bh(&mvm->queue_info_lock); | |
341 | sta_id = mvm->queue_info[queue].ra_sta_id; | |
342 | spin_unlock_bh(&mvm->queue_info_lock); | |
343 | ||
344 | rcu_read_lock(); | |
345 | ||
346 | sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); | |
347 | ||
348 | if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { | |
349 | rcu_read_unlock(); | |
350 | return -EINVAL; | |
351 | } | |
352 | ||
353 | mvmsta = iwl_mvm_sta_from_mac80211(sta); | |
354 | ||
355 | mvmsta->tid_disable_agg |= disable_agg_tids; | |
356 | ||
357 | cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); | |
358 | cmd.sta_id = mvmsta->sta_id; | |
359 | cmd.add_modify = STA_MODE_MODIFY; | |
360 | cmd.modify_mask = STA_MODIFY_QUEUES; | |
361 | if (disable_agg_tids) | |
362 | cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; | |
363 | if (remove_queue) | |
364 | cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL; | |
365 | cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); | |
366 | cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); | |
367 | ||
368 | rcu_read_unlock(); | |
369 | ||
370 | /* Notify FW of queue removal from the STA queues */ | |
371 | status = ADD_STA_SUCCESS; | |
372 | ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, | |
373 | iwl_mvm_add_sta_cmd_size(mvm), | |
374 | &cmd, &status); | |
375 | ||
376 | return ret; | |
377 | } | |
378 | ||
42db09c1 LK |
379 | static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) |
380 | { | |
381 | struct ieee80211_sta *sta; | |
382 | struct iwl_mvm_sta *mvmsta; | |
383 | unsigned long tid_bitmap; | |
384 | unsigned long agg_tids = 0; | |
385 | s8 sta_id; | |
386 | int tid; | |
387 | ||
388 | lockdep_assert_held(&mvm->mutex); | |
389 | ||
390 | spin_lock_bh(&mvm->queue_info_lock); | |
391 | sta_id = mvm->queue_info[queue].ra_sta_id; | |
392 | tid_bitmap = mvm->queue_info[queue].tid_bitmap; | |
393 | spin_unlock_bh(&mvm->queue_info_lock); | |
394 | ||
395 | sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], | |
396 | lockdep_is_held(&mvm->mutex)); | |
397 | ||
398 | if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) | |
399 | return -EINVAL; | |
400 | ||
401 | mvmsta = iwl_mvm_sta_from_mac80211(sta); | |
402 | ||
403 | spin_lock_bh(&mvmsta->lock); | |
404 | for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { | |
405 | if (mvmsta->tid_data[tid].state == IWL_AGG_ON) | |
406 | agg_tids |= BIT(tid); | |
407 | } | |
408 | spin_unlock_bh(&mvmsta->lock); | |
409 | ||
410 | return agg_tids; | |
411 | } | |
412 | ||
9794c64f LK |
413 | /* |
414 | * Remove a queue from a station's resources. | |
415 | * Note that this only marks as free. It DOESN'T delete a BA agreement, and | |
416 | * doesn't disable the queue | |
417 | */ | |
418 | static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) | |
419 | { | |
420 | struct ieee80211_sta *sta; | |
421 | struct iwl_mvm_sta *mvmsta; | |
422 | unsigned long tid_bitmap; | |
423 | unsigned long disable_agg_tids = 0; | |
424 | u8 sta_id; | |
425 | int tid; | |
426 | ||
427 | lockdep_assert_held(&mvm->mutex); | |
428 | ||
429 | spin_lock_bh(&mvm->queue_info_lock); | |
430 | sta_id = mvm->queue_info[queue].ra_sta_id; | |
431 | tid_bitmap = mvm->queue_info[queue].tid_bitmap; | |
432 | spin_unlock_bh(&mvm->queue_info_lock); | |
433 | ||
434 | rcu_read_lock(); | |
435 | ||
436 | sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); | |
437 | ||
438 | if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { | |
439 | rcu_read_unlock(); | |
440 | return 0; | |
441 | } | |
442 | ||
443 | mvmsta = iwl_mvm_sta_from_mac80211(sta); | |
444 | ||
445 | spin_lock_bh(&mvmsta->lock); | |
42db09c1 | 446 | /* Unmap MAC queues and TIDs from this queue */ |
9794c64f | 447 | for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { |
9794c64f LK |
448 | if (mvmsta->tid_data[tid].state == IWL_AGG_ON) |
449 | disable_agg_tids |= BIT(tid); | |
42db09c1 | 450 | mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE; |
9794c64f | 451 | } |
9794c64f | 452 | |
42db09c1 | 453 | mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ |
9794c64f LK |
454 | spin_unlock_bh(&mvmsta->lock); |
455 | ||
456 | rcu_read_unlock(); | |
457 | ||
9794c64f LK |
458 | return disable_agg_tids; |
459 | } | |
460 | ||
01796ff2 SS |
461 | static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, |
462 | bool same_sta) | |
463 | { | |
464 | struct iwl_mvm_sta *mvmsta; | |
465 | u8 txq_curr_ac, sta_id, tid; | |
466 | unsigned long disable_agg_tids = 0; | |
467 | int ret; | |
468 | ||
469 | lockdep_assert_held(&mvm->mutex); | |
470 | ||
471 | spin_lock_bh(&mvm->queue_info_lock); | |
472 | txq_curr_ac = mvm->queue_info[queue].mac80211_ac; | |
473 | sta_id = mvm->queue_info[queue].ra_sta_id; | |
474 | tid = mvm->queue_info[queue].txq_tid; | |
475 | spin_unlock_bh(&mvm->queue_info_lock); | |
476 | ||
477 | mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); | |
478 | ||
479 | disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); | |
480 | /* Disable the queue */ | |
481 | if (disable_agg_tids) | |
482 | iwl_mvm_invalidate_sta_queue(mvm, queue, | |
483 | disable_agg_tids, false); | |
484 | ||
485 | ret = iwl_mvm_disable_txq(mvm, queue, | |
486 | mvmsta->vif->hw_queue[txq_curr_ac], | |
487 | tid, 0); | |
488 | if (ret) { | |
489 | /* Re-mark the inactive queue as inactive */ | |
490 | spin_lock_bh(&mvm->queue_info_lock); | |
491 | mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE; | |
492 | spin_unlock_bh(&mvm->queue_info_lock); | |
493 | IWL_ERR(mvm, | |
494 | "Failed to free inactive queue %d (ret=%d)\n", | |
495 | queue, ret); | |
496 | ||
497 | return ret; | |
498 | } | |
499 | ||
500 | /* If TXQ is allocated to another STA, update removal in FW */ | |
501 | if (!same_sta) | |
502 | iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true); | |
503 | ||
504 | return 0; | |
505 | } | |
506 | ||
42db09c1 LK |
507 | static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, |
508 | unsigned long tfd_queue_mask, u8 ac) | |
509 | { | |
510 | int queue = 0; | |
511 | u8 ac_to_queue[IEEE80211_NUM_ACS]; | |
512 | int i; | |
513 | ||
514 | lockdep_assert_held(&mvm->queue_info_lock); | |
515 | ||
516 | memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue)); | |
517 | ||
518 | /* See what ACs the existing queues for this STA have */ | |
519 | for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) { | |
520 | /* Only DATA queues can be shared */ | |
521 | if (i < IWL_MVM_DQA_MIN_DATA_QUEUE && | |
522 | i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) | |
523 | continue; | |
524 | ||
9f9af3d7 LK |
525 | /* Don't try and take queues being reconfigured */ |
526 | if (mvm->queue_info[queue].status == | |
527 | IWL_MVM_QUEUE_RECONFIGURING) | |
528 | continue; | |
529 | ||
42db09c1 LK |
530 | ac_to_queue[mvm->queue_info[i].mac80211_ac] = i; |
531 | } | |
532 | ||
533 | /* | |
534 | * The queue to share is chosen only from DATA queues as follows (in | |
535 | * descending priority): | |
536 | * 1. An AC_BE queue | |
537 | * 2. Same AC queue | |
538 | * 3. Highest AC queue that is lower than new AC | |
539 | * 4. Any existing AC (there always is at least 1 DATA queue) | |
540 | */ | |
541 | ||
542 | /* Priority 1: An AC_BE queue */ | |
543 | if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE) | |
544 | queue = ac_to_queue[IEEE80211_AC_BE]; | |
545 | /* Priority 2: Same AC queue */ | |
546 | else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE) | |
547 | queue = ac_to_queue[ac]; | |
548 | /* Priority 3a: If new AC is VO and VI exists - use VI */ | |
549 | else if (ac == IEEE80211_AC_VO && | |
550 | ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) | |
551 | queue = ac_to_queue[IEEE80211_AC_VI]; | |
552 | /* Priority 3b: No BE so only AC less than the new one is BK */ | |
553 | else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE) | |
554 | queue = ac_to_queue[IEEE80211_AC_BK]; | |
555 | /* Priority 4a: No BE nor BK - use VI if exists */ | |
556 | else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) | |
557 | queue = ac_to_queue[IEEE80211_AC_VI]; | |
558 | /* Priority 4b: No BE, BK nor VI - use VO if exists */ | |
559 | else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE) | |
560 | queue = ac_to_queue[IEEE80211_AC_VO]; | |
561 | ||
562 | /* Make sure queue found (or not) is legal */ | |
9f9af3d7 LK |
563 | if (!iwl_mvm_is_dqa_data_queue(mvm, queue) && |
564 | !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) && | |
565 | (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) { | |
42db09c1 | 566 | IWL_ERR(mvm, "No DATA queues available to share\n"); |
9f9af3d7 LK |
567 | return -ENOSPC; |
568 | } | |
569 | ||
570 | /* Make sure the queue isn't in the middle of being reconfigured */ | |
571 | if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) { | |
572 | IWL_ERR(mvm, | |
573 | "TXQ %d is in the middle of re-config - try again\n", | |
574 | queue); | |
575 | return -EBUSY; | |
42db09c1 LK |
576 | } |
577 | ||
578 | return queue; | |
579 | } | |
580 | ||
58f2cc57 | 581 | /* |
9f9af3d7 LK |
582 | * If a given queue has a higher AC than the TID stream that is being compared |
583 | * to, the queue needs to be redirected to the lower AC. This function does that | |
58f2cc57 LK |
584 | * in such a case, otherwise - if no redirection required - it does nothing, |
585 | * unless the %force param is true. | |
586 | */ | |
9f9af3d7 LK |
587 | int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, |
588 | int ac, int ssn, unsigned int wdg_timeout, | |
589 | bool force) | |
58f2cc57 LK |
590 | { |
591 | struct iwl_scd_txq_cfg_cmd cmd = { | |
592 | .scd_queue = queue, | |
f7c692de | 593 | .action = SCD_CFG_DISABLE_QUEUE, |
58f2cc57 LK |
594 | }; |
595 | bool shared_queue; | |
596 | unsigned long mq; | |
597 | int ret; | |
598 | ||
599 | /* | |
600 | * If the AC is lower than current one - FIFO needs to be redirected to | |
601 | * the lowest one of the streams in the queue. Check if this is needed | |
602 | * here. | |
603 | * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with | |
604 | * value 3 and VO with value 0, so to check if ac X is lower than ac Y | |
605 | * we need to check if the numerical value of X is LARGER than of Y. | |
606 | */ | |
607 | spin_lock_bh(&mvm->queue_info_lock); | |
608 | if (ac <= mvm->queue_info[queue].mac80211_ac && !force) { | |
609 | spin_unlock_bh(&mvm->queue_info_lock); | |
610 | ||
611 | IWL_DEBUG_TX_QUEUES(mvm, | |
612 | "No redirection needed on TXQ #%d\n", | |
613 | queue); | |
614 | return 0; | |
615 | } | |
616 | ||
617 | cmd.sta_id = mvm->queue_info[queue].ra_sta_id; | |
618 | cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; | |
edbe961c | 619 | cmd.tid = mvm->queue_info[queue].txq_tid; |
58f2cc57 LK |
620 | mq = mvm->queue_info[queue].hw_queue_to_mac80211; |
621 | shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1); | |
622 | spin_unlock_bh(&mvm->queue_info_lock); | |
623 | ||
9f9af3d7 | 624 | IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", |
58f2cc57 LK |
625 | queue, iwl_mvm_ac_to_tx_fifo[ac]); |
626 | ||
627 | /* Stop MAC queues and wait for this queue to empty */ | |
628 | iwl_mvm_stop_mac_queues(mvm, mq); | |
629 | ret = iwl_trans_wait_tx_queue_empty(mvm->trans, BIT(queue)); | |
630 | if (ret) { | |
631 | IWL_ERR(mvm, "Error draining queue %d before reconfig\n", | |
632 | queue); | |
633 | ret = -EIO; | |
634 | goto out; | |
635 | } | |
636 | ||
637 | /* Before redirecting the queue we need to de-activate it */ | |
638 | iwl_trans_txq_disable(mvm->trans, queue, false); | |
639 | ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); | |
640 | if (ret) | |
641 | IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue, | |
642 | ret); | |
643 | ||
644 | /* Make sure the SCD wrptr is correctly set before reconfiguring */ | |
ca3b9c6b | 645 | iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); |
58f2cc57 | 646 | |
edbe961c LK |
647 | /* Update the TID "owner" of the queue */ |
648 | spin_lock_bh(&mvm->queue_info_lock); | |
649 | mvm->queue_info[queue].txq_tid = tid; | |
650 | spin_unlock_bh(&mvm->queue_info_lock); | |
651 | ||
58f2cc57 LK |
652 | /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */ |
653 | ||
654 | /* Redirect to lower AC */ | |
655 | iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac], | |
656 | cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF, | |
657 | ssn); | |
658 | ||
659 | /* Update AC marking of the queue */ | |
660 | spin_lock_bh(&mvm->queue_info_lock); | |
661 | mvm->queue_info[queue].mac80211_ac = ac; | |
662 | spin_unlock_bh(&mvm->queue_info_lock); | |
663 | ||
664 | /* | |
665 | * Mark queue as shared in transport if shared | |
666 | * Note this has to be done after queue enablement because enablement | |
667 | * can also set this value, and there is no indication there to shared | |
668 | * queues | |
669 | */ | |
670 | if (shared_queue) | |
671 | iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); | |
672 | ||
673 | out: | |
674 | /* Continue using the MAC queues */ | |
675 | iwl_mvm_start_mac_queues(mvm, mq); | |
676 | ||
677 | return ret; | |
678 | } | |
679 | ||
24afba76 LK |
680 | static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, |
681 | struct ieee80211_sta *sta, u8 ac, int tid, | |
682 | struct ieee80211_hdr *hdr) | |
683 | { | |
684 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); | |
685 | struct iwl_trans_txq_scd_cfg cfg = { | |
686 | .fifo = iwl_mvm_ac_to_tx_fifo[ac], | |
687 | .sta_id = mvmsta->sta_id, | |
688 | .tid = tid, | |
689 | .frame_limit = IWL_FRAME_LIMIT, | |
690 | }; | |
691 | unsigned int wdg_timeout = | |
692 | iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); | |
693 | u8 mac_queue = mvmsta->vif->hw_queue[ac]; | |
694 | int queue = -1; | |
01796ff2 | 695 | bool using_inactive_queue = false, same_sta = false; |
9794c64f LK |
696 | unsigned long disable_agg_tids = 0; |
697 | enum iwl_mvm_agg_state queue_state; | |
42db09c1 | 698 | bool shared_queue = false; |
24afba76 | 699 | int ssn; |
42db09c1 | 700 | unsigned long tfd_queue_mask; |
cf961e16 | 701 | int ret; |
24afba76 LK |
702 | |
703 | lockdep_assert_held(&mvm->mutex); | |
704 | ||
42db09c1 LK |
705 | spin_lock_bh(&mvmsta->lock); |
706 | tfd_queue_mask = mvmsta->tfd_queue_msk; | |
707 | spin_unlock_bh(&mvmsta->lock); | |
708 | ||
d2515a99 | 709 | spin_lock_bh(&mvm->queue_info_lock); |
24afba76 LK |
710 | |
711 | /* | |
712 | * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one | |
713 | * exists | |
714 | */ | |
715 | if (!ieee80211_is_data_qos(hdr->frame_control) || | |
716 | ieee80211_is_qos_nullfunc(hdr->frame_control)) { | |
9794c64f LK |
717 | queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, |
718 | IWL_MVM_DQA_MIN_MGMT_QUEUE, | |
24afba76 LK |
719 | IWL_MVM_DQA_MAX_MGMT_QUEUE); |
720 | if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) | |
721 | IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", | |
722 | queue); | |
723 | ||
724 | /* If no such queue is found, we'll use a DATA queue instead */ | |
725 | } | |
726 | ||
9794c64f LK |
727 | if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) && |
728 | (mvm->queue_info[mvmsta->reserved_queue].status == | |
729 | IWL_MVM_QUEUE_RESERVED || | |
730 | mvm->queue_info[mvmsta->reserved_queue].status == | |
731 | IWL_MVM_QUEUE_INACTIVE)) { | |
24afba76 | 732 | queue = mvmsta->reserved_queue; |
9794c64f | 733 | mvm->queue_info[queue].reserved = true; |
24afba76 LK |
734 | IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); |
735 | } | |
736 | ||
737 | if (queue < 0) | |
9794c64f LK |
738 | queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, |
739 | IWL_MVM_DQA_MIN_DATA_QUEUE, | |
24afba76 | 740 | IWL_MVM_DQA_MAX_DATA_QUEUE); |
cf961e16 | 741 | |
9794c64f LK |
742 | /* |
743 | * Check if this queue is already allocated but inactive. | |
744 | * In such a case, we'll need to first free this queue before enabling | |
745 | * it again, so we'll mark it as reserved to make sure no new traffic | |
746 | * arrives on it | |
747 | */ | |
748 | if (queue > 0 && | |
749 | mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) { | |
750 | mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; | |
751 | using_inactive_queue = true; | |
01796ff2 | 752 | same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id; |
9794c64f LK |
753 | IWL_DEBUG_TX_QUEUES(mvm, |
754 | "Re-assigning TXQ %d: sta_id=%d, tid=%d\n", | |
755 | queue, mvmsta->sta_id, tid); | |
756 | } | |
757 | ||
42db09c1 LK |
758 | /* No free queue - we'll have to share */ |
759 | if (queue <= 0) { | |
760 | queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac); | |
761 | if (queue > 0) { | |
762 | shared_queue = true; | |
763 | mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED; | |
764 | } | |
765 | } | |
766 | ||
cf961e16 LK |
767 | /* |
768 | * Mark TXQ as ready, even though it hasn't been fully configured yet, | |
769 | * to make sure no one else takes it. | |
770 | * This will allow avoiding re-acquiring the lock at the end of the | |
771 | * configuration. On error we'll mark it back as free. | |
772 | */ | |
42db09c1 | 773 | if ((queue > 0) && !shared_queue) |
cf961e16 | 774 | mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; |
24afba76 | 775 | |
d2515a99 | 776 | spin_unlock_bh(&mvm->queue_info_lock); |
24afba76 | 777 | |
42db09c1 LK |
778 | /* This shouldn't happen - out of queues */ |
779 | if (WARN_ON(queue <= 0)) { | |
780 | IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", | |
781 | tid, cfg.sta_id); | |
9f9af3d7 | 782 | return queue; |
42db09c1 | 783 | } |
24afba76 LK |
784 | |
785 | /* | |
786 | * Actual en/disablement of aggregations is through the ADD_STA HCMD, | |
787 | * but for configuring the SCD to send A-MPDUs we need to mark the queue | |
788 | * as aggregatable. | |
789 | * Mark all DATA queues as allowing to be aggregated at some point | |
790 | */ | |
d5216a28 LK |
791 | cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || |
792 | queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); | |
24afba76 | 793 | |
9794c64f LK |
794 | /* |
795 | * If this queue was previously inactive (idle) - we need to free it | |
796 | * first | |
797 | */ | |
798 | if (using_inactive_queue) { | |
01796ff2 SS |
799 | ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta); |
800 | if (ret) | |
9794c64f | 801 | return ret; |
9794c64f LK |
802 | } |
803 | ||
42db09c1 LK |
804 | IWL_DEBUG_TX_QUEUES(mvm, |
805 | "Allocating %squeue #%d to sta %d on tid %d\n", | |
806 | shared_queue ? "shared " : "", queue, | |
807 | mvmsta->sta_id, tid); | |
808 | ||
809 | if (shared_queue) { | |
810 | /* Disable any open aggs on this queue */ | |
811 | disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue); | |
812 | ||
813 | if (disable_agg_tids) { | |
814 | IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n", | |
815 | queue); | |
816 | iwl_mvm_invalidate_sta_queue(mvm, queue, | |
817 | disable_agg_tids, false); | |
818 | } | |
42db09c1 | 819 | } |
24afba76 LK |
820 | |
821 | ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); | |
822 | iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg, | |
823 | wdg_timeout); | |
824 | ||
58f2cc57 LK |
825 | /* |
826 | * Mark queue as shared in transport if shared | |
827 | * Note this has to be done after queue enablement because enablement | |
828 | * can also set this value, and there is no indication there to shared | |
829 | * queues | |
830 | */ | |
831 | if (shared_queue) | |
832 | iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); | |
833 | ||
24afba76 LK |
834 | spin_lock_bh(&mvmsta->lock); |
835 | mvmsta->tid_data[tid].txq_id = queue; | |
9794c64f | 836 | mvmsta->tid_data[tid].is_tid_active = true; |
24afba76 | 837 | mvmsta->tfd_queue_msk |= BIT(queue); |
9794c64f | 838 | queue_state = mvmsta->tid_data[tid].state; |
24afba76 LK |
839 | |
840 | if (mvmsta->reserved_queue == queue) | |
841 | mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; | |
842 | spin_unlock_bh(&mvmsta->lock); | |
843 | ||
42db09c1 LK |
844 | if (!shared_queue) { |
845 | ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); | |
846 | if (ret) | |
847 | goto out_err; | |
cf961e16 | 848 | |
42db09c1 LK |
849 | /* If we need to re-enable aggregations... */ |
850 | if (queue_state == IWL_AGG_ON) { | |
851 | ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); | |
852 | if (ret) | |
853 | goto out_err; | |
854 | } | |
58f2cc57 LK |
855 | } else { |
856 | /* Redirect queue, if needed */ | |
857 | ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn, | |
858 | wdg_timeout, false); | |
859 | if (ret) | |
860 | goto out_err; | |
42db09c1 | 861 | } |
9794c64f | 862 | |
42db09c1 | 863 | return 0; |
cf961e16 LK |
864 | |
865 | out_err: | |
866 | iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0); | |
867 | ||
868 | return ret; | |
24afba76 LK |
869 | } |
870 | ||
19aefa45 LK |
871 | static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue) |
872 | { | |
873 | struct iwl_scd_txq_cfg_cmd cmd = { | |
874 | .scd_queue = queue, | |
875 | .action = SCD_CFG_UPDATE_QUEUE_TID, | |
876 | }; | |
19aefa45 LK |
877 | int tid; |
878 | unsigned long tid_bitmap; | |
879 | int ret; | |
880 | ||
881 | lockdep_assert_held(&mvm->mutex); | |
882 | ||
883 | spin_lock_bh(&mvm->queue_info_lock); | |
19aefa45 LK |
884 | tid_bitmap = mvm->queue_info[queue].tid_bitmap; |
885 | spin_unlock_bh(&mvm->queue_info_lock); | |
886 | ||
887 | if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue)) | |
888 | return; | |
889 | ||
890 | /* Find any TID for queue */ | |
891 | tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); | |
892 | cmd.tid = tid; | |
893 | cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; | |
894 | ||
895 | ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); | |
341ca402 | 896 | if (ret) { |
19aefa45 LK |
897 | IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n", |
898 | queue, ret); | |
341ca402 LK |
899 | return; |
900 | } | |
901 | ||
902 | spin_lock_bh(&mvm->queue_info_lock); | |
903 | mvm->queue_info[queue].txq_tid = tid; | |
904 | spin_unlock_bh(&mvm->queue_info_lock); | |
905 | IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n", | |
906 | queue, tid); | |
19aefa45 LK |
907 | } |
908 | ||
9f9af3d7 LK |
909 | static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) |
910 | { | |
911 | struct ieee80211_sta *sta; | |
912 | struct iwl_mvm_sta *mvmsta; | |
913 | s8 sta_id; | |
914 | int tid = -1; | |
915 | unsigned long tid_bitmap; | |
916 | unsigned int wdg_timeout; | |
917 | int ssn; | |
918 | int ret = true; | |
919 | ||
920 | lockdep_assert_held(&mvm->mutex); | |
921 | ||
922 | spin_lock_bh(&mvm->queue_info_lock); | |
923 | sta_id = mvm->queue_info[queue].ra_sta_id; | |
924 | tid_bitmap = mvm->queue_info[queue].tid_bitmap; | |
925 | spin_unlock_bh(&mvm->queue_info_lock); | |
926 | ||
927 | /* Find TID for queue, and make sure it is the only one on the queue */ | |
928 | tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); | |
929 | if (tid_bitmap != BIT(tid)) { | |
930 | IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n", | |
931 | queue, tid_bitmap); | |
932 | return; | |
933 | } | |
934 | ||
935 | IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue, | |
936 | tid); | |
937 | ||
938 | sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], | |
939 | lockdep_is_held(&mvm->mutex)); | |
940 | ||
941 | if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) | |
942 | return; | |
943 | ||
944 | mvmsta = iwl_mvm_sta_from_mac80211(sta); | |
945 | wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); | |
946 | ||
947 | ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); | |
948 | ||
949 | ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, | |
950 | tid_to_mac80211_ac[tid], ssn, | |
951 | wdg_timeout, true); | |
952 | if (ret) { | |
953 | IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue); | |
954 | return; | |
955 | } | |
956 | ||
957 | /* If aggs should be turned back on - do it */ | |
958 | if (mvmsta->tid_data[tid].state == IWL_AGG_ON) { | |
9cd70e80 | 959 | struct iwl_mvm_add_sta_cmd cmd = {0}; |
9f9af3d7 LK |
960 | |
961 | mvmsta->tid_disable_agg &= ~BIT(tid); | |
962 | ||
963 | cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); | |
964 | cmd.sta_id = mvmsta->sta_id; | |
965 | cmd.add_modify = STA_MODE_MODIFY; | |
966 | cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX; | |
967 | cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); | |
968 | cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); | |
969 | ||
970 | ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, | |
971 | iwl_mvm_add_sta_cmd_size(mvm), &cmd); | |
972 | if (!ret) { | |
973 | IWL_DEBUG_TX_QUEUES(mvm, | |
974 | "TXQ #%d is now aggregated again\n", | |
975 | queue); | |
976 | ||
977 | /* Mark queue intenally as aggregating again */ | |
978 | iwl_trans_txq_set_shared_mode(mvm->trans, queue, false); | |
979 | } | |
980 | } | |
981 | ||
982 | spin_lock_bh(&mvm->queue_info_lock); | |
983 | mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; | |
984 | spin_unlock_bh(&mvm->queue_info_lock); | |
985 | } | |
986 | ||
24afba76 LK |
987 | static inline u8 iwl_mvm_tid_to_ac_queue(int tid) |
988 | { | |
989 | if (tid == IWL_MAX_TID_COUNT) | |
990 | return IEEE80211_AC_VO; /* MGMT */ | |
991 | ||
992 | return tid_to_mac80211_ac[tid]; | |
993 | } | |
994 | ||
995 | static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm, | |
996 | struct ieee80211_sta *sta, int tid) | |
997 | { | |
998 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); | |
999 | struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; | |
1000 | struct sk_buff *skb; | |
1001 | struct ieee80211_hdr *hdr; | |
1002 | struct sk_buff_head deferred_tx; | |
1003 | u8 mac_queue; | |
1004 | bool no_queue = false; /* Marks if there is a problem with the queue */ | |
1005 | u8 ac; | |
1006 | ||
1007 | lockdep_assert_held(&mvm->mutex); | |
1008 | ||
1009 | skb = skb_peek(&tid_data->deferred_tx_frames); | |
1010 | if (!skb) | |
1011 | return; | |
1012 | hdr = (void *)skb->data; | |
1013 | ||
1014 | ac = iwl_mvm_tid_to_ac_queue(tid); | |
1015 | mac_queue = IEEE80211_SKB_CB(skb)->hw_queue; | |
1016 | ||
1017 | if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE && | |
1018 | iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) { | |
1019 | IWL_ERR(mvm, | |
1020 | "Can't alloc TXQ for sta %d tid %d - dropping frame\n", | |
1021 | mvmsta->sta_id, tid); | |
1022 | ||
1023 | /* | |
1024 | * Mark queue as problematic so later the deferred traffic is | |
1025 | * freed, as we can do nothing with it | |
1026 | */ | |
1027 | no_queue = true; | |
1028 | } | |
1029 | ||
1030 | __skb_queue_head_init(&deferred_tx); | |
1031 | ||
d2515a99 LK |
1032 | /* Disable bottom-halves when entering TX path */ |
1033 | local_bh_disable(); | |
24afba76 LK |
1034 | spin_lock(&mvmsta->lock); |
1035 | skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx); | |
ad5de737 | 1036 | mvmsta->deferred_traffic_tid_map &= ~BIT(tid); |
24afba76 LK |
1037 | spin_unlock(&mvmsta->lock); |
1038 | ||
24afba76 LK |
1039 | while ((skb = __skb_dequeue(&deferred_tx))) |
1040 | if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta)) | |
1041 | ieee80211_free_txskb(mvm->hw, skb); | |
1042 | local_bh_enable(); | |
1043 | ||
1044 | /* Wake queue */ | |
1045 | iwl_mvm_start_mac_queues(mvm, BIT(mac_queue)); | |
1046 | } | |
1047 | ||
1048 | void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) | |
1049 | { | |
1050 | struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, | |
1051 | add_stream_wk); | |
1052 | struct ieee80211_sta *sta; | |
1053 | struct iwl_mvm_sta *mvmsta; | |
1054 | unsigned long deferred_tid_traffic; | |
9f9af3d7 | 1055 | int queue, sta_id, tid; |
24afba76 | 1056 | |
9794c64f LK |
1057 | /* Check inactivity of queues */ |
1058 | iwl_mvm_inactivity_check(mvm); | |
1059 | ||
24afba76 LK |
1060 | mutex_lock(&mvm->mutex); |
1061 | ||
9f9af3d7 LK |
1062 | /* Reconfigure queues requiring reconfiguation */ |
1063 | for (queue = 0; queue < IWL_MAX_HW_QUEUES; queue++) { | |
1064 | bool reconfig; | |
19aefa45 | 1065 | bool change_owner; |
9f9af3d7 LK |
1066 | |
1067 | spin_lock_bh(&mvm->queue_info_lock); | |
1068 | reconfig = (mvm->queue_info[queue].status == | |
1069 | IWL_MVM_QUEUE_RECONFIGURING); | |
19aefa45 LK |
1070 | |
1071 | /* | |
1072 | * We need to take into account a situation in which a TXQ was | |
1073 | * allocated to TID x, and then turned shared by adding TIDs y | |
1074 | * and z. If TID x becomes inactive and is removed from the TXQ, | |
1075 | * ownership must be given to one of the remaining TIDs. | |
1076 | * This is mainly because if TID x continues - a new queue can't | |
1077 | * be allocated for it as long as it is an owner of another TXQ. | |
1078 | */ | |
1079 | change_owner = !(mvm->queue_info[queue].tid_bitmap & | |
1080 | BIT(mvm->queue_info[queue].txq_tid)) && | |
1081 | (mvm->queue_info[queue].status == | |
1082 | IWL_MVM_QUEUE_SHARED); | |
9f9af3d7 LK |
1083 | spin_unlock_bh(&mvm->queue_info_lock); |
1084 | ||
1085 | if (reconfig) | |
1086 | iwl_mvm_unshare_queue(mvm, queue); | |
19aefa45 LK |
1087 | else if (change_owner) |
1088 | iwl_mvm_change_queue_owner(mvm, queue); | |
9f9af3d7 LK |
1089 | } |
1090 | ||
24afba76 LK |
1091 | /* Go over all stations with deferred traffic */ |
1092 | for_each_set_bit(sta_id, mvm->sta_deferred_frames, | |
1093 | IWL_MVM_STATION_COUNT) { | |
1094 | clear_bit(sta_id, mvm->sta_deferred_frames); | |
1095 | sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], | |
1096 | lockdep_is_held(&mvm->mutex)); | |
1097 | if (IS_ERR_OR_NULL(sta)) | |
1098 | continue; | |
1099 | ||
1100 | mvmsta = iwl_mvm_sta_from_mac80211(sta); | |
1101 | deferred_tid_traffic = mvmsta->deferred_traffic_tid_map; | |
1102 | ||
1103 | for_each_set_bit(tid, &deferred_tid_traffic, | |
1104 | IWL_MAX_TID_COUNT + 1) | |
1105 | iwl_mvm_tx_deferred_stream(mvm, sta, tid); | |
1106 | } | |
1107 | ||
1108 | mutex_unlock(&mvm->mutex); | |
1109 | } | |
1110 | ||
1111 | static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, | |
d5216a28 LK |
1112 | struct ieee80211_sta *sta, |
1113 | enum nl80211_iftype vif_type) | |
24afba76 LK |
1114 | { |
1115 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); | |
1116 | int queue; | |
01796ff2 | 1117 | bool using_inactive_queue = false, same_sta = false; |
24afba76 | 1118 | |
9794c64f LK |
1119 | /* |
1120 | * Check for inactive queues, so we don't reach a situation where we | |
1121 | * can't add a STA due to a shortage in queues that doesn't really exist | |
1122 | */ | |
1123 | iwl_mvm_inactivity_check(mvm); | |
1124 | ||
24afba76 LK |
1125 | spin_lock_bh(&mvm->queue_info_lock); |
1126 | ||
1127 | /* Make sure we have free resources for this STA */ | |
d5216a28 LK |
1128 | if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && |
1129 | !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount && | |
cf961e16 LK |
1130 | (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status == |
1131 | IWL_MVM_QUEUE_FREE)) | |
d5216a28 LK |
1132 | queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; |
1133 | else | |
9794c64f LK |
1134 | queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, |
1135 | IWL_MVM_DQA_MIN_DATA_QUEUE, | |
d5216a28 | 1136 | IWL_MVM_DQA_MAX_DATA_QUEUE); |
24afba76 LK |
1137 | if (queue < 0) { |
1138 | spin_unlock_bh(&mvm->queue_info_lock); | |
1139 | IWL_ERR(mvm, "No available queues for new station\n"); | |
1140 | return -ENOSPC; | |
01796ff2 SS |
1141 | } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) { |
1142 | /* | |
1143 | * If this queue is already allocated but inactive we'll need to | |
1144 | * first free this queue before enabling it again, we'll mark | |
1145 | * it as reserved to make sure no new traffic arrives on it | |
1146 | */ | |
1147 | using_inactive_queue = true; | |
1148 | same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id; | |
24afba76 | 1149 | } |
cf961e16 | 1150 | mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; |
24afba76 LK |
1151 | |
1152 | spin_unlock_bh(&mvm->queue_info_lock); | |
1153 | ||
1154 | mvmsta->reserved_queue = queue; | |
1155 | ||
01796ff2 SS |
1156 | if (using_inactive_queue) |
1157 | iwl_mvm_free_inactive_queue(mvm, queue, same_sta); | |
1158 | ||
24afba76 LK |
1159 | IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", |
1160 | queue, mvmsta->sta_id); | |
1161 | ||
1162 | return 0; | |
1163 | } | |
1164 | ||
8d98ae6e LK |
1165 | /* |
1166 | * In DQA mode, after a HW restart the queues should be allocated as before, in | |
1167 | * order to avoid race conditions when there are shared queues. This function | |
1168 | * does the re-mapping and queue allocation. | |
1169 | * | |
1170 | * Note that re-enabling aggregations isn't done in this function. | |
1171 | */ | |
1172 | static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, | |
1173 | struct iwl_mvm_sta *mvm_sta) | |
1174 | { | |
1175 | unsigned int wdg_timeout = | |
1176 | iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false); | |
1177 | int i; | |
1178 | struct iwl_trans_txq_scd_cfg cfg = { | |
1179 | .sta_id = mvm_sta->sta_id, | |
1180 | .frame_limit = IWL_FRAME_LIMIT, | |
1181 | }; | |
1182 | ||
1183 | /* Make sure reserved queue is still marked as such (or allocated) */ | |
1184 | mvm->queue_info[mvm_sta->reserved_queue].status = | |
1185 | IWL_MVM_QUEUE_RESERVED; | |
1186 | ||
1187 | for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { | |
1188 | struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i]; | |
1189 | int txq_id = tid_data->txq_id; | |
1190 | int ac; | |
1191 | u8 mac_queue; | |
1192 | ||
1193 | if (txq_id == IEEE80211_INVAL_HW_QUEUE) | |
1194 | continue; | |
1195 | ||
1196 | skb_queue_head_init(&tid_data->deferred_tx_frames); | |
1197 | ||
1198 | ac = tid_to_mac80211_ac[i]; | |
1199 | mac_queue = mvm_sta->vif->hw_queue[ac]; | |
1200 | ||
1201 | cfg.tid = i; | |
1202 | cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac]; | |
1203 | cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE || | |
1204 | txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE); | |
1205 | ||
1206 | IWL_DEBUG_TX_QUEUES(mvm, | |
1207 | "Re-mapping sta %d tid %d to queue %d\n", | |
1208 | mvm_sta->sta_id, i, txq_id); | |
1209 | ||
1210 | iwl_mvm_enable_txq(mvm, txq_id, mac_queue, | |
1211 | IEEE80211_SEQ_TO_SN(tid_data->seq_number), | |
1212 | &cfg, wdg_timeout); | |
1213 | ||
1214 | mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; | |
1215 | } | |
1216 | ||
1217 | atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0); | |
1218 | } | |
1219 | ||
8ca151b5 JB |
1220 | int iwl_mvm_add_sta(struct iwl_mvm *mvm, |
1221 | struct ieee80211_vif *vif, | |
1222 | struct ieee80211_sta *sta) | |
1223 | { | |
1224 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); | |
9d8ce6af | 1225 | struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); |
a571f5f6 | 1226 | struct iwl_mvm_rxq_dup_data *dup_data; |
8ca151b5 JB |
1227 | int i, ret, sta_id; |
1228 | ||
1229 | lockdep_assert_held(&mvm->mutex); | |
1230 | ||
1231 | if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) | |
b92e661b EP |
1232 | sta_id = iwl_mvm_find_free_sta_id(mvm, |
1233 | ieee80211_vif_type_p2p(vif)); | |
8ca151b5 JB |
1234 | else |
1235 | sta_id = mvm_sta->sta_id; | |
1236 | ||
36f4631c | 1237 | if (sta_id == IWL_MVM_STATION_COUNT) |
8ca151b5 JB |
1238 | return -ENOSPC; |
1239 | ||
1240 | spin_lock_init(&mvm_sta->lock); | |
1241 | ||
8d98ae6e LK |
1242 | /* In DQA mode, if this is a HW restart, re-alloc existing queues */ |
1243 | if (iwl_mvm_is_dqa_supported(mvm) && | |
1244 | test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { | |
1245 | iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta); | |
1246 | goto update_fw; | |
1247 | } | |
1248 | ||
8ca151b5 JB |
1249 | mvm_sta->sta_id = sta_id; |
1250 | mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, | |
1251 | mvmvif->color); | |
1252 | mvm_sta->vif = vif; | |
1253 | mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; | |
9ee718aa EL |
1254 | mvm_sta->tx_protection = 0; |
1255 | mvm_sta->tt_tx_protection = false; | |
8ca151b5 JB |
1256 | |
1257 | /* HW restart, don't assume the memory has been zeroed */ | |
e3d4bc8c | 1258 | atomic_set(&mvm->pending_frames[sta_id], 0); |
69191afe | 1259 | mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */ |
8ca151b5 | 1260 | mvm_sta->tfd_queue_msk = 0; |
a0f6bf2a | 1261 | |
e3118ad7 LK |
1262 | /* |
1263 | * Allocate new queues for a TDLS station, unless we're in DQA mode, | |
1264 | * and then they'll be allocated dynamically | |
1265 | */ | |
1266 | if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) { | |
a0f6bf2a AN |
1267 | ret = iwl_mvm_tdls_sta_init(mvm, sta); |
1268 | if (ret) | |
1269 | return ret; | |
24afba76 | 1270 | } else if (!iwl_mvm_is_dqa_supported(mvm)) { |
a0f6bf2a AN |
1271 | for (i = 0; i < IEEE80211_NUM_ACS; i++) |
1272 | if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) | |
1273 | mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); | |
1274 | } | |
8ca151b5 | 1275 | |
6d9d32b8 | 1276 | /* for HW restart - reset everything but the sequence number */ |
24afba76 | 1277 | for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { |
6d9d32b8 JB |
1278 | u16 seq = mvm_sta->tid_data[i].seq_number; |
1279 | memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); | |
1280 | mvm_sta->tid_data[i].seq_number = seq; | |
24afba76 LK |
1281 | |
1282 | if (!iwl_mvm_is_dqa_supported(mvm)) | |
1283 | continue; | |
1284 | ||
1285 | /* | |
1286 | * Mark all queues for this STA as unallocated and defer TX | |
1287 | * frames until the queue is allocated | |
1288 | */ | |
1289 | mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE; | |
1290 | skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames); | |
6d9d32b8 | 1291 | } |
24afba76 | 1292 | mvm_sta->deferred_traffic_tid_map = 0; |
efed6640 | 1293 | mvm_sta->agg_tids = 0; |
8ca151b5 | 1294 | |
a571f5f6 SS |
1295 | if (iwl_mvm_has_new_rx_api(mvm) && |
1296 | !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { | |
1297 | dup_data = kcalloc(mvm->trans->num_rx_queues, | |
1298 | sizeof(*dup_data), | |
1299 | GFP_KERNEL); | |
1300 | if (!dup_data) | |
1301 | return -ENOMEM; | |
1302 | mvm_sta->dup_data = dup_data; | |
1303 | } | |
1304 | ||
24afba76 | 1305 | if (iwl_mvm_is_dqa_supported(mvm)) { |
d5216a28 LK |
1306 | ret = iwl_mvm_reserve_sta_stream(mvm, sta, |
1307 | ieee80211_vif_type_p2p(vif)); | |
24afba76 LK |
1308 | if (ret) |
1309 | goto err; | |
1310 | } | |
1311 | ||
8d98ae6e | 1312 | update_fw: |
24afba76 | 1313 | ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0); |
8ca151b5 | 1314 | if (ret) |
a0f6bf2a | 1315 | goto err; |
8ca151b5 | 1316 | |
9e848010 JB |
1317 | if (vif->type == NL80211_IFTYPE_STATION) { |
1318 | if (!sta->tdls) { | |
1319 | WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT); | |
1320 | mvmvif->ap_sta_id = sta_id; | |
1321 | } else { | |
1322 | WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT); | |
1323 | } | |
1324 | } | |
8ca151b5 JB |
1325 | |
1326 | rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); | |
1327 | ||
1328 | return 0; | |
a0f6bf2a AN |
1329 | |
1330 | err: | |
e3118ad7 LK |
1331 | if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) |
1332 | iwl_mvm_tdls_sta_deinit(mvm, sta); | |
a0f6bf2a | 1333 | return ret; |
8ca151b5 JB |
1334 | } |
1335 | ||
1336 | int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, | |
1337 | bool drain) | |
1338 | { | |
f9dc0004 | 1339 | struct iwl_mvm_add_sta_cmd cmd = {}; |
8ca151b5 JB |
1340 | int ret; |
1341 | u32 status; | |
1342 | ||
1343 | lockdep_assert_held(&mvm->mutex); | |
1344 | ||
1345 | cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); | |
1346 | cmd.sta_id = mvmsta->sta_id; | |
1347 | cmd.add_modify = STA_MODE_MODIFY; | |
1348 | cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0; | |
1349 | cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW); | |
1350 | ||
1351 | status = ADD_STA_SUCCESS; | |
854c5705 SS |
1352 | ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, |
1353 | iwl_mvm_add_sta_cmd_size(mvm), | |
f9dc0004 | 1354 | &cmd, &status); |
8ca151b5 JB |
1355 | if (ret) |
1356 | return ret; | |
1357 | ||
837c4da9 | 1358 | switch (status & IWL_ADD_STA_STATUS_MASK) { |
8ca151b5 JB |
1359 | case ADD_STA_SUCCESS: |
1360 | IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n", | |
1361 | mvmsta->sta_id); | |
1362 | break; | |
1363 | default: | |
1364 | ret = -EIO; | |
1365 | IWL_ERR(mvm, "Couldn't drain frames for staid %d\n", | |
1366 | mvmsta->sta_id); | |
1367 | break; | |
1368 | } | |
1369 | ||
1370 | return ret; | |
1371 | } | |
1372 | ||
1373 | /* | |
1374 | * Remove a station from the FW table. Before sending the command to remove | |
1375 | * the station validate that the station is indeed known to the driver (sanity | |
1376 | * only). | |
1377 | */ | |
1378 | static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id) | |
1379 | { | |
1380 | struct ieee80211_sta *sta; | |
1381 | struct iwl_mvm_rm_sta_cmd rm_sta_cmd = { | |
1382 | .sta_id = sta_id, | |
1383 | }; | |
1384 | int ret; | |
1385 | ||
1386 | sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], | |
1387 | lockdep_is_held(&mvm->mutex)); | |
1388 | ||
1389 | /* Note: internal stations are marked as error values */ | |
1390 | if (!sta) { | |
1391 | IWL_ERR(mvm, "Invalid station id\n"); | |
1392 | return -EINVAL; | |
1393 | } | |
1394 | ||
a1022927 | 1395 | ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0, |
8ca151b5 JB |
1396 | sizeof(rm_sta_cmd), &rm_sta_cmd); |
1397 | if (ret) { | |
1398 | IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id); | |
1399 | return ret; | |
1400 | } | |
1401 | ||
1402 | return 0; | |
1403 | } | |
1404 | ||
1405 | void iwl_mvm_sta_drained_wk(struct work_struct *wk) | |
1406 | { | |
1407 | struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk); | |
1408 | u8 sta_id; | |
1409 | ||
1410 | /* | |
1411 | * The mutex is needed because of the SYNC cmd, but not only: if the | |
1412 | * work would run concurrently with iwl_mvm_rm_sta, it would run before | |
1413 | * iwl_mvm_rm_sta sets the station as busy, and exit. Then | |
1414 | * iwl_mvm_rm_sta would set the station as busy, and nobody will clean | |
1415 | * that later. | |
1416 | */ | |
1417 | mutex_lock(&mvm->mutex); | |
1418 | ||
1419 | for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) { | |
1420 | int ret; | |
1421 | struct ieee80211_sta *sta = | |
1422 | rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], | |
1423 | lockdep_is_held(&mvm->mutex)); | |
1424 | ||
1ddbbb0c JB |
1425 | /* |
1426 | * This station is in use or RCU-removed; the latter happens in | |
1427 | * managed mode, where mac80211 removes the station before we | |
1428 | * can remove it from firmware (we can only do that after the | |
1429 | * MAC is marked unassociated), and possibly while the deauth | |
1430 | * frame to disconnect from the AP is still queued. Then, the | |
1431 | * station pointer is -ENOENT when the last skb is reclaimed. | |
1432 | */ | |
1433 | if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT) | |
8ca151b5 JB |
1434 | continue; |
1435 | ||
1436 | if (PTR_ERR(sta) == -EINVAL) { | |
1437 | IWL_ERR(mvm, "Drained sta %d, but it is internal?\n", | |
1438 | sta_id); | |
1439 | continue; | |
1440 | } | |
1441 | ||
1442 | if (!sta) { | |
1443 | IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n", | |
1444 | sta_id); | |
1445 | continue; | |
1446 | } | |
1447 | ||
1448 | WARN_ON(PTR_ERR(sta) != -EBUSY); | |
1449 | /* This station was removed and we waited until it got drained, | |
1450 | * we can now proceed and remove it. | |
1451 | */ | |
1452 | ret = iwl_mvm_rm_sta_common(mvm, sta_id); | |
1453 | if (ret) { | |
1454 | IWL_ERR(mvm, | |
1455 | "Couldn't remove sta %d after it was drained\n", | |
1456 | sta_id); | |
1457 | continue; | |
1458 | } | |
c531c771 | 1459 | RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); |
8ca151b5 | 1460 | clear_bit(sta_id, mvm->sta_drained); |
a0f6bf2a AN |
1461 | |
1462 | if (mvm->tfd_drained[sta_id]) { | |
1463 | unsigned long i, msk = mvm->tfd_drained[sta_id]; | |
1464 | ||
a4ca3ed4 | 1465 | for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE) |
06ecdba3 AN |
1466 | iwl_mvm_disable_txq(mvm, i, i, |
1467 | IWL_MAX_TID_COUNT, 0); | |
a0f6bf2a AN |
1468 | |
1469 | mvm->tfd_drained[sta_id] = 0; | |
1470 | IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n", | |
1471 | sta_id, msk); | |
1472 | } | |
8ca151b5 JB |
1473 | } |
1474 | ||
1475 | mutex_unlock(&mvm->mutex); | |
1476 | } | |
1477 | ||
24afba76 LK |
1478 | static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, |
1479 | struct ieee80211_vif *vif, | |
1480 | struct iwl_mvm_sta *mvm_sta) | |
1481 | { | |
1482 | int ac; | |
1483 | int i; | |
1484 | ||
1485 | lockdep_assert_held(&mvm->mutex); | |
1486 | ||
1487 | for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { | |
1488 | if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE) | |
1489 | continue; | |
1490 | ||
1491 | ac = iwl_mvm_tid_to_ac_queue(i); | |
1492 | iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id, | |
1493 | vif->hw_queue[ac], i, 0); | |
1494 | mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE; | |
1495 | } | |
1496 | } | |
1497 | ||
8ca151b5 JB |
1498 | int iwl_mvm_rm_sta(struct iwl_mvm *mvm, |
1499 | struct ieee80211_vif *vif, | |
1500 | struct ieee80211_sta *sta) | |
1501 | { | |
1502 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); | |
9d8ce6af | 1503 | struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); |
8ca151b5 JB |
1504 | int ret; |
1505 | ||
1506 | lockdep_assert_held(&mvm->mutex); | |
1507 | ||
a571f5f6 SS |
1508 | if (iwl_mvm_has_new_rx_api(mvm)) |
1509 | kfree(mvm_sta->dup_data); | |
1510 | ||
a6f035a0 LK |
1511 | if ((vif->type == NL80211_IFTYPE_STATION && |
1512 | mvmvif->ap_sta_id == mvm_sta->sta_id) || | |
1513 | iwl_mvm_is_dqa_supported(mvm)){ | |
fe92e32a EG |
1514 | ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); |
1515 | if (ret) | |
1516 | return ret; | |
80d85655 | 1517 | /* flush its queues here since we are freeing mvm_sta */ |
5888a40c | 1518 | ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0); |
fe92e32a EG |
1519 | if (ret) |
1520 | return ret; | |
1521 | ret = iwl_trans_wait_tx_queue_empty(mvm->trans, | |
1522 | mvm_sta->tfd_queue_msk); | |
1523 | if (ret) | |
1524 | return ret; | |
1525 | ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); | |
80d85655 | 1526 | |
24afba76 | 1527 | /* If DQA is supported - the queues can be disabled now */ |
56214749 LK |
1528 | if (iwl_mvm_is_dqa_supported(mvm)) |
1529 | iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta); | |
1530 | ||
1531 | /* If there is a TXQ still marked as reserved - free it */ | |
1532 | if (iwl_mvm_is_dqa_supported(mvm) && | |
1533 | mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { | |
a0315dea LK |
1534 | u8 reserved_txq = mvm_sta->reserved_queue; |
1535 | enum iwl_mvm_queue_status *status; | |
1536 | ||
a0315dea LK |
1537 | /* |
1538 | * If no traffic has gone through the reserved TXQ - it | |
1539 | * is still marked as IWL_MVM_QUEUE_RESERVED, and | |
1540 | * should be manually marked as free again | |
1541 | */ | |
1542 | spin_lock_bh(&mvm->queue_info_lock); | |
1543 | status = &mvm->queue_info[reserved_txq].status; | |
1544 | if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && | |
1545 | (*status != IWL_MVM_QUEUE_FREE), | |
1546 | "sta_id %d reserved txq %d status %d", | |
1547 | mvm_sta->sta_id, reserved_txq, *status)) { | |
1548 | spin_unlock_bh(&mvm->queue_info_lock); | |
1549 | return -EINVAL; | |
1550 | } | |
1551 | ||
1552 | *status = IWL_MVM_QUEUE_FREE; | |
1553 | spin_unlock_bh(&mvm->queue_info_lock); | |
1554 | } | |
1555 | ||
e3118ad7 LK |
1556 | if (vif->type == NL80211_IFTYPE_STATION && |
1557 | mvmvif->ap_sta_id == mvm_sta->sta_id) { | |
1558 | /* if associated - we can't remove the AP STA now */ | |
1559 | if (vif->bss_conf.assoc) | |
1560 | return ret; | |
8ca151b5 | 1561 | |
e3118ad7 LK |
1562 | /* unassoc - go ahead - remove the AP STA now */ |
1563 | mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; | |
37577fe2 | 1564 | |
e3118ad7 LK |
1565 | /* clear d0i3_ap_sta_id if no longer relevant */ |
1566 | if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id) | |
1567 | mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; | |
1568 | } | |
8ca151b5 JB |
1569 | } |
1570 | ||
1d3c3f63 AN |
1571 | /* |
1572 | * This shouldn't happen - the TDLS channel switch should be canceled | |
1573 | * before the STA is removed. | |
1574 | */ | |
1575 | if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) { | |
1576 | mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; | |
1577 | cancel_delayed_work(&mvm->tdls_cs.dwork); | |
1578 | } | |
1579 | ||
e3d4bc8c EG |
1580 | /* |
1581 | * Make sure that the tx response code sees the station as -EBUSY and | |
1582 | * calls the drain worker. | |
1583 | */ | |
1584 | spin_lock_bh(&mvm_sta->lock); | |
8ca151b5 JB |
1585 | /* |
1586 | * There are frames pending on the AC queues for this station. | |
1587 | * We need to wait until all the frames are drained... | |
1588 | */ | |
e3d4bc8c | 1589 | if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) { |
8ca151b5 JB |
1590 | rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], |
1591 | ERR_PTR(-EBUSY)); | |
e3d4bc8c | 1592 | spin_unlock_bh(&mvm_sta->lock); |
a0f6bf2a AN |
1593 | |
1594 | /* disable TDLS sta queues on drain complete */ | |
1595 | if (sta->tdls) { | |
1596 | mvm->tfd_drained[mvm_sta->sta_id] = | |
1597 | mvm_sta->tfd_queue_msk; | |
1598 | IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", | |
1599 | mvm_sta->sta_id); | |
1600 | } | |
1601 | ||
e3d4bc8c | 1602 | ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); |
8ca151b5 | 1603 | } else { |
e3d4bc8c | 1604 | spin_unlock_bh(&mvm_sta->lock); |
a0f6bf2a | 1605 | |
e3118ad7 | 1606 | if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) |
a0f6bf2a AN |
1607 | iwl_mvm_tdls_sta_deinit(mvm, sta); |
1608 | ||
8ca151b5 | 1609 | ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); |
c531c771 | 1610 | RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); |
8ca151b5 JB |
1611 | } |
1612 | ||
1613 | return ret; | |
1614 | } | |
1615 | ||
1616 | int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, | |
1617 | struct ieee80211_vif *vif, | |
1618 | u8 sta_id) | |
1619 | { | |
1620 | int ret = iwl_mvm_rm_sta_common(mvm, sta_id); | |
1621 | ||
1622 | lockdep_assert_held(&mvm->mutex); | |
1623 | ||
c531c771 | 1624 | RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); |
8ca151b5 JB |
1625 | return ret; |
1626 | } | |
1627 | ||
0e39eb03 CRI |
1628 | int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, |
1629 | struct iwl_mvm_int_sta *sta, | |
1630 | u32 qmask, enum nl80211_iftype iftype) | |
8ca151b5 JB |
1631 | { |
1632 | if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { | |
b92e661b | 1633 | sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); |
8ca151b5 JB |
1634 | if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT)) |
1635 | return -ENOSPC; | |
1636 | } | |
1637 | ||
1638 | sta->tfd_queue_msk = qmask; | |
1639 | ||
1640 | /* put a non-NULL value so iterating over the stations won't stop */ | |
1641 | rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL)); | |
1642 | return 0; | |
1643 | } | |
1644 | ||
712b24ad JB |
1645 | static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, |
1646 | struct iwl_mvm_int_sta *sta) | |
8ca151b5 | 1647 | { |
c531c771 | 1648 | RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL); |
8ca151b5 JB |
1649 | memset(sta, 0, sizeof(struct iwl_mvm_int_sta)); |
1650 | sta->sta_id = IWL_MVM_STATION_COUNT; | |
1651 | } | |
1652 | ||
1653 | static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, | |
1654 | struct iwl_mvm_int_sta *sta, | |
1655 | const u8 *addr, | |
1656 | u16 mac_id, u16 color) | |
1657 | { | |
f9dc0004 | 1658 | struct iwl_mvm_add_sta_cmd cmd; |
8ca151b5 JB |
1659 | int ret; |
1660 | u32 status; | |
1661 | ||
1662 | lockdep_assert_held(&mvm->mutex); | |
1663 | ||
f9dc0004 | 1664 | memset(&cmd, 0, sizeof(cmd)); |
8ca151b5 JB |
1665 | cmd.sta_id = sta->sta_id; |
1666 | cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, | |
1667 | color)); | |
1668 | ||
1669 | cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); | |
cf0cda19 | 1670 | cmd.tid_disable_tx = cpu_to_le16(0xffff); |
8ca151b5 JB |
1671 | |
1672 | if (addr) | |
1673 | memcpy(cmd.addr, addr, ETH_ALEN); | |
1674 | ||
854c5705 SS |
1675 | ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, |
1676 | iwl_mvm_add_sta_cmd_size(mvm), | |
f9dc0004 | 1677 | &cmd, &status); |
8ca151b5 JB |
1678 | if (ret) |
1679 | return ret; | |
1680 | ||
837c4da9 | 1681 | switch (status & IWL_ADD_STA_STATUS_MASK) { |
8ca151b5 JB |
1682 | case ADD_STA_SUCCESS: |
1683 | IWL_DEBUG_INFO(mvm, "Internal station added.\n"); | |
1684 | return 0; | |
1685 | default: | |
1686 | ret = -EIO; | |
1687 | IWL_ERR(mvm, "Add internal station failed, status=0x%x\n", | |
1688 | status); | |
1689 | break; | |
1690 | } | |
1691 | return ret; | |
1692 | } | |
1693 | ||
1694 | int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) | |
1695 | { | |
4cf677fd EG |
1696 | unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? |
1697 | mvm->cfg->base_params->wd_timeout : | |
1698 | IWL_WATCHDOG_DISABLED; | |
8ca151b5 JB |
1699 | int ret; |
1700 | ||
1701 | lockdep_assert_held(&mvm->mutex); | |
1702 | ||
7da91b0e | 1703 | /* Map Aux queue to fifo - needs to happen before adding Aux station */ |
28d0793e LK |
1704 | if (!iwl_mvm_is_dqa_supported(mvm)) |
1705 | iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue, | |
1706 | IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); | |
7da91b0e AM |
1707 | |
1708 | /* Allocate aux station and assign to it the aux queue */ | |
1709 | ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), | |
b92e661b | 1710 | NL80211_IFTYPE_UNSPECIFIED); |
8ca151b5 JB |
1711 | if (ret) |
1712 | return ret; | |
1713 | ||
28d0793e LK |
1714 | if (iwl_mvm_is_dqa_supported(mvm)) { |
1715 | struct iwl_trans_txq_scd_cfg cfg = { | |
1716 | .fifo = IWL_MVM_TX_FIFO_MCAST, | |
1717 | .sta_id = mvm->aux_sta.sta_id, | |
1718 | .tid = IWL_MAX_TID_COUNT, | |
1719 | .aggregate = false, | |
1720 | .frame_limit = IWL_FRAME_LIMIT, | |
1721 | }; | |
1722 | ||
1723 | iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg, | |
1724 | wdg_timeout); | |
1725 | } | |
1726 | ||
8ca151b5 JB |
1727 | ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, |
1728 | MAC_INDEX_AUX, 0); | |
1729 | ||
1730 | if (ret) | |
1731 | iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); | |
1732 | return ret; | |
1733 | } | |
1734 | ||
0e39eb03 CRI |
1735 | int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
1736 | { | |
1737 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); | |
1738 | ||
1739 | lockdep_assert_held(&mvm->mutex); | |
1740 | return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr, | |
1741 | mvmvif->id, 0); | |
1742 | } | |
1743 | ||
1744 | int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) | |
1745 | { | |
1746 | int ret; | |
1747 | ||
1748 | lockdep_assert_held(&mvm->mutex); | |
1749 | ||
1750 | ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); | |
1751 | if (ret) | |
1752 | IWL_WARN(mvm, "Failed sending remove station\n"); | |
1753 | ||
1754 | return ret; | |
1755 | } | |
1756 | ||
1757 | void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm) | |
1758 | { | |
1759 | iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta); | |
1760 | } | |
1761 | ||
712b24ad JB |
1762 | void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm) |
1763 | { | |
1764 | lockdep_assert_held(&mvm->mutex); | |
1765 | ||
1766 | iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); | |
1767 | } | |
1768 | ||
8ca151b5 JB |
1769 | /* |
1770 | * Send the add station command for the vif's broadcast station. | |
1771 | * Assumes that the station was already allocated. | |
1772 | * | |
1773 | * @mvm: the mvm component | |
1774 | * @vif: the interface to which the broadcast station is added | |
1775 | * @bsta: the broadcast station to add. | |
1776 | */ | |
013290aa | 1777 | int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
8ca151b5 JB |
1778 | { |
1779 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); | |
013290aa | 1780 | struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; |
5023d966 | 1781 | static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; |
a4243402 | 1782 | const u8 *baddr = _baddr; |
8ca151b5 JB |
1783 | |
1784 | lockdep_assert_held(&mvm->mutex); | |
1785 | ||
de24f638 LK |
1786 | if (iwl_mvm_is_dqa_supported(mvm)) { |
1787 | struct iwl_trans_txq_scd_cfg cfg = { | |
1788 | .fifo = IWL_MVM_TX_FIFO_VO, | |
1789 | .sta_id = mvmvif->bcast_sta.sta_id, | |
1790 | .tid = IWL_MAX_TID_COUNT, | |
1791 | .aggregate = false, | |
1792 | .frame_limit = IWL_FRAME_LIMIT, | |
1793 | }; | |
1794 | unsigned int wdg_timeout = | |
1795 | iwl_mvm_get_wd_timeout(mvm, vif, false, false); | |
1796 | int queue; | |
1797 | ||
1798 | if ((vif->type == NL80211_IFTYPE_AP) && | |
1799 | (mvmvif->bcast_sta.tfd_queue_msk & | |
1800 | BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE))) | |
1801 | queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; | |
4c965139 LK |
1802 | else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) && |
1803 | (mvmvif->bcast_sta.tfd_queue_msk & | |
1804 | BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE))) | |
1805 | queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; | |
de24f638 LK |
1806 | else if (WARN(1, "Missed required TXQ for adding bcast STA\n")) |
1807 | return -EINVAL; | |
1808 | ||
1809 | iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg, | |
1810 | wdg_timeout); | |
1811 | } | |
1812 | ||
5023d966 JB |
1813 | if (vif->type == NL80211_IFTYPE_ADHOC) |
1814 | baddr = vif->bss_conf.bssid; | |
1815 | ||
8ca151b5 JB |
1816 | if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT)) |
1817 | return -ENOSPC; | |
1818 | ||
1819 | return iwl_mvm_add_int_sta_common(mvm, bsta, baddr, | |
1820 | mvmvif->id, mvmvif->color); | |
1821 | } | |
1822 | ||
1823 | /* Send the FW a request to remove the station from it's internal data | |
1824 | * structures, but DO NOT remove the entry from the local data structures. */ | |
013290aa | 1825 | int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
8ca151b5 | 1826 | { |
013290aa | 1827 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
8ca151b5 JB |
1828 | int ret; |
1829 | ||
1830 | lockdep_assert_held(&mvm->mutex); | |
1831 | ||
013290aa | 1832 | ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id); |
8ca151b5 JB |
1833 | if (ret) |
1834 | IWL_WARN(mvm, "Failed sending remove station\n"); | |
1835 | return ret; | |
1836 | } | |
1837 | ||
013290aa JB |
1838 | int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
1839 | { | |
1840 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); | |
de24f638 | 1841 | u32 qmask = 0; |
013290aa JB |
1842 | |
1843 | lockdep_assert_held(&mvm->mutex); | |
1844 | ||
de24f638 LK |
1845 | if (!iwl_mvm_is_dqa_supported(mvm)) |
1846 | qmask = iwl_mvm_mac_get_queues_mask(vif); | |
013290aa | 1847 | |
de24f638 LK |
1848 | if (vif->type == NL80211_IFTYPE_AP) { |
1849 | /* | |
1850 | * The firmware defines the TFD queue mask to only be relevant | |
1851 | * for *unicast* queues, so the multicast (CAB) queue shouldn't | |
1852 | * be included. | |
1853 | */ | |
013290aa JB |
1854 | qmask &= ~BIT(vif->cab_queue); |
1855 | ||
de24f638 LK |
1856 | if (iwl_mvm_is_dqa_supported(mvm)) |
1857 | qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE); | |
4c965139 LK |
1858 | } else if (iwl_mvm_is_dqa_supported(mvm) && |
1859 | vif->type == NL80211_IFTYPE_P2P_DEVICE) { | |
1860 | qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE); | |
de24f638 LK |
1861 | } |
1862 | ||
013290aa JB |
1863 | return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask, |
1864 | ieee80211_vif_type_p2p(vif)); | |
1865 | } | |
1866 | ||
8ca151b5 JB |
1867 | /* Allocate a new station entry for the broadcast station to the given vif, |
1868 | * and send it to the FW. | |
1869 | * Note that each P2P mac should have its own broadcast station. | |
1870 | * | |
1871 | * @mvm: the mvm component | |
1872 | * @vif: the interface to which the broadcast station is added | |
1873 | * @bsta: the broadcast station to add. */ | |
013290aa | 1874 | int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
8ca151b5 JB |
1875 | { |
1876 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); | |
013290aa | 1877 | struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; |
8ca151b5 JB |
1878 | int ret; |
1879 | ||
1880 | lockdep_assert_held(&mvm->mutex); | |
1881 | ||
013290aa | 1882 | ret = iwl_mvm_alloc_bcast_sta(mvm, vif); |
8ca151b5 JB |
1883 | if (ret) |
1884 | return ret; | |
1885 | ||
013290aa | 1886 | ret = iwl_mvm_send_add_bcast_sta(mvm, vif); |
8ca151b5 JB |
1887 | |
1888 | if (ret) | |
1889 | iwl_mvm_dealloc_int_sta(mvm, bsta); | |
013290aa | 1890 | |
8ca151b5 JB |
1891 | return ret; |
1892 | } | |
1893 | ||
013290aa JB |
1894 | void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
1895 | { | |
1896 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); | |
1897 | ||
1898 | iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta); | |
1899 | } | |
1900 | ||
8ca151b5 JB |
1901 | /* |
1902 | * Send the FW a request to remove the station from it's internal data | |
1903 | * structures, and in addition remove it from the local data structure. | |
1904 | */ | |
013290aa | 1905 | int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) |
8ca151b5 JB |
1906 | { |
1907 | int ret; | |
1908 | ||
1909 | lockdep_assert_held(&mvm->mutex); | |
1910 | ||
013290aa JB |
1911 | ret = iwl_mvm_send_rm_bcast_sta(mvm, vif); |
1912 | ||
1913 | iwl_mvm_dealloc_bcast_sta(mvm, vif); | |
8ca151b5 | 1914 | |
8ca151b5 JB |
1915 | return ret; |
1916 | } | |
1917 | ||
113a0447 EG |
1918 | #define IWL_MAX_RX_BA_SESSIONS 16 |
1919 | ||
b915c101 | 1920 | static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) |
10b2b201 | 1921 | { |
b915c101 SS |
1922 | struct iwl_mvm_delba_notif notif = { |
1923 | .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA, | |
1924 | .metadata.sync = 1, | |
1925 | .delba.baid = baid, | |
10b2b201 | 1926 | }; |
b915c101 SS |
1927 | iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif)); |
1928 | }; | |
10b2b201 | 1929 | |
b915c101 SS |
1930 | static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, |
1931 | struct iwl_mvm_baid_data *data) | |
1932 | { | |
1933 | int i; | |
1934 | ||
1935 | iwl_mvm_sync_rxq_del_ba(mvm, data->baid); | |
1936 | ||
1937 | for (i = 0; i < mvm->trans->num_rx_queues; i++) { | |
1938 | int j; | |
1939 | struct iwl_mvm_reorder_buffer *reorder_buf = | |
1940 | &data->reorder_buf[i]; | |
1941 | ||
0690405f SS |
1942 | spin_lock_bh(&reorder_buf->lock); |
1943 | if (likely(!reorder_buf->num_stored)) { | |
1944 | spin_unlock_bh(&reorder_buf->lock); | |
b915c101 | 1945 | continue; |
0690405f | 1946 | } |
b915c101 SS |
1947 | |
1948 | /* | |
1949 | * This shouldn't happen in regular DELBA since the internal | |
1950 | * delBA notification should trigger a release of all frames in | |
1951 | * the reorder buffer. | |
1952 | */ | |
1953 | WARN_ON(1); | |
1954 | ||
1955 | for (j = 0; j < reorder_buf->buf_size; j++) | |
1956 | __skb_queue_purge(&reorder_buf->entries[j]); | |
0690405f SS |
1957 | /* |
1958 | * Prevent timer re-arm. This prevents a very far fetched case | |
1959 | * where we timed out on the notification. There may be prior | |
1960 | * RX frames pending in the RX queue before the notification | |
1961 | * that might get processed between now and the actual deletion | |
1962 | * and we would re-arm the timer although we are deleting the | |
1963 | * reorder buffer. | |
1964 | */ | |
1965 | reorder_buf->removed = true; | |
1966 | spin_unlock_bh(&reorder_buf->lock); | |
1967 | del_timer_sync(&reorder_buf->reorder_timer); | |
b915c101 SS |
1968 | } |
1969 | } | |
1970 | ||
1971 | static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, | |
1972 | u32 sta_id, | |
1973 | struct iwl_mvm_baid_data *data, | |
1974 | u16 ssn, u8 buf_size) | |
1975 | { | |
1976 | int i; | |
1977 | ||
1978 | for (i = 0; i < mvm->trans->num_rx_queues; i++) { | |
1979 | struct iwl_mvm_reorder_buffer *reorder_buf = | |
1980 | &data->reorder_buf[i]; | |
1981 | int j; | |
1982 | ||
1983 | reorder_buf->num_stored = 0; | |
1984 | reorder_buf->head_sn = ssn; | |
1985 | reorder_buf->buf_size = buf_size; | |
0690405f SS |
1986 | /* rx reorder timer */ |
1987 | reorder_buf->reorder_timer.function = | |
1988 | iwl_mvm_reorder_timer_expired; | |
1989 | reorder_buf->reorder_timer.data = (unsigned long)reorder_buf; | |
1990 | init_timer(&reorder_buf->reorder_timer); | |
1991 | spin_lock_init(&reorder_buf->lock); | |
1992 | reorder_buf->mvm = mvm; | |
b915c101 SS |
1993 | reorder_buf->queue = i; |
1994 | reorder_buf->sta_id = sta_id; | |
1995 | for (j = 0; j < reorder_buf->buf_size; j++) | |
1996 | __skb_queue_head_init(&reorder_buf->entries[j]); | |
1997 | } | |
10b2b201 SS |
1998 | } |
1999 | ||
8ca151b5 | 2000 | int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, |
10b2b201 | 2001 | int tid, u16 ssn, bool start, u8 buf_size, u16 timeout) |
8ca151b5 | 2002 | { |
9d8ce6af | 2003 | struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); |
f9dc0004 | 2004 | struct iwl_mvm_add_sta_cmd cmd = {}; |
10b2b201 | 2005 | struct iwl_mvm_baid_data *baid_data = NULL; |
8ca151b5 JB |
2006 | int ret; |
2007 | u32 status; | |
2008 | ||
2009 | lockdep_assert_held(&mvm->mutex); | |
2010 | ||
113a0447 EG |
2011 | if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) { |
2012 | IWL_WARN(mvm, "Not enough RX BA SESSIONS\n"); | |
2013 | return -ENOSPC; | |
2014 | } | |
2015 | ||
10b2b201 SS |
2016 | if (iwl_mvm_has_new_rx_api(mvm) && start) { |
2017 | /* | |
2018 | * Allocate here so if allocation fails we can bail out early | |
2019 | * before starting the BA session in the firmware | |
2020 | */ | |
b915c101 SS |
2021 | baid_data = kzalloc(sizeof(*baid_data) + |
2022 | mvm->trans->num_rx_queues * | |
2023 | sizeof(baid_data->reorder_buf[0]), | |
2024 | GFP_KERNEL); | |
10b2b201 SS |
2025 | if (!baid_data) |
2026 | return -ENOMEM; | |
2027 | } | |
2028 | ||
8ca151b5 JB |
2029 | cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); |
2030 | cmd.sta_id = mvm_sta->sta_id; | |
2031 | cmd.add_modify = STA_MODE_MODIFY; | |
93a42667 EG |
2032 | if (start) { |
2033 | cmd.add_immediate_ba_tid = (u8) tid; | |
2034 | cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); | |
854c5705 | 2035 | cmd.rx_ba_window = cpu_to_le16((u16)buf_size); |
93a42667 EG |
2036 | } else { |
2037 | cmd.remove_immediate_ba_tid = (u8) tid; | |
2038 | } | |
8ca151b5 JB |
2039 | cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID : |
2040 | STA_MODIFY_REMOVE_BA_TID; | |
2041 | ||
2042 | status = ADD_STA_SUCCESS; | |
854c5705 SS |
2043 | ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, |
2044 | iwl_mvm_add_sta_cmd_size(mvm), | |
f9dc0004 | 2045 | &cmd, &status); |
8ca151b5 | 2046 | if (ret) |
10b2b201 | 2047 | goto out_free; |
8ca151b5 | 2048 | |
837c4da9 | 2049 | switch (status & IWL_ADD_STA_STATUS_MASK) { |
8ca151b5 | 2050 | case ADD_STA_SUCCESS: |
35263a03 SS |
2051 | IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", |
2052 | start ? "start" : "stopp"); | |
8ca151b5 JB |
2053 | break; |
2054 | case ADD_STA_IMMEDIATE_BA_FAILURE: | |
2055 | IWL_WARN(mvm, "RX BA Session refused by fw\n"); | |
2056 | ret = -ENOSPC; | |
2057 | break; | |
2058 | default: | |
2059 | ret = -EIO; | |
2060 | IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n", | |
2061 | start ? "start" : "stopp", status); | |
2062 | break; | |
2063 | } | |
2064 | ||
10b2b201 SS |
2065 | if (ret) |
2066 | goto out_free; | |
2067 | ||
2068 | if (start) { | |
2069 | u8 baid; | |
2070 | ||
2071 | mvm->rx_ba_sessions++; | |
2072 | ||
2073 | if (!iwl_mvm_has_new_rx_api(mvm)) | |
2074 | return 0; | |
2075 | ||
2076 | if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) { | |
2077 | ret = -EINVAL; | |
2078 | goto out_free; | |
2079 | } | |
2080 | baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >> | |
2081 | IWL_ADD_STA_BAID_SHIFT); | |
2082 | baid_data->baid = baid; | |
2083 | baid_data->timeout = timeout; | |
2084 | baid_data->last_rx = jiffies; | |
72c240fe WY |
2085 | setup_timer(&baid_data->session_timer, |
2086 | iwl_mvm_rx_agg_session_expired, | |
2087 | (unsigned long)&mvm->baid_map[baid]); | |
10b2b201 SS |
2088 | baid_data->mvm = mvm; |
2089 | baid_data->tid = tid; | |
2090 | baid_data->sta_id = mvm_sta->sta_id; | |
2091 | ||
2092 | mvm_sta->tid_to_baid[tid] = baid; | |
2093 | if (timeout) | |
2094 | mod_timer(&baid_data->session_timer, | |
2095 | TU_TO_EXP_TIME(timeout * 2)); | |
2096 | ||
b915c101 SS |
2097 | iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id, |
2098 | baid_data, ssn, buf_size); | |
10b2b201 SS |
2099 | /* |
2100 | * protect the BA data with RCU to cover a case where our | |
2101 | * internal RX sync mechanism will timeout (not that it's | |
2102 | * supposed to happen) and we will free the session data while | |
2103 | * RX is being processed in parallel | |
2104 | */ | |
35263a03 SS |
2105 | IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n", |
2106 | mvm_sta->sta_id, tid, baid); | |
10b2b201 SS |
2107 | WARN_ON(rcu_access_pointer(mvm->baid_map[baid])); |
2108 | rcu_assign_pointer(mvm->baid_map[baid], baid_data); | |
60dec523 | 2109 | } else { |
10b2b201 SS |
2110 | u8 baid = mvm_sta->tid_to_baid[tid]; |
2111 | ||
60dec523 SS |
2112 | if (mvm->rx_ba_sessions > 0) |
2113 | /* check that restart flow didn't zero the counter */ | |
2114 | mvm->rx_ba_sessions--; | |
10b2b201 SS |
2115 | if (!iwl_mvm_has_new_rx_api(mvm)) |
2116 | return 0; | |
2117 | ||
2118 | if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID)) | |
2119 | return -EINVAL; | |
2120 | ||
2121 | baid_data = rcu_access_pointer(mvm->baid_map[baid]); | |
2122 | if (WARN_ON(!baid_data)) | |
2123 | return -EINVAL; | |
2124 | ||
2125 | /* synchronize all rx queues so we can safely delete */ | |
b915c101 | 2126 | iwl_mvm_free_reorder(mvm, baid_data); |
10b2b201 | 2127 | del_timer_sync(&baid_data->session_timer); |
10b2b201 SS |
2128 | RCU_INIT_POINTER(mvm->baid_map[baid], NULL); |
2129 | kfree_rcu(baid_data, rcu_head); | |
35263a03 | 2130 | IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid); |
113a0447 | 2131 | } |
10b2b201 | 2132 | return 0; |
113a0447 | 2133 | |
10b2b201 SS |
2134 | out_free: |
2135 | kfree(baid_data); | |
8ca151b5 JB |
2136 | return ret; |
2137 | } | |
2138 | ||
9794c64f LK |
2139 | int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, |
2140 | int tid, u8 queue, bool start) | |
8ca151b5 | 2141 | { |
9d8ce6af | 2142 | struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); |
f9dc0004 | 2143 | struct iwl_mvm_add_sta_cmd cmd = {}; |
8ca151b5 JB |
2144 | int ret; |
2145 | u32 status; | |
2146 | ||
2147 | lockdep_assert_held(&mvm->mutex); | |
2148 | ||
2149 | if (start) { | |
2150 | mvm_sta->tfd_queue_msk |= BIT(queue); | |
2151 | mvm_sta->tid_disable_agg &= ~BIT(tid); | |
2152 | } else { | |
cf961e16 LK |
2153 | /* In DQA-mode the queue isn't removed on agg termination */ |
2154 | if (!iwl_mvm_is_dqa_supported(mvm)) | |
2155 | mvm_sta->tfd_queue_msk &= ~BIT(queue); | |
8ca151b5 JB |
2156 | mvm_sta->tid_disable_agg |= BIT(tid); |
2157 | } | |
2158 | ||
2159 | cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); | |
2160 | cmd.sta_id = mvm_sta->sta_id; | |
2161 | cmd.add_modify = STA_MODE_MODIFY; | |
2162 | cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX; | |
2163 | cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); | |
2164 | cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); | |
2165 | ||
2166 | status = ADD_STA_SUCCESS; | |
854c5705 SS |
2167 | ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, |
2168 | iwl_mvm_add_sta_cmd_size(mvm), | |
f9dc0004 | 2169 | &cmd, &status); |
8ca151b5 JB |
2170 | if (ret) |
2171 | return ret; | |
2172 | ||
837c4da9 | 2173 | switch (status & IWL_ADD_STA_STATUS_MASK) { |
8ca151b5 JB |
2174 | case ADD_STA_SUCCESS: |
2175 | break; | |
2176 | default: | |
2177 | ret = -EIO; | |
2178 | IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n", | |
2179 | start ? "start" : "stopp", status); | |
2180 | break; | |
2181 | } | |
2182 | ||
2183 | return ret; | |
2184 | } | |
2185 | ||
b797e3fb | 2186 | const u8 tid_to_mac80211_ac[] = { |
8ca151b5 JB |
2187 | IEEE80211_AC_BE, |
2188 | IEEE80211_AC_BK, | |
2189 | IEEE80211_AC_BK, | |
2190 | IEEE80211_AC_BE, | |
2191 | IEEE80211_AC_VI, | |
2192 | IEEE80211_AC_VI, | |
2193 | IEEE80211_AC_VO, | |
2194 | IEEE80211_AC_VO, | |
9794c64f | 2195 | IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */ |
8ca151b5 JB |
2196 | }; |
2197 | ||
3e56eadf JB |
2198 | static const u8 tid_to_ucode_ac[] = { |
2199 | AC_BE, | |
2200 | AC_BK, | |
2201 | AC_BK, | |
2202 | AC_BE, | |
2203 | AC_VI, | |
2204 | AC_VI, | |
2205 | AC_VO, | |
2206 | AC_VO, | |
2207 | }; | |
2208 | ||
8ca151b5 JB |
2209 | int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
2210 | struct ieee80211_sta *sta, u16 tid, u16 *ssn) | |
2211 | { | |
5b577a90 | 2212 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
8ca151b5 JB |
2213 | struct iwl_mvm_tid_data *tid_data; |
2214 | int txq_id; | |
4ecafae9 | 2215 | int ret; |
8ca151b5 JB |
2216 | |
2217 | if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) | |
2218 | return -EINVAL; | |
2219 | ||
2220 | if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) { | |
2221 | IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n", | |
2222 | mvmsta->tid_data[tid].state); | |
2223 | return -ENXIO; | |
2224 | } | |
2225 | ||
2226 | lockdep_assert_held(&mvm->mutex); | |
2227 | ||
b2492501 AN |
2228 | spin_lock_bh(&mvmsta->lock); |
2229 | ||
2230 | /* possible race condition - we entered D0i3 while starting agg */ | |
2231 | if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) { | |
2232 | spin_unlock_bh(&mvmsta->lock); | |
2233 | IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n"); | |
2234 | return -EIO; | |
2235 | } | |
2236 | ||
9f9af3d7 | 2237 | spin_lock(&mvm->queue_info_lock); |
4ecafae9 | 2238 | |
cf961e16 LK |
2239 | /* |
2240 | * Note the possible cases: | |
2241 | * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed | |
2242 | * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free | |
2243 | * one and mark it as reserved | |
2244 | * 3. In DQA mode, but no traffic yet on this TID: same treatment as in | |
2245 | * non-DQA mode, since the TXQ hasn't yet been allocated | |
2246 | */ | |
2247 | txq_id = mvmsta->tid_data[tid].txq_id; | |
9f9af3d7 LK |
2248 | if (iwl_mvm_is_dqa_supported(mvm) && |
2249 | unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) { | |
2250 | ret = -ENXIO; | |
2251 | IWL_DEBUG_TX_QUEUES(mvm, | |
2252 | "Can't start tid %d agg on shared queue!\n", | |
2253 | tid); | |
2254 | goto release_locks; | |
2255 | } else if (!iwl_mvm_is_dqa_supported(mvm) || | |
cf961e16 | 2256 | mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) { |
9794c64f LK |
2257 | txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, |
2258 | mvm->first_agg_queue, | |
cf961e16 LK |
2259 | mvm->last_agg_queue); |
2260 | if (txq_id < 0) { | |
2261 | ret = txq_id; | |
cf961e16 LK |
2262 | IWL_ERR(mvm, "Failed to allocate agg queue\n"); |
2263 | goto release_locks; | |
2264 | } | |
01796ff2 SS |
2265 | /* |
2266 | * TXQ shouldn't be in inactive mode for non-DQA, so getting | |
2267 | * an inactive queue from iwl_mvm_find_free_queue() is | |
2268 | * certainly a bug | |
2269 | */ | |
2270 | WARN_ON(mvm->queue_info[txq_id].status == | |
2271 | IWL_MVM_QUEUE_INACTIVE); | |
cf961e16 LK |
2272 | |
2273 | /* TXQ hasn't yet been enabled, so mark it only as reserved */ | |
2274 | mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; | |
4ecafae9 | 2275 | } |
9f9af3d7 LK |
2276 | |
2277 | spin_unlock(&mvm->queue_info_lock); | |
8ca151b5 | 2278 | |
cf961e16 LK |
2279 | IWL_DEBUG_TX_QUEUES(mvm, |
2280 | "AGG for tid %d will be on queue #%d\n", | |
2281 | tid, txq_id); | |
2282 | ||
8ca151b5 | 2283 | tid_data = &mvmsta->tid_data[tid]; |
9a886586 | 2284 | tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); |
8ca151b5 JB |
2285 | tid_data->txq_id = txq_id; |
2286 | *ssn = tid_data->ssn; | |
2287 | ||
2288 | IWL_DEBUG_TX_QUEUES(mvm, | |
2289 | "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n", | |
2290 | mvmsta->sta_id, tid, txq_id, tid_data->ssn, | |
2291 | tid_data->next_reclaimed); | |
2292 | ||
2293 | if (tid_data->ssn == tid_data->next_reclaimed) { | |
2294 | tid_data->state = IWL_AGG_STARTING; | |
2295 | ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); | |
2296 | } else { | |
2297 | tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA; | |
2298 | } | |
2299 | ||
4ecafae9 | 2300 | ret = 0; |
9f9af3d7 | 2301 | goto out; |
4ecafae9 LK |
2302 | |
2303 | release_locks: | |
9f9af3d7 LK |
2304 | spin_unlock(&mvm->queue_info_lock); |
2305 | out: | |
8ca151b5 JB |
2306 | spin_unlock_bh(&mvmsta->lock); |
2307 | ||
4ecafae9 | 2308 | return ret; |
8ca151b5 JB |
2309 | } |
2310 | ||
2311 | int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, | |
bb81bb68 EG |
2312 | struct ieee80211_sta *sta, u16 tid, u8 buf_size, |
2313 | bool amsdu) | |
8ca151b5 | 2314 | { |
5b577a90 | 2315 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
8ca151b5 | 2316 | struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; |
5d42e7b2 EG |
2317 | unsigned int wdg_timeout = |
2318 | iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); | |
eea76c36 | 2319 | int queue, ret; |
cf961e16 | 2320 | bool alloc_queue = true; |
9f9af3d7 | 2321 | enum iwl_mvm_queue_status queue_status; |
8ca151b5 JB |
2322 | u16 ssn; |
2323 | ||
eea76c36 EG |
2324 | struct iwl_trans_txq_scd_cfg cfg = { |
2325 | .sta_id = mvmsta->sta_id, | |
2326 | .tid = tid, | |
2327 | .frame_limit = buf_size, | |
2328 | .aggregate = true, | |
2329 | }; | |
2330 | ||
efed6640 ES |
2331 | BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE) |
2332 | != IWL_MAX_TID_COUNT); | |
2333 | ||
8ca151b5 JB |
2334 | buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF); |
2335 | ||
2336 | spin_lock_bh(&mvmsta->lock); | |
2337 | ssn = tid_data->ssn; | |
2338 | queue = tid_data->txq_id; | |
2339 | tid_data->state = IWL_AGG_ON; | |
efed6640 | 2340 | mvmsta->agg_tids |= BIT(tid); |
8ca151b5 | 2341 | tid_data->ssn = 0xffff; |
bb81bb68 | 2342 | tid_data->amsdu_in_ampdu_allowed = amsdu; |
8ca151b5 JB |
2343 | spin_unlock_bh(&mvmsta->lock); |
2344 | ||
eea76c36 | 2345 | cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; |
8ca151b5 | 2346 | |
9f9af3d7 LK |
2347 | spin_lock_bh(&mvm->queue_info_lock); |
2348 | queue_status = mvm->queue_info[queue].status; | |
2349 | spin_unlock_bh(&mvm->queue_info_lock); | |
2350 | ||
cf961e16 LK |
2351 | /* In DQA mode, the existing queue might need to be reconfigured */ |
2352 | if (iwl_mvm_is_dqa_supported(mvm)) { | |
cf961e16 LK |
2353 | /* Maybe there is no need to even alloc a queue... */ |
2354 | if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) | |
2355 | alloc_queue = false; | |
cf961e16 LK |
2356 | |
2357 | /* | |
2358 | * Only reconfig the SCD for the queue if the window size has | |
2359 | * changed from current (become smaller) | |
2360 | */ | |
2361 | if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) { | |
2362 | /* | |
2363 | * If reconfiguring an existing queue, it first must be | |
2364 | * drained | |
2365 | */ | |
2366 | ret = iwl_trans_wait_tx_queue_empty(mvm->trans, | |
2367 | BIT(queue)); | |
2368 | if (ret) { | |
2369 | IWL_ERR(mvm, | |
2370 | "Error draining queue before reconfig\n"); | |
2371 | return ret; | |
2372 | } | |
2373 | ||
2374 | ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, | |
2375 | mvmsta->sta_id, tid, | |
2376 | buf_size, ssn); | |
2377 | if (ret) { | |
2378 | IWL_ERR(mvm, | |
2379 | "Error reconfiguring TXQ #%d\n", queue); | |
2380 | return ret; | |
2381 | } | |
2382 | } | |
2383 | } | |
2384 | ||
2385 | if (alloc_queue) | |
2386 | iwl_mvm_enable_txq(mvm, queue, | |
2387 | vif->hw_queue[tid_to_mac80211_ac[tid]], ssn, | |
2388 | &cfg, wdg_timeout); | |
fa7878e7 | 2389 | |
9f9af3d7 LK |
2390 | /* Send ADD_STA command to enable aggs only if the queue isn't shared */ |
2391 | if (queue_status != IWL_MVM_QUEUE_SHARED) { | |
2392 | ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); | |
2393 | if (ret) | |
2394 | return -EIO; | |
2395 | } | |
8ca151b5 | 2396 | |
4ecafae9 LK |
2397 | /* No need to mark as reserved */ |
2398 | spin_lock_bh(&mvm->queue_info_lock); | |
cf961e16 | 2399 | mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; |
4ecafae9 LK |
2400 | spin_unlock_bh(&mvm->queue_info_lock); |
2401 | ||
8ca151b5 JB |
2402 | /* |
2403 | * Even though in theory the peer could have different | |
2404 | * aggregation reorder buffer sizes for different sessions, | |
2405 | * our ucode doesn't allow for that and has a global limit | |
2406 | * for each station. Therefore, use the minimum of all the | |
2407 | * aggregation sessions and our default value. | |
2408 | */ | |
2409 | mvmsta->max_agg_bufsize = | |
2410 | min(mvmsta->max_agg_bufsize, buf_size); | |
2411 | mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize; | |
2412 | ||
9ee718aa EL |
2413 | IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", |
2414 | sta->addr, tid); | |
2415 | ||
9e680946 | 2416 | return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false); |
8ca151b5 JB |
2417 | } |
2418 | ||
2419 | int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, | |
2420 | struct ieee80211_sta *sta, u16 tid) | |
2421 | { | |
5b577a90 | 2422 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
8ca151b5 JB |
2423 | struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; |
2424 | u16 txq_id; | |
2425 | int err; | |
2426 | ||
f9aa8dd3 EG |
2427 | /* |
2428 | * If mac80211 is cleaning its state, then say that we finished since | |
2429 | * our state has been cleared anyway. | |
2430 | */ | |
2431 | if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { | |
2432 | ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); | |
2433 | return 0; | |
2434 | } | |
2435 | ||
8ca151b5 JB |
2436 | spin_lock_bh(&mvmsta->lock); |
2437 | ||
2438 | txq_id = tid_data->txq_id; | |
2439 | ||
2440 | IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n", | |
2441 | mvmsta->sta_id, tid, txq_id, tid_data->state); | |
2442 | ||
efed6640 ES |
2443 | mvmsta->agg_tids &= ~BIT(tid); |
2444 | ||
4ecafae9 | 2445 | spin_lock_bh(&mvm->queue_info_lock); |
cf961e16 LK |
2446 | /* |
2447 | * The TXQ is marked as reserved only if no traffic came through yet | |
2448 | * This means no traffic has been sent on this TID (agg'd or not), so | |
2449 | * we no longer have use for the queue. Since it hasn't even been | |
2450 | * allocated through iwl_mvm_enable_txq, so we can just mark it back as | |
2451 | * free. | |
2452 | */ | |
2453 | if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) | |
2454 | mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; | |
9f9af3d7 | 2455 | |
4ecafae9 LK |
2456 | spin_unlock_bh(&mvm->queue_info_lock); |
2457 | ||
8ca151b5 JB |
2458 | switch (tid_data->state) { |
2459 | case IWL_AGG_ON: | |
9a886586 | 2460 | tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); |
8ca151b5 JB |
2461 | |
2462 | IWL_DEBUG_TX_QUEUES(mvm, | |
2463 | "ssn = %d, next_recl = %d\n", | |
2464 | tid_data->ssn, tid_data->next_reclaimed); | |
2465 | ||
2466 | /* There are still packets for this RA / TID in the HW */ | |
2467 | if (tid_data->ssn != tid_data->next_reclaimed) { | |
2468 | tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA; | |
2469 | err = 0; | |
2470 | break; | |
2471 | } | |
2472 | ||
2473 | tid_data->ssn = 0xffff; | |
f7f89e7b | 2474 | tid_data->state = IWL_AGG_OFF; |
f7f89e7b JB |
2475 | spin_unlock_bh(&mvmsta->lock); |
2476 | ||
2477 | ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); | |
2478 | ||
2479 | iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); | |
2480 | ||
cf961e16 LK |
2481 | if (!iwl_mvm_is_dqa_supported(mvm)) { |
2482 | int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]]; | |
2483 | ||
2484 | iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0); | |
2485 | } | |
f7f89e7b | 2486 | return 0; |
8ca151b5 JB |
2487 | case IWL_AGG_STARTING: |
2488 | case IWL_EMPTYING_HW_QUEUE_ADDBA: | |
2489 | /* | |
2490 | * The agg session has been stopped before it was set up. This | |
2491 | * can happen when the AddBA timer times out for example. | |
2492 | */ | |
2493 | ||
2494 | /* No barriers since we are under mutex */ | |
2495 | lockdep_assert_held(&mvm->mutex); | |
8ca151b5 JB |
2496 | |
2497 | ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); | |
2498 | tid_data->state = IWL_AGG_OFF; | |
2499 | err = 0; | |
2500 | break; | |
2501 | default: | |
2502 | IWL_ERR(mvm, | |
2503 | "Stopping AGG while state not ON or starting for %d on %d (%d)\n", | |
2504 | mvmsta->sta_id, tid, tid_data->state); | |
2505 | IWL_ERR(mvm, | |
2506 | "\ttid_data->txq_id = %d\n", tid_data->txq_id); | |
2507 | err = -EINVAL; | |
2508 | } | |
2509 | ||
2510 | spin_unlock_bh(&mvmsta->lock); | |
2511 | ||
2512 | return err; | |
2513 | } | |
2514 | ||
e3d9e7ce EG |
2515 | int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
2516 | struct ieee80211_sta *sta, u16 tid) | |
2517 | { | |
5b577a90 | 2518 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
e3d9e7ce EG |
2519 | struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; |
2520 | u16 txq_id; | |
b6658ff8 | 2521 | enum iwl_mvm_agg_state old_state; |
e3d9e7ce EG |
2522 | |
2523 | /* | |
2524 | * First set the agg state to OFF to avoid calling | |
2525 | * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty. | |
2526 | */ | |
2527 | spin_lock_bh(&mvmsta->lock); | |
2528 | txq_id = tid_data->txq_id; | |
2529 | IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n", | |
2530 | mvmsta->sta_id, tid, txq_id, tid_data->state); | |
b6658ff8 | 2531 | old_state = tid_data->state; |
e3d9e7ce | 2532 | tid_data->state = IWL_AGG_OFF; |
efed6640 | 2533 | mvmsta->agg_tids &= ~BIT(tid); |
e3d9e7ce EG |
2534 | spin_unlock_bh(&mvmsta->lock); |
2535 | ||
4ecafae9 | 2536 | spin_lock_bh(&mvm->queue_info_lock); |
cf961e16 LK |
2537 | /* |
2538 | * The TXQ is marked as reserved only if no traffic came through yet | |
2539 | * This means no traffic has been sent on this TID (agg'd or not), so | |
2540 | * we no longer have use for the queue. Since it hasn't even been | |
2541 | * allocated through iwl_mvm_enable_txq, so we can just mark it back as | |
2542 | * free. | |
2543 | */ | |
2544 | if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) | |
2545 | mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; | |
4ecafae9 LK |
2546 | spin_unlock_bh(&mvm->queue_info_lock); |
2547 | ||
b6658ff8 | 2548 | if (old_state >= IWL_AGG_ON) { |
fe92e32a | 2549 | iwl_mvm_drain_sta(mvm, mvmsta, true); |
5888a40c | 2550 | if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0)) |
b6658ff8 | 2551 | IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); |
fe92e32a EG |
2552 | iwl_trans_wait_tx_queue_empty(mvm->trans, |
2553 | mvmsta->tfd_queue_msk); | |
2554 | iwl_mvm_drain_sta(mvm, mvmsta, false); | |
b6658ff8 | 2555 | |
f7f89e7b JB |
2556 | iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); |
2557 | ||
cf961e16 LK |
2558 | if (!iwl_mvm_is_dqa_supported(mvm)) { |
2559 | int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]]; | |
2560 | ||
2561 | iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue, | |
2562 | tid, 0); | |
2563 | } | |
b6658ff8 | 2564 | } |
e3d9e7ce | 2565 | |
e3d9e7ce EG |
2566 | return 0; |
2567 | } | |
2568 | ||
8ca151b5 JB |
2569 | static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) |
2570 | { | |
2dc2a15e | 2571 | int i, max = -1, max_offs = -1; |
8ca151b5 JB |
2572 | |
2573 | lockdep_assert_held(&mvm->mutex); | |
2574 | ||
2dc2a15e JB |
2575 | /* Pick the unused key offset with the highest 'deleted' |
2576 | * counter. Every time a key is deleted, all the counters | |
2577 | * are incremented and the one that was just deleted is | |
2578 | * reset to zero. Thus, the highest counter is the one | |
2579 | * that was deleted longest ago. Pick that one. | |
2580 | */ | |
2581 | for (i = 0; i < STA_KEY_MAX_NUM; i++) { | |
2582 | if (test_bit(i, mvm->fw_key_table)) | |
2583 | continue; | |
2584 | if (mvm->fw_key_deleted[i] > max) { | |
2585 | max = mvm->fw_key_deleted[i]; | |
2586 | max_offs = i; | |
2587 | } | |
2588 | } | |
8ca151b5 | 2589 | |
2dc2a15e | 2590 | if (max_offs < 0) |
8ca151b5 JB |
2591 | return STA_KEY_IDX_INVALID; |
2592 | ||
2dc2a15e | 2593 | return max_offs; |
8ca151b5 JB |
2594 | } |
2595 | ||
5f7a1847 JB |
2596 | static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, |
2597 | struct ieee80211_vif *vif, | |
2598 | struct ieee80211_sta *sta) | |
8ca151b5 | 2599 | { |
5b530e95 | 2600 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
8ca151b5 | 2601 | |
5f7a1847 JB |
2602 | if (sta) |
2603 | return iwl_mvm_sta_from_mac80211(sta); | |
8ca151b5 JB |
2604 | |
2605 | /* | |
2606 | * The device expects GTKs for station interfaces to be | |
2607 | * installed as GTKs for the AP station. If we have no | |
2608 | * station ID, then use AP's station ID. | |
2609 | */ | |
2610 | if (vif->type == NL80211_IFTYPE_STATION && | |
9513c5e1 AA |
2611 | mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { |
2612 | u8 sta_id = mvmvif->ap_sta_id; | |
2613 | ||
7d6a1ab6 EG |
2614 | sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], |
2615 | lockdep_is_held(&mvm->mutex)); | |
2616 | ||
9513c5e1 AA |
2617 | /* |
2618 | * It is possible that the 'sta' parameter is NULL, | |
2619 | * for example when a GTK is removed - the sta_id will then | |
2620 | * be the AP ID, and no station was passed by mac80211. | |
2621 | */ | |
7d6a1ab6 EG |
2622 | if (IS_ERR_OR_NULL(sta)) |
2623 | return NULL; | |
2624 | ||
2625 | return iwl_mvm_sta_from_mac80211(sta); | |
9513c5e1 | 2626 | } |
8ca151b5 | 2627 | |
5f7a1847 | 2628 | return NULL; |
8ca151b5 JB |
2629 | } |
2630 | ||
2631 | static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, | |
2632 | struct iwl_mvm_sta *mvm_sta, | |
ba3943b0 | 2633 | struct ieee80211_key_conf *keyconf, bool mcast, |
d6ee54a9 LC |
2634 | u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, |
2635 | u8 key_offset) | |
8ca151b5 | 2636 | { |
5a258aae | 2637 | struct iwl_mvm_add_sta_key_cmd cmd = {}; |
f9dc0004 | 2638 | __le16 key_flags; |
79920749 JB |
2639 | int ret; |
2640 | u32 status; | |
8ca151b5 JB |
2641 | u16 keyidx; |
2642 | int i; | |
2f6319d1 | 2643 | u8 sta_id = mvm_sta->sta_id; |
8ca151b5 JB |
2644 | |
2645 | keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & | |
2646 | STA_KEY_FLG_KEYID_MSK; | |
2647 | key_flags = cpu_to_le16(keyidx); | |
2648 | key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP); | |
2649 | ||
2650 | switch (keyconf->cipher) { | |
2651 | case WLAN_CIPHER_SUITE_TKIP: | |
2652 | key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP); | |
5a258aae | 2653 | cmd.tkip_rx_tsc_byte2 = tkip_iv32; |
8ca151b5 | 2654 | for (i = 0; i < 5; i++) |
5a258aae MS |
2655 | cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]); |
2656 | memcpy(cmd.key, keyconf->key, keyconf->keylen); | |
8ca151b5 JB |
2657 | break; |
2658 | case WLAN_CIPHER_SUITE_CCMP: | |
2659 | key_flags |= cpu_to_le16(STA_KEY_FLG_CCM); | |
5a258aae | 2660 | memcpy(cmd.key, keyconf->key, keyconf->keylen); |
8ca151b5 | 2661 | break; |
ba3943b0 JB |
2662 | case WLAN_CIPHER_SUITE_WEP104: |
2663 | key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES); | |
aa0cb08b | 2664 | /* fall through */ |
ba3943b0 JB |
2665 | case WLAN_CIPHER_SUITE_WEP40: |
2666 | key_flags |= cpu_to_le16(STA_KEY_FLG_WEP); | |
2667 | memcpy(cmd.key + 3, keyconf->key, keyconf->keylen); | |
2668 | break; | |
2a53d166 AB |
2669 | case WLAN_CIPHER_SUITE_GCMP_256: |
2670 | key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES); | |
2671 | /* fall through */ | |
2672 | case WLAN_CIPHER_SUITE_GCMP: | |
2673 | key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP); | |
2674 | memcpy(cmd.key, keyconf->key, keyconf->keylen); | |
2675 | break; | |
8ca151b5 | 2676 | default: |
e36e5433 MS |
2677 | key_flags |= cpu_to_le16(STA_KEY_FLG_EXT); |
2678 | memcpy(cmd.key, keyconf->key, keyconf->keylen); | |
8ca151b5 JB |
2679 | } |
2680 | ||
ba3943b0 | 2681 | if (mcast) |
8ca151b5 JB |
2682 | key_flags |= cpu_to_le16(STA_KEY_MULTICAST); |
2683 | ||
d6ee54a9 | 2684 | cmd.key_offset = key_offset; |
5a258aae | 2685 | cmd.key_flags = key_flags; |
8ca151b5 JB |
2686 | cmd.sta_id = sta_id; |
2687 | ||
2688 | status = ADD_STA_SUCCESS; | |
a1022927 | 2689 | if (cmd_flags & CMD_ASYNC) |
f9dc0004 EG |
2690 | ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, |
2691 | sizeof(cmd), &cmd); | |
a1022927 EG |
2692 | else |
2693 | ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd), | |
2694 | &cmd, &status); | |
8ca151b5 JB |
2695 | |
2696 | switch (status) { | |
2697 | case ADD_STA_SUCCESS: | |
2698 | IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n"); | |
2699 | break; | |
2700 | default: | |
2701 | ret = -EIO; | |
2702 | IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n"); | |
2703 | break; | |
2704 | } | |
2705 | ||
2706 | return ret; | |
2707 | } | |
2708 | ||
2709 | static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, | |
2710 | struct ieee80211_key_conf *keyconf, | |
2711 | u8 sta_id, bool remove_key) | |
2712 | { | |
2713 | struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {}; | |
2714 | ||
2715 | /* verify the key details match the required command's expectations */ | |
8e160ab8 AB |
2716 | if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) || |
2717 | (keyconf->keyidx != 4 && keyconf->keyidx != 5) || | |
2718 | (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC && | |
2719 | keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 && | |
2720 | keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256))) | |
2721 | return -EINVAL; | |
2722 | ||
2723 | if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) && | |
2724 | keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC)) | |
8ca151b5 JB |
2725 | return -EINVAL; |
2726 | ||
2727 | igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx); | |
2728 | igtk_cmd.sta_id = cpu_to_le32(sta_id); | |
2729 | ||
2730 | if (remove_key) { | |
2731 | igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID); | |
2732 | } else { | |
2733 | struct ieee80211_key_seq seq; | |
2734 | const u8 *pn; | |
2735 | ||
aa950524 AB |
2736 | switch (keyconf->cipher) { |
2737 | case WLAN_CIPHER_SUITE_AES_CMAC: | |
2738 | igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM); | |
2739 | break; | |
8e160ab8 AB |
2740 | case WLAN_CIPHER_SUITE_BIP_GMAC_128: |
2741 | case WLAN_CIPHER_SUITE_BIP_GMAC_256: | |
2742 | igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP); | |
2743 | break; | |
aa950524 AB |
2744 | default: |
2745 | return -EINVAL; | |
2746 | } | |
2747 | ||
8e160ab8 AB |
2748 | memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen); |
2749 | if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) | |
2750 | igtk_cmd.ctrl_flags |= | |
2751 | cpu_to_le32(STA_KEY_FLG_KEY_32BYTES); | |
8ca151b5 JB |
2752 | ieee80211_get_key_rx_seq(keyconf, 0, &seq); |
2753 | pn = seq.aes_cmac.pn; | |
2754 | igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) | | |
2755 | ((u64) pn[4] << 8) | | |
2756 | ((u64) pn[3] << 16) | | |
2757 | ((u64) pn[2] << 24) | | |
2758 | ((u64) pn[1] << 32) | | |
2759 | ((u64) pn[0] << 40)); | |
2760 | } | |
2761 | ||
2762 | IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n", | |
2763 | remove_key ? "removing" : "installing", | |
2764 | igtk_cmd.sta_id); | |
2765 | ||
8e160ab8 AB |
2766 | if (!iwl_mvm_has_new_rx_api(mvm)) { |
2767 | struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = { | |
2768 | .ctrl_flags = igtk_cmd.ctrl_flags, | |
2769 | .key_id = igtk_cmd.key_id, | |
2770 | .sta_id = igtk_cmd.sta_id, | |
2771 | .receive_seq_cnt = igtk_cmd.receive_seq_cnt | |
2772 | }; | |
2773 | ||
2774 | memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk, | |
2775 | ARRAY_SIZE(igtk_cmd_v1.igtk)); | |
2776 | return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, | |
2777 | sizeof(igtk_cmd_v1), &igtk_cmd_v1); | |
2778 | } | |
a1022927 | 2779 | return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, |
8ca151b5 JB |
2780 | sizeof(igtk_cmd), &igtk_cmd); |
2781 | } | |
2782 | ||
2783 | ||
2784 | static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, | |
2785 | struct ieee80211_vif *vif, | |
2786 | struct ieee80211_sta *sta) | |
2787 | { | |
5b530e95 | 2788 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
8ca151b5 JB |
2789 | |
2790 | if (sta) | |
2791 | return sta->addr; | |
2792 | ||
2793 | if (vif->type == NL80211_IFTYPE_STATION && | |
2794 | mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { | |
2795 | u8 sta_id = mvmvif->ap_sta_id; | |
2796 | sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], | |
2797 | lockdep_is_held(&mvm->mutex)); | |
2798 | return sta->addr; | |
2799 | } | |
2800 | ||
2801 | ||
2802 | return NULL; | |
2803 | } | |
2804 | ||
2f6319d1 JB |
2805 | static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, |
2806 | struct ieee80211_vif *vif, | |
2807 | struct ieee80211_sta *sta, | |
ba3943b0 | 2808 | struct ieee80211_key_conf *keyconf, |
d6ee54a9 | 2809 | u8 key_offset, |
ba3943b0 | 2810 | bool mcast) |
2f6319d1 JB |
2811 | { |
2812 | struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); | |
2813 | int ret; | |
2814 | const u8 *addr; | |
2815 | struct ieee80211_key_seq seq; | |
2816 | u16 p1k[5]; | |
2817 | ||
2818 | switch (keyconf->cipher) { | |
2819 | case WLAN_CIPHER_SUITE_TKIP: | |
2820 | addr = iwl_mvm_get_mac_addr(mvm, vif, sta); | |
2821 | /* get phase 1 key from mac80211 */ | |
2822 | ieee80211_get_key_rx_seq(keyconf, 0, &seq); | |
2823 | ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); | |
ba3943b0 | 2824 | ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, |
d6ee54a9 | 2825 | seq.tkip.iv32, p1k, 0, key_offset); |
2f6319d1 JB |
2826 | break; |
2827 | case WLAN_CIPHER_SUITE_CCMP: | |
ba3943b0 JB |
2828 | case WLAN_CIPHER_SUITE_WEP40: |
2829 | case WLAN_CIPHER_SUITE_WEP104: | |
2a53d166 AB |
2830 | case WLAN_CIPHER_SUITE_GCMP: |
2831 | case WLAN_CIPHER_SUITE_GCMP_256: | |
ba3943b0 | 2832 | ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, |
d6ee54a9 | 2833 | 0, NULL, 0, key_offset); |
2f6319d1 JB |
2834 | break; |
2835 | default: | |
ba3943b0 | 2836 | ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, |
d6ee54a9 | 2837 | 0, NULL, 0, key_offset); |
2f6319d1 JB |
2838 | } |
2839 | ||
2840 | return ret; | |
2841 | } | |
2842 | ||
2843 | static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, | |
ba3943b0 JB |
2844 | struct ieee80211_key_conf *keyconf, |
2845 | bool mcast) | |
2f6319d1 JB |
2846 | { |
2847 | struct iwl_mvm_add_sta_key_cmd cmd = {}; | |
2848 | __le16 key_flags; | |
2849 | int ret; | |
2850 | u32 status; | |
2851 | ||
2852 | key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & | |
2853 | STA_KEY_FLG_KEYID_MSK); | |
2854 | key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP); | |
2855 | key_flags |= cpu_to_le16(STA_KEY_NOT_VALID); | |
2856 | ||
ba3943b0 | 2857 | if (mcast) |
2f6319d1 JB |
2858 | key_flags |= cpu_to_le16(STA_KEY_MULTICAST); |
2859 | ||
2860 | cmd.key_flags = key_flags; | |
2861 | cmd.key_offset = keyconf->hw_key_idx; | |
2862 | cmd.sta_id = sta_id; | |
2863 | ||
2864 | status = ADD_STA_SUCCESS; | |
2865 | ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd), | |
2866 | &cmd, &status); | |
2867 | ||
2868 | switch (status) { | |
2869 | case ADD_STA_SUCCESS: | |
2870 | IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n"); | |
2871 | break; | |
2872 | default: | |
2873 | ret = -EIO; | |
2874 | IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n"); | |
2875 | break; | |
2876 | } | |
2877 | ||
2878 | return ret; | |
2879 | } | |
2880 | ||
8ca151b5 JB |
2881 | int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, |
2882 | struct ieee80211_vif *vif, | |
2883 | struct ieee80211_sta *sta, | |
2884 | struct ieee80211_key_conf *keyconf, | |
d6ee54a9 | 2885 | u8 key_offset) |
8ca151b5 | 2886 | { |
ba3943b0 | 2887 | bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); |
5f7a1847 | 2888 | struct iwl_mvm_sta *mvm_sta; |
2f6319d1 | 2889 | u8 sta_id; |
8ca151b5 | 2890 | int ret; |
11828dbc | 2891 | static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; |
8ca151b5 JB |
2892 | |
2893 | lockdep_assert_held(&mvm->mutex); | |
2894 | ||
2895 | /* Get the station id from the mvm local station table */ | |
5f7a1847 JB |
2896 | mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); |
2897 | if (!mvm_sta) { | |
2898 | IWL_ERR(mvm, "Failed to find station\n"); | |
8ca151b5 JB |
2899 | return -EINVAL; |
2900 | } | |
5f7a1847 | 2901 | sta_id = mvm_sta->sta_id; |
8ca151b5 | 2902 | |
8e160ab8 AB |
2903 | if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || |
2904 | keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || | |
2905 | keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { | |
8ca151b5 JB |
2906 | ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); |
2907 | goto end; | |
2908 | } | |
2909 | ||
2910 | /* | |
2911 | * It is possible that the 'sta' parameter is NULL, and thus | |
2912 | * there is a need to retrieve the sta from the local station table. | |
2913 | */ | |
2914 | if (!sta) { | |
2915 | sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], | |
2916 | lockdep_is_held(&mvm->mutex)); | |
2917 | if (IS_ERR_OR_NULL(sta)) { | |
2918 | IWL_ERR(mvm, "Invalid station id\n"); | |
2919 | return -EINVAL; | |
2920 | } | |
2921 | } | |
2922 | ||
2f6319d1 | 2923 | if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) |
8ca151b5 JB |
2924 | return -EINVAL; |
2925 | ||
d6ee54a9 LC |
2926 | /* If the key_offset is not pre-assigned, we need to find a |
2927 | * new offset to use. In normal cases, the offset is not | |
2928 | * pre-assigned, but during HW_RESTART we want to reuse the | |
2929 | * same indices, so we pass them when this function is called. | |
2930 | * | |
2931 | * In D3 entry, we need to hardcoded the indices (because the | |
2932 | * firmware hardcodes the PTK offset to 0). In this case, we | |
2933 | * need to make sure we don't overwrite the hw_key_idx in the | |
2934 | * keyconf structure, because otherwise we cannot configure | |
2935 | * the original ones back when resuming. | |
2936 | */ | |
2937 | if (key_offset == STA_KEY_IDX_INVALID) { | |
2938 | key_offset = iwl_mvm_set_fw_key_idx(mvm); | |
2939 | if (key_offset == STA_KEY_IDX_INVALID) | |
8ca151b5 | 2940 | return -ENOSPC; |
d6ee54a9 | 2941 | keyconf->hw_key_idx = key_offset; |
8ca151b5 JB |
2942 | } |
2943 | ||
d6ee54a9 | 2944 | ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast); |
9c3deeb5 | 2945 | if (ret) |
ba3943b0 | 2946 | goto end; |
ba3943b0 JB |
2947 | |
2948 | /* | |
2949 | * For WEP, the same key is used for multicast and unicast. Upload it | |
2950 | * again, using the same key offset, and now pointing the other one | |
2951 | * to the same key slot (offset). | |
2952 | * If this fails, remove the original as well. | |
2953 | */ | |
2954 | if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || | |
2955 | keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) { | |
d6ee54a9 LC |
2956 | ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, |
2957 | key_offset, !mcast); | |
ba3943b0 | 2958 | if (ret) { |
ba3943b0 | 2959 | __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); |
9c3deeb5 | 2960 | goto end; |
ba3943b0 JB |
2961 | } |
2962 | } | |
8ca151b5 | 2963 | |
9c3deeb5 LC |
2964 | __set_bit(key_offset, mvm->fw_key_table); |
2965 | ||
8ca151b5 JB |
2966 | end: |
2967 | IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", | |
2968 | keyconf->cipher, keyconf->keylen, keyconf->keyidx, | |
11828dbc | 2969 | sta ? sta->addr : zero_addr, ret); |
8ca151b5 JB |
2970 | return ret; |
2971 | } | |
2972 | ||
2973 | int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, | |
2974 | struct ieee80211_vif *vif, | |
2975 | struct ieee80211_sta *sta, | |
2976 | struct ieee80211_key_conf *keyconf) | |
2977 | { | |
ba3943b0 | 2978 | bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); |
5f7a1847 JB |
2979 | struct iwl_mvm_sta *mvm_sta; |
2980 | u8 sta_id = IWL_MVM_STATION_COUNT; | |
2dc2a15e | 2981 | int ret, i; |
8ca151b5 JB |
2982 | |
2983 | lockdep_assert_held(&mvm->mutex); | |
2984 | ||
5f7a1847 JB |
2985 | /* Get the station from the mvm local station table */ |
2986 | mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); | |
8ca151b5 JB |
2987 | |
2988 | IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", | |
2989 | keyconf->keyidx, sta_id); | |
2990 | ||
8e160ab8 AB |
2991 | if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || |
2992 | keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || | |
2993 | keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) | |
8ca151b5 JB |
2994 | return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); |
2995 | ||
2f6319d1 | 2996 | if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { |
8ca151b5 JB |
2997 | IWL_ERR(mvm, "offset %d not used in fw key table.\n", |
2998 | keyconf->hw_key_idx); | |
2999 | return -ENOENT; | |
3000 | } | |
3001 | ||
2dc2a15e JB |
3002 | /* track which key was deleted last */ |
3003 | for (i = 0; i < STA_KEY_MAX_NUM; i++) { | |
3004 | if (mvm->fw_key_deleted[i] < U8_MAX) | |
3005 | mvm->fw_key_deleted[i]++; | |
3006 | } | |
3007 | mvm->fw_key_deleted[keyconf->hw_key_idx] = 0; | |
3008 | ||
5f7a1847 | 3009 | if (!mvm_sta) { |
8ca151b5 JB |
3010 | IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n"); |
3011 | return 0; | |
3012 | } | |
3013 | ||
5f7a1847 JB |
3014 | sta_id = mvm_sta->sta_id; |
3015 | ||
ba3943b0 JB |
3016 | ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); |
3017 | if (ret) | |
3018 | return ret; | |
3019 | ||
3020 | /* delete WEP key twice to get rid of (now useless) offset */ | |
3021 | if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || | |
3022 | keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) | |
3023 | ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast); | |
3024 | ||
3025 | return ret; | |
8ca151b5 JB |
3026 | } |
3027 | ||
3028 | void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, | |
3029 | struct ieee80211_vif *vif, | |
3030 | struct ieee80211_key_conf *keyconf, | |
3031 | struct ieee80211_sta *sta, u32 iv32, | |
3032 | u16 *phase1key) | |
3033 | { | |
c3eb536a | 3034 | struct iwl_mvm_sta *mvm_sta; |
ba3943b0 | 3035 | bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); |
8ca151b5 | 3036 | |
c3eb536a BL |
3037 | rcu_read_lock(); |
3038 | ||
5f7a1847 JB |
3039 | mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); |
3040 | if (WARN_ON_ONCE(!mvm_sta)) | |
45854360 | 3041 | goto unlock; |
ba3943b0 | 3042 | iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, |
d6ee54a9 | 3043 | iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx); |
45854360 JB |
3044 | |
3045 | unlock: | |
c3eb536a | 3046 | rcu_read_unlock(); |
8ca151b5 JB |
3047 | } |
3048 | ||
9cc40712 JB |
3049 | void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, |
3050 | struct ieee80211_sta *sta) | |
8ca151b5 | 3051 | { |
5b577a90 | 3052 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
f9dc0004 | 3053 | struct iwl_mvm_add_sta_cmd cmd = { |
8ca151b5 | 3054 | .add_modify = STA_MODE_MODIFY, |
9cc40712 | 3055 | .sta_id = mvmsta->sta_id, |
5af01772 | 3056 | .station_flags_msk = cpu_to_le32(STA_FLG_PS), |
9cc40712 | 3057 | .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), |
8ca151b5 JB |
3058 | }; |
3059 | int ret; | |
3060 | ||
854c5705 SS |
3061 | ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, |
3062 | iwl_mvm_add_sta_cmd_size(mvm), &cmd); | |
8ca151b5 JB |
3063 | if (ret) |
3064 | IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); | |
3065 | } | |
3066 | ||
9cc40712 JB |
3067 | void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, |
3068 | struct ieee80211_sta *sta, | |
8ca151b5 | 3069 | enum ieee80211_frame_release_type reason, |
3e56eadf JB |
3070 | u16 cnt, u16 tids, bool more_data, |
3071 | bool agg) | |
8ca151b5 | 3072 | { |
5b577a90 | 3073 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
f9dc0004 | 3074 | struct iwl_mvm_add_sta_cmd cmd = { |
8ca151b5 | 3075 | .add_modify = STA_MODE_MODIFY, |
9cc40712 | 3076 | .sta_id = mvmsta->sta_id, |
8ca151b5 JB |
3077 | .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, |
3078 | .sleep_tx_count = cpu_to_le16(cnt), | |
9cc40712 | 3079 | .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), |
8ca151b5 | 3080 | }; |
3e56eadf JB |
3081 | int tid, ret; |
3082 | unsigned long _tids = tids; | |
3083 | ||
3084 | /* convert TIDs to ACs - we don't support TSPEC so that's OK | |
3085 | * Note that this field is reserved and unused by firmware not | |
3086 | * supporting GO uAPSD, so it's safe to always do this. | |
3087 | */ | |
3088 | for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) | |
3089 | cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]); | |
3090 | ||
3091 | /* If we're releasing frames from aggregation queues then check if the | |
3092 | * all queues combined that we're releasing frames from have | |
3093 | * - more frames than the service period, in which case more_data | |
3094 | * needs to be set | |
3095 | * - fewer than 'cnt' frames, in which case we need to adjust the | |
3096 | * firmware command (but do that unconditionally) | |
3097 | */ | |
3098 | if (agg) { | |
3099 | int remaining = cnt; | |
36be0eb6 | 3100 | int sleep_tx_count; |
3e56eadf JB |
3101 | |
3102 | spin_lock_bh(&mvmsta->lock); | |
3103 | for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) { | |
3104 | struct iwl_mvm_tid_data *tid_data; | |
3105 | u16 n_queued; | |
3106 | ||
3107 | tid_data = &mvmsta->tid_data[tid]; | |
3108 | if (WARN(tid_data->state != IWL_AGG_ON && | |
3109 | tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA, | |
3110 | "TID %d state is %d\n", | |
3111 | tid, tid_data->state)) { | |
3112 | spin_unlock_bh(&mvmsta->lock); | |
3113 | ieee80211_sta_eosp(sta); | |
3114 | return; | |
3115 | } | |
3116 | ||
3117 | n_queued = iwl_mvm_tid_queued(tid_data); | |
3118 | if (n_queued > remaining) { | |
3119 | more_data = true; | |
3120 | remaining = 0; | |
3121 | break; | |
3122 | } | |
3123 | remaining -= n_queued; | |
3124 | } | |
36be0eb6 EG |
3125 | sleep_tx_count = cnt - remaining; |
3126 | if (reason == IEEE80211_FRAME_RELEASE_UAPSD) | |
3127 | mvmsta->sleep_tx_count = sleep_tx_count; | |
3e56eadf JB |
3128 | spin_unlock_bh(&mvmsta->lock); |
3129 | ||
36be0eb6 | 3130 | cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count); |
3e56eadf JB |
3131 | if (WARN_ON(cnt - remaining == 0)) { |
3132 | ieee80211_sta_eosp(sta); | |
3133 | return; | |
3134 | } | |
3135 | } | |
3136 | ||
3137 | /* Note: this is ignored by firmware not supporting GO uAPSD */ | |
3138 | if (more_data) | |
3139 | cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA); | |
3140 | ||
3141 | if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) { | |
3142 | mvmsta->next_status_eosp = true; | |
3143 | cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL); | |
3144 | } else { | |
3145 | cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD); | |
3146 | } | |
8ca151b5 | 3147 | |
156f92f2 EG |
3148 | /* block the Tx queues until the FW updated the sleep Tx count */ |
3149 | iwl_trans_block_txq_ptrs(mvm->trans, true); | |
3150 | ||
3151 | ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, | |
3152 | CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK, | |
854c5705 | 3153 | iwl_mvm_add_sta_cmd_size(mvm), &cmd); |
8ca151b5 JB |
3154 | if (ret) |
3155 | IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); | |
3156 | } | |
3e56eadf | 3157 | |
0416841d JB |
3158 | void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, |
3159 | struct iwl_rx_cmd_buffer *rxb) | |
3e56eadf JB |
3160 | { |
3161 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | |
3162 | struct iwl_mvm_eosp_notification *notif = (void *)pkt->data; | |
3163 | struct ieee80211_sta *sta; | |
3164 | u32 sta_id = le32_to_cpu(notif->sta_id); | |
3165 | ||
3166 | if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT)) | |
0416841d | 3167 | return; |
3e56eadf JB |
3168 | |
3169 | rcu_read_lock(); | |
3170 | sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); | |
3171 | if (!IS_ERR_OR_NULL(sta)) | |
3172 | ieee80211_sta_eosp(sta); | |
3173 | rcu_read_unlock(); | |
3e56eadf | 3174 | } |
09b0ce1a AO |
3175 | |
3176 | void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, | |
3177 | struct iwl_mvm_sta *mvmsta, bool disable) | |
3178 | { | |
3179 | struct iwl_mvm_add_sta_cmd cmd = { | |
3180 | .add_modify = STA_MODE_MODIFY, | |
3181 | .sta_id = mvmsta->sta_id, | |
3182 | .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, | |
3183 | .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), | |
3184 | .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), | |
3185 | }; | |
3186 | int ret; | |
3187 | ||
854c5705 SS |
3188 | ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, |
3189 | iwl_mvm_add_sta_cmd_size(mvm), &cmd); | |
09b0ce1a AO |
3190 | if (ret) |
3191 | IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); | |
3192 | } | |
003e5236 AO |
3193 | |
3194 | void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, | |
3195 | struct ieee80211_sta *sta, | |
3196 | bool disable) | |
3197 | { | |
3198 | struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); | |
3199 | ||
3200 | spin_lock_bh(&mvm_sta->lock); | |
3201 | ||
3202 | if (mvm_sta->disable_tx == disable) { | |
3203 | spin_unlock_bh(&mvm_sta->lock); | |
3204 | return; | |
3205 | } | |
3206 | ||
3207 | mvm_sta->disable_tx = disable; | |
3208 | ||
3209 | /* | |
0d365ae5 SS |
3210 | * Tell mac80211 to start/stop queuing tx for this station, |
3211 | * but don't stop queuing if there are still pending frames | |
003e5236 AO |
3212 | * for this station. |
3213 | */ | |
3214 | if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) | |
3215 | ieee80211_sta_block_awake(mvm->hw, sta, disable); | |
3216 | ||
3217 | iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable); | |
3218 | ||
3219 | spin_unlock_bh(&mvm_sta->lock); | |
3220 | } | |
3221 | ||
3222 | void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, | |
3223 | struct iwl_mvm_vif *mvmvif, | |
3224 | bool disable) | |
3225 | { | |
3226 | struct ieee80211_sta *sta; | |
3227 | struct iwl_mvm_sta *mvm_sta; | |
3228 | int i; | |
3229 | ||
3230 | lockdep_assert_held(&mvm->mutex); | |
3231 | ||
3232 | /* Block/unblock all the stations of the given mvmvif */ | |
3233 | for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { | |
3234 | sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], | |
3235 | lockdep_is_held(&mvm->mutex)); | |
3236 | if (IS_ERR_OR_NULL(sta)) | |
3237 | continue; | |
3238 | ||
3239 | mvm_sta = iwl_mvm_sta_from_mac80211(sta); | |
3240 | if (mvm_sta->mac_id_n_color != | |
3241 | FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)) | |
3242 | continue; | |
3243 | ||
3244 | iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable); | |
3245 | } | |
3246 | } | |
dc88b4ba LC |
3247 | |
3248 | void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif) | |
3249 | { | |
3250 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); | |
3251 | struct iwl_mvm_sta *mvmsta; | |
3252 | ||
3253 | rcu_read_lock(); | |
3254 | ||
3255 | mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); | |
3256 | ||
3257 | if (!WARN_ON(!mvmsta)) | |
3258 | iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true); | |
3259 | ||
3260 | rcu_read_unlock(); | |
3261 | } |