]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/wireless/iwlwifi/mvm/sta.c
15afa9a26c8fe60887207d03951693dfaaab8165
[mirror_ubuntu-jammy-kernel.git] / drivers / net / wireless / iwlwifi / mvm / sta.c
1 /******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65 #include <net/mac80211.h>
66
67 #include "mvm.h"
68 #include "sta.h"
69 #include "rs.h"
70
71 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
72 enum nl80211_iftype iftype)
73 {
74 int sta_id;
75 u32 reserved_ids = 0;
76
77 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
78 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
79
80 lockdep_assert_held(&mvm->mutex);
81
82 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
83 if (iftype != NL80211_IFTYPE_STATION)
84 reserved_ids = BIT(0);
85
86 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
87 for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
88 if (BIT(sta_id) & reserved_ids)
89 continue;
90
91 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
92 lockdep_is_held(&mvm->mutex)))
93 return sta_id;
94 }
95 return IWL_MVM_STATION_COUNT;
96 }
97
98 /* send station add/update command to firmware */
99 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
100 bool update)
101 {
102 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
103 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
104 .sta_id = mvm_sta->sta_id,
105 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
106 .add_modify = update ? 1 : 0,
107 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
108 STA_FLG_MIMO_EN_MSK),
109 };
110 int ret;
111 u32 status;
112 u32 agg_size = 0, mpdu_dens = 0;
113
114 if (!update) {
115 add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
116 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
117 }
118
119 switch (sta->bandwidth) {
120 case IEEE80211_STA_RX_BW_160:
121 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
122 /* fall through */
123 case IEEE80211_STA_RX_BW_80:
124 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
125 /* fall through */
126 case IEEE80211_STA_RX_BW_40:
127 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
128 /* fall through */
129 case IEEE80211_STA_RX_BW_20:
130 if (sta->ht_cap.ht_supported)
131 add_sta_cmd.station_flags |=
132 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
133 break;
134 }
135
136 switch (sta->rx_nss) {
137 case 1:
138 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
139 break;
140 case 2:
141 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
142 break;
143 case 3 ... 8:
144 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
145 break;
146 }
147
148 switch (sta->smps_mode) {
149 case IEEE80211_SMPS_AUTOMATIC:
150 case IEEE80211_SMPS_NUM_MODES:
151 WARN_ON(1);
152 break;
153 case IEEE80211_SMPS_STATIC:
154 /* override NSS */
155 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
157 break;
158 case IEEE80211_SMPS_DYNAMIC:
159 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
160 break;
161 case IEEE80211_SMPS_OFF:
162 /* nothing */
163 break;
164 }
165
166 if (sta->ht_cap.ht_supported) {
167 add_sta_cmd.station_flags_msk |=
168 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
169 STA_FLG_AGG_MPDU_DENS_MSK);
170
171 mpdu_dens = sta->ht_cap.ampdu_density;
172 }
173
174 if (sta->vht_cap.vht_supported) {
175 agg_size = sta->vht_cap.cap &
176 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
177 agg_size >>=
178 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
179 } else if (sta->ht_cap.ht_supported) {
180 agg_size = sta->ht_cap.ampdu_factor;
181 }
182
183 add_sta_cmd.station_flags |=
184 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
185 add_sta_cmd.station_flags |=
186 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
187
188 status = ADD_STA_SUCCESS;
189 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd),
190 &add_sta_cmd, &status);
191 if (ret)
192 return ret;
193
194 switch (status) {
195 case ADD_STA_SUCCESS:
196 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
197 break;
198 default:
199 ret = -EIO;
200 IWL_ERR(mvm, "ADD_STA failed\n");
201 break;
202 }
203
204 return ret;
205 }
206
207 static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
208 struct ieee80211_sta *sta)
209 {
210 unsigned long used_hw_queues;
211 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
212 u32 ac;
213
214 lockdep_assert_held(&mvm->mutex);
215
216 used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
217
218 /* Find available queues, and allocate them to the ACs */
219 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
220 u8 queue = find_first_zero_bit(&used_hw_queues,
221 mvm->first_agg_queue);
222
223 if (queue >= mvm->first_agg_queue) {
224 IWL_ERR(mvm, "Failed to allocate STA queue\n");
225 return -EBUSY;
226 }
227
228 __set_bit(queue, &used_hw_queues);
229 mvmsta->hw_queue[ac] = queue;
230 }
231
232 /* Found a place for all queues - enable them */
233 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
234 iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
235 iwl_mvm_ac_to_tx_fifo[ac]);
236 mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
237 }
238
239 return 0;
240 }
241
242 static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
243 struct ieee80211_sta *sta)
244 {
245 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
246 unsigned long sta_msk;
247 int i;
248
249 lockdep_assert_held(&mvm->mutex);
250
251 /* disable the TDLS STA-specific queues */
252 sta_msk = mvmsta->tfd_queue_msk;
253 for_each_set_bit(i, &sta_msk, sizeof(sta_msk))
254 iwl_mvm_disable_txq(mvm, i);
255 }
256
257 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
258 struct ieee80211_vif *vif,
259 struct ieee80211_sta *sta)
260 {
261 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
262 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
263 int i, ret, sta_id;
264
265 lockdep_assert_held(&mvm->mutex);
266
267 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
268 sta_id = iwl_mvm_find_free_sta_id(mvm,
269 ieee80211_vif_type_p2p(vif));
270 else
271 sta_id = mvm_sta->sta_id;
272
273 if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
274 return -ENOSPC;
275
276 spin_lock_init(&mvm_sta->lock);
277
278 mvm_sta->sta_id = sta_id;
279 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
280 mvmvif->color);
281 mvm_sta->vif = vif;
282 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
283 mvm_sta->tx_protection = 0;
284 mvm_sta->tt_tx_protection = false;
285
286 /* HW restart, don't assume the memory has been zeroed */
287 atomic_set(&mvm->pending_frames[sta_id], 0);
288 mvm_sta->tid_disable_agg = 0;
289 mvm_sta->tfd_queue_msk = 0;
290
291 /* allocate new queues for a TDLS station */
292 if (sta->tdls) {
293 ret = iwl_mvm_tdls_sta_init(mvm, sta);
294 if (ret)
295 return ret;
296 } else {
297 for (i = 0; i < IEEE80211_NUM_ACS; i++)
298 if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
299 mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
300 }
301
302 /* for HW restart - reset everything but the sequence number */
303 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
304 u16 seq = mvm_sta->tid_data[i].seq_number;
305 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
306 mvm_sta->tid_data[i].seq_number = seq;
307 }
308 mvm_sta->agg_tids = 0;
309
310 ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
311 if (ret)
312 goto err;
313
314 if (vif->type == NL80211_IFTYPE_STATION) {
315 if (!sta->tdls) {
316 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT);
317 mvmvif->ap_sta_id = sta_id;
318 } else {
319 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT);
320 }
321 }
322
323 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
324
325 return 0;
326
327 err:
328 iwl_mvm_tdls_sta_deinit(mvm, sta);
329 return ret;
330 }
331
332 int iwl_mvm_update_sta(struct iwl_mvm *mvm,
333 struct ieee80211_vif *vif,
334 struct ieee80211_sta *sta)
335 {
336 return iwl_mvm_sta_send_to_fw(mvm, sta, true);
337 }
338
339 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
340 bool drain)
341 {
342 struct iwl_mvm_add_sta_cmd cmd = {};
343 int ret;
344 u32 status;
345
346 lockdep_assert_held(&mvm->mutex);
347
348 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
349 cmd.sta_id = mvmsta->sta_id;
350 cmd.add_modify = STA_MODE_MODIFY;
351 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
352 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
353
354 status = ADD_STA_SUCCESS;
355 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
356 &cmd, &status);
357 if (ret)
358 return ret;
359
360 switch (status) {
361 case ADD_STA_SUCCESS:
362 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
363 mvmsta->sta_id);
364 break;
365 default:
366 ret = -EIO;
367 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
368 mvmsta->sta_id);
369 break;
370 }
371
372 return ret;
373 }
374
375 /*
376 * Remove a station from the FW table. Before sending the command to remove
377 * the station validate that the station is indeed known to the driver (sanity
378 * only).
379 */
380 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
381 {
382 struct ieee80211_sta *sta;
383 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
384 .sta_id = sta_id,
385 };
386 int ret;
387
388 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
389 lockdep_is_held(&mvm->mutex));
390
391 /* Note: internal stations are marked as error values */
392 if (!sta) {
393 IWL_ERR(mvm, "Invalid station id\n");
394 return -EINVAL;
395 }
396
397 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
398 sizeof(rm_sta_cmd), &rm_sta_cmd);
399 if (ret) {
400 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
401 return ret;
402 }
403
404 return 0;
405 }
406
407 void iwl_mvm_sta_drained_wk(struct work_struct *wk)
408 {
409 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
410 u8 sta_id;
411
412 /*
413 * The mutex is needed because of the SYNC cmd, but not only: if the
414 * work would run concurrently with iwl_mvm_rm_sta, it would run before
415 * iwl_mvm_rm_sta sets the station as busy, and exit. Then
416 * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
417 * that later.
418 */
419 mutex_lock(&mvm->mutex);
420
421 for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
422 int ret;
423 struct ieee80211_sta *sta =
424 rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
425 lockdep_is_held(&mvm->mutex));
426
427 /*
428 * This station is in use or RCU-removed; the latter happens in
429 * managed mode, where mac80211 removes the station before we
430 * can remove it from firmware (we can only do that after the
431 * MAC is marked unassociated), and possibly while the deauth
432 * frame to disconnect from the AP is still queued. Then, the
433 * station pointer is -ENOENT when the last skb is reclaimed.
434 */
435 if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
436 continue;
437
438 if (PTR_ERR(sta) == -EINVAL) {
439 IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
440 sta_id);
441 continue;
442 }
443
444 if (!sta) {
445 IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
446 sta_id);
447 continue;
448 }
449
450 WARN_ON(PTR_ERR(sta) != -EBUSY);
451 /* This station was removed and we waited until it got drained,
452 * we can now proceed and remove it.
453 */
454 ret = iwl_mvm_rm_sta_common(mvm, sta_id);
455 if (ret) {
456 IWL_ERR(mvm,
457 "Couldn't remove sta %d after it was drained\n",
458 sta_id);
459 continue;
460 }
461 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
462 clear_bit(sta_id, mvm->sta_drained);
463
464 if (mvm->tfd_drained[sta_id]) {
465 unsigned long i, msk = mvm->tfd_drained[sta_id];
466
467 for_each_set_bit(i, &msk, sizeof(msk))
468 iwl_mvm_disable_txq(mvm, i);
469
470 mvm->tfd_drained[sta_id] = 0;
471 IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
472 sta_id, msk);
473 }
474 }
475
476 mutex_unlock(&mvm->mutex);
477 }
478
479 int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
480 struct ieee80211_vif *vif,
481 struct ieee80211_sta *sta)
482 {
483 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
484 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
485 int ret;
486
487 lockdep_assert_held(&mvm->mutex);
488
489 if (vif->type == NL80211_IFTYPE_STATION &&
490 mvmvif->ap_sta_id == mvm_sta->sta_id) {
491 /* flush its queues here since we are freeing mvm_sta */
492 ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
493
494 /* if we are associated - we can't remove the AP STA now */
495 if (vif->bss_conf.assoc)
496 return ret;
497
498 /* unassoc - go ahead - remove the AP STA now */
499 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
500
501 /* clear d0i3_ap_sta_id if no longer relevant */
502 if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
503 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
504 }
505
506 /*
507 * Make sure that the tx response code sees the station as -EBUSY and
508 * calls the drain worker.
509 */
510 spin_lock_bh(&mvm_sta->lock);
511 /*
512 * There are frames pending on the AC queues for this station.
513 * We need to wait until all the frames are drained...
514 */
515 if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) {
516 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
517 ERR_PTR(-EBUSY));
518 spin_unlock_bh(&mvm_sta->lock);
519
520 /* disable TDLS sta queues on drain complete */
521 if (sta->tdls) {
522 mvm->tfd_drained[mvm_sta->sta_id] =
523 mvm_sta->tfd_queue_msk;
524 IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n",
525 mvm_sta->sta_id);
526 }
527
528 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
529 } else {
530 spin_unlock_bh(&mvm_sta->lock);
531
532 if (sta->tdls)
533 iwl_mvm_tdls_sta_deinit(mvm, sta);
534
535 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
536 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
537 }
538
539 return ret;
540 }
541
542 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
543 struct ieee80211_vif *vif,
544 u8 sta_id)
545 {
546 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
547
548 lockdep_assert_held(&mvm->mutex);
549
550 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
551 return ret;
552 }
553
554 static int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
555 struct iwl_mvm_int_sta *sta,
556 u32 qmask, enum nl80211_iftype iftype)
557 {
558 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
559 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
560 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
561 return -ENOSPC;
562 }
563
564 sta->tfd_queue_msk = qmask;
565
566 /* put a non-NULL value so iterating over the stations won't stop */
567 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
568 return 0;
569 }
570
571 static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
572 struct iwl_mvm_int_sta *sta)
573 {
574 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
575 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
576 sta->sta_id = IWL_MVM_STATION_COUNT;
577 }
578
579 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
580 struct iwl_mvm_int_sta *sta,
581 const u8 *addr,
582 u16 mac_id, u16 color)
583 {
584 struct iwl_mvm_add_sta_cmd cmd;
585 int ret;
586 u32 status;
587
588 lockdep_assert_held(&mvm->mutex);
589
590 memset(&cmd, 0, sizeof(cmd));
591 cmd.sta_id = sta->sta_id;
592 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
593 color));
594
595 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
596
597 if (addr)
598 memcpy(cmd.addr, addr, ETH_ALEN);
599
600 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
601 &cmd, &status);
602 if (ret)
603 return ret;
604
605 switch (status) {
606 case ADD_STA_SUCCESS:
607 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
608 return 0;
609 default:
610 ret = -EIO;
611 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
612 status);
613 break;
614 }
615 return ret;
616 }
617
618 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
619 {
620 int ret;
621
622 lockdep_assert_held(&mvm->mutex);
623
624 /* Map Aux queue to fifo - needs to happen before adding Aux station */
625 iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue,
626 IWL_MVM_TX_FIFO_MCAST);
627
628 /* Allocate aux station and assign to it the aux queue */
629 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
630 NL80211_IFTYPE_UNSPECIFIED);
631 if (ret)
632 return ret;
633
634 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
635 MAC_INDEX_AUX, 0);
636
637 if (ret)
638 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
639 return ret;
640 }
641
642 void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
643 {
644 lockdep_assert_held(&mvm->mutex);
645
646 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
647 }
648
649 /*
650 * Send the add station command for the vif's broadcast station.
651 * Assumes that the station was already allocated.
652 *
653 * @mvm: the mvm component
654 * @vif: the interface to which the broadcast station is added
655 * @bsta: the broadcast station to add.
656 */
657 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
658 {
659 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
660 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
661 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
662 const u8 *baddr = _baddr;
663
664 lockdep_assert_held(&mvm->mutex);
665
666 if (vif->type == NL80211_IFTYPE_ADHOC)
667 baddr = vif->bss_conf.bssid;
668
669 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
670 return -ENOSPC;
671
672 return iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
673 mvmvif->id, mvmvif->color);
674 }
675
676 /* Send the FW a request to remove the station from it's internal data
677 * structures, but DO NOT remove the entry from the local data structures. */
678 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
679 {
680 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
681 int ret;
682
683 lockdep_assert_held(&mvm->mutex);
684
685 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
686 if (ret)
687 IWL_WARN(mvm, "Failed sending remove station\n");
688 return ret;
689 }
690
691 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
692 {
693 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
694 u32 qmask;
695
696 lockdep_assert_held(&mvm->mutex);
697
698 qmask = iwl_mvm_mac_get_queues_mask(vif);
699
700 /*
701 * The firmware defines the TFD queue mask to only be relevant
702 * for *unicast* queues, so the multicast (CAB) queue shouldn't
703 * be included.
704 */
705 if (vif->type == NL80211_IFTYPE_AP)
706 qmask &= ~BIT(vif->cab_queue);
707
708 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
709 ieee80211_vif_type_p2p(vif));
710 }
711
712 /* Allocate a new station entry for the broadcast station to the given vif,
713 * and send it to the FW.
714 * Note that each P2P mac should have its own broadcast station.
715 *
716 * @mvm: the mvm component
717 * @vif: the interface to which the broadcast station is added
718 * @bsta: the broadcast station to add. */
719 int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
720 {
721 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
722 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
723 int ret;
724
725 lockdep_assert_held(&mvm->mutex);
726
727 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
728 if (ret)
729 return ret;
730
731 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
732
733 if (ret)
734 iwl_mvm_dealloc_int_sta(mvm, bsta);
735
736 return ret;
737 }
738
739 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
740 {
741 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
742
743 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
744 }
745
746 /*
747 * Send the FW a request to remove the station from it's internal data
748 * structures, and in addition remove it from the local data structure.
749 */
750 int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
751 {
752 int ret;
753
754 lockdep_assert_held(&mvm->mutex);
755
756 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
757
758 iwl_mvm_dealloc_bcast_sta(mvm, vif);
759
760 return ret;
761 }
762
763 #define IWL_MAX_RX_BA_SESSIONS 16
764
765 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
766 int tid, u16 ssn, bool start)
767 {
768 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
769 struct iwl_mvm_add_sta_cmd cmd = {};
770 int ret;
771 u32 status;
772
773 lockdep_assert_held(&mvm->mutex);
774
775 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
776 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
777 return -ENOSPC;
778 }
779
780 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
781 cmd.sta_id = mvm_sta->sta_id;
782 cmd.add_modify = STA_MODE_MODIFY;
783 if (start) {
784 cmd.add_immediate_ba_tid = (u8) tid;
785 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
786 } else {
787 cmd.remove_immediate_ba_tid = (u8) tid;
788 }
789 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
790 STA_MODIFY_REMOVE_BA_TID;
791
792 status = ADD_STA_SUCCESS;
793 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
794 &cmd, &status);
795 if (ret)
796 return ret;
797
798 switch (status) {
799 case ADD_STA_SUCCESS:
800 IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n",
801 start ? "start" : "stopp");
802 break;
803 case ADD_STA_IMMEDIATE_BA_FAILURE:
804 IWL_WARN(mvm, "RX BA Session refused by fw\n");
805 ret = -ENOSPC;
806 break;
807 default:
808 ret = -EIO;
809 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
810 start ? "start" : "stopp", status);
811 break;
812 }
813
814 if (!ret) {
815 if (start)
816 mvm->rx_ba_sessions++;
817 else if (mvm->rx_ba_sessions > 0)
818 /* check that restart flow didn't zero the counter */
819 mvm->rx_ba_sessions--;
820 }
821
822 return ret;
823 }
824
825 static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
826 int tid, u8 queue, bool start)
827 {
828 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
829 struct iwl_mvm_add_sta_cmd cmd = {};
830 int ret;
831 u32 status;
832
833 lockdep_assert_held(&mvm->mutex);
834
835 if (start) {
836 mvm_sta->tfd_queue_msk |= BIT(queue);
837 mvm_sta->tid_disable_agg &= ~BIT(tid);
838 } else {
839 mvm_sta->tfd_queue_msk &= ~BIT(queue);
840 mvm_sta->tid_disable_agg |= BIT(tid);
841 }
842
843 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
844 cmd.sta_id = mvm_sta->sta_id;
845 cmd.add_modify = STA_MODE_MODIFY;
846 cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX;
847 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
848 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
849
850 status = ADD_STA_SUCCESS;
851 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
852 &cmd, &status);
853 if (ret)
854 return ret;
855
856 switch (status) {
857 case ADD_STA_SUCCESS:
858 break;
859 default:
860 ret = -EIO;
861 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
862 start ? "start" : "stopp", status);
863 break;
864 }
865
866 return ret;
867 }
868
869 const u8 tid_to_mac80211_ac[] = {
870 IEEE80211_AC_BE,
871 IEEE80211_AC_BK,
872 IEEE80211_AC_BK,
873 IEEE80211_AC_BE,
874 IEEE80211_AC_VI,
875 IEEE80211_AC_VI,
876 IEEE80211_AC_VO,
877 IEEE80211_AC_VO,
878 };
879
880 static const u8 tid_to_ucode_ac[] = {
881 AC_BE,
882 AC_BK,
883 AC_BK,
884 AC_BE,
885 AC_VI,
886 AC_VI,
887 AC_VO,
888 AC_VO,
889 };
890
891 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
892 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
893 {
894 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
895 struct iwl_mvm_tid_data *tid_data;
896 int txq_id;
897
898 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
899 return -EINVAL;
900
901 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
902 IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
903 mvmsta->tid_data[tid].state);
904 return -ENXIO;
905 }
906
907 lockdep_assert_held(&mvm->mutex);
908
909 for (txq_id = mvm->first_agg_queue;
910 txq_id <= mvm->last_agg_queue; txq_id++)
911 if (mvm->queue_to_mac80211[txq_id] ==
912 IWL_INVALID_MAC80211_QUEUE)
913 break;
914
915 if (txq_id > mvm->last_agg_queue) {
916 IWL_ERR(mvm, "Failed to allocate agg queue\n");
917 return -EIO;
918 }
919
920 spin_lock_bh(&mvmsta->lock);
921
922 /* possible race condition - we entered D0i3 while starting agg */
923 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
924 spin_unlock_bh(&mvmsta->lock);
925 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
926 return -EIO;
927 }
928
929 /* the new tx queue is still connected to the same mac80211 queue */
930 mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_mac80211_ac[tid]];
931
932 tid_data = &mvmsta->tid_data[tid];
933 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
934 tid_data->txq_id = txq_id;
935 *ssn = tid_data->ssn;
936
937 IWL_DEBUG_TX_QUEUES(mvm,
938 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
939 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
940 tid_data->next_reclaimed);
941
942 if (tid_data->ssn == tid_data->next_reclaimed) {
943 tid_data->state = IWL_AGG_STARTING;
944 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
945 } else {
946 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
947 }
948
949 spin_unlock_bh(&mvmsta->lock);
950
951 return 0;
952 }
953
954 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
955 struct ieee80211_sta *sta, u16 tid, u8 buf_size)
956 {
957 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
958 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
959 int queue, fifo, ret;
960 u16 ssn;
961
962 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
963 != IWL_MAX_TID_COUNT);
964
965 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
966
967 spin_lock_bh(&mvmsta->lock);
968 ssn = tid_data->ssn;
969 queue = tid_data->txq_id;
970 tid_data->state = IWL_AGG_ON;
971 mvmsta->agg_tids |= BIT(tid);
972 tid_data->ssn = 0xffff;
973 spin_unlock_bh(&mvmsta->lock);
974
975 fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
976
977 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
978 if (ret)
979 return -EIO;
980
981 iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid,
982 buf_size, ssn);
983
984 /*
985 * Even though in theory the peer could have different
986 * aggregation reorder buffer sizes for different sessions,
987 * our ucode doesn't allow for that and has a global limit
988 * for each station. Therefore, use the minimum of all the
989 * aggregation sessions and our default value.
990 */
991 mvmsta->max_agg_bufsize =
992 min(mvmsta->max_agg_bufsize, buf_size);
993 mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
994
995 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
996 sta->addr, tid);
997
998 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
999 }
1000
1001 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1002 struct ieee80211_sta *sta, u16 tid)
1003 {
1004 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1005 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1006 u16 txq_id;
1007 int err;
1008
1009
1010 /*
1011 * If mac80211 is cleaning its state, then say that we finished since
1012 * our state has been cleared anyway.
1013 */
1014 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1015 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1016 return 0;
1017 }
1018
1019 spin_lock_bh(&mvmsta->lock);
1020
1021 txq_id = tid_data->txq_id;
1022
1023 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
1024 mvmsta->sta_id, tid, txq_id, tid_data->state);
1025
1026 mvmsta->agg_tids &= ~BIT(tid);
1027
1028 switch (tid_data->state) {
1029 case IWL_AGG_ON:
1030 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1031
1032 IWL_DEBUG_TX_QUEUES(mvm,
1033 "ssn = %d, next_recl = %d\n",
1034 tid_data->ssn, tid_data->next_reclaimed);
1035
1036 /* There are still packets for this RA / TID in the HW */
1037 if (tid_data->ssn != tid_data->next_reclaimed) {
1038 tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
1039 err = 0;
1040 break;
1041 }
1042
1043 tid_data->ssn = 0xffff;
1044 tid_data->state = IWL_AGG_OFF;
1045 mvm->queue_to_mac80211[txq_id] = IWL_INVALID_MAC80211_QUEUE;
1046 spin_unlock_bh(&mvmsta->lock);
1047
1048 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1049
1050 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
1051
1052 iwl_mvm_disable_txq(mvm, txq_id);
1053 return 0;
1054 case IWL_AGG_STARTING:
1055 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1056 /*
1057 * The agg session has been stopped before it was set up. This
1058 * can happen when the AddBA timer times out for example.
1059 */
1060
1061 /* No barriers since we are under mutex */
1062 lockdep_assert_held(&mvm->mutex);
1063 mvm->queue_to_mac80211[txq_id] = IWL_INVALID_MAC80211_QUEUE;
1064
1065 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1066 tid_data->state = IWL_AGG_OFF;
1067 err = 0;
1068 break;
1069 default:
1070 IWL_ERR(mvm,
1071 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
1072 mvmsta->sta_id, tid, tid_data->state);
1073 IWL_ERR(mvm,
1074 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
1075 err = -EINVAL;
1076 }
1077
1078 spin_unlock_bh(&mvmsta->lock);
1079
1080 return err;
1081 }
1082
1083 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1084 struct ieee80211_sta *sta, u16 tid)
1085 {
1086 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1087 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1088 u16 txq_id;
1089 enum iwl_mvm_agg_state old_state;
1090
1091 /*
1092 * First set the agg state to OFF to avoid calling
1093 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
1094 */
1095 spin_lock_bh(&mvmsta->lock);
1096 txq_id = tid_data->txq_id;
1097 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
1098 mvmsta->sta_id, tid, txq_id, tid_data->state);
1099 old_state = tid_data->state;
1100 tid_data->state = IWL_AGG_OFF;
1101 mvmsta->agg_tids &= ~BIT(tid);
1102 spin_unlock_bh(&mvmsta->lock);
1103
1104 if (old_state >= IWL_AGG_ON) {
1105 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
1106 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
1107
1108 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
1109
1110 iwl_mvm_disable_txq(mvm, tid_data->txq_id);
1111 }
1112
1113 mvm->queue_to_mac80211[tid_data->txq_id] =
1114 IWL_INVALID_MAC80211_QUEUE;
1115
1116 return 0;
1117 }
1118
1119 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
1120 {
1121 int i;
1122
1123 lockdep_assert_held(&mvm->mutex);
1124
1125 i = find_first_zero_bit(mvm->fw_key_table, STA_KEY_MAX_NUM);
1126
1127 if (i == STA_KEY_MAX_NUM)
1128 return STA_KEY_IDX_INVALID;
1129
1130 __set_bit(i, mvm->fw_key_table);
1131
1132 return i;
1133 }
1134
1135 static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
1136 struct ieee80211_sta *sta)
1137 {
1138 struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
1139
1140 if (sta) {
1141 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
1142
1143 return mvm_sta->sta_id;
1144 }
1145
1146 /*
1147 * The device expects GTKs for station interfaces to be
1148 * installed as GTKs for the AP station. If we have no
1149 * station ID, then use AP's station ID.
1150 */
1151 if (vif->type == NL80211_IFTYPE_STATION &&
1152 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT)
1153 return mvmvif->ap_sta_id;
1154
1155 return IWL_MVM_STATION_COUNT;
1156 }
1157
1158 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
1159 struct iwl_mvm_sta *mvm_sta,
1160 struct ieee80211_key_conf *keyconf, bool mcast,
1161 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags)
1162 {
1163 struct iwl_mvm_add_sta_key_cmd cmd = {};
1164 __le16 key_flags;
1165 int ret;
1166 u32 status;
1167 u16 keyidx;
1168 int i;
1169 u8 sta_id = mvm_sta->sta_id;
1170
1171 keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
1172 STA_KEY_FLG_KEYID_MSK;
1173 key_flags = cpu_to_le16(keyidx);
1174 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
1175
1176 switch (keyconf->cipher) {
1177 case WLAN_CIPHER_SUITE_TKIP:
1178 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
1179 cmd.tkip_rx_tsc_byte2 = tkip_iv32;
1180 for (i = 0; i < 5; i++)
1181 cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
1182 memcpy(cmd.key, keyconf->key, keyconf->keylen);
1183 break;
1184 case WLAN_CIPHER_SUITE_CCMP:
1185 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
1186 memcpy(cmd.key, keyconf->key, keyconf->keylen);
1187 break;
1188 case WLAN_CIPHER_SUITE_WEP104:
1189 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
1190 case WLAN_CIPHER_SUITE_WEP40:
1191 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
1192 memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
1193 break;
1194 default:
1195 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
1196 memcpy(cmd.key, keyconf->key, keyconf->keylen);
1197 }
1198
1199 if (mcast)
1200 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
1201
1202 cmd.key_offset = keyconf->hw_key_idx;
1203 cmd.key_flags = key_flags;
1204 cmd.sta_id = sta_id;
1205
1206 status = ADD_STA_SUCCESS;
1207 if (cmd_flags & CMD_ASYNC)
1208 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
1209 sizeof(cmd), &cmd);
1210 else
1211 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
1212 &cmd, &status);
1213
1214 switch (status) {
1215 case ADD_STA_SUCCESS:
1216 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
1217 break;
1218 default:
1219 ret = -EIO;
1220 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
1221 break;
1222 }
1223
1224 return ret;
1225 }
1226
1227 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
1228 struct ieee80211_key_conf *keyconf,
1229 u8 sta_id, bool remove_key)
1230 {
1231 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
1232
1233 /* verify the key details match the required command's expectations */
1234 if (WARN_ON((keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC) ||
1235 (keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
1236 (keyconf->keyidx != 4 && keyconf->keyidx != 5)))
1237 return -EINVAL;
1238
1239 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
1240 igtk_cmd.sta_id = cpu_to_le32(sta_id);
1241
1242 if (remove_key) {
1243 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
1244 } else {
1245 struct ieee80211_key_seq seq;
1246 const u8 *pn;
1247
1248 memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
1249 ieee80211_aes_cmac_calculate_k1_k2(keyconf,
1250 igtk_cmd.K1, igtk_cmd.K2);
1251 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
1252 pn = seq.aes_cmac.pn;
1253 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
1254 ((u64) pn[4] << 8) |
1255 ((u64) pn[3] << 16) |
1256 ((u64) pn[2] << 24) |
1257 ((u64) pn[1] << 32) |
1258 ((u64) pn[0] << 40));
1259 }
1260
1261 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
1262 remove_key ? "removing" : "installing",
1263 igtk_cmd.sta_id);
1264
1265 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
1266 sizeof(igtk_cmd), &igtk_cmd);
1267 }
1268
1269
1270 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
1271 struct ieee80211_vif *vif,
1272 struct ieee80211_sta *sta)
1273 {
1274 struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
1275
1276 if (sta)
1277 return sta->addr;
1278
1279 if (vif->type == NL80211_IFTYPE_STATION &&
1280 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
1281 u8 sta_id = mvmvif->ap_sta_id;
1282 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1283 lockdep_is_held(&mvm->mutex));
1284 return sta->addr;
1285 }
1286
1287
1288 return NULL;
1289 }
1290
1291 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1292 struct ieee80211_vif *vif,
1293 struct ieee80211_sta *sta,
1294 struct ieee80211_key_conf *keyconf,
1295 bool mcast)
1296 {
1297 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1298 int ret;
1299 const u8 *addr;
1300 struct ieee80211_key_seq seq;
1301 u16 p1k[5];
1302
1303 switch (keyconf->cipher) {
1304 case WLAN_CIPHER_SUITE_TKIP:
1305 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
1306 /* get phase 1 key from mac80211 */
1307 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
1308 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
1309 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1310 seq.tkip.iv32, p1k, 0);
1311 break;
1312 case WLAN_CIPHER_SUITE_CCMP:
1313 case WLAN_CIPHER_SUITE_WEP40:
1314 case WLAN_CIPHER_SUITE_WEP104:
1315 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1316 0, NULL, 0);
1317 break;
1318 default:
1319 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1320 0, NULL, 0);
1321 }
1322
1323 return ret;
1324 }
1325
1326 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
1327 struct ieee80211_key_conf *keyconf,
1328 bool mcast)
1329 {
1330 struct iwl_mvm_add_sta_key_cmd cmd = {};
1331 __le16 key_flags;
1332 int ret;
1333 u32 status;
1334
1335 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
1336 STA_KEY_FLG_KEYID_MSK);
1337 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
1338 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
1339
1340 if (mcast)
1341 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
1342
1343 cmd.key_flags = key_flags;
1344 cmd.key_offset = keyconf->hw_key_idx;
1345 cmd.sta_id = sta_id;
1346
1347 status = ADD_STA_SUCCESS;
1348 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
1349 &cmd, &status);
1350
1351 switch (status) {
1352 case ADD_STA_SUCCESS:
1353 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
1354 break;
1355 default:
1356 ret = -EIO;
1357 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
1358 break;
1359 }
1360
1361 return ret;
1362 }
1363
1364 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1365 struct ieee80211_vif *vif,
1366 struct ieee80211_sta *sta,
1367 struct ieee80211_key_conf *keyconf,
1368 bool have_key_offset)
1369 {
1370 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
1371 u8 sta_id;
1372 int ret;
1373
1374 lockdep_assert_held(&mvm->mutex);
1375
1376 /* Get the station id from the mvm local station table */
1377 sta_id = iwl_mvm_get_key_sta_id(vif, sta);
1378 if (sta_id == IWL_MVM_STATION_COUNT) {
1379 IWL_ERR(mvm, "Failed to find station id\n");
1380 return -EINVAL;
1381 }
1382
1383 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
1384 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
1385 goto end;
1386 }
1387
1388 /*
1389 * It is possible that the 'sta' parameter is NULL, and thus
1390 * there is a need to retrieve the sta from the local station table.
1391 */
1392 if (!sta) {
1393 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1394 lockdep_is_held(&mvm->mutex));
1395 if (IS_ERR_OR_NULL(sta)) {
1396 IWL_ERR(mvm, "Invalid station id\n");
1397 return -EINVAL;
1398 }
1399 }
1400
1401 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
1402 return -EINVAL;
1403
1404 if (!have_key_offset) {
1405 /*
1406 * The D3 firmware hardcodes the PTK offset to 0, so we have to
1407 * configure it there. As a result, this workaround exists to
1408 * let the caller set the key offset (hw_key_idx), see d3.c.
1409 */
1410 keyconf->hw_key_idx = iwl_mvm_set_fw_key_idx(mvm);
1411 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
1412 return -ENOSPC;
1413 }
1414
1415 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, mcast);
1416 if (ret) {
1417 __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
1418 goto end;
1419 }
1420
1421 /*
1422 * For WEP, the same key is used for multicast and unicast. Upload it
1423 * again, using the same key offset, and now pointing the other one
1424 * to the same key slot (offset).
1425 * If this fails, remove the original as well.
1426 */
1427 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
1428 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
1429 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, !mcast);
1430 if (ret) {
1431 __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
1432 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
1433 }
1434 }
1435
1436 end:
1437 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
1438 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
1439 sta->addr, ret);
1440 return ret;
1441 }
1442
1443 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
1444 struct ieee80211_vif *vif,
1445 struct ieee80211_sta *sta,
1446 struct ieee80211_key_conf *keyconf)
1447 {
1448 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
1449 u8 sta_id;
1450 int ret;
1451
1452 lockdep_assert_held(&mvm->mutex);
1453
1454 /* Get the station id from the mvm local station table */
1455 sta_id = iwl_mvm_get_key_sta_id(vif, sta);
1456
1457 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
1458 keyconf->keyidx, sta_id);
1459
1460 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
1461 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
1462
1463 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
1464 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
1465 keyconf->hw_key_idx);
1466 return -ENOENT;
1467 }
1468
1469 if (sta_id == IWL_MVM_STATION_COUNT) {
1470 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
1471 return 0;
1472 }
1473
1474 /*
1475 * It is possible that the 'sta' parameter is NULL, and thus
1476 * there is a need to retrieve the sta from the local station table,
1477 * for example when a GTK is removed (where the sta_id will then be
1478 * the AP ID, and no station was passed by mac80211.)
1479 */
1480 if (!sta) {
1481 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1482 lockdep_is_held(&mvm->mutex));
1483 if (!sta) {
1484 IWL_ERR(mvm, "Invalid station id\n");
1485 return -EINVAL;
1486 }
1487 }
1488
1489 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
1490 return -EINVAL;
1491
1492 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
1493 if (ret)
1494 return ret;
1495
1496 /* delete WEP key twice to get rid of (now useless) offset */
1497 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
1498 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
1499 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
1500
1501 return ret;
1502 }
1503
1504 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
1505 struct ieee80211_vif *vif,
1506 struct ieee80211_key_conf *keyconf,
1507 struct ieee80211_sta *sta, u32 iv32,
1508 u16 *phase1key)
1509 {
1510 struct iwl_mvm_sta *mvm_sta;
1511 u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta);
1512 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
1513
1514 if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
1515 return;
1516
1517 rcu_read_lock();
1518
1519 if (!sta) {
1520 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1521 if (WARN_ON(IS_ERR_OR_NULL(sta))) {
1522 rcu_read_unlock();
1523 return;
1524 }
1525 }
1526
1527 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1528 iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1529 iv32, phase1key, CMD_ASYNC);
1530 rcu_read_unlock();
1531 }
1532
1533 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
1534 struct ieee80211_sta *sta)
1535 {
1536 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1537 struct iwl_mvm_add_sta_cmd cmd = {
1538 .add_modify = STA_MODE_MODIFY,
1539 .sta_id = mvmsta->sta_id,
1540 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
1541 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
1542 };
1543 int ret;
1544
1545 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
1546 if (ret)
1547 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
1548 }
1549
1550 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
1551 struct ieee80211_sta *sta,
1552 enum ieee80211_frame_release_type reason,
1553 u16 cnt, u16 tids, bool more_data,
1554 bool agg)
1555 {
1556 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1557 struct iwl_mvm_add_sta_cmd cmd = {
1558 .add_modify = STA_MODE_MODIFY,
1559 .sta_id = mvmsta->sta_id,
1560 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
1561 .sleep_tx_count = cpu_to_le16(cnt),
1562 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
1563 };
1564 int tid, ret;
1565 unsigned long _tids = tids;
1566
1567 /* convert TIDs to ACs - we don't support TSPEC so that's OK
1568 * Note that this field is reserved and unused by firmware not
1569 * supporting GO uAPSD, so it's safe to always do this.
1570 */
1571 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
1572 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
1573
1574 /* If we're releasing frames from aggregation queues then check if the
1575 * all queues combined that we're releasing frames from have
1576 * - more frames than the service period, in which case more_data
1577 * needs to be set
1578 * - fewer than 'cnt' frames, in which case we need to adjust the
1579 * firmware command (but do that unconditionally)
1580 */
1581 if (agg) {
1582 int remaining = cnt;
1583
1584 spin_lock_bh(&mvmsta->lock);
1585 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
1586 struct iwl_mvm_tid_data *tid_data;
1587 u16 n_queued;
1588
1589 tid_data = &mvmsta->tid_data[tid];
1590 if (WARN(tid_data->state != IWL_AGG_ON &&
1591 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
1592 "TID %d state is %d\n",
1593 tid, tid_data->state)) {
1594 spin_unlock_bh(&mvmsta->lock);
1595 ieee80211_sta_eosp(sta);
1596 return;
1597 }
1598
1599 n_queued = iwl_mvm_tid_queued(tid_data);
1600 if (n_queued > remaining) {
1601 more_data = true;
1602 remaining = 0;
1603 break;
1604 }
1605 remaining -= n_queued;
1606 }
1607 spin_unlock_bh(&mvmsta->lock);
1608
1609 cmd.sleep_tx_count = cpu_to_le16(cnt - remaining);
1610 if (WARN_ON(cnt - remaining == 0)) {
1611 ieee80211_sta_eosp(sta);
1612 return;
1613 }
1614 }
1615
1616 /* Note: this is ignored by firmware not supporting GO uAPSD */
1617 if (more_data)
1618 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
1619
1620 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
1621 mvmsta->next_status_eosp = true;
1622 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
1623 } else {
1624 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
1625 }
1626
1627 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
1628 if (ret)
1629 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
1630 }
1631
1632 int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
1633 struct iwl_rx_cmd_buffer *rxb,
1634 struct iwl_device_cmd *cmd)
1635 {
1636 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1637 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
1638 struct ieee80211_sta *sta;
1639 u32 sta_id = le32_to_cpu(notif->sta_id);
1640
1641 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
1642 return 0;
1643
1644 rcu_read_lock();
1645 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1646 if (!IS_ERR_OR_NULL(sta))
1647 ieee80211_sta_eosp(sta);
1648 rcu_read_unlock();
1649
1650 return 0;
1651 }
1652
1653 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
1654 struct iwl_mvm_sta *mvmsta, bool disable)
1655 {
1656 struct iwl_mvm_add_sta_cmd cmd = {
1657 .add_modify = STA_MODE_MODIFY,
1658 .sta_id = mvmsta->sta_id,
1659 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
1660 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
1661 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
1662 };
1663 int ret;
1664
1665 if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_DISABLE_STA_TX))
1666 return;
1667
1668 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
1669 if (ret)
1670 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
1671 }
1672
1673 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
1674 struct ieee80211_sta *sta,
1675 bool disable)
1676 {
1677 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1678
1679 spin_lock_bh(&mvm_sta->lock);
1680
1681 if (mvm_sta->disable_tx == disable) {
1682 spin_unlock_bh(&mvm_sta->lock);
1683 return;
1684 }
1685
1686 mvm_sta->disable_tx = disable;
1687
1688 /*
1689 * Tell mac80211 to start/stop queueing tx for this station,
1690 * but don't stop queueing if there are still pending frames
1691 * for this station.
1692 */
1693 if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
1694 ieee80211_sta_block_awake(mvm->hw, sta, disable);
1695
1696 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
1697
1698 spin_unlock_bh(&mvm_sta->lock);
1699 }
1700
1701 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
1702 struct iwl_mvm_vif *mvmvif,
1703 bool disable)
1704 {
1705 struct ieee80211_sta *sta;
1706 struct iwl_mvm_sta *mvm_sta;
1707 int i;
1708
1709 lockdep_assert_held(&mvm->mutex);
1710
1711 /* Block/unblock all the stations of the given mvmvif */
1712 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
1713 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
1714 lockdep_is_held(&mvm->mutex));
1715 if (IS_ERR_OR_NULL(sta))
1716 continue;
1717
1718 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1719 if (mvm_sta->mac_id_n_color !=
1720 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
1721 continue;
1722
1723 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
1724 }
1725 }