]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/net/wireless/intel/iwlwifi/mvm/sta.c
Merge remote-tracking branches 'asoc/topic/ac97', 'asoc/topic/ac97-mfd', 'asoc/topic...
[mirror_ubuntu-focal-kernel.git] / drivers / net / wireless / intel / iwlwifi / mvm / sta.c
CommitLineData
8ca151b5
JB
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
fa7878e7
AO
8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
26d6c16b 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
8ca151b5
JB
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
410dc5aa 27 * in the file called COPYING.
8ca151b5
JB
28 *
29 * Contact Information:
cb2f8277 30 * Intel Linux Wireless <linuxwifi@intel.com>
8ca151b5
JB
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
fa7878e7
AO
35 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
26d6c16b 37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
8ca151b5
JB
38 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 *
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
49 * distribution.
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 *****************************************************************************/
67#include <net/mac80211.h>
68
69#include "mvm.h"
70#include "sta.h"
9ee718aa 71#include "rs.h"
8ca151b5 72
854c5705
SS
73/*
74 * New version of ADD_STA_sta command added new fields at the end of the
75 * structure, so sending the size of the relevant API's structure is enough to
76 * support both API versions.
77 */
78static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
79{
ced19f26
SS
80 if (iwl_mvm_has_new_rx_api(mvm) ||
81 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
82 return sizeof(struct iwl_mvm_add_sta_cmd);
83 else
84 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
854c5705
SS
85}
86
b92e661b
EP
87static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
88 enum nl80211_iftype iftype)
8ca151b5
JB
89{
90 int sta_id;
b92e661b 91 u32 reserved_ids = 0;
8ca151b5 92
b92e661b 93 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
8ca151b5
JB
94 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
95
96 lockdep_assert_held(&mvm->mutex);
97
b92e661b
EP
98 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
99 if (iftype != NL80211_IFTYPE_STATION)
100 reserved_ids = BIT(0);
101
8ca151b5 102 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
0ae98812 103 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
b92e661b
EP
104 if (BIT(sta_id) & reserved_ids)
105 continue;
106
8ca151b5
JB
107 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
108 lockdep_is_held(&mvm->mutex)))
109 return sta_id;
b92e661b 110 }
0ae98812 111 return IWL_MVM_INVALID_STA;
8ca151b5
JB
112}
113
7a453973
JB
114/* send station add/update command to firmware */
115int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
24afba76 116 bool update, unsigned int flags)
8ca151b5 117{
9d8ce6af 118 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
4b8265ab
EG
119 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
120 .sta_id = mvm_sta->sta_id,
121 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
122 .add_modify = update ? 1 : 0,
123 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
8addabf8
NG
124 STA_FLG_MIMO_EN_MSK |
125 STA_FLG_RTS_MIMO_PROT),
cf0cda19 126 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
4b8265ab 127 };
8ca151b5
JB
128 int ret;
129 u32 status;
130 u32 agg_size = 0, mpdu_dens = 0;
131
ced19f26
SS
132 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
133 add_sta_cmd.station_type = mvm_sta->sta_type;
134
24afba76 135 if (!update || (flags & STA_MODIFY_QUEUES)) {
7a453973 136 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
24afba76 137
bb49701b
SS
138 if (!iwl_mvm_has_new_tx_api(mvm)) {
139 add_sta_cmd.tfd_queue_msk =
140 cpu_to_le32(mvm_sta->tfd_queue_msk);
141
142 if (flags & STA_MODIFY_QUEUES)
143 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
144 } else {
145 WARN_ON(flags & STA_MODIFY_QUEUES);
146 }
7a453973 147 }
5bc5aaad
JB
148
149 switch (sta->bandwidth) {
150 case IEEE80211_STA_RX_BW_160:
151 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
152 /* fall through */
153 case IEEE80211_STA_RX_BW_80:
154 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
155 /* fall through */
156 case IEEE80211_STA_RX_BW_40:
157 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
158 /* fall through */
159 case IEEE80211_STA_RX_BW_20:
160 if (sta->ht_cap.ht_supported)
161 add_sta_cmd.station_flags |=
162 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
163 break;
164 }
165
166 switch (sta->rx_nss) {
167 case 1:
168 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
169 break;
170 case 2:
171 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
172 break;
173 case 3 ... 8:
174 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
175 break;
176 }
177
178 switch (sta->smps_mode) {
179 case IEEE80211_SMPS_AUTOMATIC:
180 case IEEE80211_SMPS_NUM_MODES:
181 WARN_ON(1);
182 break;
183 case IEEE80211_SMPS_STATIC:
184 /* override NSS */
185 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
186 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
187 break;
188 case IEEE80211_SMPS_DYNAMIC:
189 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
190 break;
191 case IEEE80211_SMPS_OFF:
192 /* nothing */
193 break;
194 }
8ca151b5
JB
195
196 if (sta->ht_cap.ht_supported) {
197 add_sta_cmd.station_flags_msk |=
198 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
199 STA_FLG_AGG_MPDU_DENS_MSK);
200
201 mpdu_dens = sta->ht_cap.ampdu_density;
202 }
203
204 if (sta->vht_cap.vht_supported) {
205 agg_size = sta->vht_cap.cap &
206 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
207 agg_size >>=
208 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
209 } else if (sta->ht_cap.ht_supported) {
210 agg_size = sta->ht_cap.ampdu_factor;
211 }
212
213 add_sta_cmd.station_flags |=
214 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
215 add_sta_cmd.station_flags |=
216 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
6ea29ce5
JB
217 if (mvm_sta->associated)
218 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
8ca151b5 219
65e25482
JB
220 if (sta->wme) {
221 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
222
223 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
c80eb570 224 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
65e25482 225 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
c80eb570 226 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
65e25482 227 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
c80eb570 228 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
65e25482 229 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
c80eb570
EG
230 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
231 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
e71ca5ea 232 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
65e25482
JB
233 }
234
8ca151b5 235 status = ADD_STA_SUCCESS;
854c5705
SS
236 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
237 iwl_mvm_add_sta_cmd_size(mvm),
f9dc0004 238 &add_sta_cmd, &status);
8ca151b5
JB
239 if (ret)
240 return ret;
241
837c4da9 242 switch (status & IWL_ADD_STA_STATUS_MASK) {
8ca151b5
JB
243 case ADD_STA_SUCCESS:
244 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
245 break;
246 default:
247 ret = -EIO;
248 IWL_ERR(mvm, "ADD_STA failed\n");
249 break;
250 }
251
252 return ret;
253}
254
10b2b201
SS
255static void iwl_mvm_rx_agg_session_expired(unsigned long data)
256{
257 struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data;
258 struct iwl_mvm_baid_data *ba_data;
259 struct ieee80211_sta *sta;
260 struct iwl_mvm_sta *mvm_sta;
261 unsigned long timeout;
262
263 rcu_read_lock();
264
265 ba_data = rcu_dereference(*rcu_ptr);
266
267 if (WARN_ON(!ba_data))
268 goto unlock;
269
270 if (!ba_data->timeout)
271 goto unlock;
272
273 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
274 if (time_is_after_jiffies(timeout)) {
275 mod_timer(&ba_data->session_timer, timeout);
276 goto unlock;
277 }
278
279 /* Timer expired */
280 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
61dd8a8a
EG
281
282 /*
283 * sta should be valid unless the following happens:
284 * The firmware asserts which triggers a reconfig flow, but
285 * the reconfig fails before we set the pointer to sta into
286 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
287 * A-MDPU and hence the timer continues to run. Then, the
288 * timer expires and sta is NULL.
289 */
290 if (!sta)
291 goto unlock;
292
10b2b201 293 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
20fc690f
NG
294 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
295 sta->addr, ba_data->tid);
10b2b201
SS
296unlock:
297 rcu_read_unlock();
298}
299
9794c64f
LK
300/* Disable aggregations for a bitmap of TIDs for a given station */
301static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
302 unsigned long disable_agg_tids,
303 bool remove_queue)
304{
305 struct iwl_mvm_add_sta_cmd cmd = {};
306 struct ieee80211_sta *sta;
307 struct iwl_mvm_sta *mvmsta;
308 u32 status;
309 u8 sta_id;
310 int ret;
311
bb49701b
SS
312 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
313 return -EINVAL;
314
9794c64f
LK
315 spin_lock_bh(&mvm->queue_info_lock);
316 sta_id = mvm->queue_info[queue].ra_sta_id;
317 spin_unlock_bh(&mvm->queue_info_lock);
318
319 rcu_read_lock();
320
321 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
322
323 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
324 rcu_read_unlock();
325 return -EINVAL;
326 }
327
328 mvmsta = iwl_mvm_sta_from_mac80211(sta);
329
330 mvmsta->tid_disable_agg |= disable_agg_tids;
331
332 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
333 cmd.sta_id = mvmsta->sta_id;
334 cmd.add_modify = STA_MODE_MODIFY;
335 cmd.modify_mask = STA_MODIFY_QUEUES;
336 if (disable_agg_tids)
337 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
338 if (remove_queue)
339 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
340 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
341 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
342
343 rcu_read_unlock();
344
345 /* Notify FW of queue removal from the STA queues */
346 status = ADD_STA_SUCCESS;
347 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
348 iwl_mvm_add_sta_cmd_size(mvm),
349 &cmd, &status);
350
351 return ret;
352}
353
42db09c1
LK
354static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
355{
356 struct ieee80211_sta *sta;
357 struct iwl_mvm_sta *mvmsta;
358 unsigned long tid_bitmap;
359 unsigned long agg_tids = 0;
806911da 360 u8 sta_id;
42db09c1
LK
361 int tid;
362
363 lockdep_assert_held(&mvm->mutex);
364
bb49701b
SS
365 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
366 return -EINVAL;
367
42db09c1
LK
368 spin_lock_bh(&mvm->queue_info_lock);
369 sta_id = mvm->queue_info[queue].ra_sta_id;
370 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
371 spin_unlock_bh(&mvm->queue_info_lock);
372
373 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
374 lockdep_is_held(&mvm->mutex));
375
376 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
377 return -EINVAL;
378
379 mvmsta = iwl_mvm_sta_from_mac80211(sta);
380
381 spin_lock_bh(&mvmsta->lock);
382 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
383 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
384 agg_tids |= BIT(tid);
385 }
386 spin_unlock_bh(&mvmsta->lock);
387
388 return agg_tids;
389}
390
9794c64f
LK
391/*
392 * Remove a queue from a station's resources.
393 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
394 * doesn't disable the queue
395 */
396static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
397{
398 struct ieee80211_sta *sta;
399 struct iwl_mvm_sta *mvmsta;
400 unsigned long tid_bitmap;
401 unsigned long disable_agg_tids = 0;
402 u8 sta_id;
403 int tid;
404
405 lockdep_assert_held(&mvm->mutex);
406
bb49701b
SS
407 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
408 return -EINVAL;
409
9794c64f
LK
410 spin_lock_bh(&mvm->queue_info_lock);
411 sta_id = mvm->queue_info[queue].ra_sta_id;
412 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
413 spin_unlock_bh(&mvm->queue_info_lock);
414
415 rcu_read_lock();
416
417 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
418
419 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
420 rcu_read_unlock();
421 return 0;
422 }
423
424 mvmsta = iwl_mvm_sta_from_mac80211(sta);
425
426 spin_lock_bh(&mvmsta->lock);
42db09c1 427 /* Unmap MAC queues and TIDs from this queue */
9794c64f 428 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
9794c64f
LK
429 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
430 disable_agg_tids |= BIT(tid);
6862fcee 431 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
9794c64f 432 }
9794c64f 433
42db09c1 434 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
9794c64f
LK
435 spin_unlock_bh(&mvmsta->lock);
436
437 rcu_read_unlock();
438
9794c64f
LK
439 return disable_agg_tids;
440}
441
01796ff2
SS
442static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
443 bool same_sta)
444{
445 struct iwl_mvm_sta *mvmsta;
446 u8 txq_curr_ac, sta_id, tid;
447 unsigned long disable_agg_tids = 0;
448 int ret;
449
450 lockdep_assert_held(&mvm->mutex);
451
bb49701b
SS
452 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
453 return -EINVAL;
454
01796ff2
SS
455 spin_lock_bh(&mvm->queue_info_lock);
456 txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
457 sta_id = mvm->queue_info[queue].ra_sta_id;
458 tid = mvm->queue_info[queue].txq_tid;
459 spin_unlock_bh(&mvm->queue_info_lock);
460
461 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
e3df1e4b
SD
462 if (WARN_ON(!mvmsta))
463 return -EINVAL;
01796ff2
SS
464
465 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
466 /* Disable the queue */
467 if (disable_agg_tids)
468 iwl_mvm_invalidate_sta_queue(mvm, queue,
469 disable_agg_tids, false);
470
471 ret = iwl_mvm_disable_txq(mvm, queue,
472 mvmsta->vif->hw_queue[txq_curr_ac],
473 tid, 0);
474 if (ret) {
475 /* Re-mark the inactive queue as inactive */
476 spin_lock_bh(&mvm->queue_info_lock);
477 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
478 spin_unlock_bh(&mvm->queue_info_lock);
479 IWL_ERR(mvm,
480 "Failed to free inactive queue %d (ret=%d)\n",
481 queue, ret);
482
483 return ret;
484 }
485
486 /* If TXQ is allocated to another STA, update removal in FW */
487 if (!same_sta)
488 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
489
490 return 0;
491}
492
42db09c1
LK
493static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
494 unsigned long tfd_queue_mask, u8 ac)
495{
496 int queue = 0;
497 u8 ac_to_queue[IEEE80211_NUM_ACS];
498 int i;
499
500 lockdep_assert_held(&mvm->queue_info_lock);
bb49701b
SS
501 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
502 return -EINVAL;
42db09c1
LK
503
504 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
505
506 /* See what ACs the existing queues for this STA have */
507 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
508 /* Only DATA queues can be shared */
509 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
510 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
511 continue;
512
9f9af3d7
LK
513 /* Don't try and take queues being reconfigured */
514 if (mvm->queue_info[queue].status ==
515 IWL_MVM_QUEUE_RECONFIGURING)
516 continue;
517
42db09c1
LK
518 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
519 }
520
521 /*
522 * The queue to share is chosen only from DATA queues as follows (in
523 * descending priority):
524 * 1. An AC_BE queue
525 * 2. Same AC queue
526 * 3. Highest AC queue that is lower than new AC
527 * 4. Any existing AC (there always is at least 1 DATA queue)
528 */
529
530 /* Priority 1: An AC_BE queue */
531 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
532 queue = ac_to_queue[IEEE80211_AC_BE];
533 /* Priority 2: Same AC queue */
534 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
535 queue = ac_to_queue[ac];
536 /* Priority 3a: If new AC is VO and VI exists - use VI */
537 else if (ac == IEEE80211_AC_VO &&
538 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
539 queue = ac_to_queue[IEEE80211_AC_VI];
540 /* Priority 3b: No BE so only AC less than the new one is BK */
541 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
542 queue = ac_to_queue[IEEE80211_AC_BK];
543 /* Priority 4a: No BE nor BK - use VI if exists */
544 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
545 queue = ac_to_queue[IEEE80211_AC_VI];
546 /* Priority 4b: No BE, BK nor VI - use VO if exists */
547 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
548 queue = ac_to_queue[IEEE80211_AC_VO];
549
550 /* Make sure queue found (or not) is legal */
9f9af3d7
LK
551 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
552 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
553 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
42db09c1 554 IWL_ERR(mvm, "No DATA queues available to share\n");
9f9af3d7
LK
555 return -ENOSPC;
556 }
557
558 /* Make sure the queue isn't in the middle of being reconfigured */
559 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
560 IWL_ERR(mvm,
561 "TXQ %d is in the middle of re-config - try again\n",
562 queue);
563 return -EBUSY;
42db09c1
LK
564 }
565
566 return queue;
567}
568
58f2cc57 569/*
9f9af3d7
LK
570 * If a given queue has a higher AC than the TID stream that is being compared
571 * to, the queue needs to be redirected to the lower AC. This function does that
58f2cc57
LK
572 * in such a case, otherwise - if no redirection required - it does nothing,
573 * unless the %force param is true.
574 */
9f9af3d7
LK
575int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
576 int ac, int ssn, unsigned int wdg_timeout,
577 bool force)
58f2cc57
LK
578{
579 struct iwl_scd_txq_cfg_cmd cmd = {
580 .scd_queue = queue,
f7c692de 581 .action = SCD_CFG_DISABLE_QUEUE,
58f2cc57
LK
582 };
583 bool shared_queue;
584 unsigned long mq;
585 int ret;
586
bb49701b
SS
587 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
588 return -EINVAL;
589
58f2cc57
LK
590 /*
591 * If the AC is lower than current one - FIFO needs to be redirected to
592 * the lowest one of the streams in the queue. Check if this is needed
593 * here.
594 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
595 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
596 * we need to check if the numerical value of X is LARGER than of Y.
597 */
598 spin_lock_bh(&mvm->queue_info_lock);
599 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
600 spin_unlock_bh(&mvm->queue_info_lock);
601
602 IWL_DEBUG_TX_QUEUES(mvm,
603 "No redirection needed on TXQ #%d\n",
604 queue);
605 return 0;
606 }
607
608 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
609 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
edbe961c 610 cmd.tid = mvm->queue_info[queue].txq_tid;
34e10860 611 mq = mvm->hw_queue_to_mac80211[queue];
58f2cc57
LK
612 shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
613 spin_unlock_bh(&mvm->queue_info_lock);
614
9f9af3d7 615 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
58f2cc57
LK
616 queue, iwl_mvm_ac_to_tx_fifo[ac]);
617
618 /* Stop MAC queues and wait for this queue to empty */
619 iwl_mvm_stop_mac_queues(mvm, mq);
a1a57877 620 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
58f2cc57
LK
621 if (ret) {
622 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
623 queue);
624 ret = -EIO;
625 goto out;
626 }
627
628 /* Before redirecting the queue we need to de-activate it */
629 iwl_trans_txq_disable(mvm->trans, queue, false);
630 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
631 if (ret)
632 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
633 ret);
634
635 /* Make sure the SCD wrptr is correctly set before reconfiguring */
ca3b9c6b 636 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
58f2cc57 637
edbe961c
LK
638 /* Update the TID "owner" of the queue */
639 spin_lock_bh(&mvm->queue_info_lock);
640 mvm->queue_info[queue].txq_tid = tid;
641 spin_unlock_bh(&mvm->queue_info_lock);
642
58f2cc57
LK
643 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
644
645 /* Redirect to lower AC */
646 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
647 cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
648 ssn);
649
650 /* Update AC marking of the queue */
651 spin_lock_bh(&mvm->queue_info_lock);
652 mvm->queue_info[queue].mac80211_ac = ac;
653 spin_unlock_bh(&mvm->queue_info_lock);
654
655 /*
656 * Mark queue as shared in transport if shared
657 * Note this has to be done after queue enablement because enablement
658 * can also set this value, and there is no indication there to shared
659 * queues
660 */
661 if (shared_queue)
662 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
663
664out:
665 /* Continue using the MAC queues */
666 iwl_mvm_start_mac_queues(mvm, mq);
667
668 return ret;
669}
670
310181ec
SS
671static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
672 struct ieee80211_sta *sta, u8 ac,
673 int tid)
674{
675 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
676 unsigned int wdg_timeout =
677 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
678 u8 mac_queue = mvmsta->vif->hw_queue[ac];
679 int queue = -1;
680
681 lockdep_assert_held(&mvm->mutex);
682
683 IWL_DEBUG_TX_QUEUES(mvm,
684 "Allocating queue for sta %d on tid %d\n",
685 mvmsta->sta_id, tid);
686 queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
687 wdg_timeout);
688 if (queue < 0)
689 return queue;
690
691 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
692
693 spin_lock_bh(&mvmsta->lock);
694 mvmsta->tid_data[tid].txq_id = queue;
695 mvmsta->tid_data[tid].is_tid_active = true;
310181ec
SS
696 spin_unlock_bh(&mvmsta->lock);
697
310181ec
SS
698 return 0;
699}
700
24afba76
LK
701static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
702 struct ieee80211_sta *sta, u8 ac, int tid,
703 struct ieee80211_hdr *hdr)
704{
705 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
706 struct iwl_trans_txq_scd_cfg cfg = {
cf6c6ea3 707 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
24afba76
LK
708 .sta_id = mvmsta->sta_id,
709 .tid = tid,
710 .frame_limit = IWL_FRAME_LIMIT,
711 };
712 unsigned int wdg_timeout =
713 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
714 u8 mac_queue = mvmsta->vif->hw_queue[ac];
715 int queue = -1;
01796ff2 716 bool using_inactive_queue = false, same_sta = false;
9794c64f
LK
717 unsigned long disable_agg_tids = 0;
718 enum iwl_mvm_agg_state queue_state;
dcfbd67b 719 bool shared_queue = false, inc_ssn;
24afba76 720 int ssn;
42db09c1 721 unsigned long tfd_queue_mask;
cf961e16 722 int ret;
24afba76
LK
723
724 lockdep_assert_held(&mvm->mutex);
725
310181ec
SS
726 if (iwl_mvm_has_new_tx_api(mvm))
727 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
728
42db09c1
LK
729 spin_lock_bh(&mvmsta->lock);
730 tfd_queue_mask = mvmsta->tfd_queue_msk;
731 spin_unlock_bh(&mvmsta->lock);
732
d2515a99 733 spin_lock_bh(&mvm->queue_info_lock);
24afba76
LK
734
735 /*
736 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
737 * exists
738 */
739 if (!ieee80211_is_data_qos(hdr->frame_control) ||
740 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
9794c64f
LK
741 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
742 IWL_MVM_DQA_MIN_MGMT_QUEUE,
24afba76
LK
743 IWL_MVM_DQA_MAX_MGMT_QUEUE);
744 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
745 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
746 queue);
747
748 /* If no such queue is found, we'll use a DATA queue instead */
749 }
750
9794c64f
LK
751 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
752 (mvm->queue_info[mvmsta->reserved_queue].status ==
753 IWL_MVM_QUEUE_RESERVED ||
754 mvm->queue_info[mvmsta->reserved_queue].status ==
755 IWL_MVM_QUEUE_INACTIVE)) {
24afba76 756 queue = mvmsta->reserved_queue;
9794c64f 757 mvm->queue_info[queue].reserved = true;
24afba76
LK
758 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
759 }
760
761 if (queue < 0)
9794c64f
LK
762 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
763 IWL_MVM_DQA_MIN_DATA_QUEUE,
24afba76 764 IWL_MVM_DQA_MAX_DATA_QUEUE);
cf961e16 765
9794c64f
LK
766 /*
767 * Check if this queue is already allocated but inactive.
768 * In such a case, we'll need to first free this queue before enabling
769 * it again, so we'll mark it as reserved to make sure no new traffic
770 * arrives on it
771 */
772 if (queue > 0 &&
773 mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
774 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
775 using_inactive_queue = true;
01796ff2 776 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
9794c64f
LK
777 IWL_DEBUG_TX_QUEUES(mvm,
778 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
779 queue, mvmsta->sta_id, tid);
780 }
781
42db09c1
LK
782 /* No free queue - we'll have to share */
783 if (queue <= 0) {
784 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
785 if (queue > 0) {
786 shared_queue = true;
787 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
788 }
789 }
790
cf961e16
LK
791 /*
792 * Mark TXQ as ready, even though it hasn't been fully configured yet,
793 * to make sure no one else takes it.
794 * This will allow avoiding re-acquiring the lock at the end of the
795 * configuration. On error we'll mark it back as free.
796 */
42db09c1 797 if ((queue > 0) && !shared_queue)
cf961e16 798 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
24afba76 799
d2515a99 800 spin_unlock_bh(&mvm->queue_info_lock);
24afba76 801
42db09c1
LK
802 /* This shouldn't happen - out of queues */
803 if (WARN_ON(queue <= 0)) {
804 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
805 tid, cfg.sta_id);
9f9af3d7 806 return queue;
42db09c1 807 }
24afba76
LK
808
809 /*
810 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
811 * but for configuring the SCD to send A-MPDUs we need to mark the queue
812 * as aggregatable.
813 * Mark all DATA queues as allowing to be aggregated at some point
814 */
d5216a28
LK
815 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
816 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
24afba76 817
9794c64f
LK
818 /*
819 * If this queue was previously inactive (idle) - we need to free it
820 * first
821 */
822 if (using_inactive_queue) {
01796ff2
SS
823 ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
824 if (ret)
9794c64f 825 return ret;
9794c64f
LK
826 }
827
42db09c1
LK
828 IWL_DEBUG_TX_QUEUES(mvm,
829 "Allocating %squeue #%d to sta %d on tid %d\n",
830 shared_queue ? "shared " : "", queue,
831 mvmsta->sta_id, tid);
832
833 if (shared_queue) {
834 /* Disable any open aggs on this queue */
835 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
836
837 if (disable_agg_tids) {
838 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
839 queue);
840 iwl_mvm_invalidate_sta_queue(mvm, queue,
841 disable_agg_tids, false);
842 }
42db09c1 843 }
24afba76
LK
844
845 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
dcfbd67b
EG
846 inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
847 ssn, &cfg, wdg_timeout);
848 if (inc_ssn) {
849 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
850 le16_add_cpu(&hdr->seq_ctrl, 0x10);
851 }
24afba76 852
58f2cc57
LK
853 /*
854 * Mark queue as shared in transport if shared
855 * Note this has to be done after queue enablement because enablement
856 * can also set this value, and there is no indication there to shared
857 * queues
858 */
859 if (shared_queue)
860 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
861
24afba76 862 spin_lock_bh(&mvmsta->lock);
dcfbd67b
EG
863 /*
864 * This looks racy, but it is not. We have only one packet for
865 * this ra/tid in our Tx path since we stop the Qdisc when we
866 * need to allocate a new TFD queue.
867 */
868 if (inc_ssn)
869 mvmsta->tid_data[tid].seq_number += 0x10;
24afba76 870 mvmsta->tid_data[tid].txq_id = queue;
9794c64f 871 mvmsta->tid_data[tid].is_tid_active = true;
24afba76 872 mvmsta->tfd_queue_msk |= BIT(queue);
9794c64f 873 queue_state = mvmsta->tid_data[tid].state;
24afba76
LK
874
875 if (mvmsta->reserved_queue == queue)
876 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
877 spin_unlock_bh(&mvmsta->lock);
878
42db09c1
LK
879 if (!shared_queue) {
880 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
881 if (ret)
882 goto out_err;
cf961e16 883
42db09c1
LK
884 /* If we need to re-enable aggregations... */
885 if (queue_state == IWL_AGG_ON) {
886 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
887 if (ret)
888 goto out_err;
889 }
58f2cc57
LK
890 } else {
891 /* Redirect queue, if needed */
892 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
893 wdg_timeout, false);
894 if (ret)
895 goto out_err;
42db09c1 896 }
9794c64f 897
42db09c1 898 return 0;
cf961e16
LK
899
900out_err:
901 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
902
903 return ret;
24afba76
LK
904}
905
19aefa45
LK
906static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
907{
908 struct iwl_scd_txq_cfg_cmd cmd = {
909 .scd_queue = queue,
910 .action = SCD_CFG_UPDATE_QUEUE_TID,
911 };
19aefa45
LK
912 int tid;
913 unsigned long tid_bitmap;
914 int ret;
915
916 lockdep_assert_held(&mvm->mutex);
917
bb49701b
SS
918 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
919 return;
920
19aefa45 921 spin_lock_bh(&mvm->queue_info_lock);
19aefa45
LK
922 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
923 spin_unlock_bh(&mvm->queue_info_lock);
924
925 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
926 return;
927
928 /* Find any TID for queue */
929 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
930 cmd.tid = tid;
931 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
932
933 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
341ca402 934 if (ret) {
19aefa45
LK
935 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
936 queue, ret);
341ca402
LK
937 return;
938 }
939
940 spin_lock_bh(&mvm->queue_info_lock);
941 mvm->queue_info[queue].txq_tid = tid;
942 spin_unlock_bh(&mvm->queue_info_lock);
943 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
944 queue, tid);
19aefa45
LK
945}
946
9f9af3d7
LK
947static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
948{
949 struct ieee80211_sta *sta;
950 struct iwl_mvm_sta *mvmsta;
806911da 951 u8 sta_id;
9f9af3d7
LK
952 int tid = -1;
953 unsigned long tid_bitmap;
954 unsigned int wdg_timeout;
955 int ssn;
956 int ret = true;
957
bb49701b
SS
958 /* queue sharing is disabled on new TX path */
959 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
960 return;
961
9f9af3d7
LK
962 lockdep_assert_held(&mvm->mutex);
963
964 spin_lock_bh(&mvm->queue_info_lock);
965 sta_id = mvm->queue_info[queue].ra_sta_id;
966 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
967 spin_unlock_bh(&mvm->queue_info_lock);
968
969 /* Find TID for queue, and make sure it is the only one on the queue */
970 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
971 if (tid_bitmap != BIT(tid)) {
972 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
973 queue, tid_bitmap);
974 return;
975 }
976
977 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
978 tid);
979
980 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
981 lockdep_is_held(&mvm->mutex));
982
983 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
984 return;
985
986 mvmsta = iwl_mvm_sta_from_mac80211(sta);
987 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
988
989 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
990
991 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
992 tid_to_mac80211_ac[tid], ssn,
993 wdg_timeout, true);
994 if (ret) {
995 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
996 return;
997 }
998
999 /* If aggs should be turned back on - do it */
1000 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
9cd70e80 1001 struct iwl_mvm_add_sta_cmd cmd = {0};
9f9af3d7
LK
1002
1003 mvmsta->tid_disable_agg &= ~BIT(tid);
1004
1005 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1006 cmd.sta_id = mvmsta->sta_id;
1007 cmd.add_modify = STA_MODE_MODIFY;
1008 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1009 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1010 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1011
1012 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1013 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1014 if (!ret) {
1015 IWL_DEBUG_TX_QUEUES(mvm,
1016 "TXQ #%d is now aggregated again\n",
1017 queue);
1018
1019 /* Mark queue intenally as aggregating again */
1020 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1021 }
1022 }
1023
1024 spin_lock_bh(&mvm->queue_info_lock);
1025 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1026 spin_unlock_bh(&mvm->queue_info_lock);
1027}
1028
24afba76
LK
1029static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1030{
1031 if (tid == IWL_MAX_TID_COUNT)
1032 return IEEE80211_AC_VO; /* MGMT */
1033
1034 return tid_to_mac80211_ac[tid];
1035}
1036
1037static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
1038 struct ieee80211_sta *sta, int tid)
1039{
1040 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1041 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1042 struct sk_buff *skb;
1043 struct ieee80211_hdr *hdr;
1044 struct sk_buff_head deferred_tx;
1045 u8 mac_queue;
1046 bool no_queue = false; /* Marks if there is a problem with the queue */
1047 u8 ac;
1048
1049 lockdep_assert_held(&mvm->mutex);
1050
1051 skb = skb_peek(&tid_data->deferred_tx_frames);
1052 if (!skb)
1053 return;
1054 hdr = (void *)skb->data;
1055
1056 ac = iwl_mvm_tid_to_ac_queue(tid);
1057 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
1058
6862fcee 1059 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
24afba76
LK
1060 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
1061 IWL_ERR(mvm,
1062 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1063 mvmsta->sta_id, tid);
1064
1065 /*
1066 * Mark queue as problematic so later the deferred traffic is
1067 * freed, as we can do nothing with it
1068 */
1069 no_queue = true;
1070 }
1071
1072 __skb_queue_head_init(&deferred_tx);
1073
d2515a99
LK
1074 /* Disable bottom-halves when entering TX path */
1075 local_bh_disable();
24afba76
LK
1076 spin_lock(&mvmsta->lock);
1077 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
ad5de737 1078 mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
24afba76
LK
1079 spin_unlock(&mvmsta->lock);
1080
24afba76
LK
1081 while ((skb = __skb_dequeue(&deferred_tx)))
1082 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1083 ieee80211_free_txskb(mvm->hw, skb);
1084 local_bh_enable();
1085
1086 /* Wake queue */
1087 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1088}
1089
1090void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1091{
1092 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1093 add_stream_wk);
1094 struct ieee80211_sta *sta;
1095 struct iwl_mvm_sta *mvmsta;
1096 unsigned long deferred_tid_traffic;
9f9af3d7 1097 int queue, sta_id, tid;
24afba76 1098
9794c64f
LK
1099 /* Check inactivity of queues */
1100 iwl_mvm_inactivity_check(mvm);
1101
24afba76
LK
1102 mutex_lock(&mvm->mutex);
1103
34e10860
SS
1104 /* No queue reconfiguration in TVQM mode */
1105 if (iwl_mvm_has_new_tx_api(mvm))
1106 goto alloc_queues;
1107
9f9af3d7 1108 /* Reconfigure queues requiring reconfiguation */
34e10860 1109 for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) {
9f9af3d7 1110 bool reconfig;
19aefa45 1111 bool change_owner;
9f9af3d7
LK
1112
1113 spin_lock_bh(&mvm->queue_info_lock);
1114 reconfig = (mvm->queue_info[queue].status ==
1115 IWL_MVM_QUEUE_RECONFIGURING);
19aefa45
LK
1116
1117 /*
1118 * We need to take into account a situation in which a TXQ was
1119 * allocated to TID x, and then turned shared by adding TIDs y
1120 * and z. If TID x becomes inactive and is removed from the TXQ,
1121 * ownership must be given to one of the remaining TIDs.
1122 * This is mainly because if TID x continues - a new queue can't
1123 * be allocated for it as long as it is an owner of another TXQ.
1124 */
1125 change_owner = !(mvm->queue_info[queue].tid_bitmap &
1126 BIT(mvm->queue_info[queue].txq_tid)) &&
1127 (mvm->queue_info[queue].status ==
1128 IWL_MVM_QUEUE_SHARED);
9f9af3d7
LK
1129 spin_unlock_bh(&mvm->queue_info_lock);
1130
1131 if (reconfig)
1132 iwl_mvm_unshare_queue(mvm, queue);
19aefa45
LK
1133 else if (change_owner)
1134 iwl_mvm_change_queue_owner(mvm, queue);
9f9af3d7
LK
1135 }
1136
34e10860 1137alloc_queues:
24afba76
LK
1138 /* Go over all stations with deferred traffic */
1139 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1140 IWL_MVM_STATION_COUNT) {
1141 clear_bit(sta_id, mvm->sta_deferred_frames);
1142 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1143 lockdep_is_held(&mvm->mutex));
1144 if (IS_ERR_OR_NULL(sta))
1145 continue;
1146
1147 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1148 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
1149
1150 for_each_set_bit(tid, &deferred_tid_traffic,
1151 IWL_MAX_TID_COUNT + 1)
1152 iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1153 }
1154
1155 mutex_unlock(&mvm->mutex);
1156}
1157
1158static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
d5216a28
LK
1159 struct ieee80211_sta *sta,
1160 enum nl80211_iftype vif_type)
24afba76
LK
1161{
1162 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1163 int queue;
01796ff2 1164 bool using_inactive_queue = false, same_sta = false;
24afba76 1165
396952ee
SS
1166 /* queue reserving is disabled on new TX path */
1167 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1168 return 0;
1169
9794c64f
LK
1170 /*
1171 * Check for inactive queues, so we don't reach a situation where we
1172 * can't add a STA due to a shortage in queues that doesn't really exist
1173 */
1174 iwl_mvm_inactivity_check(mvm);
1175
24afba76
LK
1176 spin_lock_bh(&mvm->queue_info_lock);
1177
1178 /* Make sure we have free resources for this STA */
d5216a28
LK
1179 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1180 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
cf961e16
LK
1181 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1182 IWL_MVM_QUEUE_FREE))
d5216a28
LK
1183 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1184 else
9794c64f
LK
1185 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1186 IWL_MVM_DQA_MIN_DATA_QUEUE,
d5216a28 1187 IWL_MVM_DQA_MAX_DATA_QUEUE);
24afba76
LK
1188 if (queue < 0) {
1189 spin_unlock_bh(&mvm->queue_info_lock);
1190 IWL_ERR(mvm, "No available queues for new station\n");
1191 return -ENOSPC;
01796ff2
SS
1192 } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
1193 /*
1194 * If this queue is already allocated but inactive we'll need to
1195 * first free this queue before enabling it again, we'll mark
1196 * it as reserved to make sure no new traffic arrives on it
1197 */
1198 using_inactive_queue = true;
1199 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
24afba76 1200 }
cf961e16 1201 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
24afba76
LK
1202
1203 spin_unlock_bh(&mvm->queue_info_lock);
1204
1205 mvmsta->reserved_queue = queue;
1206
01796ff2
SS
1207 if (using_inactive_queue)
1208 iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
1209
24afba76
LK
1210 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1211 queue, mvmsta->sta_id);
1212
1213 return 0;
1214}
1215
8d98ae6e
LK
1216/*
1217 * In DQA mode, after a HW restart the queues should be allocated as before, in
1218 * order to avoid race conditions when there are shared queues. This function
1219 * does the re-mapping and queue allocation.
1220 *
1221 * Note that re-enabling aggregations isn't done in this function.
1222 */
1223static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1224 struct iwl_mvm_sta *mvm_sta)
1225{
1226 unsigned int wdg_timeout =
1227 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1228 int i;
1229 struct iwl_trans_txq_scd_cfg cfg = {
1230 .sta_id = mvm_sta->sta_id,
1231 .frame_limit = IWL_FRAME_LIMIT,
1232 };
1233
03c902bf
JB
1234 /* Make sure reserved queue is still marked as such (if allocated) */
1235 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1236 mvm->queue_info[mvm_sta->reserved_queue].status =
1237 IWL_MVM_QUEUE_RESERVED;
8d98ae6e
LK
1238
1239 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1240 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1241 int txq_id = tid_data->txq_id;
1242 int ac;
1243 u8 mac_queue;
1244
6862fcee 1245 if (txq_id == IWL_MVM_INVALID_QUEUE)
8d98ae6e
LK
1246 continue;
1247
1248 skb_queue_head_init(&tid_data->deferred_tx_frames);
1249
1250 ac = tid_to_mac80211_ac[i];
1251 mac_queue = mvm_sta->vif->hw_queue[ac];
1252
310181ec
SS
1253 if (iwl_mvm_has_new_tx_api(mvm)) {
1254 IWL_DEBUG_TX_QUEUES(mvm,
1255 "Re-mapping sta %d tid %d\n",
1256 mvm_sta->sta_id, i);
1257 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
1258 mvm_sta->sta_id,
1259 i, wdg_timeout);
1260 tid_data->txq_id = txq_id;
1261 } else {
1262 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
8d98ae6e 1263
310181ec 1264 cfg.tid = i;
cf6c6ea3 1265 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
310181ec
SS
1266 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1267 txq_id ==
1268 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
8d98ae6e 1269
310181ec
SS
1270 IWL_DEBUG_TX_QUEUES(mvm,
1271 "Re-mapping sta %d tid %d to queue %d\n",
1272 mvm_sta->sta_id, i, txq_id);
1273
1274 iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
1275 wdg_timeout);
34e10860 1276 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
310181ec 1277 }
8d98ae6e 1278 }
8d98ae6e
LK
1279}
1280
732d06e9
ST
1281static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1282 struct iwl_mvm_int_sta *sta,
1283 const u8 *addr,
1284 u16 mac_id, u16 color)
1285{
1286 struct iwl_mvm_add_sta_cmd cmd;
1287 int ret;
3f497de9 1288 u32 status = ADD_STA_SUCCESS;
732d06e9
ST
1289
1290 lockdep_assert_held(&mvm->mutex);
1291
1292 memset(&cmd, 0, sizeof(cmd));
1293 cmd.sta_id = sta->sta_id;
1294 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1295 color));
1296 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1297 cmd.station_type = sta->type;
1298
1299 if (!iwl_mvm_has_new_tx_api(mvm))
1300 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1301 cmd.tid_disable_tx = cpu_to_le16(0xffff);
1302
1303 if (addr)
1304 memcpy(cmd.addr, addr, ETH_ALEN);
1305
1306 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1307 iwl_mvm_add_sta_cmd_size(mvm),
1308 &cmd, &status);
1309 if (ret)
1310 return ret;
1311
1312 switch (status & IWL_ADD_STA_STATUS_MASK) {
1313 case ADD_STA_SUCCESS:
1314 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1315 return 0;
1316 default:
1317 ret = -EIO;
1318 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1319 status);
1320 break;
1321 }
1322 return ret;
1323}
1324
8ca151b5
JB
1325int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1326 struct ieee80211_vif *vif,
1327 struct ieee80211_sta *sta)
1328{
1329 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
9d8ce6af 1330 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
a571f5f6 1331 struct iwl_mvm_rxq_dup_data *dup_data;
8ca151b5 1332 int i, ret, sta_id;
732d06e9
ST
1333 bool sta_update = false;
1334 unsigned int sta_flags = 0;
8ca151b5
JB
1335
1336 lockdep_assert_held(&mvm->mutex);
1337
1338 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
b92e661b
EP
1339 sta_id = iwl_mvm_find_free_sta_id(mvm,
1340 ieee80211_vif_type_p2p(vif));
8ca151b5
JB
1341 else
1342 sta_id = mvm_sta->sta_id;
1343
0ae98812 1344 if (sta_id == IWL_MVM_INVALID_STA)
8ca151b5
JB
1345 return -ENOSPC;
1346
1347 spin_lock_init(&mvm_sta->lock);
1348
c8f54701
JB
1349 /* if this is a HW restart re-alloc existing queues */
1350 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
732d06e9
ST
1351 struct iwl_mvm_int_sta tmp_sta = {
1352 .sta_id = sta_id,
1353 .type = mvm_sta->sta_type,
1354 };
1355
1356 /*
1357 * First add an empty station since allocating
1358 * a queue requires a valid station
1359 */
1360 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1361 mvmvif->id, mvmvif->color);
1362 if (ret)
1363 goto err;
1364
8d98ae6e 1365 iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
732d06e9
ST
1366 sta_update = true;
1367 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
8d98ae6e
LK
1368 goto update_fw;
1369 }
1370
8ca151b5
JB
1371 mvm_sta->sta_id = sta_id;
1372 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1373 mvmvif->color);
1374 mvm_sta->vif = vif;
a58bb468
LK
1375 if (!mvm->trans->cfg->gen2)
1376 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1377 else
1378 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
9ee718aa
EL
1379 mvm_sta->tx_protection = 0;
1380 mvm_sta->tt_tx_protection = false;
ced19f26 1381 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
8ca151b5
JB
1382
1383 /* HW restart, don't assume the memory has been zeroed */
69191afe 1384 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
8ca151b5 1385 mvm_sta->tfd_queue_msk = 0;
a0f6bf2a 1386
6d9d32b8 1387 /* for HW restart - reset everything but the sequence number */
24afba76 1388 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
6d9d32b8
JB
1389 u16 seq = mvm_sta->tid_data[i].seq_number;
1390 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1391 mvm_sta->tid_data[i].seq_number = seq;
24afba76 1392
24afba76
LK
1393 /*
1394 * Mark all queues for this STA as unallocated and defer TX
1395 * frames until the queue is allocated
1396 */
6862fcee 1397 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
24afba76 1398 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
6d9d32b8 1399 }
24afba76 1400 mvm_sta->deferred_traffic_tid_map = 0;
efed6640 1401 mvm_sta->agg_tids = 0;
8ca151b5 1402
a571f5f6
SS
1403 if (iwl_mvm_has_new_rx_api(mvm) &&
1404 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
92c4dca6
JB
1405 int q;
1406
a571f5f6 1407 dup_data = kcalloc(mvm->trans->num_rx_queues,
92c4dca6 1408 sizeof(*dup_data), GFP_KERNEL);
a571f5f6
SS
1409 if (!dup_data)
1410 return -ENOMEM;
92c4dca6
JB
1411 /*
1412 * Initialize all the last_seq values to 0xffff which can never
1413 * compare equal to the frame's seq_ctrl in the check in
1414 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1415 * number and fragmented packets don't reach that function.
1416 *
1417 * This thus allows receiving a packet with seqno 0 and the
1418 * retry bit set as the very first packet on a new TID.
1419 */
1420 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1421 memset(dup_data[q].last_seq, 0xff,
1422 sizeof(dup_data[q].last_seq));
a571f5f6
SS
1423 mvm_sta->dup_data = dup_data;
1424 }
1425
c8f54701 1426 if (!iwl_mvm_has_new_tx_api(mvm)) {
d5216a28
LK
1427 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1428 ieee80211_vif_type_p2p(vif));
24afba76
LK
1429 if (ret)
1430 goto err;
1431 }
1432
8d98ae6e 1433update_fw:
732d06e9 1434 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
8ca151b5 1435 if (ret)
a0f6bf2a 1436 goto err;
8ca151b5 1437
9e848010
JB
1438 if (vif->type == NL80211_IFTYPE_STATION) {
1439 if (!sta->tdls) {
0ae98812 1440 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
9e848010
JB
1441 mvmvif->ap_sta_id = sta_id;
1442 } else {
0ae98812 1443 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
9e848010
JB
1444 }
1445 }
8ca151b5
JB
1446
1447 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1448
1449 return 0;
a0f6bf2a
AN
1450
1451err:
a0f6bf2a 1452 return ret;
8ca151b5
JB
1453}
1454
1455int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1456 bool drain)
1457{
f9dc0004 1458 struct iwl_mvm_add_sta_cmd cmd = {};
8ca151b5
JB
1459 int ret;
1460 u32 status;
1461
1462 lockdep_assert_held(&mvm->mutex);
1463
1464 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1465 cmd.sta_id = mvmsta->sta_id;
1466 cmd.add_modify = STA_MODE_MODIFY;
1467 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1468 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1469
1470 status = ADD_STA_SUCCESS;
854c5705
SS
1471 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1472 iwl_mvm_add_sta_cmd_size(mvm),
f9dc0004 1473 &cmd, &status);
8ca151b5
JB
1474 if (ret)
1475 return ret;
1476
837c4da9 1477 switch (status & IWL_ADD_STA_STATUS_MASK) {
8ca151b5
JB
1478 case ADD_STA_SUCCESS:
1479 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1480 mvmsta->sta_id);
1481 break;
1482 default:
1483 ret = -EIO;
1484 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1485 mvmsta->sta_id);
1486 break;
1487 }
1488
1489 return ret;
1490}
1491
1492/*
1493 * Remove a station from the FW table. Before sending the command to remove
1494 * the station validate that the station is indeed known to the driver (sanity
1495 * only).
1496 */
1497static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1498{
1499 struct ieee80211_sta *sta;
1500 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1501 .sta_id = sta_id,
1502 };
1503 int ret;
1504
1505 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1506 lockdep_is_held(&mvm->mutex));
1507
1508 /* Note: internal stations are marked as error values */
1509 if (!sta) {
1510 IWL_ERR(mvm, "Invalid station id\n");
1511 return -EINVAL;
1512 }
1513
a1022927 1514 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
8ca151b5
JB
1515 sizeof(rm_sta_cmd), &rm_sta_cmd);
1516 if (ret) {
1517 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1518 return ret;
1519 }
1520
1521 return 0;
1522}
1523
24afba76
LK
1524static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1525 struct ieee80211_vif *vif,
1526 struct iwl_mvm_sta *mvm_sta)
1527{
1528 int ac;
1529 int i;
1530
1531 lockdep_assert_held(&mvm->mutex);
1532
1533 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
6862fcee 1534 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
24afba76
LK
1535 continue;
1536
1537 ac = iwl_mvm_tid_to_ac_queue(i);
1538 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1539 vif->hw_queue[ac], i, 0);
6862fcee 1540 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
24afba76
LK
1541 }
1542}
1543
d6d517b7
SS
1544int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1545 struct iwl_mvm_sta *mvm_sta)
1546{
bec9522a 1547 int i;
d6d517b7
SS
1548
1549 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1550 u16 txq_id;
bec9522a 1551 int ret;
d6d517b7
SS
1552
1553 spin_lock_bh(&mvm_sta->lock);
1554 txq_id = mvm_sta->tid_data[i].txq_id;
1555 spin_unlock_bh(&mvm_sta->lock);
1556
1557 if (txq_id == IWL_MVM_INVALID_QUEUE)
1558 continue;
1559
1560 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1561 if (ret)
bec9522a 1562 return ret;
d6d517b7
SS
1563 }
1564
bec9522a 1565 return 0;
d6d517b7
SS
1566}
1567
8ca151b5
JB
1568int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1569 struct ieee80211_vif *vif,
1570 struct ieee80211_sta *sta)
1571{
1572 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
9d8ce6af 1573 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
94c3e614 1574 u8 sta_id = mvm_sta->sta_id;
8ca151b5
JB
1575 int ret;
1576
1577 lockdep_assert_held(&mvm->mutex);
1578
a571f5f6
SS
1579 if (iwl_mvm_has_new_rx_api(mvm))
1580 kfree(mvm_sta->dup_data);
1581
c8f54701
JB
1582 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1583 if (ret)
1584 return ret;
d6d517b7 1585
c8f54701
JB
1586 /* flush its queues here since we are freeing mvm_sta */
1587 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
1588 if (ret)
1589 return ret;
1590 if (iwl_mvm_has_new_tx_api(mvm)) {
1591 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1592 } else {
1593 u32 q_mask = mvm_sta->tfd_queue_msk;
56214749 1594
c8f54701
JB
1595 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1596 q_mask);
1597 }
1598 if (ret)
1599 return ret;
1600
1601 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
a0315dea 1602
c8f54701
JB
1603 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
1604
1605 /* If there is a TXQ still marked as reserved - free it */
1606 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1607 u8 reserved_txq = mvm_sta->reserved_queue;
1608 enum iwl_mvm_queue_status *status;
a0315dea 1609
c8f54701
JB
1610 /*
1611 * If no traffic has gone through the reserved TXQ - it
1612 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1613 * should be manually marked as free again
1614 */
1615 spin_lock_bh(&mvm->queue_info_lock);
1616 status = &mvm->queue_info[reserved_txq].status;
1617 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1618 (*status != IWL_MVM_QUEUE_FREE),
1619 "sta_id %d reserved txq %d status %d",
1620 sta_id, reserved_txq, *status)) {
a0315dea 1621 spin_unlock_bh(&mvm->queue_info_lock);
c8f54701 1622 return -EINVAL;
a0315dea
LK
1623 }
1624
c8f54701
JB
1625 *status = IWL_MVM_QUEUE_FREE;
1626 spin_unlock_bh(&mvm->queue_info_lock);
1627 }
1628
1629 if (vif->type == NL80211_IFTYPE_STATION &&
1630 mvmvif->ap_sta_id == sta_id) {
1631 /* if associated - we can't remove the AP STA now */
1632 if (vif->bss_conf.assoc)
1633 return ret;
8ca151b5 1634
c8f54701
JB
1635 /* unassoc - go ahead - remove the AP STA now */
1636 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
37577fe2 1637
c8f54701
JB
1638 /* clear d0i3_ap_sta_id if no longer relevant */
1639 if (mvm->d0i3_ap_sta_id == sta_id)
1640 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
8ca151b5
JB
1641 }
1642
1d3c3f63
AN
1643 /*
1644 * This shouldn't happen - the TDLS channel switch should be canceled
1645 * before the STA is removed.
1646 */
94c3e614 1647 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
0ae98812 1648 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1d3c3f63
AN
1649 cancel_delayed_work(&mvm->tdls_cs.dwork);
1650 }
1651
e3d4bc8c
EG
1652 /*
1653 * Make sure that the tx response code sees the station as -EBUSY and
1654 * calls the drain worker.
1655 */
1656 spin_lock_bh(&mvm_sta->lock);
c8f54701 1657 spin_unlock_bh(&mvm_sta->lock);
94c3e614 1658
c8f54701
JB
1659 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1660 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
8ca151b5
JB
1661
1662 return ret;
1663}
1664
1665int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1666 struct ieee80211_vif *vif,
1667 u8 sta_id)
1668{
1669 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1670
1671 lockdep_assert_held(&mvm->mutex);
1672
c531c771 1673 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
8ca151b5
JB
1674 return ret;
1675}
1676
0e39eb03
CRI
1677int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1678 struct iwl_mvm_int_sta *sta,
ced19f26
SS
1679 u32 qmask, enum nl80211_iftype iftype,
1680 enum iwl_sta_type type)
8ca151b5
JB
1681{
1682 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
b92e661b 1683 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
0ae98812 1684 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
8ca151b5
JB
1685 return -ENOSPC;
1686 }
1687
1688 sta->tfd_queue_msk = qmask;
ced19f26 1689 sta->type = type;
8ca151b5
JB
1690
1691 /* put a non-NULL value so iterating over the stations won't stop */
1692 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1693 return 0;
1694}
1695
26d6c16b 1696void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
8ca151b5 1697{
c531c771 1698 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
8ca151b5 1699 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
0ae98812 1700 sta->sta_id = IWL_MVM_INVALID_STA;
8ca151b5
JB
1701}
1702
c5a719ee 1703static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm)
8ca151b5 1704{
4cf677fd
EG
1705 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1706 mvm->cfg->base_params->wd_timeout :
1707 IWL_WATCHDOG_DISABLED;
8ca151b5 1708
310181ec
SS
1709 if (iwl_mvm_has_new_tx_api(mvm)) {
1710 int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue,
1711 mvm->aux_sta.sta_id,
1712 IWL_MAX_TID_COUNT,
1713 wdg_timeout);
1714 mvm->aux_queue = queue;
c8f54701 1715 } else {
28d0793e
LK
1716 struct iwl_trans_txq_scd_cfg cfg = {
1717 .fifo = IWL_MVM_TX_FIFO_MCAST,
1718 .sta_id = mvm->aux_sta.sta_id,
1719 .tid = IWL_MAX_TID_COUNT,
1720 .aggregate = false,
1721 .frame_limit = IWL_FRAME_LIMIT,
1722 };
1723
1724 iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
1725 wdg_timeout);
1726 }
c5a719ee 1727}
28d0793e 1728
c5a719ee
SS
1729int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1730{
1731 int ret;
1732
1733 lockdep_assert_held(&mvm->mutex);
8ca151b5 1734
c5a719ee
SS
1735 /* Allocate aux station and assign to it the aux queue */
1736 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
ced19f26
SS
1737 NL80211_IFTYPE_UNSPECIFIED,
1738 IWL_STA_AUX_ACTIVITY);
8ca151b5 1739 if (ret)
c5a719ee
SS
1740 return ret;
1741
1742 /* Map Aux queue to fifo - needs to happen before adding Aux station */
1743 if (!iwl_mvm_has_new_tx_api(mvm))
1744 iwl_mvm_enable_aux_queue(mvm);
1745
1746 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
1747 MAC_INDEX_AUX, 0);
1748 if (ret) {
8ca151b5 1749 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
c5a719ee
SS
1750 return ret;
1751 }
1752
1753 /*
1754 * For a000 firmware and on we cannot add queue to a station unknown
1755 * to firmware so enable queue here - after the station was added
1756 */
1757 if (iwl_mvm_has_new_tx_api(mvm))
1758 iwl_mvm_enable_aux_queue(mvm);
1759
1760 return 0;
8ca151b5
JB
1761}
1762
0e39eb03
CRI
1763int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1764{
1765 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1766
1767 lockdep_assert_held(&mvm->mutex);
1768 return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
1769 mvmvif->id, 0);
1770}
1771
1772int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1773{
1774 int ret;
1775
1776 lockdep_assert_held(&mvm->mutex);
1777
1778 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
1779 if (ret)
1780 IWL_WARN(mvm, "Failed sending remove station\n");
1781
1782 return ret;
1783}
1784
1785void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
1786{
1787 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
1788}
1789
712b24ad
JB
1790void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
1791{
1792 lockdep_assert_held(&mvm->mutex);
1793
1794 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1795}
1796
8ca151b5
JB
1797/*
1798 * Send the add station command for the vif's broadcast station.
1799 * Assumes that the station was already allocated.
1800 *
1801 * @mvm: the mvm component
1802 * @vif: the interface to which the broadcast station is added
1803 * @bsta: the broadcast station to add.
1804 */
013290aa 1805int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
8ca151b5
JB
1806{
1807 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
013290aa 1808 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
5023d966 1809 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
a4243402 1810 const u8 *baddr = _baddr;
7daa7624 1811 int queue;
df88c08d 1812 int ret;
c5a719ee
SS
1813 unsigned int wdg_timeout =
1814 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
1815 struct iwl_trans_txq_scd_cfg cfg = {
1816 .fifo = IWL_MVM_TX_FIFO_VO,
1817 .sta_id = mvmvif->bcast_sta.sta_id,
1818 .tid = IWL_MAX_TID_COUNT,
1819 .aggregate = false,
1820 .frame_limit = IWL_FRAME_LIMIT,
1821 };
8ca151b5
JB
1822
1823 lockdep_assert_held(&mvm->mutex);
1824
c8f54701 1825 if (!iwl_mvm_has_new_tx_api(mvm)) {
4d339989
LK
1826 if (vif->type == NL80211_IFTYPE_AP ||
1827 vif->type == NL80211_IFTYPE_ADHOC)
49f71713 1828 queue = mvm->probe_queue;
df88c08d 1829 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
49f71713 1830 queue = mvm->p2p_dev_queue;
df88c08d 1831 else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
de24f638
LK
1832 return -EINVAL;
1833
df88c08d 1834 bsta->tfd_queue_msk |= BIT(queue);
c5a719ee 1835
310181ec
SS
1836 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
1837 &cfg, wdg_timeout);
de24f638
LK
1838 }
1839
5023d966
JB
1840 if (vif->type == NL80211_IFTYPE_ADHOC)
1841 baddr = vif->bss_conf.bssid;
1842
0ae98812 1843 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
8ca151b5
JB
1844 return -ENOSPC;
1845
df88c08d
LK
1846 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
1847 mvmvif->id, mvmvif->color);
1848 if (ret)
1849 return ret;
1850
1851 /*
c5a719ee
SS
1852 * For a000 firmware and on we cannot add queue to a station unknown
1853 * to firmware so enable queue here - after the station was added
df88c08d 1854 */
310181ec 1855 if (iwl_mvm_has_new_tx_api(mvm)) {
7daa7624
JB
1856 queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
1857 bsta->sta_id,
1858 IWL_MAX_TID_COUNT,
1859 wdg_timeout);
1860
7b758a11
LC
1861 if (vif->type == NL80211_IFTYPE_AP ||
1862 vif->type == NL80211_IFTYPE_ADHOC)
310181ec
SS
1863 mvm->probe_queue = queue;
1864 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
1865 mvm->p2p_dev_queue = queue;
310181ec 1866 }
df88c08d
LK
1867
1868 return 0;
1869}
1870
1871static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
1872 struct ieee80211_vif *vif)
1873{
1874 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
d167e81a 1875 int queue;
df88c08d
LK
1876
1877 lockdep_assert_held(&mvm->mutex);
1878
d49394a1
SS
1879 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
1880
d167e81a
MG
1881 switch (vif->type) {
1882 case NL80211_IFTYPE_AP:
1883 case NL80211_IFTYPE_ADHOC:
1884 queue = mvm->probe_queue;
1885 break;
1886 case NL80211_IFTYPE_P2P_DEVICE:
1887 queue = mvm->p2p_dev_queue;
1888 break;
1889 default:
1890 WARN(1, "Can't free bcast queue on vif type %d\n",
1891 vif->type);
1892 return;
df88c08d
LK
1893 }
1894
d167e81a
MG
1895 iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
1896 if (iwl_mvm_has_new_tx_api(mvm))
1897 return;
1898
1899 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
1900 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
8ca151b5
JB
1901}
1902
1903/* Send the FW a request to remove the station from it's internal data
1904 * structures, but DO NOT remove the entry from the local data structures. */
013290aa 1905int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
8ca151b5 1906{
013290aa 1907 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
8ca151b5
JB
1908 int ret;
1909
1910 lockdep_assert_held(&mvm->mutex);
1911
c8f54701 1912 iwl_mvm_free_bcast_sta_queues(mvm, vif);
df88c08d 1913
013290aa 1914 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
8ca151b5
JB
1915 if (ret)
1916 IWL_WARN(mvm, "Failed sending remove station\n");
1917 return ret;
1918}
1919
013290aa
JB
1920int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1921{
1922 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
013290aa
JB
1923
1924 lockdep_assert_held(&mvm->mutex);
1925
c8f54701 1926 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
ced19f26
SS
1927 ieee80211_vif_type_p2p(vif),
1928 IWL_STA_GENERAL_PURPOSE);
013290aa
JB
1929}
1930
8ca151b5
JB
1931/* Allocate a new station entry for the broadcast station to the given vif,
1932 * and send it to the FW.
1933 * Note that each P2P mac should have its own broadcast station.
1934 *
1935 * @mvm: the mvm component
1936 * @vif: the interface to which the broadcast station is added
1937 * @bsta: the broadcast station to add. */
d197358b 1938int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
8ca151b5
JB
1939{
1940 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
013290aa 1941 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
8ca151b5
JB
1942 int ret;
1943
1944 lockdep_assert_held(&mvm->mutex);
1945
013290aa 1946 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
8ca151b5
JB
1947 if (ret)
1948 return ret;
1949
013290aa 1950 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
8ca151b5
JB
1951
1952 if (ret)
1953 iwl_mvm_dealloc_int_sta(mvm, bsta);
013290aa 1954
8ca151b5
JB
1955 return ret;
1956}
1957
013290aa
JB
1958void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1959{
1960 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1961
1962 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
1963}
1964
8ca151b5
JB
1965/*
1966 * Send the FW a request to remove the station from it's internal data
1967 * structures, and in addition remove it from the local data structure.
1968 */
d197358b 1969int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
8ca151b5
JB
1970{
1971 int ret;
1972
1973 lockdep_assert_held(&mvm->mutex);
1974
013290aa
JB
1975 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
1976
1977 iwl_mvm_dealloc_bcast_sta(mvm, vif);
8ca151b5 1978
8ca151b5
JB
1979 return ret;
1980}
1981
26d6c16b
SS
1982/*
1983 * Allocate a new station entry for the multicast station to the given vif,
1984 * and send it to the FW.
1985 * Note that each AP/GO mac should have its own multicast station.
1986 *
1987 * @mvm: the mvm component
1988 * @vif: the interface to which the multicast station is added
1989 */
1990int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1991{
1992 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1993 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
1994 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
1995 const u8 *maddr = _maddr;
1996 struct iwl_trans_txq_scd_cfg cfg = {
1997 .fifo = IWL_MVM_TX_FIFO_MCAST,
1998 .sta_id = msta->sta_id,
1999 .tid = IWL_MAX_TID_COUNT,
2000 .aggregate = false,
2001 .frame_limit = IWL_FRAME_LIMIT,
2002 };
2003 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2004 int ret;
2005
2006 lockdep_assert_held(&mvm->mutex);
2007
ee48b722
LK
2008 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2009 vif->type != NL80211_IFTYPE_ADHOC))
26d6c16b
SS
2010 return -ENOTSUPP;
2011
ced19f26
SS
2012 /*
2013 * While in previous FWs we had to exclude cab queue from TFD queue
2014 * mask, now it is needed as any other queue.
2015 */
2016 if (!iwl_mvm_has_new_tx_api(mvm) &&
2017 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2018 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2019 &cfg, timeout);
2020 msta->tfd_queue_msk |= BIT(vif->cab_queue);
2021 }
26d6c16b
SS
2022 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2023 mvmvif->id, mvmvif->color);
2024 if (ret) {
2025 iwl_mvm_dealloc_int_sta(mvm, msta);
2026 return ret;
2027 }
2028
2029 /*
2030 * Enable cab queue after the ADD_STA command is sent.
2031 * This is needed for a000 firmware which won't accept SCD_QUEUE_CFG
ced19f26
SS
2032 * command with unknown station id, and for FW that doesn't support
2033 * station API since the cab queue is not included in the
2034 * tfd_queue_mask.
26d6c16b 2035 */
310181ec
SS
2036 if (iwl_mvm_has_new_tx_api(mvm)) {
2037 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
2038 msta->sta_id,
2039 IWL_MAX_TID_COUNT,
2040 timeout);
e2af3fab 2041 mvmvif->cab_queue = queue;
ced19f26
SS
2042 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2043 IWL_UCODE_TLV_API_STA_TYPE)) {
ee48b722
LK
2044 /*
2045 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2046 * invalid, so make sure we use the queue we want.
2047 * Note that this is done here as we want to avoid making DQA
2048 * changes in mac80211 layer.
2049 */
2050 if (vif->type == NL80211_IFTYPE_ADHOC) {
2051 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2052 mvmvif->cab_queue = vif->cab_queue;
2053 }
310181ec
SS
2054 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2055 &cfg, timeout);
2056 }
26d6c16b
SS
2057
2058 return 0;
2059}
2060
2061/*
2062 * Send the FW a request to remove the station from it's internal data
2063 * structures, and in addition remove it from the local data structure.
2064 */
2065int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2066{
2067 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2068 int ret;
2069
2070 lockdep_assert_held(&mvm->mutex);
2071
d49394a1
SS
2072 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2073
e2af3fab 2074 iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
26d6c16b
SS
2075 IWL_MAX_TID_COUNT, 0);
2076
2077 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2078 if (ret)
2079 IWL_WARN(mvm, "Failed sending remove station\n");
2080
2081 return ret;
2082}
2083
113a0447
EG
2084#define IWL_MAX_RX_BA_SESSIONS 16
2085
b915c101 2086static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
10b2b201 2087{
b915c101
SS
2088 struct iwl_mvm_delba_notif notif = {
2089 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2090 .metadata.sync = 1,
2091 .delba.baid = baid,
10b2b201 2092 };
b915c101
SS
2093 iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
2094};
10b2b201 2095
b915c101
SS
2096static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2097 struct iwl_mvm_baid_data *data)
2098{
2099 int i;
2100
2101 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2102
2103 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2104 int j;
2105 struct iwl_mvm_reorder_buffer *reorder_buf =
2106 &data->reorder_buf[i];
2107
0690405f
SS
2108 spin_lock_bh(&reorder_buf->lock);
2109 if (likely(!reorder_buf->num_stored)) {
2110 spin_unlock_bh(&reorder_buf->lock);
b915c101 2111 continue;
0690405f 2112 }
b915c101
SS
2113
2114 /*
2115 * This shouldn't happen in regular DELBA since the internal
2116 * delBA notification should trigger a release of all frames in
2117 * the reorder buffer.
2118 */
2119 WARN_ON(1);
2120
2121 for (j = 0; j < reorder_buf->buf_size; j++)
2122 __skb_queue_purge(&reorder_buf->entries[j]);
0690405f
SS
2123 /*
2124 * Prevent timer re-arm. This prevents a very far fetched case
2125 * where we timed out on the notification. There may be prior
2126 * RX frames pending in the RX queue before the notification
2127 * that might get processed between now and the actual deletion
2128 * and we would re-arm the timer although we are deleting the
2129 * reorder buffer.
2130 */
2131 reorder_buf->removed = true;
2132 spin_unlock_bh(&reorder_buf->lock);
2133 del_timer_sync(&reorder_buf->reorder_timer);
b915c101
SS
2134 }
2135}
2136
2137static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2138 u32 sta_id,
2139 struct iwl_mvm_baid_data *data,
2140 u16 ssn, u8 buf_size)
2141{
2142 int i;
2143
2144 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2145 struct iwl_mvm_reorder_buffer *reorder_buf =
2146 &data->reorder_buf[i];
2147 int j;
2148
2149 reorder_buf->num_stored = 0;
2150 reorder_buf->head_sn = ssn;
2151 reorder_buf->buf_size = buf_size;
0690405f
SS
2152 /* rx reorder timer */
2153 reorder_buf->reorder_timer.function =
2154 iwl_mvm_reorder_timer_expired;
2155 reorder_buf->reorder_timer.data = (unsigned long)reorder_buf;
2156 init_timer(&reorder_buf->reorder_timer);
2157 spin_lock_init(&reorder_buf->lock);
2158 reorder_buf->mvm = mvm;
b915c101
SS
2159 reorder_buf->queue = i;
2160 reorder_buf->sta_id = sta_id;
5d43eab6 2161 reorder_buf->valid = false;
b915c101
SS
2162 for (j = 0; j < reorder_buf->buf_size; j++)
2163 __skb_queue_head_init(&reorder_buf->entries[j]);
2164 }
10b2b201
SS
2165}
2166
8ca151b5 2167int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
10b2b201 2168 int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
8ca151b5 2169{
9d8ce6af 2170 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
f9dc0004 2171 struct iwl_mvm_add_sta_cmd cmd = {};
10b2b201 2172 struct iwl_mvm_baid_data *baid_data = NULL;
8ca151b5
JB
2173 int ret;
2174 u32 status;
2175
2176 lockdep_assert_held(&mvm->mutex);
2177
113a0447
EG
2178 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2179 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2180 return -ENOSPC;
2181 }
2182
10b2b201
SS
2183 if (iwl_mvm_has_new_rx_api(mvm) && start) {
2184 /*
2185 * Allocate here so if allocation fails we can bail out early
2186 * before starting the BA session in the firmware
2187 */
b915c101
SS
2188 baid_data = kzalloc(sizeof(*baid_data) +
2189 mvm->trans->num_rx_queues *
2190 sizeof(baid_data->reorder_buf[0]),
2191 GFP_KERNEL);
10b2b201
SS
2192 if (!baid_data)
2193 return -ENOMEM;
2194 }
2195
8ca151b5
JB
2196 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2197 cmd.sta_id = mvm_sta->sta_id;
2198 cmd.add_modify = STA_MODE_MODIFY;
93a42667
EG
2199 if (start) {
2200 cmd.add_immediate_ba_tid = (u8) tid;
2201 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
854c5705 2202 cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
93a42667
EG
2203 } else {
2204 cmd.remove_immediate_ba_tid = (u8) tid;
2205 }
8ca151b5
JB
2206 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2207 STA_MODIFY_REMOVE_BA_TID;
2208
2209 status = ADD_STA_SUCCESS;
854c5705
SS
2210 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2211 iwl_mvm_add_sta_cmd_size(mvm),
f9dc0004 2212 &cmd, &status);
8ca151b5 2213 if (ret)
10b2b201 2214 goto out_free;
8ca151b5 2215
837c4da9 2216 switch (status & IWL_ADD_STA_STATUS_MASK) {
8ca151b5 2217 case ADD_STA_SUCCESS:
35263a03
SS
2218 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2219 start ? "start" : "stopp");
8ca151b5
JB
2220 break;
2221 case ADD_STA_IMMEDIATE_BA_FAILURE:
2222 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2223 ret = -ENOSPC;
2224 break;
2225 default:
2226 ret = -EIO;
2227 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2228 start ? "start" : "stopp", status);
2229 break;
2230 }
2231
10b2b201
SS
2232 if (ret)
2233 goto out_free;
2234
2235 if (start) {
2236 u8 baid;
2237
2238 mvm->rx_ba_sessions++;
2239
2240 if (!iwl_mvm_has_new_rx_api(mvm))
2241 return 0;
2242
2243 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2244 ret = -EINVAL;
2245 goto out_free;
2246 }
2247 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2248 IWL_ADD_STA_BAID_SHIFT);
2249 baid_data->baid = baid;
2250 baid_data->timeout = timeout;
2251 baid_data->last_rx = jiffies;
72c240fe
WY
2252 setup_timer(&baid_data->session_timer,
2253 iwl_mvm_rx_agg_session_expired,
2254 (unsigned long)&mvm->baid_map[baid]);
10b2b201
SS
2255 baid_data->mvm = mvm;
2256 baid_data->tid = tid;
2257 baid_data->sta_id = mvm_sta->sta_id;
2258
2259 mvm_sta->tid_to_baid[tid] = baid;
2260 if (timeout)
2261 mod_timer(&baid_data->session_timer,
2262 TU_TO_EXP_TIME(timeout * 2));
2263
b915c101
SS
2264 iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id,
2265 baid_data, ssn, buf_size);
10b2b201
SS
2266 /*
2267 * protect the BA data with RCU to cover a case where our
2268 * internal RX sync mechanism will timeout (not that it's
2269 * supposed to happen) and we will free the session data while
2270 * RX is being processed in parallel
2271 */
35263a03
SS
2272 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2273 mvm_sta->sta_id, tid, baid);
10b2b201
SS
2274 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2275 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
60dec523 2276 } else {
10b2b201
SS
2277 u8 baid = mvm_sta->tid_to_baid[tid];
2278
60dec523
SS
2279 if (mvm->rx_ba_sessions > 0)
2280 /* check that restart flow didn't zero the counter */
2281 mvm->rx_ba_sessions--;
10b2b201
SS
2282 if (!iwl_mvm_has_new_rx_api(mvm))
2283 return 0;
2284
2285 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2286 return -EINVAL;
2287
2288 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2289 if (WARN_ON(!baid_data))
2290 return -EINVAL;
2291
2292 /* synchronize all rx queues so we can safely delete */
b915c101 2293 iwl_mvm_free_reorder(mvm, baid_data);
10b2b201 2294 del_timer_sync(&baid_data->session_timer);
10b2b201
SS
2295 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2296 kfree_rcu(baid_data, rcu_head);
35263a03 2297 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
113a0447 2298 }
10b2b201 2299 return 0;
113a0447 2300
10b2b201
SS
2301out_free:
2302 kfree(baid_data);
8ca151b5
JB
2303 return ret;
2304}
2305
9794c64f
LK
2306int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2307 int tid, u8 queue, bool start)
8ca151b5 2308{
9d8ce6af 2309 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
f9dc0004 2310 struct iwl_mvm_add_sta_cmd cmd = {};
8ca151b5
JB
2311 int ret;
2312 u32 status;
2313
2314 lockdep_assert_held(&mvm->mutex);
2315
2316 if (start) {
2317 mvm_sta->tfd_queue_msk |= BIT(queue);
2318 mvm_sta->tid_disable_agg &= ~BIT(tid);
2319 } else {
cf961e16 2320 /* In DQA-mode the queue isn't removed on agg termination */
8ca151b5
JB
2321 mvm_sta->tid_disable_agg |= BIT(tid);
2322 }
2323
2324 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2325 cmd.sta_id = mvm_sta->sta_id;
2326 cmd.add_modify = STA_MODE_MODIFY;
bb49701b
SS
2327 if (!iwl_mvm_has_new_tx_api(mvm))
2328 cmd.modify_mask = STA_MODIFY_QUEUES;
2329 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
8ca151b5
JB
2330 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2331 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2332
2333 status = ADD_STA_SUCCESS;
854c5705
SS
2334 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2335 iwl_mvm_add_sta_cmd_size(mvm),
f9dc0004 2336 &cmd, &status);
8ca151b5
JB
2337 if (ret)
2338 return ret;
2339
837c4da9 2340 switch (status & IWL_ADD_STA_STATUS_MASK) {
8ca151b5
JB
2341 case ADD_STA_SUCCESS:
2342 break;
2343 default:
2344 ret = -EIO;
2345 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2346 start ? "start" : "stopp", status);
2347 break;
2348 }
2349
2350 return ret;
2351}
2352
b797e3fb 2353const u8 tid_to_mac80211_ac[] = {
8ca151b5
JB
2354 IEEE80211_AC_BE,
2355 IEEE80211_AC_BK,
2356 IEEE80211_AC_BK,
2357 IEEE80211_AC_BE,
2358 IEEE80211_AC_VI,
2359 IEEE80211_AC_VI,
2360 IEEE80211_AC_VO,
2361 IEEE80211_AC_VO,
9794c64f 2362 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
8ca151b5
JB
2363};
2364
3e56eadf
JB
2365static const u8 tid_to_ucode_ac[] = {
2366 AC_BE,
2367 AC_BK,
2368 AC_BK,
2369 AC_BE,
2370 AC_VI,
2371 AC_VI,
2372 AC_VO,
2373 AC_VO,
2374};
2375
8ca151b5
JB
2376int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2377 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2378{
5b577a90 2379 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
8ca151b5 2380 struct iwl_mvm_tid_data *tid_data;
dd32162d 2381 u16 normalized_ssn;
8ca151b5 2382 int txq_id;
4ecafae9 2383 int ret;
8ca151b5
JB
2384
2385 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2386 return -EINVAL;
2387
bd800e41
NG
2388 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2389 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2390 IWL_ERR(mvm,
2391 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
8ca151b5
JB
2392 mvmsta->tid_data[tid].state);
2393 return -ENXIO;
2394 }
2395
2396 lockdep_assert_held(&mvm->mutex);
2397
b2492501
AN
2398 spin_lock_bh(&mvmsta->lock);
2399
2400 /* possible race condition - we entered D0i3 while starting agg */
2401 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2402 spin_unlock_bh(&mvmsta->lock);
2403 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2404 return -EIO;
2405 }
2406
9f9af3d7 2407 spin_lock(&mvm->queue_info_lock);
4ecafae9 2408
cf961e16
LK
2409 /*
2410 * Note the possible cases:
2411 * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
2412 * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
2413 * one and mark it as reserved
2414 * 3. In DQA mode, but no traffic yet on this TID: same treatment as in
2415 * non-DQA mode, since the TXQ hasn't yet been allocated
34e10860
SS
2416 * Don't support case 3 for new TX path as it is not expected to happen
2417 * and aggregation will be offloaded soon anyway
cf961e16
LK
2418 */
2419 txq_id = mvmsta->tid_data[tid].txq_id;
34e10860
SS
2420 if (iwl_mvm_has_new_tx_api(mvm)) {
2421 if (txq_id == IWL_MVM_INVALID_QUEUE) {
2422 ret = -ENXIO;
2423 goto release_locks;
2424 }
c8f54701 2425 } else if (unlikely(mvm->queue_info[txq_id].status ==
34e10860 2426 IWL_MVM_QUEUE_SHARED)) {
9f9af3d7
LK
2427 ret = -ENXIO;
2428 IWL_DEBUG_TX_QUEUES(mvm,
2429 "Can't start tid %d agg on shared queue!\n",
2430 tid);
2431 goto release_locks;
c8f54701 2432 } else if (mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
9794c64f 2433 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
c8f54701
JB
2434 IWL_MVM_DQA_MIN_DATA_QUEUE,
2435 IWL_MVM_DQA_MAX_DATA_QUEUE);
cf961e16
LK
2436 if (txq_id < 0) {
2437 ret = txq_id;
cf961e16
LK
2438 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2439 goto release_locks;
2440 }
01796ff2
SS
2441 /*
2442 * TXQ shouldn't be in inactive mode for non-DQA, so getting
2443 * an inactive queue from iwl_mvm_find_free_queue() is
2444 * certainly a bug
2445 */
2446 WARN_ON(mvm->queue_info[txq_id].status ==
2447 IWL_MVM_QUEUE_INACTIVE);
cf961e16
LK
2448
2449 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2450 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
4ecafae9 2451 }
9f9af3d7
LK
2452
2453 spin_unlock(&mvm->queue_info_lock);
8ca151b5 2454
cf961e16
LK
2455 IWL_DEBUG_TX_QUEUES(mvm,
2456 "AGG for tid %d will be on queue #%d\n",
2457 tid, txq_id);
2458
8ca151b5 2459 tid_data = &mvmsta->tid_data[tid];
9a886586 2460 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
8ca151b5
JB
2461 tid_data->txq_id = txq_id;
2462 *ssn = tid_data->ssn;
2463
2464 IWL_DEBUG_TX_QUEUES(mvm,
2465 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2466 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2467 tid_data->next_reclaimed);
2468
dd32162d
LK
2469 /*
2470 * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
2471 * to align the wrap around of ssn so we compare relevant values.
2472 */
2473 normalized_ssn = tid_data->ssn;
2474 if (mvm->trans->cfg->gen2)
2475 normalized_ssn &= 0xff;
2476
2477 if (normalized_ssn == tid_data->next_reclaimed) {
8ca151b5
JB
2478 tid_data->state = IWL_AGG_STARTING;
2479 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2480 } else {
2481 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2482 }
2483
4ecafae9 2484 ret = 0;
9f9af3d7 2485 goto out;
4ecafae9
LK
2486
2487release_locks:
9f9af3d7
LK
2488 spin_unlock(&mvm->queue_info_lock);
2489out:
8ca151b5
JB
2490 spin_unlock_bh(&mvmsta->lock);
2491
4ecafae9 2492 return ret;
8ca151b5
JB
2493}
2494
2495int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bb81bb68
EG
2496 struct ieee80211_sta *sta, u16 tid, u8 buf_size,
2497 bool amsdu)
8ca151b5 2498{
5b577a90 2499 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
8ca151b5 2500 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
5d42e7b2
EG
2501 unsigned int wdg_timeout =
2502 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
eea76c36 2503 int queue, ret;
cf961e16 2504 bool alloc_queue = true;
9f9af3d7 2505 enum iwl_mvm_queue_status queue_status;
8ca151b5
JB
2506 u16 ssn;
2507
eea76c36
EG
2508 struct iwl_trans_txq_scd_cfg cfg = {
2509 .sta_id = mvmsta->sta_id,
2510 .tid = tid,
2511 .frame_limit = buf_size,
2512 .aggregate = true,
2513 };
2514
efed6640
ES
2515 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2516 != IWL_MAX_TID_COUNT);
2517
a58bb468
LK
2518 if (!mvm->trans->cfg->gen2)
2519 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
2520 else
2521 buf_size = min_t(int, buf_size,
2522 LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF);
8ca151b5
JB
2523
2524 spin_lock_bh(&mvmsta->lock);
2525 ssn = tid_data->ssn;
2526 queue = tid_data->txq_id;
2527 tid_data->state = IWL_AGG_ON;
efed6640 2528 mvmsta->agg_tids |= BIT(tid);
8ca151b5 2529 tid_data->ssn = 0xffff;
bb81bb68 2530 tid_data->amsdu_in_ampdu_allowed = amsdu;
8ca151b5
JB
2531 spin_unlock_bh(&mvmsta->lock);
2532
34e10860
SS
2533 if (iwl_mvm_has_new_tx_api(mvm)) {
2534 /*
2535 * If no queue iwl_mvm_sta_tx_agg_start() would have failed so
2536 * no need to check queue's status
2537 */
2538 if (buf_size < mvmsta->max_agg_bufsize)
2539 return -ENOTSUPP;
2540
2541 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2542 if (ret)
2543 return -EIO;
2544 goto out;
2545 }
2546
eea76c36 2547 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
8ca151b5 2548
9f9af3d7
LK
2549 spin_lock_bh(&mvm->queue_info_lock);
2550 queue_status = mvm->queue_info[queue].status;
2551 spin_unlock_bh(&mvm->queue_info_lock);
2552
c8f54701
JB
2553 /* Maybe there is no need to even alloc a queue... */
2554 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2555 alloc_queue = false;
cf961e16 2556
c8f54701
JB
2557 /*
2558 * Only reconfig the SCD for the queue if the window size has
2559 * changed from current (become smaller)
2560 */
2561 if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
cf961e16 2562 /*
c8f54701
JB
2563 * If reconfiguring an existing queue, it first must be
2564 * drained
cf961e16 2565 */
c8f54701
JB
2566 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2567 BIT(queue));
2568 if (ret) {
2569 IWL_ERR(mvm,
2570 "Error draining queue before reconfig\n");
2571 return ret;
2572 }
cf961e16 2573
c8f54701
JB
2574 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2575 mvmsta->sta_id, tid,
2576 buf_size, ssn);
2577 if (ret) {
2578 IWL_ERR(mvm,
2579 "Error reconfiguring TXQ #%d\n", queue);
2580 return ret;
cf961e16
LK
2581 }
2582 }
2583
2584 if (alloc_queue)
2585 iwl_mvm_enable_txq(mvm, queue,
2586 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
2587 &cfg, wdg_timeout);
fa7878e7 2588
9f9af3d7
LK
2589 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2590 if (queue_status != IWL_MVM_QUEUE_SHARED) {
2591 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2592 if (ret)
2593 return -EIO;
2594 }
8ca151b5 2595
4ecafae9
LK
2596 /* No need to mark as reserved */
2597 spin_lock_bh(&mvm->queue_info_lock);
cf961e16 2598 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
4ecafae9
LK
2599 spin_unlock_bh(&mvm->queue_info_lock);
2600
34e10860 2601out:
8ca151b5
JB
2602 /*
2603 * Even though in theory the peer could have different
2604 * aggregation reorder buffer sizes for different sessions,
2605 * our ucode doesn't allow for that and has a global limit
2606 * for each station. Therefore, use the minimum of all the
2607 * aggregation sessions and our default value.
2608 */
2609 mvmsta->max_agg_bufsize =
2610 min(mvmsta->max_agg_bufsize, buf_size);
2611 mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
2612
9ee718aa
EL
2613 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
2614 sta->addr, tid);
2615
9e680946 2616 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
8ca151b5
JB
2617}
2618
34e10860
SS
2619static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
2620 struct iwl_mvm_sta *mvmsta,
2621 u16 txq_id)
2622{
2623 if (iwl_mvm_has_new_tx_api(mvm))
2624 return;
2625
2626 spin_lock_bh(&mvm->queue_info_lock);
2627 /*
2628 * The TXQ is marked as reserved only if no traffic came through yet
2629 * This means no traffic has been sent on this TID (agg'd or not), so
2630 * we no longer have use for the queue. Since it hasn't even been
2631 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2632 * free.
2633 */
2634 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
2635 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
2636
2637 spin_unlock_bh(&mvm->queue_info_lock);
2638}
2639
8ca151b5
JB
2640int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2641 struct ieee80211_sta *sta, u16 tid)
2642{
5b577a90 2643 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
8ca151b5
JB
2644 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2645 u16 txq_id;
2646 int err;
2647
f9aa8dd3
EG
2648 /*
2649 * If mac80211 is cleaning its state, then say that we finished since
2650 * our state has been cleared anyway.
2651 */
2652 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
2653 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2654 return 0;
2655 }
2656
8ca151b5
JB
2657 spin_lock_bh(&mvmsta->lock);
2658
2659 txq_id = tid_data->txq_id;
2660
2661 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
2662 mvmsta->sta_id, tid, txq_id, tid_data->state);
2663
efed6640
ES
2664 mvmsta->agg_tids &= ~BIT(tid);
2665
34e10860 2666 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id);
4ecafae9 2667
8ca151b5
JB
2668 switch (tid_data->state) {
2669 case IWL_AGG_ON:
9a886586 2670 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
8ca151b5
JB
2671
2672 IWL_DEBUG_TX_QUEUES(mvm,
2673 "ssn = %d, next_recl = %d\n",
2674 tid_data->ssn, tid_data->next_reclaimed);
2675
8ca151b5 2676 tid_data->ssn = 0xffff;
f7f89e7b 2677 tid_data->state = IWL_AGG_OFF;
f7f89e7b
JB
2678 spin_unlock_bh(&mvmsta->lock);
2679
2680 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2681
2682 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
f7f89e7b 2683 return 0;
8ca151b5
JB
2684 case IWL_AGG_STARTING:
2685 case IWL_EMPTYING_HW_QUEUE_ADDBA:
2686 /*
2687 * The agg session has been stopped before it was set up. This
2688 * can happen when the AddBA timer times out for example.
2689 */
2690
2691 /* No barriers since we are under mutex */
2692 lockdep_assert_held(&mvm->mutex);
8ca151b5
JB
2693
2694 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2695 tid_data->state = IWL_AGG_OFF;
2696 err = 0;
2697 break;
2698 default:
2699 IWL_ERR(mvm,
2700 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
2701 mvmsta->sta_id, tid, tid_data->state);
2702 IWL_ERR(mvm,
2703 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
2704 err = -EINVAL;
2705 }
2706
2707 spin_unlock_bh(&mvmsta->lock);
2708
2709 return err;
2710}
2711
e3d9e7ce
EG
2712int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2713 struct ieee80211_sta *sta, u16 tid)
2714{
5b577a90 2715 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
e3d9e7ce
EG
2716 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2717 u16 txq_id;
b6658ff8 2718 enum iwl_mvm_agg_state old_state;
e3d9e7ce
EG
2719
2720 /*
2721 * First set the agg state to OFF to avoid calling
2722 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
2723 */
2724 spin_lock_bh(&mvmsta->lock);
2725 txq_id = tid_data->txq_id;
2726 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
2727 mvmsta->sta_id, tid, txq_id, tid_data->state);
b6658ff8 2728 old_state = tid_data->state;
e3d9e7ce 2729 tid_data->state = IWL_AGG_OFF;
efed6640 2730 mvmsta->agg_tids &= ~BIT(tid);
e3d9e7ce
EG
2731 spin_unlock_bh(&mvmsta->lock);
2732
34e10860 2733 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id);
4ecafae9 2734
b6658ff8 2735 if (old_state >= IWL_AGG_ON) {
fe92e32a 2736 iwl_mvm_drain_sta(mvm, mvmsta, true);
d6d517b7 2737
d167e81a
MG
2738 if (iwl_mvm_has_new_tx_api(mvm)) {
2739 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
2740 BIT(tid), 0))
2741 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
d6d517b7 2742 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
d167e81a
MG
2743 } else {
2744 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
2745 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
d6d517b7 2746 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
d167e81a 2747 }
d6d517b7 2748
fe92e32a 2749 iwl_mvm_drain_sta(mvm, mvmsta, false);
b6658ff8 2750
f7f89e7b 2751 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
b6658ff8 2752 }
e3d9e7ce 2753
e3d9e7ce
EG
2754 return 0;
2755}
2756
8ca151b5
JB
2757static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
2758{
2dc2a15e 2759 int i, max = -1, max_offs = -1;
8ca151b5
JB
2760
2761 lockdep_assert_held(&mvm->mutex);
2762
2dc2a15e
JB
2763 /* Pick the unused key offset with the highest 'deleted'
2764 * counter. Every time a key is deleted, all the counters
2765 * are incremented and the one that was just deleted is
2766 * reset to zero. Thus, the highest counter is the one
2767 * that was deleted longest ago. Pick that one.
2768 */
2769 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2770 if (test_bit(i, mvm->fw_key_table))
2771 continue;
2772 if (mvm->fw_key_deleted[i] > max) {
2773 max = mvm->fw_key_deleted[i];
2774 max_offs = i;
2775 }
2776 }
8ca151b5 2777
2dc2a15e 2778 if (max_offs < 0)
8ca151b5
JB
2779 return STA_KEY_IDX_INVALID;
2780
2dc2a15e 2781 return max_offs;
8ca151b5
JB
2782}
2783
5f7a1847
JB
2784static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
2785 struct ieee80211_vif *vif,
2786 struct ieee80211_sta *sta)
8ca151b5 2787{
5b530e95 2788 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
8ca151b5 2789
5f7a1847
JB
2790 if (sta)
2791 return iwl_mvm_sta_from_mac80211(sta);
8ca151b5
JB
2792
2793 /*
2794 * The device expects GTKs for station interfaces to be
2795 * installed as GTKs for the AP station. If we have no
2796 * station ID, then use AP's station ID.
2797 */
2798 if (vif->type == NL80211_IFTYPE_STATION &&
0ae98812 2799 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
9513c5e1
AA
2800 u8 sta_id = mvmvif->ap_sta_id;
2801
7d6a1ab6
EG
2802 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
2803 lockdep_is_held(&mvm->mutex));
2804
9513c5e1
AA
2805 /*
2806 * It is possible that the 'sta' parameter is NULL,
2807 * for example when a GTK is removed - the sta_id will then
2808 * be the AP ID, and no station was passed by mac80211.
2809 */
7d6a1ab6
EG
2810 if (IS_ERR_OR_NULL(sta))
2811 return NULL;
2812
2813 return iwl_mvm_sta_from_mac80211(sta);
9513c5e1 2814 }
8ca151b5 2815
5f7a1847 2816 return NULL;
8ca151b5
JB
2817}
2818
2819static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
85aeb58c 2820 u32 sta_id,
45c458b4 2821 struct ieee80211_key_conf *key, bool mcast,
d6ee54a9
LC
2822 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
2823 u8 key_offset)
8ca151b5 2824{
45c458b4
SS
2825 union {
2826 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2827 struct iwl_mvm_add_sta_key_cmd cmd;
2828 } u = {};
f9dc0004 2829 __le16 key_flags;
79920749
JB
2830 int ret;
2831 u32 status;
8ca151b5 2832 u16 keyidx;
45c458b4
SS
2833 u64 pn = 0;
2834 int i, size;
2835 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2836 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
8ca151b5 2837
85aeb58c
DS
2838 if (sta_id == IWL_MVM_INVALID_STA)
2839 return -EINVAL;
2840
45c458b4 2841 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
8ca151b5
JB
2842 STA_KEY_FLG_KEYID_MSK;
2843 key_flags = cpu_to_le16(keyidx);
2844 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
2845
45c458b4 2846 switch (key->cipher) {
8ca151b5
JB
2847 case WLAN_CIPHER_SUITE_TKIP:
2848 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
45c458b4
SS
2849 if (new_api) {
2850 memcpy((void *)&u.cmd.tx_mic_key,
2851 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
2852 IWL_MIC_KEY_SIZE);
2853
2854 memcpy((void *)&u.cmd.rx_mic_key,
2855 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
2856 IWL_MIC_KEY_SIZE);
2857 pn = atomic64_read(&key->tx_pn);
2858
2859 } else {
2860 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
2861 for (i = 0; i < 5; i++)
2862 u.cmd_v1.tkip_rx_ttak[i] =
2863 cpu_to_le16(tkip_p1k[i]);
2864 }
2865 memcpy(u.cmd.common.key, key->key, key->keylen);
8ca151b5
JB
2866 break;
2867 case WLAN_CIPHER_SUITE_CCMP:
2868 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
45c458b4
SS
2869 memcpy(u.cmd.common.key, key->key, key->keylen);
2870 if (new_api)
2871 pn = atomic64_read(&key->tx_pn);
8ca151b5 2872 break;
ba3943b0
JB
2873 case WLAN_CIPHER_SUITE_WEP104:
2874 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
aa0cb08b 2875 /* fall through */
ba3943b0
JB
2876 case WLAN_CIPHER_SUITE_WEP40:
2877 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
45c458b4 2878 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
ba3943b0 2879 break;
2a53d166
AB
2880 case WLAN_CIPHER_SUITE_GCMP_256:
2881 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
2882 /* fall through */
2883 case WLAN_CIPHER_SUITE_GCMP:
2884 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
45c458b4
SS
2885 memcpy(u.cmd.common.key, key->key, key->keylen);
2886 if (new_api)
2887 pn = atomic64_read(&key->tx_pn);
2a53d166 2888 break;
8ca151b5 2889 default:
e36e5433 2890 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
45c458b4 2891 memcpy(u.cmd.common.key, key->key, key->keylen);
8ca151b5
JB
2892 }
2893
ba3943b0 2894 if (mcast)
8ca151b5
JB
2895 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2896
45c458b4
SS
2897 u.cmd.common.key_offset = key_offset;
2898 u.cmd.common.key_flags = key_flags;
85aeb58c 2899 u.cmd.common.sta_id = sta_id;
45c458b4
SS
2900
2901 if (new_api) {
2902 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
2903 size = sizeof(u.cmd);
2904 } else {
2905 size = sizeof(u.cmd_v1);
2906 }
8ca151b5
JB
2907
2908 status = ADD_STA_SUCCESS;
a1022927 2909 if (cmd_flags & CMD_ASYNC)
45c458b4
SS
2910 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
2911 &u.cmd);
a1022927 2912 else
45c458b4
SS
2913 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
2914 &u.cmd, &status);
8ca151b5
JB
2915
2916 switch (status) {
2917 case ADD_STA_SUCCESS:
2918 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
2919 break;
2920 default:
2921 ret = -EIO;
2922 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
2923 break;
2924 }
2925
2926 return ret;
2927}
2928
2929static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
2930 struct ieee80211_key_conf *keyconf,
2931 u8 sta_id, bool remove_key)
2932{
2933 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
2934
2935 /* verify the key details match the required command's expectations */
8e160ab8
AB
2936 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
2937 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
2938 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
2939 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
2940 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
2941 return -EINVAL;
2942
2943 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
2944 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
8ca151b5
JB
2945 return -EINVAL;
2946
2947 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
2948 igtk_cmd.sta_id = cpu_to_le32(sta_id);
2949
2950 if (remove_key) {
2951 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
2952 } else {
2953 struct ieee80211_key_seq seq;
2954 const u8 *pn;
2955
aa950524
AB
2956 switch (keyconf->cipher) {
2957 case WLAN_CIPHER_SUITE_AES_CMAC:
2958 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
2959 break;
8e160ab8
AB
2960 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
2961 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
2962 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
2963 break;
aa950524
AB
2964 default:
2965 return -EINVAL;
2966 }
2967
8e160ab8
AB
2968 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
2969 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
2970 igtk_cmd.ctrl_flags |=
2971 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
8ca151b5
JB
2972 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
2973 pn = seq.aes_cmac.pn;
2974 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
2975 ((u64) pn[4] << 8) |
2976 ((u64) pn[3] << 16) |
2977 ((u64) pn[2] << 24) |
2978 ((u64) pn[1] << 32) |
2979 ((u64) pn[0] << 40));
2980 }
2981
2982 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
2983 remove_key ? "removing" : "installing",
2984 igtk_cmd.sta_id);
2985
8e160ab8
AB
2986 if (!iwl_mvm_has_new_rx_api(mvm)) {
2987 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
2988 .ctrl_flags = igtk_cmd.ctrl_flags,
2989 .key_id = igtk_cmd.key_id,
2990 .sta_id = igtk_cmd.sta_id,
2991 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
2992 };
2993
2994 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
2995 ARRAY_SIZE(igtk_cmd_v1.igtk));
2996 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
2997 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
2998 }
a1022927 2999 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
8ca151b5
JB
3000 sizeof(igtk_cmd), &igtk_cmd);
3001}
3002
3003
3004static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3005 struct ieee80211_vif *vif,
3006 struct ieee80211_sta *sta)
3007{
5b530e95 3008 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
8ca151b5
JB
3009
3010 if (sta)
3011 return sta->addr;
3012
3013 if (vif->type == NL80211_IFTYPE_STATION &&
0ae98812 3014 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
8ca151b5
JB
3015 u8 sta_id = mvmvif->ap_sta_id;
3016 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3017 lockdep_is_held(&mvm->mutex));
3018 return sta->addr;
3019 }
3020
3021
3022 return NULL;
3023}
3024
2f6319d1
JB
3025static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3026 struct ieee80211_vif *vif,
3027 struct ieee80211_sta *sta,
ba3943b0 3028 struct ieee80211_key_conf *keyconf,
d6ee54a9 3029 u8 key_offset,
ba3943b0 3030 bool mcast)
2f6319d1 3031{
2f6319d1
JB
3032 int ret;
3033 const u8 *addr;
3034 struct ieee80211_key_seq seq;
3035 u16 p1k[5];
85aeb58c
DS
3036 u32 sta_id;
3037
3038 if (sta) {
3039 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3040
3041 sta_id = mvm_sta->sta_id;
3042 } else if (vif->type == NL80211_IFTYPE_AP &&
3043 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3044 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3045
3046 sta_id = mvmvif->mcast_sta.sta_id;
3047 } else {
3048 IWL_ERR(mvm, "Failed to find station id\n");
3049 return -EINVAL;
3050 }
2f6319d1
JB
3051
3052 switch (keyconf->cipher) {
3053 case WLAN_CIPHER_SUITE_TKIP:
85aeb58c
DS
3054 if (vif->type == NL80211_IFTYPE_AP) {
3055 ret = -EINVAL;
3056 break;
3057 }
2f6319d1
JB
3058 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3059 /* get phase 1 key from mac80211 */
3060 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3061 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
85aeb58c 3062 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
d6ee54a9 3063 seq.tkip.iv32, p1k, 0, key_offset);
2f6319d1
JB
3064 break;
3065 case WLAN_CIPHER_SUITE_CCMP:
ba3943b0
JB
3066 case WLAN_CIPHER_SUITE_WEP40:
3067 case WLAN_CIPHER_SUITE_WEP104:
2a53d166
AB
3068 case WLAN_CIPHER_SUITE_GCMP:
3069 case WLAN_CIPHER_SUITE_GCMP_256:
85aeb58c 3070 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
d6ee54a9 3071 0, NULL, 0, key_offset);
2f6319d1
JB
3072 break;
3073 default:
85aeb58c 3074 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
d6ee54a9 3075 0, NULL, 0, key_offset);
2f6319d1
JB
3076 }
3077
3078 return ret;
3079}
3080
3081static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
ba3943b0
JB
3082 struct ieee80211_key_conf *keyconf,
3083 bool mcast)
2f6319d1 3084{
45c458b4
SS
3085 union {
3086 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3087 struct iwl_mvm_add_sta_key_cmd cmd;
3088 } u = {};
3089 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3090 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2f6319d1 3091 __le16 key_flags;
45c458b4 3092 int ret, size;
2f6319d1
JB
3093 u32 status;
3094
85aeb58c
DS
3095 if (sta_id == IWL_MVM_INVALID_STA)
3096 return -EINVAL;
3097
2f6319d1
JB
3098 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
3099 STA_KEY_FLG_KEYID_MSK);
3100 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
3101 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
3102
ba3943b0 3103 if (mcast)
2f6319d1
JB
3104 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3105
45c458b4
SS
3106 /*
3107 * The fields assigned here are in the same location at the start
3108 * of the command, so we can do this union trick.
3109 */
3110 u.cmd.common.key_flags = key_flags;
3111 u.cmd.common.key_offset = keyconf->hw_key_idx;
3112 u.cmd.common.sta_id = sta_id;
3113
3114 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
2f6319d1
JB
3115
3116 status = ADD_STA_SUCCESS;
45c458b4
SS
3117 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
3118 &status);
2f6319d1
JB
3119
3120 switch (status) {
3121 case ADD_STA_SUCCESS:
3122 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
3123 break;
3124 default:
3125 ret = -EIO;
3126 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
3127 break;
3128 }
3129
3130 return ret;
3131}
3132
8ca151b5
JB
3133int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3134 struct ieee80211_vif *vif,
3135 struct ieee80211_sta *sta,
3136 struct ieee80211_key_conf *keyconf,
d6ee54a9 3137 u8 key_offset)
8ca151b5 3138{
ba3943b0 3139 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
5f7a1847 3140 struct iwl_mvm_sta *mvm_sta;
85aeb58c 3141 u8 sta_id = IWL_MVM_INVALID_STA;
8ca151b5 3142 int ret;
11828dbc 3143 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
8ca151b5
JB
3144
3145 lockdep_assert_held(&mvm->mutex);
3146
85aeb58c
DS
3147 if (vif->type != NL80211_IFTYPE_AP ||
3148 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3149 /* Get the station id from the mvm local station table */
3150 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3151 if (!mvm_sta) {
3152 IWL_ERR(mvm, "Failed to find station\n");
3153 return -EINVAL;
3154 }
3155 sta_id = mvm_sta->sta_id;
8ca151b5 3156
85aeb58c
DS
3157 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3158 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3159 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3160 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id,
3161 false);
3162 goto end;
3163 }
8ca151b5 3164
85aeb58c
DS
3165 /*
3166 * It is possible that the 'sta' parameter is NULL, and thus
3167 * there is a need to retrieve the sta from the local station
3168 * table.
3169 */
3170 if (!sta) {
3171 sta = rcu_dereference_protected(
3172 mvm->fw_id_to_mac_id[sta_id],
3173 lockdep_is_held(&mvm->mutex));
3174 if (IS_ERR_OR_NULL(sta)) {
3175 IWL_ERR(mvm, "Invalid station id\n");
3176 return -EINVAL;
3177 }
8ca151b5 3178 }
8ca151b5 3179
85aeb58c
DS
3180 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3181 return -EINVAL;
3182 }
8ca151b5 3183
d6ee54a9
LC
3184 /* If the key_offset is not pre-assigned, we need to find a
3185 * new offset to use. In normal cases, the offset is not
3186 * pre-assigned, but during HW_RESTART we want to reuse the
3187 * same indices, so we pass them when this function is called.
3188 *
3189 * In D3 entry, we need to hardcoded the indices (because the
3190 * firmware hardcodes the PTK offset to 0). In this case, we
3191 * need to make sure we don't overwrite the hw_key_idx in the
3192 * keyconf structure, because otherwise we cannot configure
3193 * the original ones back when resuming.
3194 */
3195 if (key_offset == STA_KEY_IDX_INVALID) {
3196 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3197 if (key_offset == STA_KEY_IDX_INVALID)
8ca151b5 3198 return -ENOSPC;
d6ee54a9 3199 keyconf->hw_key_idx = key_offset;
8ca151b5
JB
3200 }
3201
d6ee54a9 3202 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
9c3deeb5 3203 if (ret)
ba3943b0 3204 goto end;
ba3943b0
JB
3205
3206 /*
3207 * For WEP, the same key is used for multicast and unicast. Upload it
3208 * again, using the same key offset, and now pointing the other one
3209 * to the same key slot (offset).
3210 * If this fails, remove the original as well.
3211 */
85aeb58c
DS
3212 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3213 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3214 sta) {
d6ee54a9
LC
3215 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3216 key_offset, !mcast);
ba3943b0 3217 if (ret) {
ba3943b0 3218 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
9c3deeb5 3219 goto end;
ba3943b0
JB
3220 }
3221 }
8ca151b5 3222
9c3deeb5
LC
3223 __set_bit(key_offset, mvm->fw_key_table);
3224
8ca151b5
JB
3225end:
3226 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3227 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
11828dbc 3228 sta ? sta->addr : zero_addr, ret);
8ca151b5
JB
3229 return ret;
3230}
3231
3232int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3233 struct ieee80211_vif *vif,
3234 struct ieee80211_sta *sta,
3235 struct ieee80211_key_conf *keyconf)
3236{
ba3943b0 3237 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
5f7a1847 3238 struct iwl_mvm_sta *mvm_sta;
0ae98812 3239 u8 sta_id = IWL_MVM_INVALID_STA;
2dc2a15e 3240 int ret, i;
8ca151b5
JB
3241
3242 lockdep_assert_held(&mvm->mutex);
3243
5f7a1847
JB
3244 /* Get the station from the mvm local station table */
3245 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
71793b7d
LC
3246 if (mvm_sta)
3247 sta_id = mvm_sta->sta_id;
85aeb58c
DS
3248 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3249 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3250
8ca151b5
JB
3251
3252 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3253 keyconf->keyidx, sta_id);
3254
71793b7d
LC
3255 if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3256 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3257 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
8ca151b5
JB
3258 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3259
2f6319d1 3260 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
8ca151b5
JB
3261 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3262 keyconf->hw_key_idx);
3263 return -ENOENT;
3264 }
3265
2dc2a15e
JB
3266 /* track which key was deleted last */
3267 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3268 if (mvm->fw_key_deleted[i] < U8_MAX)
3269 mvm->fw_key_deleted[i]++;
3270 }
3271 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3272
85aeb58c 3273 if (sta && !mvm_sta) {
8ca151b5
JB
3274 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3275 return 0;
3276 }
3277
ba3943b0
JB
3278 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3279 if (ret)
3280 return ret;
3281
3282 /* delete WEP key twice to get rid of (now useless) offset */
3283 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3284 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3285 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3286
3287 return ret;
8ca151b5
JB
3288}
3289
3290void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3291 struct ieee80211_vif *vif,
3292 struct ieee80211_key_conf *keyconf,
3293 struct ieee80211_sta *sta, u32 iv32,
3294 u16 *phase1key)
3295{
c3eb536a 3296 struct iwl_mvm_sta *mvm_sta;
ba3943b0 3297 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
8ca151b5 3298
c3eb536a
BL
3299 rcu_read_lock();
3300
5f7a1847
JB
3301 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3302 if (WARN_ON_ONCE(!mvm_sta))
45854360 3303 goto unlock;
85aeb58c 3304 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
d6ee54a9 3305 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
45854360
JB
3306
3307 unlock:
c3eb536a 3308 rcu_read_unlock();
8ca151b5
JB
3309}
3310
9cc40712
JB
3311void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3312 struct ieee80211_sta *sta)
8ca151b5 3313{
5b577a90 3314 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
f9dc0004 3315 struct iwl_mvm_add_sta_cmd cmd = {
8ca151b5 3316 .add_modify = STA_MODE_MODIFY,
9cc40712 3317 .sta_id = mvmsta->sta_id,
5af01772 3318 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
9cc40712 3319 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
8ca151b5
JB
3320 };
3321 int ret;
3322
854c5705
SS
3323 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3324 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
8ca151b5
JB
3325 if (ret)
3326 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3327}
3328
9cc40712
JB
3329void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3330 struct ieee80211_sta *sta,
8ca151b5 3331 enum ieee80211_frame_release_type reason,
3e56eadf 3332 u16 cnt, u16 tids, bool more_data,
9a3fcf91 3333 bool single_sta_queue)
8ca151b5 3334{
5b577a90 3335 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
f9dc0004 3336 struct iwl_mvm_add_sta_cmd cmd = {
8ca151b5 3337 .add_modify = STA_MODE_MODIFY,
9cc40712 3338 .sta_id = mvmsta->sta_id,
8ca151b5
JB
3339 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3340 .sleep_tx_count = cpu_to_le16(cnt),
9cc40712 3341 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
8ca151b5 3342 };
3e56eadf
JB
3343 int tid, ret;
3344 unsigned long _tids = tids;
3345
3346 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3347 * Note that this field is reserved and unused by firmware not
3348 * supporting GO uAPSD, so it's safe to always do this.
3349 */
3350 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3351 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3352
9a3fcf91
SS
3353 /* If we're releasing frames from aggregation or dqa queues then check
3354 * if all the queues that we're releasing frames from, combined, have:
3e56eadf
JB
3355 * - more frames than the service period, in which case more_data
3356 * needs to be set
3357 * - fewer than 'cnt' frames, in which case we need to adjust the
3358 * firmware command (but do that unconditionally)
3359 */
9a3fcf91 3360 if (single_sta_queue) {
3e56eadf 3361 int remaining = cnt;
36be0eb6 3362 int sleep_tx_count;
3e56eadf
JB
3363
3364 spin_lock_bh(&mvmsta->lock);
3365 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3366 struct iwl_mvm_tid_data *tid_data;
3367 u16 n_queued;
3368
3369 tid_data = &mvmsta->tid_data[tid];
3e56eadf 3370
dd32162d 3371 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3e56eadf
JB
3372 if (n_queued > remaining) {
3373 more_data = true;
3374 remaining = 0;
3375 break;
3376 }
3377 remaining -= n_queued;
3378 }
36be0eb6
EG
3379 sleep_tx_count = cnt - remaining;
3380 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3381 mvmsta->sleep_tx_count = sleep_tx_count;
3e56eadf
JB
3382 spin_unlock_bh(&mvmsta->lock);
3383
36be0eb6 3384 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3e56eadf
JB
3385 if (WARN_ON(cnt - remaining == 0)) {
3386 ieee80211_sta_eosp(sta);
3387 return;
3388 }
3389 }
3390
3391 /* Note: this is ignored by firmware not supporting GO uAPSD */
3392 if (more_data)
ced19f26 3393 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3e56eadf
JB
3394
3395 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3396 mvmsta->next_status_eosp = true;
ced19f26 3397 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3e56eadf 3398 } else {
ced19f26 3399 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3e56eadf 3400 }
8ca151b5 3401
156f92f2
EG
3402 /* block the Tx queues until the FW updated the sleep Tx count */
3403 iwl_trans_block_txq_ptrs(mvm->trans, true);
3404
3405 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3406 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
854c5705 3407 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
8ca151b5
JB
3408 if (ret)
3409 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3410}
3e56eadf 3411
0416841d
JB
3412void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3413 struct iwl_rx_cmd_buffer *rxb)
3e56eadf
JB
3414{
3415 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3416 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3417 struct ieee80211_sta *sta;
3418 u32 sta_id = le32_to_cpu(notif->sta_id);
3419
3420 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
0416841d 3421 return;
3e56eadf
JB
3422
3423 rcu_read_lock();
3424 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3425 if (!IS_ERR_OR_NULL(sta))
3426 ieee80211_sta_eosp(sta);
3427 rcu_read_unlock();
3e56eadf 3428}
09b0ce1a
AO
3429
3430void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3431 struct iwl_mvm_sta *mvmsta, bool disable)
3432{
3433 struct iwl_mvm_add_sta_cmd cmd = {
3434 .add_modify = STA_MODE_MODIFY,
3435 .sta_id = mvmsta->sta_id,
3436 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3437 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3438 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3439 };
3440 int ret;
3441
854c5705
SS
3442 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3443 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
09b0ce1a
AO
3444 if (ret)
3445 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3446}
003e5236
AO
3447
3448void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3449 struct ieee80211_sta *sta,
3450 bool disable)
3451{
3452 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3453
3454 spin_lock_bh(&mvm_sta->lock);
3455
3456 if (mvm_sta->disable_tx == disable) {
3457 spin_unlock_bh(&mvm_sta->lock);
3458 return;
3459 }
3460
3461 mvm_sta->disable_tx = disable;
3462
c8f54701
JB
3463 /* Tell mac80211 to start/stop queuing tx for this station */
3464 ieee80211_sta_block_awake(mvm->hw, sta, disable);
003e5236
AO
3465
3466 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3467
3468 spin_unlock_bh(&mvm_sta->lock);
3469}
3470
ced19f26
SS
3471static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3472 struct iwl_mvm_vif *mvmvif,
3473 struct iwl_mvm_int_sta *sta,
3474 bool disable)
3475{
3476 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3477 struct iwl_mvm_add_sta_cmd cmd = {
3478 .add_modify = STA_MODE_MODIFY,
3479 .sta_id = sta->sta_id,
3480 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3481 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3482 .mac_id_n_color = cpu_to_le32(id),
3483 };
3484 int ret;
3485
3486 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3487 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3488 if (ret)
3489 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3490}
3491
003e5236
AO
3492void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3493 struct iwl_mvm_vif *mvmvif,
3494 bool disable)
3495{
3496 struct ieee80211_sta *sta;
3497 struct iwl_mvm_sta *mvm_sta;
3498 int i;
3499
3500 lockdep_assert_held(&mvm->mutex);
3501
3502 /* Block/unblock all the stations of the given mvmvif */
0ae98812 3503 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
003e5236
AO
3504 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3505 lockdep_is_held(&mvm->mutex));
3506 if (IS_ERR_OR_NULL(sta))
3507 continue;
3508
3509 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3510 if (mvm_sta->mac_id_n_color !=
3511 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3512 continue;
3513
3514 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3515 }
ced19f26
SS
3516
3517 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
3518 return;
3519
3520 /* Need to block/unblock also multicast station */
3521 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
3522 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3523 &mvmvif->mcast_sta, disable);
3524
3525 /*
3526 * Only unblock the broadcast station (FW blocks it for immediate
3527 * quiet, not the driver)
3528 */
3529 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
3530 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3531 &mvmvif->bcast_sta, disable);
003e5236 3532}
dc88b4ba
LC
3533
3534void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3535{
3536 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3537 struct iwl_mvm_sta *mvmsta;
3538
3539 rcu_read_lock();
3540
3541 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3542
3543 if (!WARN_ON(!mvmsta))
3544 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3545
3546 rcu_read_unlock();
3547}
dd32162d
LK
3548
3549u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
3550{
3551 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3552
3553 /*
3554 * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
3555 * to align the wrap around of ssn so we compare relevant values.
3556 */
3557 if (mvm->trans->cfg->gen2)
3558 sn &= 0xff;
3559
3560 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
3561}