1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
22 * The full GNU General Public License is included in this distribution
23 * in the file called COPYING.
25 * Contact Information:
26 * Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 Intel Corporation
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64 #include <net/mac80211.h>
70 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm
*mvm
);
72 static int iwl_mvm_send_sta_key(struct iwl_mvm
*mvm
,
74 struct ieee80211_key_conf
*key
, bool mcast
,
75 u32 tkip_iv32
, u16
*tkip_p1k
, u32 cmd_flags
,
76 u8 key_offset
, bool mfp
);
79 * New version of ADD_STA_sta command added new fields at the end of the
80 * structure, so sending the size of the relevant API's structure is enough to
81 * support both API versions.
83 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm
*mvm
)
85 if (iwl_mvm_has_new_rx_api(mvm
) ||
86 fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
))
87 return sizeof(struct iwl_mvm_add_sta_cmd
);
89 return sizeof(struct iwl_mvm_add_sta_cmd_v7
);
92 static int iwl_mvm_find_free_sta_id(struct iwl_mvm
*mvm
,
93 enum nl80211_iftype iftype
)
98 BUILD_BUG_ON(IWL_MVM_STATION_COUNT
> 32);
99 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
));
101 lockdep_assert_held(&mvm
->mutex
);
103 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
104 if (iftype
!= NL80211_IFTYPE_STATION
)
105 reserved_ids
= BIT(0);
107 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
108 for (sta_id
= 0; sta_id
< ARRAY_SIZE(mvm
->fw_id_to_mac_id
); sta_id
++) {
109 if (BIT(sta_id
) & reserved_ids
)
112 if (!rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
113 lockdep_is_held(&mvm
->mutex
)))
116 return IWL_MVM_INVALID_STA
;
119 /* send station add/update command to firmware */
120 int iwl_mvm_sta_send_to_fw(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
121 bool update
, unsigned int flags
)
123 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
124 struct iwl_mvm_add_sta_cmd add_sta_cmd
= {
125 .sta_id
= mvm_sta
->sta_id
,
126 .mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
),
127 .add_modify
= update
? 1 : 0,
128 .station_flags_msk
= cpu_to_le32(STA_FLG_FAT_EN_MSK
|
129 STA_FLG_MIMO_EN_MSK
|
130 STA_FLG_RTS_MIMO_PROT
),
131 .tid_disable_tx
= cpu_to_le16(mvm_sta
->tid_disable_agg
),
135 u32 agg_size
= 0, mpdu_dens
= 0;
137 if (fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
))
138 add_sta_cmd
.station_type
= mvm_sta
->sta_type
;
140 if (!update
|| (flags
& STA_MODIFY_QUEUES
)) {
141 memcpy(&add_sta_cmd
.addr
, sta
->addr
, ETH_ALEN
);
143 if (!iwl_mvm_has_new_tx_api(mvm
)) {
144 add_sta_cmd
.tfd_queue_msk
=
145 cpu_to_le32(mvm_sta
->tfd_queue_msk
);
147 if (flags
& STA_MODIFY_QUEUES
)
148 add_sta_cmd
.modify_mask
|= STA_MODIFY_QUEUES
;
150 WARN_ON(flags
& STA_MODIFY_QUEUES
);
154 switch (sta
->bandwidth
) {
155 case IEEE80211_STA_RX_BW_160
:
156 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_160MHZ
);
158 case IEEE80211_STA_RX_BW_80
:
159 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_80MHZ
);
161 case IEEE80211_STA_RX_BW_40
:
162 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_40MHZ
);
164 case IEEE80211_STA_RX_BW_20
:
165 if (sta
->ht_cap
.ht_supported
)
166 add_sta_cmd
.station_flags
|=
167 cpu_to_le32(STA_FLG_FAT_EN_20MHZ
);
171 switch (sta
->rx_nss
) {
173 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_SISO
);
176 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2
);
179 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3
);
183 switch (sta
->smps_mode
) {
184 case IEEE80211_SMPS_AUTOMATIC
:
185 case IEEE80211_SMPS_NUM_MODES
:
188 case IEEE80211_SMPS_STATIC
:
190 add_sta_cmd
.station_flags
&= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK
);
191 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_SISO
);
193 case IEEE80211_SMPS_DYNAMIC
:
194 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_RTS_MIMO_PROT
);
196 case IEEE80211_SMPS_OFF
:
201 if (sta
->ht_cap
.ht_supported
) {
202 add_sta_cmd
.station_flags_msk
|=
203 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK
|
204 STA_FLG_AGG_MPDU_DENS_MSK
);
206 mpdu_dens
= sta
->ht_cap
.ampdu_density
;
209 if (sta
->vht_cap
.vht_supported
) {
210 agg_size
= sta
->vht_cap
.cap
&
211 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK
;
213 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT
;
214 } else if (sta
->ht_cap
.ht_supported
) {
215 agg_size
= sta
->ht_cap
.ampdu_factor
;
218 add_sta_cmd
.station_flags
|=
219 cpu_to_le32(agg_size
<< STA_FLG_MAX_AGG_SIZE_SHIFT
);
220 add_sta_cmd
.station_flags
|=
221 cpu_to_le32(mpdu_dens
<< STA_FLG_AGG_MPDU_DENS_SHIFT
);
222 if (mvm_sta
->sta_state
>= IEEE80211_STA_ASSOC
)
223 add_sta_cmd
.assoc_id
= cpu_to_le16(sta
->aid
);
226 add_sta_cmd
.modify_mask
|= STA_MODIFY_UAPSD_ACS
;
228 if (sta
->uapsd_queues
& IEEE80211_WMM_IE_STA_QOSINFO_AC_BK
)
229 add_sta_cmd
.uapsd_acs
|= BIT(AC_BK
);
230 if (sta
->uapsd_queues
& IEEE80211_WMM_IE_STA_QOSINFO_AC_BE
)
231 add_sta_cmd
.uapsd_acs
|= BIT(AC_BE
);
232 if (sta
->uapsd_queues
& IEEE80211_WMM_IE_STA_QOSINFO_AC_VI
)
233 add_sta_cmd
.uapsd_acs
|= BIT(AC_VI
);
234 if (sta
->uapsd_queues
& IEEE80211_WMM_IE_STA_QOSINFO_AC_VO
)
235 add_sta_cmd
.uapsd_acs
|= BIT(AC_VO
);
236 add_sta_cmd
.uapsd_acs
|= add_sta_cmd
.uapsd_acs
<< 4;
237 add_sta_cmd
.sp_length
= sta
->max_sp
? sta
->max_sp
* 2 : 128;
240 status
= ADD_STA_SUCCESS
;
241 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
242 iwl_mvm_add_sta_cmd_size(mvm
),
243 &add_sta_cmd
, &status
);
247 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
248 case ADD_STA_SUCCESS
:
249 IWL_DEBUG_ASSOC(mvm
, "ADD_STA PASSED\n");
253 IWL_ERR(mvm
, "ADD_STA failed\n");
260 static void iwl_mvm_rx_agg_session_expired(struct timer_list
*t
)
262 struct iwl_mvm_baid_data
*data
=
263 from_timer(data
, t
, session_timer
);
264 struct iwl_mvm_baid_data __rcu
**rcu_ptr
= data
->rcu_ptr
;
265 struct iwl_mvm_baid_data
*ba_data
;
266 struct ieee80211_sta
*sta
;
267 struct iwl_mvm_sta
*mvm_sta
;
268 unsigned long timeout
;
272 ba_data
= rcu_dereference(*rcu_ptr
);
274 if (WARN_ON(!ba_data
))
277 if (!ba_data
->timeout
)
280 timeout
= ba_data
->last_rx
+ TU_TO_JIFFIES(ba_data
->timeout
* 2);
281 if (time_is_after_jiffies(timeout
)) {
282 mod_timer(&ba_data
->session_timer
, timeout
);
287 sta
= rcu_dereference(ba_data
->mvm
->fw_id_to_mac_id
[ba_data
->sta_id
]);
290 * sta should be valid unless the following happens:
291 * The firmware asserts which triggers a reconfig flow, but
292 * the reconfig fails before we set the pointer to sta into
293 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
294 * A-MDPU and hence the timer continues to run. Then, the
295 * timer expires and sta is NULL.
300 mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
301 ieee80211_rx_ba_timer_expired(mvm_sta
->vif
,
302 sta
->addr
, ba_data
->tid
);
307 /* Disable aggregations for a bitmap of TIDs for a given station */
308 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm
*mvm
, int queue
,
309 unsigned long disable_agg_tids
,
312 struct iwl_mvm_add_sta_cmd cmd
= {};
313 struct ieee80211_sta
*sta
;
314 struct iwl_mvm_sta
*mvmsta
;
319 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
322 spin_lock_bh(&mvm
->queue_info_lock
);
323 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
324 spin_unlock_bh(&mvm
->queue_info_lock
);
328 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
330 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
))) {
335 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
337 mvmsta
->tid_disable_agg
|= disable_agg_tids
;
339 cmd
.mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
);
340 cmd
.sta_id
= mvmsta
->sta_id
;
341 cmd
.add_modify
= STA_MODE_MODIFY
;
342 cmd
.modify_mask
= STA_MODIFY_QUEUES
;
343 if (disable_agg_tids
)
344 cmd
.modify_mask
|= STA_MODIFY_TID_DISABLE_TX
;
346 cmd
.modify_mask
|= STA_MODIFY_QUEUE_REMOVAL
;
347 cmd
.tfd_queue_msk
= cpu_to_le32(mvmsta
->tfd_queue_msk
);
348 cmd
.tid_disable_tx
= cpu_to_le16(mvmsta
->tid_disable_agg
);
352 /* Notify FW of queue removal from the STA queues */
353 status
= ADD_STA_SUCCESS
;
354 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
355 iwl_mvm_add_sta_cmd_size(mvm
),
361 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm
*mvm
, int queue
)
363 struct ieee80211_sta
*sta
;
364 struct iwl_mvm_sta
*mvmsta
;
365 unsigned long tid_bitmap
;
366 unsigned long agg_tids
= 0;
370 lockdep_assert_held(&mvm
->mutex
);
372 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
375 spin_lock_bh(&mvm
->queue_info_lock
);
376 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
377 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
378 spin_unlock_bh(&mvm
->queue_info_lock
);
380 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
381 lockdep_is_held(&mvm
->mutex
));
383 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
)))
386 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
388 spin_lock_bh(&mvmsta
->lock
);
389 for_each_set_bit(tid
, &tid_bitmap
, IWL_MAX_TID_COUNT
+ 1) {
390 if (mvmsta
->tid_data
[tid
].state
== IWL_AGG_ON
)
391 agg_tids
|= BIT(tid
);
393 spin_unlock_bh(&mvmsta
->lock
);
399 * Remove a queue from a station's resources.
400 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
401 * doesn't disable the queue
403 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm
*mvm
, int queue
)
405 struct ieee80211_sta
*sta
;
406 struct iwl_mvm_sta
*mvmsta
;
407 unsigned long tid_bitmap
;
408 unsigned long disable_agg_tids
= 0;
412 lockdep_assert_held(&mvm
->mutex
);
414 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
417 spin_lock_bh(&mvm
->queue_info_lock
);
418 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
419 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
420 spin_unlock_bh(&mvm
->queue_info_lock
);
424 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
426 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
))) {
431 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
433 spin_lock_bh(&mvmsta
->lock
);
434 /* Unmap MAC queues and TIDs from this queue */
435 for_each_set_bit(tid
, &tid_bitmap
, IWL_MAX_TID_COUNT
+ 1) {
436 if (mvmsta
->tid_data
[tid
].state
== IWL_AGG_ON
)
437 disable_agg_tids
|= BIT(tid
);
438 mvmsta
->tid_data
[tid
].txq_id
= IWL_MVM_INVALID_QUEUE
;
441 mvmsta
->tfd_queue_msk
&= ~BIT(queue
); /* Don't use this queue anymore */
442 spin_unlock_bh(&mvmsta
->lock
);
446 return disable_agg_tids
;
449 static int iwl_mvm_free_inactive_queue(struct iwl_mvm
*mvm
, int queue
,
452 struct iwl_mvm_sta
*mvmsta
;
453 u8 txq_curr_ac
, sta_id
, tid
;
454 unsigned long disable_agg_tids
= 0;
457 lockdep_assert_held(&mvm
->mutex
);
459 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
462 spin_lock_bh(&mvm
->queue_info_lock
);
463 txq_curr_ac
= mvm
->queue_info
[queue
].mac80211_ac
;
464 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
465 tid
= mvm
->queue_info
[queue
].txq_tid
;
466 spin_unlock_bh(&mvm
->queue_info_lock
);
468 mvmsta
= iwl_mvm_sta_from_staid_protected(mvm
, sta_id
);
469 if (WARN_ON(!mvmsta
))
472 disable_agg_tids
= iwl_mvm_remove_sta_queue_marking(mvm
, queue
);
473 /* Disable the queue */
474 if (disable_agg_tids
)
475 iwl_mvm_invalidate_sta_queue(mvm
, queue
,
476 disable_agg_tids
, false);
478 ret
= iwl_mvm_disable_txq(mvm
, queue
,
479 mvmsta
->vif
->hw_queue
[txq_curr_ac
],
482 /* Re-mark the inactive queue as inactive */
483 spin_lock_bh(&mvm
->queue_info_lock
);
484 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_INACTIVE
;
485 spin_unlock_bh(&mvm
->queue_info_lock
);
487 "Failed to free inactive queue %d (ret=%d)\n",
493 /* If TXQ is allocated to another STA, update removal in FW */
495 iwl_mvm_invalidate_sta_queue(mvm
, queue
, 0, true);
500 static int iwl_mvm_get_shared_queue(struct iwl_mvm
*mvm
,
501 unsigned long tfd_queue_mask
, u8 ac
)
504 u8 ac_to_queue
[IEEE80211_NUM_ACS
];
507 lockdep_assert_held(&mvm
->queue_info_lock
);
508 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
511 memset(&ac_to_queue
, IEEE80211_INVAL_HW_QUEUE
, sizeof(ac_to_queue
));
513 /* See what ACs the existing queues for this STA have */
514 for_each_set_bit(i
, &tfd_queue_mask
, IWL_MVM_DQA_MAX_DATA_QUEUE
) {
515 /* Only DATA queues can be shared */
516 if (i
< IWL_MVM_DQA_MIN_DATA_QUEUE
&&
517 i
!= IWL_MVM_DQA_BSS_CLIENT_QUEUE
)
520 /* Don't try and take queues being reconfigured */
521 if (mvm
->queue_info
[queue
].status
==
522 IWL_MVM_QUEUE_RECONFIGURING
)
525 ac_to_queue
[mvm
->queue_info
[i
].mac80211_ac
] = i
;
529 * The queue to share is chosen only from DATA queues as follows (in
530 * descending priority):
533 * 3. Highest AC queue that is lower than new AC
534 * 4. Any existing AC (there always is at least 1 DATA queue)
537 /* Priority 1: An AC_BE queue */
538 if (ac_to_queue
[IEEE80211_AC_BE
] != IEEE80211_INVAL_HW_QUEUE
)
539 queue
= ac_to_queue
[IEEE80211_AC_BE
];
540 /* Priority 2: Same AC queue */
541 else if (ac_to_queue
[ac
] != IEEE80211_INVAL_HW_QUEUE
)
542 queue
= ac_to_queue
[ac
];
543 /* Priority 3a: If new AC is VO and VI exists - use VI */
544 else if (ac
== IEEE80211_AC_VO
&&
545 ac_to_queue
[IEEE80211_AC_VI
] != IEEE80211_INVAL_HW_QUEUE
)
546 queue
= ac_to_queue
[IEEE80211_AC_VI
];
547 /* Priority 3b: No BE so only AC less than the new one is BK */
548 else if (ac_to_queue
[IEEE80211_AC_BK
] != IEEE80211_INVAL_HW_QUEUE
)
549 queue
= ac_to_queue
[IEEE80211_AC_BK
];
550 /* Priority 4a: No BE nor BK - use VI if exists */
551 else if (ac_to_queue
[IEEE80211_AC_VI
] != IEEE80211_INVAL_HW_QUEUE
)
552 queue
= ac_to_queue
[IEEE80211_AC_VI
];
553 /* Priority 4b: No BE, BK nor VI - use VO if exists */
554 else if (ac_to_queue
[IEEE80211_AC_VO
] != IEEE80211_INVAL_HW_QUEUE
)
555 queue
= ac_to_queue
[IEEE80211_AC_VO
];
557 /* Make sure queue found (or not) is legal */
558 if (!iwl_mvm_is_dqa_data_queue(mvm
, queue
) &&
559 !iwl_mvm_is_dqa_mgmt_queue(mvm
, queue
) &&
560 (queue
!= IWL_MVM_DQA_BSS_CLIENT_QUEUE
)) {
561 IWL_ERR(mvm
, "No DATA queues available to share\n");
565 /* Make sure the queue isn't in the middle of being reconfigured */
566 if (mvm
->queue_info
[queue
].status
== IWL_MVM_QUEUE_RECONFIGURING
) {
568 "TXQ %d is in the middle of re-config - try again\n",
577 * If a given queue has a higher AC than the TID stream that is being compared
578 * to, the queue needs to be redirected to the lower AC. This function does that
579 * in such a case, otherwise - if no redirection required - it does nothing,
580 * unless the %force param is true.
582 int iwl_mvm_scd_queue_redirect(struct iwl_mvm
*mvm
, int queue
, int tid
,
583 int ac
, int ssn
, unsigned int wdg_timeout
,
586 struct iwl_scd_txq_cfg_cmd cmd
= {
588 .action
= SCD_CFG_DISABLE_QUEUE
,
594 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
598 * If the AC is lower than current one - FIFO needs to be redirected to
599 * the lowest one of the streams in the queue. Check if this is needed
601 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
602 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
603 * we need to check if the numerical value of X is LARGER than of Y.
605 spin_lock_bh(&mvm
->queue_info_lock
);
606 if (ac
<= mvm
->queue_info
[queue
].mac80211_ac
&& !force
) {
607 spin_unlock_bh(&mvm
->queue_info_lock
);
609 IWL_DEBUG_TX_QUEUES(mvm
,
610 "No redirection needed on TXQ #%d\n",
615 cmd
.sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
616 cmd
.tx_fifo
= iwl_mvm_ac_to_tx_fifo
[mvm
->queue_info
[queue
].mac80211_ac
];
617 cmd
.tid
= mvm
->queue_info
[queue
].txq_tid
;
618 mq
= mvm
->hw_queue_to_mac80211
[queue
];
619 shared_queue
= (mvm
->queue_info
[queue
].hw_queue_refcount
> 1);
620 spin_unlock_bh(&mvm
->queue_info_lock
);
622 IWL_DEBUG_TX_QUEUES(mvm
, "Redirecting TXQ #%d to FIFO #%d\n",
623 queue
, iwl_mvm_ac_to_tx_fifo
[ac
]);
625 /* Stop MAC queues and wait for this queue to empty */
626 iwl_mvm_stop_mac_queues(mvm
, mq
);
627 ret
= iwl_trans_wait_tx_queues_empty(mvm
->trans
, BIT(queue
));
629 IWL_ERR(mvm
, "Error draining queue %d before reconfig\n",
635 /* Before redirecting the queue we need to de-activate it */
636 iwl_trans_txq_disable(mvm
->trans
, queue
, false);
637 ret
= iwl_mvm_send_cmd_pdu(mvm
, SCD_QUEUE_CFG
, 0, sizeof(cmd
), &cmd
);
639 IWL_ERR(mvm
, "Failed SCD disable TXQ %d (ret=%d)\n", queue
,
642 /* Make sure the SCD wrptr is correctly set before reconfiguring */
643 iwl_trans_txq_enable_cfg(mvm
->trans
, queue
, ssn
, NULL
, wdg_timeout
);
645 /* Update the TID "owner" of the queue */
646 spin_lock_bh(&mvm
->queue_info_lock
);
647 mvm
->queue_info
[queue
].txq_tid
= tid
;
648 spin_unlock_bh(&mvm
->queue_info_lock
);
650 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
652 /* Redirect to lower AC */
653 iwl_mvm_reconfig_scd(mvm
, queue
, iwl_mvm_ac_to_tx_fifo
[ac
],
654 cmd
.sta_id
, tid
, IWL_FRAME_LIMIT
, ssn
);
656 /* Update AC marking of the queue */
657 spin_lock_bh(&mvm
->queue_info_lock
);
658 mvm
->queue_info
[queue
].mac80211_ac
= ac
;
659 spin_unlock_bh(&mvm
->queue_info_lock
);
662 * Mark queue as shared in transport if shared
663 * Note this has to be done after queue enablement because enablement
664 * can also set this value, and there is no indication there to shared
668 iwl_trans_txq_set_shared_mode(mvm
->trans
, queue
, true);
671 /* Continue using the MAC queues */
672 iwl_mvm_start_mac_queues(mvm
, mq
);
677 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm
*mvm
,
678 struct ieee80211_sta
*sta
, u8 ac
,
681 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
682 unsigned int wdg_timeout
=
683 iwl_mvm_get_wd_timeout(mvm
, mvmsta
->vif
, false, false);
684 u8 mac_queue
= mvmsta
->vif
->hw_queue
[ac
];
687 lockdep_assert_held(&mvm
->mutex
);
689 IWL_DEBUG_TX_QUEUES(mvm
,
690 "Allocating queue for sta %d on tid %d\n",
691 mvmsta
->sta_id
, tid
);
692 queue
= iwl_mvm_tvqm_enable_txq(mvm
, mac_queue
, mvmsta
->sta_id
, tid
,
697 IWL_DEBUG_TX_QUEUES(mvm
, "Allocated queue is %d\n", queue
);
699 spin_lock_bh(&mvmsta
->lock
);
700 mvmsta
->tid_data
[tid
].txq_id
= queue
;
701 mvmsta
->tid_data
[tid
].is_tid_active
= true;
702 spin_unlock_bh(&mvmsta
->lock
);
707 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm
*mvm
,
708 struct ieee80211_sta
*sta
, u8 ac
, int tid
,
709 struct ieee80211_hdr
*hdr
)
711 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
712 struct iwl_trans_txq_scd_cfg cfg
= {
713 .fifo
= iwl_mvm_mac_ac_to_tx_fifo(mvm
, ac
),
714 .sta_id
= mvmsta
->sta_id
,
716 .frame_limit
= IWL_FRAME_LIMIT
,
718 unsigned int wdg_timeout
=
719 iwl_mvm_get_wd_timeout(mvm
, mvmsta
->vif
, false, false);
720 u8 mac_queue
= mvmsta
->vif
->hw_queue
[ac
];
722 bool using_inactive_queue
= false, same_sta
= false;
723 unsigned long disable_agg_tids
= 0;
724 enum iwl_mvm_agg_state queue_state
;
725 bool shared_queue
= false, inc_ssn
;
727 unsigned long tfd_queue_mask
;
730 lockdep_assert_held(&mvm
->mutex
);
732 if (iwl_mvm_has_new_tx_api(mvm
))
733 return iwl_mvm_sta_alloc_queue_tvqm(mvm
, sta
, ac
, tid
);
735 spin_lock_bh(&mvmsta
->lock
);
736 tfd_queue_mask
= mvmsta
->tfd_queue_msk
;
737 spin_unlock_bh(&mvmsta
->lock
);
739 spin_lock_bh(&mvm
->queue_info_lock
);
742 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
745 if (!ieee80211_is_data_qos(hdr
->frame_control
) ||
746 ieee80211_is_qos_nullfunc(hdr
->frame_control
)) {
747 queue
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
748 IWL_MVM_DQA_MIN_MGMT_QUEUE
,
749 IWL_MVM_DQA_MAX_MGMT_QUEUE
);
750 if (queue
>= IWL_MVM_DQA_MIN_MGMT_QUEUE
)
751 IWL_DEBUG_TX_QUEUES(mvm
, "Found free MGMT queue #%d\n",
754 /* If no such queue is found, we'll use a DATA queue instead */
757 if ((queue
< 0 && mvmsta
->reserved_queue
!= IEEE80211_INVAL_HW_QUEUE
) &&
758 (mvm
->queue_info
[mvmsta
->reserved_queue
].status
==
759 IWL_MVM_QUEUE_RESERVED
||
760 mvm
->queue_info
[mvmsta
->reserved_queue
].status
==
761 IWL_MVM_QUEUE_INACTIVE
)) {
762 queue
= mvmsta
->reserved_queue
;
763 mvm
->queue_info
[queue
].reserved
= true;
764 IWL_DEBUG_TX_QUEUES(mvm
, "Using reserved queue #%d\n", queue
);
768 queue
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
769 IWL_MVM_DQA_MIN_DATA_QUEUE
,
770 IWL_MVM_DQA_MAX_DATA_QUEUE
);
773 * Check if this queue is already allocated but inactive.
774 * In such a case, we'll need to first free this queue before enabling
775 * it again, so we'll mark it as reserved to make sure no new traffic
779 mvm
->queue_info
[queue
].status
== IWL_MVM_QUEUE_INACTIVE
) {
780 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_RESERVED
;
781 using_inactive_queue
= true;
782 same_sta
= mvm
->queue_info
[queue
].ra_sta_id
== mvmsta
->sta_id
;
783 IWL_DEBUG_TX_QUEUES(mvm
,
784 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
785 queue
, mvmsta
->sta_id
, tid
);
788 /* No free queue - we'll have to share */
790 queue
= iwl_mvm_get_shared_queue(mvm
, tfd_queue_mask
, ac
);
793 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_SHARED
;
798 * Mark TXQ as ready, even though it hasn't been fully configured yet,
799 * to make sure no one else takes it.
800 * This will allow avoiding re-acquiring the lock at the end of the
801 * configuration. On error we'll mark it back as free.
803 if ((queue
> 0) && !shared_queue
)
804 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_READY
;
806 spin_unlock_bh(&mvm
->queue_info_lock
);
808 /* This shouldn't happen - out of queues */
809 if (WARN_ON(queue
<= 0)) {
810 IWL_ERR(mvm
, "No available queues for tid %d on sta_id %d\n",
816 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
817 * but for configuring the SCD to send A-MPDUs we need to mark the queue
819 * Mark all DATA queues as allowing to be aggregated at some point
821 cfg
.aggregate
= (queue
>= IWL_MVM_DQA_MIN_DATA_QUEUE
||
822 queue
== IWL_MVM_DQA_BSS_CLIENT_QUEUE
);
825 * If this queue was previously inactive (idle) - we need to free it
828 if (using_inactive_queue
) {
829 ret
= iwl_mvm_free_inactive_queue(mvm
, queue
, same_sta
);
834 IWL_DEBUG_TX_QUEUES(mvm
,
835 "Allocating %squeue #%d to sta %d on tid %d\n",
836 shared_queue
? "shared " : "", queue
,
837 mvmsta
->sta_id
, tid
);
840 /* Disable any open aggs on this queue */
841 disable_agg_tids
= iwl_mvm_get_queue_agg_tids(mvm
, queue
);
843 if (disable_agg_tids
) {
844 IWL_DEBUG_TX_QUEUES(mvm
, "Disabling aggs on queue %d\n",
846 iwl_mvm_invalidate_sta_queue(mvm
, queue
,
847 disable_agg_tids
, false);
851 ssn
= IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr
->seq_ctrl
));
852 inc_ssn
= iwl_mvm_enable_txq(mvm
, queue
, mac_queue
,
853 ssn
, &cfg
, wdg_timeout
);
855 ssn
= (ssn
+ 1) & IEEE80211_SCTL_SEQ
;
856 le16_add_cpu(&hdr
->seq_ctrl
, 0x10);
860 * Mark queue as shared in transport if shared
861 * Note this has to be done after queue enablement because enablement
862 * can also set this value, and there is no indication there to shared
866 iwl_trans_txq_set_shared_mode(mvm
->trans
, queue
, true);
868 spin_lock_bh(&mvmsta
->lock
);
870 * This looks racy, but it is not. We have only one packet for
871 * this ra/tid in our Tx path since we stop the Qdisc when we
872 * need to allocate a new TFD queue.
875 mvmsta
->tid_data
[tid
].seq_number
+= 0x10;
876 mvmsta
->tid_data
[tid
].txq_id
= queue
;
877 mvmsta
->tid_data
[tid
].is_tid_active
= true;
878 mvmsta
->tfd_queue_msk
|= BIT(queue
);
879 queue_state
= mvmsta
->tid_data
[tid
].state
;
881 if (mvmsta
->reserved_queue
== queue
)
882 mvmsta
->reserved_queue
= IEEE80211_INVAL_HW_QUEUE
;
883 spin_unlock_bh(&mvmsta
->lock
);
886 ret
= iwl_mvm_sta_send_to_fw(mvm
, sta
, true, STA_MODIFY_QUEUES
);
890 /* If we need to re-enable aggregations... */
891 if (queue_state
== IWL_AGG_ON
) {
892 ret
= iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, queue
, true);
897 /* Redirect queue, if needed */
898 ret
= iwl_mvm_scd_queue_redirect(mvm
, queue
, tid
, ac
, ssn
,
907 iwl_mvm_disable_txq(mvm
, queue
, mac_queue
, tid
, 0);
912 static void iwl_mvm_change_queue_owner(struct iwl_mvm
*mvm
, int queue
)
914 struct iwl_scd_txq_cfg_cmd cmd
= {
916 .action
= SCD_CFG_UPDATE_QUEUE_TID
,
919 unsigned long tid_bitmap
;
922 lockdep_assert_held(&mvm
->mutex
);
924 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
927 spin_lock_bh(&mvm
->queue_info_lock
);
928 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
929 spin_unlock_bh(&mvm
->queue_info_lock
);
931 if (WARN(!tid_bitmap
, "TXQ %d has no tids assigned to it\n", queue
))
934 /* Find any TID for queue */
935 tid
= find_first_bit(&tid_bitmap
, IWL_MAX_TID_COUNT
+ 1);
937 cmd
.tx_fifo
= iwl_mvm_ac_to_tx_fifo
[tid_to_mac80211_ac
[tid
]];
939 ret
= iwl_mvm_send_cmd_pdu(mvm
, SCD_QUEUE_CFG
, 0, sizeof(cmd
), &cmd
);
941 IWL_ERR(mvm
, "Failed to update owner of TXQ %d (ret=%d)\n",
946 spin_lock_bh(&mvm
->queue_info_lock
);
947 mvm
->queue_info
[queue
].txq_tid
= tid
;
948 spin_unlock_bh(&mvm
->queue_info_lock
);
949 IWL_DEBUG_TX_QUEUES(mvm
, "Changed TXQ %d ownership to tid %d\n",
953 static void iwl_mvm_unshare_queue(struct iwl_mvm
*mvm
, int queue
)
955 struct ieee80211_sta
*sta
;
956 struct iwl_mvm_sta
*mvmsta
;
959 unsigned long tid_bitmap
;
960 unsigned int wdg_timeout
;
964 /* queue sharing is disabled on new TX path */
965 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
968 lockdep_assert_held(&mvm
->mutex
);
970 spin_lock_bh(&mvm
->queue_info_lock
);
971 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
972 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
973 spin_unlock_bh(&mvm
->queue_info_lock
);
975 /* Find TID for queue, and make sure it is the only one on the queue */
976 tid
= find_first_bit(&tid_bitmap
, IWL_MAX_TID_COUNT
+ 1);
977 if (tid_bitmap
!= BIT(tid
)) {
978 IWL_ERR(mvm
, "Failed to unshare q %d, active tids=0x%lx\n",
983 IWL_DEBUG_TX_QUEUES(mvm
, "Unsharing TXQ %d, keeping tid %d\n", queue
,
986 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
987 lockdep_is_held(&mvm
->mutex
));
989 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
)))
992 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
993 wdg_timeout
= iwl_mvm_get_wd_timeout(mvm
, mvmsta
->vif
, false, false);
995 ssn
= IEEE80211_SEQ_TO_SN(mvmsta
->tid_data
[tid
].seq_number
);
997 ret
= iwl_mvm_scd_queue_redirect(mvm
, queue
, tid
,
998 tid_to_mac80211_ac
[tid
], ssn
,
1001 IWL_ERR(mvm
, "Failed to redirect TXQ %d\n", queue
);
1005 /* If aggs should be turned back on - do it */
1006 if (mvmsta
->tid_data
[tid
].state
== IWL_AGG_ON
) {
1007 struct iwl_mvm_add_sta_cmd cmd
= {0};
1009 mvmsta
->tid_disable_agg
&= ~BIT(tid
);
1011 cmd
.mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
);
1012 cmd
.sta_id
= mvmsta
->sta_id
;
1013 cmd
.add_modify
= STA_MODE_MODIFY
;
1014 cmd
.modify_mask
= STA_MODIFY_TID_DISABLE_TX
;
1015 cmd
.tfd_queue_msk
= cpu_to_le32(mvmsta
->tfd_queue_msk
);
1016 cmd
.tid_disable_tx
= cpu_to_le16(mvmsta
->tid_disable_agg
);
1018 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
1019 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
1021 IWL_DEBUG_TX_QUEUES(mvm
,
1022 "TXQ #%d is now aggregated again\n",
1025 /* Mark queue intenally as aggregating again */
1026 iwl_trans_txq_set_shared_mode(mvm
->trans
, queue
, false);
1030 spin_lock_bh(&mvm
->queue_info_lock
);
1031 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_READY
;
1032 spin_unlock_bh(&mvm
->queue_info_lock
);
1035 static inline u8
iwl_mvm_tid_to_ac_queue(int tid
)
1037 if (tid
== IWL_MAX_TID_COUNT
)
1038 return IEEE80211_AC_VO
; /* MGMT */
1040 return tid_to_mac80211_ac
[tid
];
1043 static void iwl_mvm_tx_deferred_stream(struct iwl_mvm
*mvm
,
1044 struct ieee80211_sta
*sta
, int tid
)
1046 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1047 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
1048 struct sk_buff
*skb
;
1049 struct ieee80211_hdr
*hdr
;
1050 struct sk_buff_head deferred_tx
;
1052 bool no_queue
= false; /* Marks if there is a problem with the queue */
1055 lockdep_assert_held(&mvm
->mutex
);
1057 skb
= skb_peek(&tid_data
->deferred_tx_frames
);
1060 hdr
= (void *)skb
->data
;
1062 ac
= iwl_mvm_tid_to_ac_queue(tid
);
1063 mac_queue
= IEEE80211_SKB_CB(skb
)->hw_queue
;
1065 if (tid_data
->txq_id
== IWL_MVM_INVALID_QUEUE
&&
1066 iwl_mvm_sta_alloc_queue(mvm
, sta
, ac
, tid
, hdr
)) {
1068 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1069 mvmsta
->sta_id
, tid
);
1072 * Mark queue as problematic so later the deferred traffic is
1073 * freed, as we can do nothing with it
1078 __skb_queue_head_init(&deferred_tx
);
1080 /* Disable bottom-halves when entering TX path */
1082 spin_lock(&mvmsta
->lock
);
1083 skb_queue_splice_init(&tid_data
->deferred_tx_frames
, &deferred_tx
);
1084 mvmsta
->deferred_traffic_tid_map
&= ~BIT(tid
);
1085 spin_unlock(&mvmsta
->lock
);
1087 while ((skb
= __skb_dequeue(&deferred_tx
)))
1088 if (no_queue
|| iwl_mvm_tx_skb(mvm
, skb
, sta
))
1089 ieee80211_free_txskb(mvm
->hw
, skb
);
1093 iwl_mvm_start_mac_queues(mvm
, BIT(mac_queue
));
1096 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct
*wk
)
1098 struct iwl_mvm
*mvm
= container_of(wk
, struct iwl_mvm
,
1100 struct ieee80211_sta
*sta
;
1101 struct iwl_mvm_sta
*mvmsta
;
1102 unsigned long deferred_tid_traffic
;
1103 int queue
, sta_id
, tid
;
1105 /* Check inactivity of queues */
1106 iwl_mvm_inactivity_check(mvm
);
1108 mutex_lock(&mvm
->mutex
);
1110 /* No queue reconfiguration in TVQM mode */
1111 if (iwl_mvm_has_new_tx_api(mvm
))
1114 /* Reconfigure queues requiring reconfiguation */
1115 for (queue
= 0; queue
< ARRAY_SIZE(mvm
->queue_info
); queue
++) {
1119 spin_lock_bh(&mvm
->queue_info_lock
);
1120 reconfig
= (mvm
->queue_info
[queue
].status
==
1121 IWL_MVM_QUEUE_RECONFIGURING
);
1124 * We need to take into account a situation in which a TXQ was
1125 * allocated to TID x, and then turned shared by adding TIDs y
1126 * and z. If TID x becomes inactive and is removed from the TXQ,
1127 * ownership must be given to one of the remaining TIDs.
1128 * This is mainly because if TID x continues - a new queue can't
1129 * be allocated for it as long as it is an owner of another TXQ.
1131 change_owner
= !(mvm
->queue_info
[queue
].tid_bitmap
&
1132 BIT(mvm
->queue_info
[queue
].txq_tid
)) &&
1133 (mvm
->queue_info
[queue
].status
==
1134 IWL_MVM_QUEUE_SHARED
);
1135 spin_unlock_bh(&mvm
->queue_info_lock
);
1138 iwl_mvm_unshare_queue(mvm
, queue
);
1139 else if (change_owner
)
1140 iwl_mvm_change_queue_owner(mvm
, queue
);
1144 /* Go over all stations with deferred traffic */
1145 for_each_set_bit(sta_id
, mvm
->sta_deferred_frames
,
1146 IWL_MVM_STATION_COUNT
) {
1147 clear_bit(sta_id
, mvm
->sta_deferred_frames
);
1148 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
1149 lockdep_is_held(&mvm
->mutex
));
1150 if (IS_ERR_OR_NULL(sta
))
1153 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1154 deferred_tid_traffic
= mvmsta
->deferred_traffic_tid_map
;
1156 for_each_set_bit(tid
, &deferred_tid_traffic
,
1157 IWL_MAX_TID_COUNT
+ 1)
1158 iwl_mvm_tx_deferred_stream(mvm
, sta
, tid
);
1161 mutex_unlock(&mvm
->mutex
);
1164 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm
*mvm
,
1165 struct ieee80211_sta
*sta
,
1166 enum nl80211_iftype vif_type
)
1168 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1170 bool using_inactive_queue
= false, same_sta
= false;
1172 /* queue reserving is disabled on new TX path */
1173 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
1177 * Check for inactive queues, so we don't reach a situation where we
1178 * can't add a STA due to a shortage in queues that doesn't really exist
1180 iwl_mvm_inactivity_check(mvm
);
1182 spin_lock_bh(&mvm
->queue_info_lock
);
1184 /* Make sure we have free resources for this STA */
1185 if (vif_type
== NL80211_IFTYPE_STATION
&& !sta
->tdls
&&
1186 !mvm
->queue_info
[IWL_MVM_DQA_BSS_CLIENT_QUEUE
].hw_queue_refcount
&&
1187 (mvm
->queue_info
[IWL_MVM_DQA_BSS_CLIENT_QUEUE
].status
==
1188 IWL_MVM_QUEUE_FREE
))
1189 queue
= IWL_MVM_DQA_BSS_CLIENT_QUEUE
;
1191 queue
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
1192 IWL_MVM_DQA_MIN_DATA_QUEUE
,
1193 IWL_MVM_DQA_MAX_DATA_QUEUE
);
1195 spin_unlock_bh(&mvm
->queue_info_lock
);
1196 IWL_ERR(mvm
, "No available queues for new station\n");
1198 } else if (mvm
->queue_info
[queue
].status
== IWL_MVM_QUEUE_INACTIVE
) {
1200 * If this queue is already allocated but inactive we'll need to
1201 * first free this queue before enabling it again, we'll mark
1202 * it as reserved to make sure no new traffic arrives on it
1204 using_inactive_queue
= true;
1205 same_sta
= mvm
->queue_info
[queue
].ra_sta_id
== mvmsta
->sta_id
;
1207 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_RESERVED
;
1209 spin_unlock_bh(&mvm
->queue_info_lock
);
1211 mvmsta
->reserved_queue
= queue
;
1213 if (using_inactive_queue
)
1214 iwl_mvm_free_inactive_queue(mvm
, queue
, same_sta
);
1216 IWL_DEBUG_TX_QUEUES(mvm
, "Reserving data queue #%d for sta_id %d\n",
1217 queue
, mvmsta
->sta_id
);
1223 * In DQA mode, after a HW restart the queues should be allocated as before, in
1224 * order to avoid race conditions when there are shared queues. This function
1225 * does the re-mapping and queue allocation.
1227 * Note that re-enabling aggregations isn't done in this function.
1229 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm
*mvm
,
1230 struct iwl_mvm_sta
*mvm_sta
)
1232 unsigned int wdg_timeout
=
1233 iwl_mvm_get_wd_timeout(mvm
, mvm_sta
->vif
, false, false);
1235 struct iwl_trans_txq_scd_cfg cfg
= {
1236 .sta_id
= mvm_sta
->sta_id
,
1237 .frame_limit
= IWL_FRAME_LIMIT
,
1240 /* Make sure reserved queue is still marked as such (if allocated) */
1241 if (mvm_sta
->reserved_queue
!= IEEE80211_INVAL_HW_QUEUE
)
1242 mvm
->queue_info
[mvm_sta
->reserved_queue
].status
=
1243 IWL_MVM_QUEUE_RESERVED
;
1245 for (i
= 0; i
<= IWL_MAX_TID_COUNT
; i
++) {
1246 struct iwl_mvm_tid_data
*tid_data
= &mvm_sta
->tid_data
[i
];
1247 int txq_id
= tid_data
->txq_id
;
1251 if (txq_id
== IWL_MVM_INVALID_QUEUE
)
1254 skb_queue_head_init(&tid_data
->deferred_tx_frames
);
1256 ac
= tid_to_mac80211_ac
[i
];
1257 mac_queue
= mvm_sta
->vif
->hw_queue
[ac
];
1259 if (iwl_mvm_has_new_tx_api(mvm
)) {
1260 IWL_DEBUG_TX_QUEUES(mvm
,
1261 "Re-mapping sta %d tid %d\n",
1262 mvm_sta
->sta_id
, i
);
1263 txq_id
= iwl_mvm_tvqm_enable_txq(mvm
, mac_queue
,
1266 tid_data
->txq_id
= txq_id
;
1269 * Since we don't set the seq number after reset, and HW
1270 * sets it now, FW reset will cause the seq num to start
1271 * at 0 again, so driver will need to update it
1272 * internally as well, so it keeps in sync with real val
1274 tid_data
->seq_number
= 0;
1276 u16 seq
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
1279 cfg
.fifo
= iwl_mvm_mac_ac_to_tx_fifo(mvm
, ac
);
1280 cfg
.aggregate
= (txq_id
>= IWL_MVM_DQA_MIN_DATA_QUEUE
||
1282 IWL_MVM_DQA_BSS_CLIENT_QUEUE
);
1284 IWL_DEBUG_TX_QUEUES(mvm
,
1285 "Re-mapping sta %d tid %d to queue %d\n",
1286 mvm_sta
->sta_id
, i
, txq_id
);
1288 iwl_mvm_enable_txq(mvm
, txq_id
, mac_queue
, seq
, &cfg
,
1290 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_READY
;
1295 static int iwl_mvm_add_int_sta_common(struct iwl_mvm
*mvm
,
1296 struct iwl_mvm_int_sta
*sta
,
1298 u16 mac_id
, u16 color
)
1300 struct iwl_mvm_add_sta_cmd cmd
;
1302 u32 status
= ADD_STA_SUCCESS
;
1304 lockdep_assert_held(&mvm
->mutex
);
1306 memset(&cmd
, 0, sizeof(cmd
));
1307 cmd
.sta_id
= sta
->sta_id
;
1308 cmd
.mac_id_n_color
= cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id
,
1310 if (fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
))
1311 cmd
.station_type
= sta
->type
;
1313 if (!iwl_mvm_has_new_tx_api(mvm
))
1314 cmd
.tfd_queue_msk
= cpu_to_le32(sta
->tfd_queue_msk
);
1315 cmd
.tid_disable_tx
= cpu_to_le16(0xffff);
1318 memcpy(cmd
.addr
, addr
, ETH_ALEN
);
1320 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
1321 iwl_mvm_add_sta_cmd_size(mvm
),
1326 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
1327 case ADD_STA_SUCCESS
:
1328 IWL_DEBUG_INFO(mvm
, "Internal station added.\n");
1332 IWL_ERR(mvm
, "Add internal station failed, status=0x%x\n",
1339 int iwl_mvm_add_sta(struct iwl_mvm
*mvm
,
1340 struct ieee80211_vif
*vif
,
1341 struct ieee80211_sta
*sta
)
1343 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1344 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1345 struct iwl_mvm_rxq_dup_data
*dup_data
;
1347 bool sta_update
= false;
1348 unsigned int sta_flags
= 0;
1350 lockdep_assert_held(&mvm
->mutex
);
1352 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
))
1353 sta_id
= iwl_mvm_find_free_sta_id(mvm
,
1354 ieee80211_vif_type_p2p(vif
));
1356 sta_id
= mvm_sta
->sta_id
;
1358 if (sta_id
== IWL_MVM_INVALID_STA
)
1361 spin_lock_init(&mvm_sta
->lock
);
1363 /* if this is a HW restart re-alloc existing queues */
1364 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
1365 struct iwl_mvm_int_sta tmp_sta
= {
1367 .type
= mvm_sta
->sta_type
,
1371 * First add an empty station since allocating
1372 * a queue requires a valid station
1374 ret
= iwl_mvm_add_int_sta_common(mvm
, &tmp_sta
, sta
->addr
,
1375 mvmvif
->id
, mvmvif
->color
);
1379 iwl_mvm_realloc_queues_after_restart(mvm
, mvm_sta
);
1381 sta_flags
= iwl_mvm_has_new_tx_api(mvm
) ? 0 : STA_MODIFY_QUEUES
;
1385 mvm_sta
->sta_id
= sta_id
;
1386 mvm_sta
->mac_id_n_color
= FW_CMD_ID_AND_COLOR(mvmvif
->id
,
1389 if (!mvm
->trans
->cfg
->gen2
)
1390 mvm_sta
->max_agg_bufsize
= LINK_QUAL_AGG_FRAME_LIMIT_DEF
;
1392 mvm_sta
->max_agg_bufsize
= LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF
;
1393 mvm_sta
->tx_protection
= 0;
1394 mvm_sta
->tt_tx_protection
= false;
1395 mvm_sta
->sta_type
= sta
->tdls
? IWL_STA_TDLS_LINK
: IWL_STA_LINK
;
1397 /* HW restart, don't assume the memory has been zeroed */
1398 mvm_sta
->tid_disable_agg
= 0xffff; /* No aggs at first */
1399 mvm_sta
->tfd_queue_msk
= 0;
1401 /* for HW restart - reset everything but the sequence number */
1402 for (i
= 0; i
<= IWL_MAX_TID_COUNT
; i
++) {
1403 u16 seq
= mvm_sta
->tid_data
[i
].seq_number
;
1404 memset(&mvm_sta
->tid_data
[i
], 0, sizeof(mvm_sta
->tid_data
[i
]));
1405 mvm_sta
->tid_data
[i
].seq_number
= seq
;
1408 * Mark all queues for this STA as unallocated and defer TX
1409 * frames until the queue is allocated
1411 mvm_sta
->tid_data
[i
].txq_id
= IWL_MVM_INVALID_QUEUE
;
1412 skb_queue_head_init(&mvm_sta
->tid_data
[i
].deferred_tx_frames
);
1414 mvm_sta
->deferred_traffic_tid_map
= 0;
1415 mvm_sta
->agg_tids
= 0;
1417 if (iwl_mvm_has_new_rx_api(mvm
) &&
1418 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
1421 dup_data
= kcalloc(mvm
->trans
->num_rx_queues
,
1422 sizeof(*dup_data
), GFP_KERNEL
);
1426 * Initialize all the last_seq values to 0xffff which can never
1427 * compare equal to the frame's seq_ctrl in the check in
1428 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1429 * number and fragmented packets don't reach that function.
1431 * This thus allows receiving a packet with seqno 0 and the
1432 * retry bit set as the very first packet on a new TID.
1434 for (q
= 0; q
< mvm
->trans
->num_rx_queues
; q
++)
1435 memset(dup_data
[q
].last_seq
, 0xff,
1436 sizeof(dup_data
[q
].last_seq
));
1437 mvm_sta
->dup_data
= dup_data
;
1440 if (!iwl_mvm_has_new_tx_api(mvm
)) {
1441 ret
= iwl_mvm_reserve_sta_stream(mvm
, sta
,
1442 ieee80211_vif_type_p2p(vif
));
1448 * if rs is registered with mac80211, then "add station" will be handled
1449 * via the corresponding ops, otherwise need to notify rate scaling here
1451 if (iwl_mvm_has_tlc_offload(mvm
))
1452 iwl_mvm_rs_add_sta(mvm
, mvm_sta
);
1455 ret
= iwl_mvm_sta_send_to_fw(mvm
, sta
, sta_update
, sta_flags
);
1459 if (vif
->type
== NL80211_IFTYPE_STATION
) {
1461 WARN_ON(mvmvif
->ap_sta_id
!= IWL_MVM_INVALID_STA
);
1462 mvmvif
->ap_sta_id
= sta_id
;
1464 WARN_ON(mvmvif
->ap_sta_id
== IWL_MVM_INVALID_STA
);
1468 rcu_assign_pointer(mvm
->fw_id_to_mac_id
[sta_id
], sta
);
1476 int iwl_mvm_drain_sta(struct iwl_mvm
*mvm
, struct iwl_mvm_sta
*mvmsta
,
1479 struct iwl_mvm_add_sta_cmd cmd
= {};
1483 lockdep_assert_held(&mvm
->mutex
);
1485 cmd
.mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
);
1486 cmd
.sta_id
= mvmsta
->sta_id
;
1487 cmd
.add_modify
= STA_MODE_MODIFY
;
1488 cmd
.station_flags
= drain
? cpu_to_le32(STA_FLG_DRAIN_FLOW
) : 0;
1489 cmd
.station_flags_msk
= cpu_to_le32(STA_FLG_DRAIN_FLOW
);
1491 status
= ADD_STA_SUCCESS
;
1492 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
1493 iwl_mvm_add_sta_cmd_size(mvm
),
1498 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
1499 case ADD_STA_SUCCESS
:
1500 IWL_DEBUG_INFO(mvm
, "Frames for staid %d will drained in fw\n",
1505 IWL_ERR(mvm
, "Couldn't drain frames for staid %d\n",
1514 * Remove a station from the FW table. Before sending the command to remove
1515 * the station validate that the station is indeed known to the driver (sanity
1518 static int iwl_mvm_rm_sta_common(struct iwl_mvm
*mvm
, u8 sta_id
)
1520 struct ieee80211_sta
*sta
;
1521 struct iwl_mvm_rm_sta_cmd rm_sta_cmd
= {
1526 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
1527 lockdep_is_held(&mvm
->mutex
));
1529 /* Note: internal stations are marked as error values */
1531 IWL_ERR(mvm
, "Invalid station id\n");
1535 ret
= iwl_mvm_send_cmd_pdu(mvm
, REMOVE_STA
, 0,
1536 sizeof(rm_sta_cmd
), &rm_sta_cmd
);
1538 IWL_ERR(mvm
, "Failed to remove station. Id=%d\n", sta_id
);
1545 static void iwl_mvm_disable_sta_queues(struct iwl_mvm
*mvm
,
1546 struct ieee80211_vif
*vif
,
1547 struct iwl_mvm_sta
*mvm_sta
)
1552 lockdep_assert_held(&mvm
->mutex
);
1554 for (i
= 0; i
< ARRAY_SIZE(mvm_sta
->tid_data
); i
++) {
1555 if (mvm_sta
->tid_data
[i
].txq_id
== IWL_MVM_INVALID_QUEUE
)
1558 ac
= iwl_mvm_tid_to_ac_queue(i
);
1559 iwl_mvm_disable_txq(mvm
, mvm_sta
->tid_data
[i
].txq_id
,
1560 vif
->hw_queue
[ac
], i
, 0);
1561 mvm_sta
->tid_data
[i
].txq_id
= IWL_MVM_INVALID_QUEUE
;
1565 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm
*mvm
,
1566 struct iwl_mvm_sta
*mvm_sta
)
1570 for (i
= 0; i
< ARRAY_SIZE(mvm_sta
->tid_data
); i
++) {
1574 spin_lock_bh(&mvm_sta
->lock
);
1575 txq_id
= mvm_sta
->tid_data
[i
].txq_id
;
1576 spin_unlock_bh(&mvm_sta
->lock
);
1578 if (txq_id
== IWL_MVM_INVALID_QUEUE
)
1581 ret
= iwl_trans_wait_txq_empty(mvm
->trans
, txq_id
);
1589 int iwl_mvm_rm_sta(struct iwl_mvm
*mvm
,
1590 struct ieee80211_vif
*vif
,
1591 struct ieee80211_sta
*sta
)
1593 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1594 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1595 u8 sta_id
= mvm_sta
->sta_id
;
1598 lockdep_assert_held(&mvm
->mutex
);
1600 if (iwl_mvm_has_new_rx_api(mvm
))
1601 kfree(mvm_sta
->dup_data
);
1603 ret
= iwl_mvm_drain_sta(mvm
, mvm_sta
, true);
1607 /* flush its queues here since we are freeing mvm_sta */
1608 ret
= iwl_mvm_flush_sta(mvm
, mvm_sta
, false, 0);
1611 if (iwl_mvm_has_new_tx_api(mvm
)) {
1612 ret
= iwl_mvm_wait_sta_queues_empty(mvm
, mvm_sta
);
1614 u32 q_mask
= mvm_sta
->tfd_queue_msk
;
1616 ret
= iwl_trans_wait_tx_queues_empty(mvm
->trans
,
1622 ret
= iwl_mvm_drain_sta(mvm
, mvm_sta
, false);
1624 iwl_mvm_disable_sta_queues(mvm
, vif
, mvm_sta
);
1626 /* If there is a TXQ still marked as reserved - free it */
1627 if (mvm_sta
->reserved_queue
!= IEEE80211_INVAL_HW_QUEUE
) {
1628 u8 reserved_txq
= mvm_sta
->reserved_queue
;
1629 enum iwl_mvm_queue_status
*status
;
1632 * If no traffic has gone through the reserved TXQ - it
1633 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1634 * should be manually marked as free again
1636 spin_lock_bh(&mvm
->queue_info_lock
);
1637 status
= &mvm
->queue_info
[reserved_txq
].status
;
1638 if (WARN((*status
!= IWL_MVM_QUEUE_RESERVED
) &&
1639 (*status
!= IWL_MVM_QUEUE_FREE
),
1640 "sta_id %d reserved txq %d status %d",
1641 sta_id
, reserved_txq
, *status
)) {
1642 spin_unlock_bh(&mvm
->queue_info_lock
);
1646 *status
= IWL_MVM_QUEUE_FREE
;
1647 spin_unlock_bh(&mvm
->queue_info_lock
);
1650 if (vif
->type
== NL80211_IFTYPE_STATION
&&
1651 mvmvif
->ap_sta_id
== sta_id
) {
1652 /* if associated - we can't remove the AP STA now */
1653 if (vif
->bss_conf
.assoc
)
1656 /* unassoc - go ahead - remove the AP STA now */
1657 mvmvif
->ap_sta_id
= IWL_MVM_INVALID_STA
;
1659 /* clear d0i3_ap_sta_id if no longer relevant */
1660 if (mvm
->d0i3_ap_sta_id
== sta_id
)
1661 mvm
->d0i3_ap_sta_id
= IWL_MVM_INVALID_STA
;
1665 * This shouldn't happen - the TDLS channel switch should be canceled
1666 * before the STA is removed.
1668 if (WARN_ON_ONCE(mvm
->tdls_cs
.peer
.sta_id
== sta_id
)) {
1669 mvm
->tdls_cs
.peer
.sta_id
= IWL_MVM_INVALID_STA
;
1670 cancel_delayed_work(&mvm
->tdls_cs
.dwork
);
1674 * Make sure that the tx response code sees the station as -EBUSY and
1675 * calls the drain worker.
1677 spin_lock_bh(&mvm_sta
->lock
);
1678 spin_unlock_bh(&mvm_sta
->lock
);
1680 ret
= iwl_mvm_rm_sta_common(mvm
, mvm_sta
->sta_id
);
1681 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[mvm_sta
->sta_id
], NULL
);
1686 int iwl_mvm_rm_sta_id(struct iwl_mvm
*mvm
,
1687 struct ieee80211_vif
*vif
,
1690 int ret
= iwl_mvm_rm_sta_common(mvm
, sta_id
);
1692 lockdep_assert_held(&mvm
->mutex
);
1694 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[sta_id
], NULL
);
1698 int iwl_mvm_allocate_int_sta(struct iwl_mvm
*mvm
,
1699 struct iwl_mvm_int_sta
*sta
,
1700 u32 qmask
, enum nl80211_iftype iftype
,
1701 enum iwl_sta_type type
)
1703 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
) ||
1704 sta
->sta_id
== IWL_MVM_INVALID_STA
) {
1705 sta
->sta_id
= iwl_mvm_find_free_sta_id(mvm
, iftype
);
1706 if (WARN_ON_ONCE(sta
->sta_id
== IWL_MVM_INVALID_STA
))
1710 sta
->tfd_queue_msk
= qmask
;
1713 /* put a non-NULL value so iterating over the stations won't stop */
1714 rcu_assign_pointer(mvm
->fw_id_to_mac_id
[sta
->sta_id
], ERR_PTR(-EINVAL
));
1718 void iwl_mvm_dealloc_int_sta(struct iwl_mvm
*mvm
, struct iwl_mvm_int_sta
*sta
)
1720 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[sta
->sta_id
], NULL
);
1721 memset(sta
, 0, sizeof(struct iwl_mvm_int_sta
));
1722 sta
->sta_id
= IWL_MVM_INVALID_STA
;
1725 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm
*mvm
, u16
*queue
,
1728 unsigned int wdg_timeout
= iwlmvm_mod_params
.tfd_q_hang_detect
?
1729 mvm
->cfg
->base_params
->wd_timeout
:
1730 IWL_WATCHDOG_DISABLED
;
1732 if (iwl_mvm_has_new_tx_api(mvm
)) {
1734 iwl_mvm_tvqm_enable_txq(mvm
, *queue
, sta_id
,
1737 *queue
= tvqm_queue
;
1739 struct iwl_trans_txq_scd_cfg cfg
= {
1742 .tid
= IWL_MAX_TID_COUNT
,
1744 .frame_limit
= IWL_FRAME_LIMIT
,
1747 iwl_mvm_enable_txq(mvm
, *queue
, *queue
, 0, &cfg
, wdg_timeout
);
1751 int iwl_mvm_add_aux_sta(struct iwl_mvm
*mvm
)
1755 lockdep_assert_held(&mvm
->mutex
);
1757 /* Allocate aux station and assign to it the aux queue */
1758 ret
= iwl_mvm_allocate_int_sta(mvm
, &mvm
->aux_sta
, BIT(mvm
->aux_queue
),
1759 NL80211_IFTYPE_UNSPECIFIED
,
1760 IWL_STA_AUX_ACTIVITY
);
1764 /* Map Aux queue to fifo - needs to happen before adding Aux station */
1765 if (!iwl_mvm_has_new_tx_api(mvm
))
1766 iwl_mvm_enable_aux_snif_queue(mvm
, &mvm
->aux_queue
,
1767 mvm
->aux_sta
.sta_id
,
1768 IWL_MVM_TX_FIFO_MCAST
);
1770 ret
= iwl_mvm_add_int_sta_common(mvm
, &mvm
->aux_sta
, NULL
,
1773 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->aux_sta
);
1778 * For 22000 firmware and on we cannot add queue to a station unknown
1779 * to firmware so enable queue here - after the station was added
1781 if (iwl_mvm_has_new_tx_api(mvm
))
1782 iwl_mvm_enable_aux_snif_queue(mvm
, &mvm
->aux_queue
,
1783 mvm
->aux_sta
.sta_id
,
1784 IWL_MVM_TX_FIFO_MCAST
);
1789 int iwl_mvm_add_snif_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1791 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1794 lockdep_assert_held(&mvm
->mutex
);
1796 /* Map snif queue to fifo - must happen before adding snif station */
1797 if (!iwl_mvm_has_new_tx_api(mvm
))
1798 iwl_mvm_enable_aux_snif_queue(mvm
, &mvm
->snif_queue
,
1799 mvm
->snif_sta
.sta_id
,
1800 IWL_MVM_TX_FIFO_BE
);
1802 ret
= iwl_mvm_add_int_sta_common(mvm
, &mvm
->snif_sta
, vif
->addr
,
1808 * For 22000 firmware and on we cannot add queue to a station unknown
1809 * to firmware so enable queue here - after the station was added
1811 if (iwl_mvm_has_new_tx_api(mvm
))
1812 iwl_mvm_enable_aux_snif_queue(mvm
, &mvm
->snif_queue
,
1813 mvm
->snif_sta
.sta_id
,
1814 IWL_MVM_TX_FIFO_BE
);
1819 int iwl_mvm_rm_snif_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1823 lockdep_assert_held(&mvm
->mutex
);
1825 iwl_mvm_disable_txq(mvm
, mvm
->snif_queue
, mvm
->snif_queue
,
1826 IWL_MAX_TID_COUNT
, 0);
1827 ret
= iwl_mvm_rm_sta_common(mvm
, mvm
->snif_sta
.sta_id
);
1829 IWL_WARN(mvm
, "Failed sending remove station\n");
1834 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm
*mvm
)
1836 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->snif_sta
);
1839 void iwl_mvm_del_aux_sta(struct iwl_mvm
*mvm
)
1841 lockdep_assert_held(&mvm
->mutex
);
1843 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->aux_sta
);
1847 * Send the add station command for the vif's broadcast station.
1848 * Assumes that the station was already allocated.
1850 * @mvm: the mvm component
1851 * @vif: the interface to which the broadcast station is added
1852 * @bsta: the broadcast station to add.
1854 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1856 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1857 struct iwl_mvm_int_sta
*bsta
= &mvmvif
->bcast_sta
;
1858 static const u8 _baddr
[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1859 const u8
*baddr
= _baddr
;
1862 unsigned int wdg_timeout
=
1863 iwl_mvm_get_wd_timeout(mvm
, vif
, false, false);
1864 struct iwl_trans_txq_scd_cfg cfg
= {
1865 .fifo
= IWL_MVM_TX_FIFO_VO
,
1866 .sta_id
= mvmvif
->bcast_sta
.sta_id
,
1867 .tid
= IWL_MAX_TID_COUNT
,
1869 .frame_limit
= IWL_FRAME_LIMIT
,
1872 lockdep_assert_held(&mvm
->mutex
);
1874 if (!iwl_mvm_has_new_tx_api(mvm
)) {
1875 if (vif
->type
== NL80211_IFTYPE_AP
||
1876 vif
->type
== NL80211_IFTYPE_ADHOC
)
1877 queue
= mvm
->probe_queue
;
1878 else if (vif
->type
== NL80211_IFTYPE_P2P_DEVICE
)
1879 queue
= mvm
->p2p_dev_queue
;
1880 else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
1883 bsta
->tfd_queue_msk
|= BIT(queue
);
1885 iwl_mvm_enable_txq(mvm
, queue
, vif
->hw_queue
[0], 0,
1889 if (vif
->type
== NL80211_IFTYPE_ADHOC
)
1890 baddr
= vif
->bss_conf
.bssid
;
1892 if (WARN_ON_ONCE(bsta
->sta_id
== IWL_MVM_INVALID_STA
))
1895 ret
= iwl_mvm_add_int_sta_common(mvm
, bsta
, baddr
,
1896 mvmvif
->id
, mvmvif
->color
);
1901 * For 22000 firmware and on we cannot add queue to a station unknown
1902 * to firmware so enable queue here - after the station was added
1904 if (iwl_mvm_has_new_tx_api(mvm
)) {
1905 queue
= iwl_mvm_tvqm_enable_txq(mvm
, vif
->hw_queue
[0],
1910 if (vif
->type
== NL80211_IFTYPE_AP
||
1911 vif
->type
== NL80211_IFTYPE_ADHOC
)
1912 mvm
->probe_queue
= queue
;
1913 else if (vif
->type
== NL80211_IFTYPE_P2P_DEVICE
)
1914 mvm
->p2p_dev_queue
= queue
;
1920 static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm
*mvm
,
1921 struct ieee80211_vif
*vif
)
1923 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1926 lockdep_assert_held(&mvm
->mutex
);
1928 iwl_mvm_flush_sta(mvm
, &mvmvif
->bcast_sta
, true, 0);
1930 switch (vif
->type
) {
1931 case NL80211_IFTYPE_AP
:
1932 case NL80211_IFTYPE_ADHOC
:
1933 queue
= mvm
->probe_queue
;
1935 case NL80211_IFTYPE_P2P_DEVICE
:
1936 queue
= mvm
->p2p_dev_queue
;
1939 WARN(1, "Can't free bcast queue on vif type %d\n",
1944 iwl_mvm_disable_txq(mvm
, queue
, vif
->hw_queue
[0], IWL_MAX_TID_COUNT
, 0);
1945 if (iwl_mvm_has_new_tx_api(mvm
))
1948 WARN_ON(!(mvmvif
->bcast_sta
.tfd_queue_msk
& BIT(queue
)));
1949 mvmvif
->bcast_sta
.tfd_queue_msk
&= ~BIT(queue
);
1952 /* Send the FW a request to remove the station from it's internal data
1953 * structures, but DO NOT remove the entry from the local data structures. */
1954 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1956 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1959 lockdep_assert_held(&mvm
->mutex
);
1961 iwl_mvm_free_bcast_sta_queues(mvm
, vif
);
1963 ret
= iwl_mvm_rm_sta_common(mvm
, mvmvif
->bcast_sta
.sta_id
);
1965 IWL_WARN(mvm
, "Failed sending remove station\n");
1969 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1971 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1973 lockdep_assert_held(&mvm
->mutex
);
1975 return iwl_mvm_allocate_int_sta(mvm
, &mvmvif
->bcast_sta
, 0,
1976 ieee80211_vif_type_p2p(vif
),
1977 IWL_STA_GENERAL_PURPOSE
);
1980 /* Allocate a new station entry for the broadcast station to the given vif,
1981 * and send it to the FW.
1982 * Note that each P2P mac should have its own broadcast station.
1984 * @mvm: the mvm component
1985 * @vif: the interface to which the broadcast station is added
1986 * @bsta: the broadcast station to add. */
1987 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1989 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1990 struct iwl_mvm_int_sta
*bsta
= &mvmvif
->bcast_sta
;
1993 lockdep_assert_held(&mvm
->mutex
);
1995 ret
= iwl_mvm_alloc_bcast_sta(mvm
, vif
);
1999 ret
= iwl_mvm_send_add_bcast_sta(mvm
, vif
);
2002 iwl_mvm_dealloc_int_sta(mvm
, bsta
);
2007 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2009 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2011 iwl_mvm_dealloc_int_sta(mvm
, &mvmvif
->bcast_sta
);
2015 * Send the FW a request to remove the station from it's internal data
2016 * structures, and in addition remove it from the local data structure.
2018 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2022 lockdep_assert_held(&mvm
->mutex
);
2024 ret
= iwl_mvm_send_rm_bcast_sta(mvm
, vif
);
2026 iwl_mvm_dealloc_bcast_sta(mvm
, vif
);
2032 * Allocate a new station entry for the multicast station to the given vif,
2033 * and send it to the FW.
2034 * Note that each AP/GO mac should have its own multicast station.
2036 * @mvm: the mvm component
2037 * @vif: the interface to which the multicast station is added
2039 int iwl_mvm_add_mcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2041 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2042 struct iwl_mvm_int_sta
*msta
= &mvmvif
->mcast_sta
;
2043 static const u8 _maddr
[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2044 const u8
*maddr
= _maddr
;
2045 struct iwl_trans_txq_scd_cfg cfg
= {
2046 .fifo
= IWL_MVM_TX_FIFO_MCAST
,
2047 .sta_id
= msta
->sta_id
,
2050 .frame_limit
= IWL_FRAME_LIMIT
,
2052 unsigned int timeout
= iwl_mvm_get_wd_timeout(mvm
, vif
, false, false);
2055 lockdep_assert_held(&mvm
->mutex
);
2057 if (WARN_ON(vif
->type
!= NL80211_IFTYPE_AP
&&
2058 vif
->type
!= NL80211_IFTYPE_ADHOC
))
2062 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2063 * invalid, so make sure we use the queue we want.
2064 * Note that this is done here as we want to avoid making DQA
2065 * changes in mac80211 layer.
2067 if (vif
->type
== NL80211_IFTYPE_ADHOC
) {
2068 vif
->cab_queue
= IWL_MVM_DQA_GCAST_QUEUE
;
2069 mvmvif
->cab_queue
= vif
->cab_queue
;
2073 * While in previous FWs we had to exclude cab queue from TFD queue
2074 * mask, now it is needed as any other queue.
2076 if (!iwl_mvm_has_new_tx_api(mvm
) &&
2077 fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
)) {
2078 iwl_mvm_enable_txq(mvm
, vif
->cab_queue
, vif
->cab_queue
, 0,
2080 msta
->tfd_queue_msk
|= BIT(vif
->cab_queue
);
2082 ret
= iwl_mvm_add_int_sta_common(mvm
, msta
, maddr
,
2083 mvmvif
->id
, mvmvif
->color
);
2085 iwl_mvm_dealloc_int_sta(mvm
, msta
);
2090 * Enable cab queue after the ADD_STA command is sent.
2091 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
2092 * command with unknown station id, and for FW that doesn't support
2093 * station API since the cab queue is not included in the
2096 if (iwl_mvm_has_new_tx_api(mvm
)) {
2097 int queue
= iwl_mvm_tvqm_enable_txq(mvm
, vif
->cab_queue
,
2101 mvmvif
->cab_queue
= queue
;
2102 } else if (!fw_has_api(&mvm
->fw
->ucode_capa
,
2103 IWL_UCODE_TLV_API_STA_TYPE
))
2104 iwl_mvm_enable_txq(mvm
, vif
->cab_queue
, vif
->cab_queue
, 0,
2107 if (mvmvif
->ap_wep_key
) {
2108 u8 key_offset
= iwl_mvm_set_fw_key_idx(mvm
);
2110 if (key_offset
== STA_KEY_IDX_INVALID
)
2113 ret
= iwl_mvm_send_sta_key(mvm
, mvmvif
->mcast_sta
.sta_id
,
2114 mvmvif
->ap_wep_key
, 1, 0, NULL
, 0,
2124 * Send the FW a request to remove the station from it's internal data
2125 * structures, and in addition remove it from the local data structure.
2127 int iwl_mvm_rm_mcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2129 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2132 lockdep_assert_held(&mvm
->mutex
);
2134 iwl_mvm_flush_sta(mvm
, &mvmvif
->mcast_sta
, true, 0);
2136 iwl_mvm_disable_txq(mvm
, mvmvif
->cab_queue
, vif
->cab_queue
,
2139 ret
= iwl_mvm_rm_sta_common(mvm
, mvmvif
->mcast_sta
.sta_id
);
2141 IWL_WARN(mvm
, "Failed sending remove station\n");
2146 #define IWL_MAX_RX_BA_SESSIONS 16
2148 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm
*mvm
, u8 baid
)
2150 struct iwl_mvm_delba_notif notif
= {
2151 .metadata
.type
= IWL_MVM_RXQ_NOTIF_DEL_BA
,
2155 iwl_mvm_sync_rx_queues_internal(mvm
, (void *)¬if
, sizeof(notif
));
2158 static void iwl_mvm_free_reorder(struct iwl_mvm
*mvm
,
2159 struct iwl_mvm_baid_data
*data
)
2163 iwl_mvm_sync_rxq_del_ba(mvm
, data
->baid
);
2165 for (i
= 0; i
< mvm
->trans
->num_rx_queues
; i
++) {
2167 struct iwl_mvm_reorder_buffer
*reorder_buf
=
2168 &data
->reorder_buf
[i
];
2169 struct iwl_mvm_reorder_buf_entry
*entries
=
2170 &data
->entries
[i
* data
->entries_per_queue
];
2172 spin_lock_bh(&reorder_buf
->lock
);
2173 if (likely(!reorder_buf
->num_stored
)) {
2174 spin_unlock_bh(&reorder_buf
->lock
);
2179 * This shouldn't happen in regular DELBA since the internal
2180 * delBA notification should trigger a release of all frames in
2181 * the reorder buffer.
2185 for (j
= 0; j
< reorder_buf
->buf_size
; j
++)
2186 __skb_queue_purge(&entries
[j
].e
.frames
);
2188 * Prevent timer re-arm. This prevents a very far fetched case
2189 * where we timed out on the notification. There may be prior
2190 * RX frames pending in the RX queue before the notification
2191 * that might get processed between now and the actual deletion
2192 * and we would re-arm the timer although we are deleting the
2195 reorder_buf
->removed
= true;
2196 spin_unlock_bh(&reorder_buf
->lock
);
2197 del_timer_sync(&reorder_buf
->reorder_timer
);
2201 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm
*mvm
,
2202 struct iwl_mvm_baid_data
*data
,
2203 u16 ssn
, u16 buf_size
)
2207 for (i
= 0; i
< mvm
->trans
->num_rx_queues
; i
++) {
2208 struct iwl_mvm_reorder_buffer
*reorder_buf
=
2209 &data
->reorder_buf
[i
];
2210 struct iwl_mvm_reorder_buf_entry
*entries
=
2211 &data
->entries
[i
* data
->entries_per_queue
];
2214 reorder_buf
->num_stored
= 0;
2215 reorder_buf
->head_sn
= ssn
;
2216 reorder_buf
->buf_size
= buf_size
;
2217 /* rx reorder timer */
2218 timer_setup(&reorder_buf
->reorder_timer
,
2219 iwl_mvm_reorder_timer_expired
, 0);
2220 spin_lock_init(&reorder_buf
->lock
);
2221 reorder_buf
->mvm
= mvm
;
2222 reorder_buf
->queue
= i
;
2223 reorder_buf
->valid
= false;
2224 for (j
= 0; j
< reorder_buf
->buf_size
; j
++)
2225 __skb_queue_head_init(&entries
[j
].e
.frames
);
2229 int iwl_mvm_sta_rx_agg(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
2230 int tid
, u16 ssn
, bool start
, u16 buf_size
, u16 timeout
)
2232 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
2233 struct iwl_mvm_add_sta_cmd cmd
= {};
2234 struct iwl_mvm_baid_data
*baid_data
= NULL
;
2238 lockdep_assert_held(&mvm
->mutex
);
2240 if (start
&& mvm
->rx_ba_sessions
>= IWL_MAX_RX_BA_SESSIONS
) {
2241 IWL_WARN(mvm
, "Not enough RX BA SESSIONS\n");
2245 if (iwl_mvm_has_new_rx_api(mvm
) && start
) {
2246 u16 reorder_buf_size
= buf_size
* sizeof(baid_data
->entries
[0]);
2248 /* sparse doesn't like the __align() so don't check */
2251 * The division below will be OK if either the cache line size
2252 * can be divided by the entry size (ALIGN will round up) or if
2253 * if the entry size can be divided by the cache line size, in
2254 * which case the ALIGN() will do nothing.
2256 BUILD_BUG_ON(SMP_CACHE_BYTES
% sizeof(baid_data
->entries
[0]) &&
2257 sizeof(baid_data
->entries
[0]) % SMP_CACHE_BYTES
);
2261 * Upward align the reorder buffer size to fill an entire cache
2262 * line for each queue, to avoid sharing cache lines between
2265 reorder_buf_size
= ALIGN(reorder_buf_size
, SMP_CACHE_BYTES
);
2268 * Allocate here so if allocation fails we can bail out early
2269 * before starting the BA session in the firmware
2271 baid_data
= kzalloc(sizeof(*baid_data
) +
2272 mvm
->trans
->num_rx_queues
*
2279 * This division is why we need the above BUILD_BUG_ON(),
2280 * if that doesn't hold then this will not be right.
2282 baid_data
->entries_per_queue
=
2283 reorder_buf_size
/ sizeof(baid_data
->entries
[0]);
2286 cmd
.mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
);
2287 cmd
.sta_id
= mvm_sta
->sta_id
;
2288 cmd
.add_modify
= STA_MODE_MODIFY
;
2290 cmd
.add_immediate_ba_tid
= (u8
) tid
;
2291 cmd
.add_immediate_ba_ssn
= cpu_to_le16(ssn
);
2292 cmd
.rx_ba_window
= cpu_to_le16(buf_size
);
2294 cmd
.remove_immediate_ba_tid
= (u8
) tid
;
2296 cmd
.modify_mask
= start
? STA_MODIFY_ADD_BA_TID
:
2297 STA_MODIFY_REMOVE_BA_TID
;
2299 status
= ADD_STA_SUCCESS
;
2300 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
2301 iwl_mvm_add_sta_cmd_size(mvm
),
2306 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
2307 case ADD_STA_SUCCESS
:
2308 IWL_DEBUG_HT(mvm
, "RX BA Session %sed in fw\n",
2309 start
? "start" : "stopp");
2311 case ADD_STA_IMMEDIATE_BA_FAILURE
:
2312 IWL_WARN(mvm
, "RX BA Session refused by fw\n");
2317 IWL_ERR(mvm
, "RX BA Session failed %sing, status 0x%x\n",
2318 start
? "start" : "stopp", status
);
2328 mvm
->rx_ba_sessions
++;
2330 if (!iwl_mvm_has_new_rx_api(mvm
))
2333 if (WARN_ON(!(status
& IWL_ADD_STA_BAID_VALID_MASK
))) {
2337 baid
= (u8
)((status
& IWL_ADD_STA_BAID_MASK
) >>
2338 IWL_ADD_STA_BAID_SHIFT
);
2339 baid_data
->baid
= baid
;
2340 baid_data
->timeout
= timeout
;
2341 baid_data
->last_rx
= jiffies
;
2342 baid_data
->rcu_ptr
= &mvm
->baid_map
[baid
];
2343 timer_setup(&baid_data
->session_timer
,
2344 iwl_mvm_rx_agg_session_expired
, 0);
2345 baid_data
->mvm
= mvm
;
2346 baid_data
->tid
= tid
;
2347 baid_data
->sta_id
= mvm_sta
->sta_id
;
2349 mvm_sta
->tid_to_baid
[tid
] = baid
;
2351 mod_timer(&baid_data
->session_timer
,
2352 TU_TO_EXP_TIME(timeout
* 2));
2354 iwl_mvm_init_reorder_buffer(mvm
, baid_data
, ssn
, buf_size
);
2356 * protect the BA data with RCU to cover a case where our
2357 * internal RX sync mechanism will timeout (not that it's
2358 * supposed to happen) and we will free the session data while
2359 * RX is being processed in parallel
2361 IWL_DEBUG_HT(mvm
, "Sta %d(%d) is assigned to BAID %d\n",
2362 mvm_sta
->sta_id
, tid
, baid
);
2363 WARN_ON(rcu_access_pointer(mvm
->baid_map
[baid
]));
2364 rcu_assign_pointer(mvm
->baid_map
[baid
], baid_data
);
2366 u8 baid
= mvm_sta
->tid_to_baid
[tid
];
2368 if (mvm
->rx_ba_sessions
> 0)
2369 /* check that restart flow didn't zero the counter */
2370 mvm
->rx_ba_sessions
--;
2371 if (!iwl_mvm_has_new_rx_api(mvm
))
2374 if (WARN_ON(baid
== IWL_RX_REORDER_DATA_INVALID_BAID
))
2377 baid_data
= rcu_access_pointer(mvm
->baid_map
[baid
]);
2378 if (WARN_ON(!baid_data
))
2381 /* synchronize all rx queues so we can safely delete */
2382 iwl_mvm_free_reorder(mvm
, baid_data
);
2383 del_timer_sync(&baid_data
->session_timer
);
2384 RCU_INIT_POINTER(mvm
->baid_map
[baid
], NULL
);
2385 kfree_rcu(baid_data
, rcu_head
);
2386 IWL_DEBUG_HT(mvm
, "BAID %d is free\n", baid
);
2395 int iwl_mvm_sta_tx_agg(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
2396 int tid
, u8 queue
, bool start
)
2398 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
2399 struct iwl_mvm_add_sta_cmd cmd
= {};
2403 lockdep_assert_held(&mvm
->mutex
);
2406 mvm_sta
->tfd_queue_msk
|= BIT(queue
);
2407 mvm_sta
->tid_disable_agg
&= ~BIT(tid
);
2409 /* In DQA-mode the queue isn't removed on agg termination */
2410 mvm_sta
->tid_disable_agg
|= BIT(tid
);
2413 cmd
.mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
);
2414 cmd
.sta_id
= mvm_sta
->sta_id
;
2415 cmd
.add_modify
= STA_MODE_MODIFY
;
2416 if (!iwl_mvm_has_new_tx_api(mvm
))
2417 cmd
.modify_mask
= STA_MODIFY_QUEUES
;
2418 cmd
.modify_mask
|= STA_MODIFY_TID_DISABLE_TX
;
2419 cmd
.tfd_queue_msk
= cpu_to_le32(mvm_sta
->tfd_queue_msk
);
2420 cmd
.tid_disable_tx
= cpu_to_le16(mvm_sta
->tid_disable_agg
);
2422 status
= ADD_STA_SUCCESS
;
2423 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
2424 iwl_mvm_add_sta_cmd_size(mvm
),
2429 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
2430 case ADD_STA_SUCCESS
:
2434 IWL_ERR(mvm
, "TX BA Session failed %sing, status 0x%x\n",
2435 start
? "start" : "stopp", status
);
2442 const u8 tid_to_mac80211_ac
[] = {
2451 IEEE80211_AC_VO
, /* We treat MGMT as TID 8, which is set as AC_VO */
2454 static const u8 tid_to_ucode_ac
[] = {
2465 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
2466 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
)
2468 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2469 struct iwl_mvm_tid_data
*tid_data
;
2474 if (WARN_ON_ONCE(tid
>= IWL_MAX_TID_COUNT
))
2477 if (mvmsta
->tid_data
[tid
].state
!= IWL_AGG_QUEUED
&&
2478 mvmsta
->tid_data
[tid
].state
!= IWL_AGG_OFF
) {
2480 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2481 mvmsta
->tid_data
[tid
].state
);
2485 lockdep_assert_held(&mvm
->mutex
);
2487 if (mvmsta
->tid_data
[tid
].txq_id
== IWL_MVM_INVALID_QUEUE
&&
2488 iwl_mvm_has_new_tx_api(mvm
)) {
2489 u8 ac
= tid_to_mac80211_ac
[tid
];
2491 ret
= iwl_mvm_sta_alloc_queue_tvqm(mvm
, sta
, ac
, tid
);
2496 spin_lock_bh(&mvmsta
->lock
);
2498 /* possible race condition - we entered D0i3 while starting agg */
2499 if (test_bit(IWL_MVM_STATUS_IN_D0I3
, &mvm
->status
)) {
2500 spin_unlock_bh(&mvmsta
->lock
);
2501 IWL_ERR(mvm
, "Entered D0i3 while starting Tx agg\n");
2505 spin_lock(&mvm
->queue_info_lock
);
2508 * Note the possible cases:
2509 * 1. An enabled TXQ - TXQ needs to become agg'ed
2510 * 2. The TXQ hasn't yet been enabled, so find a free one and mark
2513 txq_id
= mvmsta
->tid_data
[tid
].txq_id
;
2514 if (txq_id
== IWL_MVM_INVALID_QUEUE
) {
2515 txq_id
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
2516 IWL_MVM_DQA_MIN_DATA_QUEUE
,
2517 IWL_MVM_DQA_MAX_DATA_QUEUE
);
2520 IWL_ERR(mvm
, "Failed to allocate agg queue\n");
2524 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2525 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_RESERVED
;
2526 } else if (unlikely(mvm
->queue_info
[txq_id
].status
==
2527 IWL_MVM_QUEUE_SHARED
)) {
2529 IWL_DEBUG_TX_QUEUES(mvm
,
2530 "Can't start tid %d agg on shared queue!\n",
2535 spin_unlock(&mvm
->queue_info_lock
);
2537 IWL_DEBUG_TX_QUEUES(mvm
,
2538 "AGG for tid %d will be on queue #%d\n",
2541 tid_data
= &mvmsta
->tid_data
[tid
];
2542 tid_data
->ssn
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
2543 tid_data
->txq_id
= txq_id
;
2544 *ssn
= tid_data
->ssn
;
2546 IWL_DEBUG_TX_QUEUES(mvm
,
2547 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2548 mvmsta
->sta_id
, tid
, txq_id
, tid_data
->ssn
,
2549 tid_data
->next_reclaimed
);
2552 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
2553 * to align the wrap around of ssn so we compare relevant values.
2555 normalized_ssn
= tid_data
->ssn
;
2556 if (mvm
->trans
->cfg
->gen2
)
2557 normalized_ssn
&= 0xff;
2559 if (normalized_ssn
== tid_data
->next_reclaimed
) {
2560 tid_data
->state
= IWL_AGG_STARTING
;
2561 ieee80211_start_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
2563 tid_data
->state
= IWL_EMPTYING_HW_QUEUE_ADDBA
;
2570 spin_unlock(&mvm
->queue_info_lock
);
2572 spin_unlock_bh(&mvmsta
->lock
);
2577 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
2578 struct ieee80211_sta
*sta
, u16 tid
, u16 buf_size
,
2581 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2582 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
2583 unsigned int wdg_timeout
=
2584 iwl_mvm_get_wd_timeout(mvm
, vif
, sta
->tdls
, false);
2586 bool alloc_queue
= true;
2587 enum iwl_mvm_queue_status queue_status
;
2590 struct iwl_trans_txq_scd_cfg cfg
= {
2591 .sta_id
= mvmsta
->sta_id
,
2593 .frame_limit
= buf_size
,
2598 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
2599 * manager, so this function should never be called in this case.
2601 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm
)))
2604 BUILD_BUG_ON((sizeof(mvmsta
->agg_tids
) * BITS_PER_BYTE
)
2605 != IWL_MAX_TID_COUNT
);
2607 spin_lock_bh(&mvmsta
->lock
);
2608 ssn
= tid_data
->ssn
;
2609 queue
= tid_data
->txq_id
;
2610 tid_data
->state
= IWL_AGG_ON
;
2611 mvmsta
->agg_tids
|= BIT(tid
);
2612 tid_data
->ssn
= 0xffff;
2613 tid_data
->amsdu_in_ampdu_allowed
= amsdu
;
2614 spin_unlock_bh(&mvmsta
->lock
);
2616 if (iwl_mvm_has_new_tx_api(mvm
)) {
2618 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
2619 * would have failed, so if we are here there is no need to
2621 * However, if aggregation size is different than the default
2622 * size, the scheduler should be reconfigured.
2623 * We cannot do this with the new TX API, so return unsupported
2624 * for now, until it will be offloaded to firmware..
2625 * Note that if SCD default value changes - this condition
2626 * should be updated as well.
2628 if (buf_size
< IWL_FRAME_LIMIT
)
2631 ret
= iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, queue
, true);
2637 cfg
.fifo
= iwl_mvm_ac_to_tx_fifo
[tid_to_mac80211_ac
[tid
]];
2639 spin_lock_bh(&mvm
->queue_info_lock
);
2640 queue_status
= mvm
->queue_info
[queue
].status
;
2641 spin_unlock_bh(&mvm
->queue_info_lock
);
2643 /* Maybe there is no need to even alloc a queue... */
2644 if (mvm
->queue_info
[queue
].status
== IWL_MVM_QUEUE_READY
)
2645 alloc_queue
= false;
2648 * Only reconfig the SCD for the queue if the window size has
2649 * changed from current (become smaller)
2651 if (!alloc_queue
&& buf_size
< IWL_FRAME_LIMIT
) {
2653 * If reconfiguring an existing queue, it first must be
2656 ret
= iwl_trans_wait_tx_queues_empty(mvm
->trans
,
2660 "Error draining queue before reconfig\n");
2664 ret
= iwl_mvm_reconfig_scd(mvm
, queue
, cfg
.fifo
,
2665 mvmsta
->sta_id
, tid
,
2669 "Error reconfiguring TXQ #%d\n", queue
);
2675 iwl_mvm_enable_txq(mvm
, queue
,
2676 vif
->hw_queue
[tid_to_mac80211_ac
[tid
]], ssn
,
2679 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2680 if (queue_status
!= IWL_MVM_QUEUE_SHARED
) {
2681 ret
= iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, queue
, true);
2686 /* No need to mark as reserved */
2687 spin_lock_bh(&mvm
->queue_info_lock
);
2688 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_READY
;
2689 spin_unlock_bh(&mvm
->queue_info_lock
);
2693 * Even though in theory the peer could have different
2694 * aggregation reorder buffer sizes for different sessions,
2695 * our ucode doesn't allow for that and has a global limit
2696 * for each station. Therefore, use the minimum of all the
2697 * aggregation sessions and our default value.
2699 mvmsta
->max_agg_bufsize
=
2700 min(mvmsta
->max_agg_bufsize
, buf_size
);
2701 mvmsta
->lq_sta
.rs_drv
.lq
.agg_frame_cnt_limit
= mvmsta
->max_agg_bufsize
;
2703 IWL_DEBUG_HT(mvm
, "Tx aggregation enabled on ra = %pM tid = %d\n",
2706 return iwl_mvm_send_lq_cmd(mvm
, &mvmsta
->lq_sta
.rs_drv
.lq
, false);
2709 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm
*mvm
,
2710 struct iwl_mvm_sta
*mvmsta
,
2711 struct iwl_mvm_tid_data
*tid_data
)
2713 u16 txq_id
= tid_data
->txq_id
;
2715 if (iwl_mvm_has_new_tx_api(mvm
))
2718 spin_lock_bh(&mvm
->queue_info_lock
);
2720 * The TXQ is marked as reserved only if no traffic came through yet
2721 * This means no traffic has been sent on this TID (agg'd or not), so
2722 * we no longer have use for the queue. Since it hasn't even been
2723 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2726 if (mvm
->queue_info
[txq_id
].status
== IWL_MVM_QUEUE_RESERVED
) {
2727 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_FREE
;
2728 tid_data
->txq_id
= IWL_MVM_INVALID_QUEUE
;
2731 spin_unlock_bh(&mvm
->queue_info_lock
);
2734 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
2735 struct ieee80211_sta
*sta
, u16 tid
)
2737 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2738 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
2743 * If mac80211 is cleaning its state, then say that we finished since
2744 * our state has been cleared anyway.
2746 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
2747 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
2751 spin_lock_bh(&mvmsta
->lock
);
2753 txq_id
= tid_data
->txq_id
;
2755 IWL_DEBUG_TX_QUEUES(mvm
, "Stop AGG: sta %d tid %d q %d state %d\n",
2756 mvmsta
->sta_id
, tid
, txq_id
, tid_data
->state
);
2758 mvmsta
->agg_tids
&= ~BIT(tid
);
2760 iwl_mvm_unreserve_agg_queue(mvm
, mvmsta
, tid_data
);
2762 switch (tid_data
->state
) {
2764 tid_data
->ssn
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
2766 IWL_DEBUG_TX_QUEUES(mvm
,
2767 "ssn = %d, next_recl = %d\n",
2768 tid_data
->ssn
, tid_data
->next_reclaimed
);
2770 tid_data
->ssn
= 0xffff;
2771 tid_data
->state
= IWL_AGG_OFF
;
2772 spin_unlock_bh(&mvmsta
->lock
);
2774 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
2776 iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, txq_id
, false);
2778 case IWL_AGG_STARTING
:
2779 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
2781 * The agg session has been stopped before it was set up. This
2782 * can happen when the AddBA timer times out for example.
2785 /* No barriers since we are under mutex */
2786 lockdep_assert_held(&mvm
->mutex
);
2788 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
2789 tid_data
->state
= IWL_AGG_OFF
;
2794 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
2795 mvmsta
->sta_id
, tid
, tid_data
->state
);
2797 "\ttid_data->txq_id = %d\n", tid_data
->txq_id
);
2801 spin_unlock_bh(&mvmsta
->lock
);
2806 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
2807 struct ieee80211_sta
*sta
, u16 tid
)
2809 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2810 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
2812 enum iwl_mvm_agg_state old_state
;
2815 * First set the agg state to OFF to avoid calling
2816 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
2818 spin_lock_bh(&mvmsta
->lock
);
2819 txq_id
= tid_data
->txq_id
;
2820 IWL_DEBUG_TX_QUEUES(mvm
, "Flush AGG: sta %d tid %d q %d state %d\n",
2821 mvmsta
->sta_id
, tid
, txq_id
, tid_data
->state
);
2822 old_state
= tid_data
->state
;
2823 tid_data
->state
= IWL_AGG_OFF
;
2824 mvmsta
->agg_tids
&= ~BIT(tid
);
2825 spin_unlock_bh(&mvmsta
->lock
);
2827 iwl_mvm_unreserve_agg_queue(mvm
, mvmsta
, tid_data
);
2829 if (old_state
>= IWL_AGG_ON
) {
2830 iwl_mvm_drain_sta(mvm
, mvmsta
, true);
2832 if (iwl_mvm_has_new_tx_api(mvm
)) {
2833 if (iwl_mvm_flush_sta_tids(mvm
, mvmsta
->sta_id
,
2835 IWL_ERR(mvm
, "Couldn't flush the AGG queue\n");
2836 iwl_trans_wait_txq_empty(mvm
->trans
, txq_id
);
2838 if (iwl_mvm_flush_tx_path(mvm
, BIT(txq_id
), 0))
2839 IWL_ERR(mvm
, "Couldn't flush the AGG queue\n");
2840 iwl_trans_wait_tx_queues_empty(mvm
->trans
, BIT(txq_id
));
2843 iwl_mvm_drain_sta(mvm
, mvmsta
, false);
2845 iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, txq_id
, false);
2851 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm
*mvm
)
2853 int i
, max
= -1, max_offs
= -1;
2855 lockdep_assert_held(&mvm
->mutex
);
2857 /* Pick the unused key offset with the highest 'deleted'
2858 * counter. Every time a key is deleted, all the counters
2859 * are incremented and the one that was just deleted is
2860 * reset to zero. Thus, the highest counter is the one
2861 * that was deleted longest ago. Pick that one.
2863 for (i
= 0; i
< STA_KEY_MAX_NUM
; i
++) {
2864 if (test_bit(i
, mvm
->fw_key_table
))
2866 if (mvm
->fw_key_deleted
[i
] > max
) {
2867 max
= mvm
->fw_key_deleted
[i
];
2873 return STA_KEY_IDX_INVALID
;
2878 static struct iwl_mvm_sta
*iwl_mvm_get_key_sta(struct iwl_mvm
*mvm
,
2879 struct ieee80211_vif
*vif
,
2880 struct ieee80211_sta
*sta
)
2882 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2885 return iwl_mvm_sta_from_mac80211(sta
);
2888 * The device expects GTKs for station interfaces to be
2889 * installed as GTKs for the AP station. If we have no
2890 * station ID, then use AP's station ID.
2892 if (vif
->type
== NL80211_IFTYPE_STATION
&&
2893 mvmvif
->ap_sta_id
!= IWL_MVM_INVALID_STA
) {
2894 u8 sta_id
= mvmvif
->ap_sta_id
;
2896 sta
= rcu_dereference_check(mvm
->fw_id_to_mac_id
[sta_id
],
2897 lockdep_is_held(&mvm
->mutex
));
2900 * It is possible that the 'sta' parameter is NULL,
2901 * for example when a GTK is removed - the sta_id will then
2902 * be the AP ID, and no station was passed by mac80211.
2904 if (IS_ERR_OR_NULL(sta
))
2907 return iwl_mvm_sta_from_mac80211(sta
);
2913 static int iwl_mvm_send_sta_key(struct iwl_mvm
*mvm
,
2915 struct ieee80211_key_conf
*key
, bool mcast
,
2916 u32 tkip_iv32
, u16
*tkip_p1k
, u32 cmd_flags
,
2917 u8 key_offset
, bool mfp
)
2920 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1
;
2921 struct iwl_mvm_add_sta_key_cmd cmd
;
2929 bool new_api
= fw_has_api(&mvm
->fw
->ucode_capa
,
2930 IWL_UCODE_TLV_API_TKIP_MIC_KEYS
);
2932 if (sta_id
== IWL_MVM_INVALID_STA
)
2935 keyidx
= (key
->keyidx
<< STA_KEY_FLG_KEYID_POS
) &
2936 STA_KEY_FLG_KEYID_MSK
;
2937 key_flags
= cpu_to_le16(keyidx
);
2938 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP
);
2940 switch (key
->cipher
) {
2941 case WLAN_CIPHER_SUITE_TKIP
:
2942 key_flags
|= cpu_to_le16(STA_KEY_FLG_TKIP
);
2944 memcpy((void *)&u
.cmd
.tx_mic_key
,
2945 &key
->key
[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY
],
2948 memcpy((void *)&u
.cmd
.rx_mic_key
,
2949 &key
->key
[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY
],
2951 pn
= atomic64_read(&key
->tx_pn
);
2954 u
.cmd_v1
.tkip_rx_tsc_byte2
= tkip_iv32
;
2955 for (i
= 0; i
< 5; i
++)
2956 u
.cmd_v1
.tkip_rx_ttak
[i
] =
2957 cpu_to_le16(tkip_p1k
[i
]);
2959 memcpy(u
.cmd
.common
.key
, key
->key
, key
->keylen
);
2961 case WLAN_CIPHER_SUITE_CCMP
:
2962 key_flags
|= cpu_to_le16(STA_KEY_FLG_CCM
);
2963 memcpy(u
.cmd
.common
.key
, key
->key
, key
->keylen
);
2965 pn
= atomic64_read(&key
->tx_pn
);
2967 case WLAN_CIPHER_SUITE_WEP104
:
2968 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES
);
2970 case WLAN_CIPHER_SUITE_WEP40
:
2971 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP
);
2972 memcpy(u
.cmd
.common
.key
+ 3, key
->key
, key
->keylen
);
2974 case WLAN_CIPHER_SUITE_GCMP_256
:
2975 key_flags
|= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES
);
2977 case WLAN_CIPHER_SUITE_GCMP
:
2978 key_flags
|= cpu_to_le16(STA_KEY_FLG_GCMP
);
2979 memcpy(u
.cmd
.common
.key
, key
->key
, key
->keylen
);
2981 pn
= atomic64_read(&key
->tx_pn
);
2984 key_flags
|= cpu_to_le16(STA_KEY_FLG_EXT
);
2985 memcpy(u
.cmd
.common
.key
, key
->key
, key
->keylen
);
2989 key_flags
|= cpu_to_le16(STA_KEY_MULTICAST
);
2991 key_flags
|= cpu_to_le16(STA_KEY_MFP
);
2993 u
.cmd
.common
.key_offset
= key_offset
;
2994 u
.cmd
.common
.key_flags
= key_flags
;
2995 u
.cmd
.common
.sta_id
= sta_id
;
2998 u
.cmd
.transmit_seq_cnt
= cpu_to_le64(pn
);
2999 size
= sizeof(u
.cmd
);
3001 size
= sizeof(u
.cmd_v1
);
3004 status
= ADD_STA_SUCCESS
;
3005 if (cmd_flags
& CMD_ASYNC
)
3006 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA_KEY
, CMD_ASYNC
, size
,
3009 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA_KEY
, size
,
3013 case ADD_STA_SUCCESS
:
3014 IWL_DEBUG_WEP(mvm
, "MODIFY_STA: set dynamic key passed\n");
3018 IWL_ERR(mvm
, "MODIFY_STA: set dynamic key failed\n");
3025 static int iwl_mvm_send_sta_igtk(struct iwl_mvm
*mvm
,
3026 struct ieee80211_key_conf
*keyconf
,
3027 u8 sta_id
, bool remove_key
)
3029 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd
= {};
3031 /* verify the key details match the required command's expectations */
3032 if (WARN_ON((keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
) ||
3033 (keyconf
->keyidx
!= 4 && keyconf
->keyidx
!= 5) ||
3034 (keyconf
->cipher
!= WLAN_CIPHER_SUITE_AES_CMAC
&&
3035 keyconf
->cipher
!= WLAN_CIPHER_SUITE_BIP_GMAC_128
&&
3036 keyconf
->cipher
!= WLAN_CIPHER_SUITE_BIP_GMAC_256
)))
3039 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm
) &&
3040 keyconf
->cipher
!= WLAN_CIPHER_SUITE_AES_CMAC
))
3043 igtk_cmd
.key_id
= cpu_to_le32(keyconf
->keyidx
);
3044 igtk_cmd
.sta_id
= cpu_to_le32(sta_id
);
3047 igtk_cmd
.ctrl_flags
|= cpu_to_le32(STA_KEY_NOT_VALID
);
3049 struct ieee80211_key_seq seq
;
3052 switch (keyconf
->cipher
) {
3053 case WLAN_CIPHER_SUITE_AES_CMAC
:
3054 igtk_cmd
.ctrl_flags
|= cpu_to_le32(STA_KEY_FLG_CCM
);
3056 case WLAN_CIPHER_SUITE_BIP_GMAC_128
:
3057 case WLAN_CIPHER_SUITE_BIP_GMAC_256
:
3058 igtk_cmd
.ctrl_flags
|= cpu_to_le32(STA_KEY_FLG_GCMP
);
3064 memcpy(igtk_cmd
.igtk
, keyconf
->key
, keyconf
->keylen
);
3065 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_256
)
3066 igtk_cmd
.ctrl_flags
|=
3067 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES
);
3068 ieee80211_get_key_rx_seq(keyconf
, 0, &seq
);
3069 pn
= seq
.aes_cmac
.pn
;
3070 igtk_cmd
.receive_seq_cnt
= cpu_to_le64(((u64
) pn
[5] << 0) |
3071 ((u64
) pn
[4] << 8) |
3072 ((u64
) pn
[3] << 16) |
3073 ((u64
) pn
[2] << 24) |
3074 ((u64
) pn
[1] << 32) |
3075 ((u64
) pn
[0] << 40));
3078 IWL_DEBUG_INFO(mvm
, "%s igtk for sta %u\n",
3079 remove_key
? "removing" : "installing",
3082 if (!iwl_mvm_has_new_rx_api(mvm
)) {
3083 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1
= {
3084 .ctrl_flags
= igtk_cmd
.ctrl_flags
,
3085 .key_id
= igtk_cmd
.key_id
,
3086 .sta_id
= igtk_cmd
.sta_id
,
3087 .receive_seq_cnt
= igtk_cmd
.receive_seq_cnt
3090 memcpy(igtk_cmd_v1
.igtk
, igtk_cmd
.igtk
,
3091 ARRAY_SIZE(igtk_cmd_v1
.igtk
));
3092 return iwl_mvm_send_cmd_pdu(mvm
, MGMT_MCAST_KEY
, 0,
3093 sizeof(igtk_cmd_v1
), &igtk_cmd_v1
);
3095 return iwl_mvm_send_cmd_pdu(mvm
, MGMT_MCAST_KEY
, 0,
3096 sizeof(igtk_cmd
), &igtk_cmd
);
3100 static inline u8
*iwl_mvm_get_mac_addr(struct iwl_mvm
*mvm
,
3101 struct ieee80211_vif
*vif
,
3102 struct ieee80211_sta
*sta
)
3104 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
3109 if (vif
->type
== NL80211_IFTYPE_STATION
&&
3110 mvmvif
->ap_sta_id
!= IWL_MVM_INVALID_STA
) {
3111 u8 sta_id
= mvmvif
->ap_sta_id
;
3112 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
3113 lockdep_is_held(&mvm
->mutex
));
3121 static int __iwl_mvm_set_sta_key(struct iwl_mvm
*mvm
,
3122 struct ieee80211_vif
*vif
,
3123 struct ieee80211_sta
*sta
,
3124 struct ieee80211_key_conf
*keyconf
,
3130 struct ieee80211_key_seq seq
;
3136 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
3138 sta_id
= mvm_sta
->sta_id
;
3140 } else if (vif
->type
== NL80211_IFTYPE_AP
&&
3141 !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
)) {
3142 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
3144 sta_id
= mvmvif
->mcast_sta
.sta_id
;
3146 IWL_ERR(mvm
, "Failed to find station id\n");
3150 switch (keyconf
->cipher
) {
3151 case WLAN_CIPHER_SUITE_TKIP
:
3152 addr
= iwl_mvm_get_mac_addr(mvm
, vif
, sta
);
3153 /* get phase 1 key from mac80211 */
3154 ieee80211_get_key_rx_seq(keyconf
, 0, &seq
);
3155 ieee80211_get_tkip_rx_p1k(keyconf
, addr
, seq
.tkip
.iv32
, p1k
);
3156 ret
= iwl_mvm_send_sta_key(mvm
, sta_id
, keyconf
, mcast
,
3157 seq
.tkip
.iv32
, p1k
, 0, key_offset
,
3160 case WLAN_CIPHER_SUITE_CCMP
:
3161 case WLAN_CIPHER_SUITE_WEP40
:
3162 case WLAN_CIPHER_SUITE_WEP104
:
3163 case WLAN_CIPHER_SUITE_GCMP
:
3164 case WLAN_CIPHER_SUITE_GCMP_256
:
3165 ret
= iwl_mvm_send_sta_key(mvm
, sta_id
, keyconf
, mcast
,
3166 0, NULL
, 0, key_offset
, mfp
);
3169 ret
= iwl_mvm_send_sta_key(mvm
, sta_id
, keyconf
, mcast
,
3170 0, NULL
, 0, key_offset
, mfp
);
3176 static int __iwl_mvm_remove_sta_key(struct iwl_mvm
*mvm
, u8 sta_id
,
3177 struct ieee80211_key_conf
*keyconf
,
3181 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1
;
3182 struct iwl_mvm_add_sta_key_cmd cmd
;
3184 bool new_api
= fw_has_api(&mvm
->fw
->ucode_capa
,
3185 IWL_UCODE_TLV_API_TKIP_MIC_KEYS
);
3190 /* This is a valid situation for GTK removal */
3191 if (sta_id
== IWL_MVM_INVALID_STA
)
3194 key_flags
= cpu_to_le16((keyconf
->keyidx
<< STA_KEY_FLG_KEYID_POS
) &
3195 STA_KEY_FLG_KEYID_MSK
);
3196 key_flags
|= cpu_to_le16(STA_KEY_FLG_NO_ENC
| STA_KEY_FLG_WEP_KEY_MAP
);
3197 key_flags
|= cpu_to_le16(STA_KEY_NOT_VALID
);
3200 key_flags
|= cpu_to_le16(STA_KEY_MULTICAST
);
3203 * The fields assigned here are in the same location at the start
3204 * of the command, so we can do this union trick.
3206 u
.cmd
.common
.key_flags
= key_flags
;
3207 u
.cmd
.common
.key_offset
= keyconf
->hw_key_idx
;
3208 u
.cmd
.common
.sta_id
= sta_id
;
3210 size
= new_api
? sizeof(u
.cmd
) : sizeof(u
.cmd_v1
);
3212 status
= ADD_STA_SUCCESS
;
3213 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA_KEY
, size
, &u
.cmd
,
3217 case ADD_STA_SUCCESS
:
3218 IWL_DEBUG_WEP(mvm
, "MODIFY_STA: remove sta key passed\n");
3222 IWL_ERR(mvm
, "MODIFY_STA: remove sta key failed\n");
3229 int iwl_mvm_set_sta_key(struct iwl_mvm
*mvm
,
3230 struct ieee80211_vif
*vif
,
3231 struct ieee80211_sta
*sta
,
3232 struct ieee80211_key_conf
*keyconf
,
3235 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
3236 struct iwl_mvm_sta
*mvm_sta
;
3237 u8 sta_id
= IWL_MVM_INVALID_STA
;
3239 static const u8 __maybe_unused zero_addr
[ETH_ALEN
] = {0};
3241 lockdep_assert_held(&mvm
->mutex
);
3243 if (vif
->type
!= NL80211_IFTYPE_AP
||
3244 keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
) {
3245 /* Get the station id from the mvm local station table */
3246 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
3248 IWL_ERR(mvm
, "Failed to find station\n");
3251 sta_id
= mvm_sta
->sta_id
;
3254 * It is possible that the 'sta' parameter is NULL, and thus
3255 * there is a need to retrieve the sta from the local station
3259 sta
= rcu_dereference_protected(
3260 mvm
->fw_id_to_mac_id
[sta_id
],
3261 lockdep_is_held(&mvm
->mutex
));
3262 if (IS_ERR_OR_NULL(sta
)) {
3263 IWL_ERR(mvm
, "Invalid station id\n");
3268 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta
)->vif
!= vif
))
3271 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
3273 sta_id
= mvmvif
->mcast_sta
.sta_id
;
3276 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_AES_CMAC
||
3277 keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_128
||
3278 keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_256
) {
3279 ret
= iwl_mvm_send_sta_igtk(mvm
, keyconf
, sta_id
, false);
3283 /* If the key_offset is not pre-assigned, we need to find a
3284 * new offset to use. In normal cases, the offset is not
3285 * pre-assigned, but during HW_RESTART we want to reuse the
3286 * same indices, so we pass them when this function is called.
3288 * In D3 entry, we need to hardcoded the indices (because the
3289 * firmware hardcodes the PTK offset to 0). In this case, we
3290 * need to make sure we don't overwrite the hw_key_idx in the
3291 * keyconf structure, because otherwise we cannot configure
3292 * the original ones back when resuming.
3294 if (key_offset
== STA_KEY_IDX_INVALID
) {
3295 key_offset
= iwl_mvm_set_fw_key_idx(mvm
);
3296 if (key_offset
== STA_KEY_IDX_INVALID
)
3298 keyconf
->hw_key_idx
= key_offset
;
3301 ret
= __iwl_mvm_set_sta_key(mvm
, vif
, sta
, keyconf
, key_offset
, mcast
);
3306 * For WEP, the same key is used for multicast and unicast. Upload it
3307 * again, using the same key offset, and now pointing the other one
3308 * to the same key slot (offset).
3309 * If this fails, remove the original as well.
3311 if ((keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP40
||
3312 keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP104
) &&
3314 ret
= __iwl_mvm_set_sta_key(mvm
, vif
, sta
, keyconf
,
3315 key_offset
, !mcast
);
3317 __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, mcast
);
3322 __set_bit(key_offset
, mvm
->fw_key_table
);
3325 IWL_DEBUG_WEP(mvm
, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3326 keyconf
->cipher
, keyconf
->keylen
, keyconf
->keyidx
,
3327 sta
? sta
->addr
: zero_addr
, ret
);
3331 int iwl_mvm_remove_sta_key(struct iwl_mvm
*mvm
,
3332 struct ieee80211_vif
*vif
,
3333 struct ieee80211_sta
*sta
,
3334 struct ieee80211_key_conf
*keyconf
)
3336 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
3337 struct iwl_mvm_sta
*mvm_sta
;
3338 u8 sta_id
= IWL_MVM_INVALID_STA
;
3341 lockdep_assert_held(&mvm
->mutex
);
3343 /* Get the station from the mvm local station table */
3344 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
3346 sta_id
= mvm_sta
->sta_id
;
3347 else if (!sta
&& vif
->type
== NL80211_IFTYPE_AP
&& mcast
)
3348 sta_id
= iwl_mvm_vif_from_mac80211(vif
)->mcast_sta
.sta_id
;
3351 IWL_DEBUG_WEP(mvm
, "mvm remove dynamic key: idx=%d sta=%d\n",
3352 keyconf
->keyidx
, sta_id
);
3354 if (mvm_sta
&& (keyconf
->cipher
== WLAN_CIPHER_SUITE_AES_CMAC
||
3355 keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_128
||
3356 keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_256
))
3357 return iwl_mvm_send_sta_igtk(mvm
, keyconf
, sta_id
, true);
3359 if (!__test_and_clear_bit(keyconf
->hw_key_idx
, mvm
->fw_key_table
)) {
3360 IWL_ERR(mvm
, "offset %d not used in fw key table.\n",
3361 keyconf
->hw_key_idx
);
3365 /* track which key was deleted last */
3366 for (i
= 0; i
< STA_KEY_MAX_NUM
; i
++) {
3367 if (mvm
->fw_key_deleted
[i
] < U8_MAX
)
3368 mvm
->fw_key_deleted
[i
]++;
3370 mvm
->fw_key_deleted
[keyconf
->hw_key_idx
] = 0;
3372 if (sta
&& !mvm_sta
) {
3373 IWL_DEBUG_WEP(mvm
, "station non-existent, early return.\n");
3377 ret
= __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, mcast
);
3381 /* delete WEP key twice to get rid of (now useless) offset */
3382 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP40
||
3383 keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP104
)
3384 ret
= __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, !mcast
);
3389 void iwl_mvm_update_tkip_key(struct iwl_mvm
*mvm
,
3390 struct ieee80211_vif
*vif
,
3391 struct ieee80211_key_conf
*keyconf
,
3392 struct ieee80211_sta
*sta
, u32 iv32
,
3395 struct iwl_mvm_sta
*mvm_sta
;
3396 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
3397 bool mfp
= sta
? sta
->mfp
: false;
3401 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
3402 if (WARN_ON_ONCE(!mvm_sta
))
3404 iwl_mvm_send_sta_key(mvm
, mvm_sta
->sta_id
, keyconf
, mcast
,
3405 iv32
, phase1key
, CMD_ASYNC
, keyconf
->hw_key_idx
,
3412 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm
*mvm
,
3413 struct ieee80211_sta
*sta
)
3415 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
3416 struct iwl_mvm_add_sta_cmd cmd
= {
3417 .add_modify
= STA_MODE_MODIFY
,
3418 .sta_id
= mvmsta
->sta_id
,
3419 .station_flags_msk
= cpu_to_le32(STA_FLG_PS
),
3420 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
3424 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
3425 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
3427 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
3430 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm
*mvm
,
3431 struct ieee80211_sta
*sta
,
3432 enum ieee80211_frame_release_type reason
,
3433 u16 cnt
, u16 tids
, bool more_data
,
3434 bool single_sta_queue
)
3436 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
3437 struct iwl_mvm_add_sta_cmd cmd
= {
3438 .add_modify
= STA_MODE_MODIFY
,
3439 .sta_id
= mvmsta
->sta_id
,
3440 .modify_mask
= STA_MODIFY_SLEEPING_STA_TX_COUNT
,
3441 .sleep_tx_count
= cpu_to_le16(cnt
),
3442 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
3445 unsigned long _tids
= tids
;
3447 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3448 * Note that this field is reserved and unused by firmware not
3449 * supporting GO uAPSD, so it's safe to always do this.
3451 for_each_set_bit(tid
, &_tids
, IWL_MAX_TID_COUNT
)
3452 cmd
.awake_acs
|= BIT(tid_to_ucode_ac
[tid
]);
3454 /* If we're releasing frames from aggregation or dqa queues then check
3455 * if all the queues that we're releasing frames from, combined, have:
3456 * - more frames than the service period, in which case more_data
3458 * - fewer than 'cnt' frames, in which case we need to adjust the
3459 * firmware command (but do that unconditionally)
3461 if (single_sta_queue
) {
3462 int remaining
= cnt
;
3465 spin_lock_bh(&mvmsta
->lock
);
3466 for_each_set_bit(tid
, &_tids
, IWL_MAX_TID_COUNT
) {
3467 struct iwl_mvm_tid_data
*tid_data
;
3470 tid_data
= &mvmsta
->tid_data
[tid
];
3472 n_queued
= iwl_mvm_tid_queued(mvm
, tid_data
);
3473 if (n_queued
> remaining
) {
3478 remaining
-= n_queued
;
3480 sleep_tx_count
= cnt
- remaining
;
3481 if (reason
== IEEE80211_FRAME_RELEASE_UAPSD
)
3482 mvmsta
->sleep_tx_count
= sleep_tx_count
;
3483 spin_unlock_bh(&mvmsta
->lock
);
3485 cmd
.sleep_tx_count
= cpu_to_le16(sleep_tx_count
);
3486 if (WARN_ON(cnt
- remaining
== 0)) {
3487 ieee80211_sta_eosp(sta
);
3492 /* Note: this is ignored by firmware not supporting GO uAPSD */
3494 cmd
.sleep_state_flags
|= STA_SLEEP_STATE_MOREDATA
;
3496 if (reason
== IEEE80211_FRAME_RELEASE_PSPOLL
) {
3497 mvmsta
->next_status_eosp
= true;
3498 cmd
.sleep_state_flags
|= STA_SLEEP_STATE_PS_POLL
;
3500 cmd
.sleep_state_flags
|= STA_SLEEP_STATE_UAPSD
;
3503 /* block the Tx queues until the FW updated the sleep Tx count */
3504 iwl_trans_block_txq_ptrs(mvm
->trans
, true);
3506 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
,
3507 CMD_ASYNC
| CMD_WANT_ASYNC_CALLBACK
,
3508 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
3510 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
3513 void iwl_mvm_rx_eosp_notif(struct iwl_mvm
*mvm
,
3514 struct iwl_rx_cmd_buffer
*rxb
)
3516 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
3517 struct iwl_mvm_eosp_notification
*notif
= (void *)pkt
->data
;
3518 struct ieee80211_sta
*sta
;
3519 u32 sta_id
= le32_to_cpu(notif
->sta_id
);
3521 if (WARN_ON_ONCE(sta_id
>= IWL_MVM_STATION_COUNT
))
3525 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
3526 if (!IS_ERR_OR_NULL(sta
))
3527 ieee80211_sta_eosp(sta
);
3531 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm
*mvm
,
3532 struct iwl_mvm_sta
*mvmsta
, bool disable
)
3534 struct iwl_mvm_add_sta_cmd cmd
= {
3535 .add_modify
= STA_MODE_MODIFY
,
3536 .sta_id
= mvmsta
->sta_id
,
3537 .station_flags
= disable
? cpu_to_le32(STA_FLG_DISABLE_TX
) : 0,
3538 .station_flags_msk
= cpu_to_le32(STA_FLG_DISABLE_TX
),
3539 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
3543 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
3544 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
3546 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
3549 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm
*mvm
,
3550 struct ieee80211_sta
*sta
,
3553 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
3555 spin_lock_bh(&mvm_sta
->lock
);
3557 if (mvm_sta
->disable_tx
== disable
) {
3558 spin_unlock_bh(&mvm_sta
->lock
);
3562 mvm_sta
->disable_tx
= disable
;
3564 /* Tell mac80211 to start/stop queuing tx for this station */
3565 ieee80211_sta_block_awake(mvm
->hw
, sta
, disable
);
3567 iwl_mvm_sta_modify_disable_tx(mvm
, mvm_sta
, disable
);
3569 spin_unlock_bh(&mvm_sta
->lock
);
3572 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm
*mvm
,
3573 struct iwl_mvm_vif
*mvmvif
,
3574 struct iwl_mvm_int_sta
*sta
,
3577 u32 id
= FW_CMD_ID_AND_COLOR(mvmvif
->id
, mvmvif
->color
);
3578 struct iwl_mvm_add_sta_cmd cmd
= {
3579 .add_modify
= STA_MODE_MODIFY
,
3580 .sta_id
= sta
->sta_id
,
3581 .station_flags
= disable
? cpu_to_le32(STA_FLG_DISABLE_TX
) : 0,
3582 .station_flags_msk
= cpu_to_le32(STA_FLG_DISABLE_TX
),
3583 .mac_id_n_color
= cpu_to_le32(id
),
3587 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, 0,
3588 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
3590 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
3593 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm
*mvm
,
3594 struct iwl_mvm_vif
*mvmvif
,
3597 struct ieee80211_sta
*sta
;
3598 struct iwl_mvm_sta
*mvm_sta
;
3601 lockdep_assert_held(&mvm
->mutex
);
3603 /* Block/unblock all the stations of the given mvmvif */
3604 for (i
= 0; i
< ARRAY_SIZE(mvm
->fw_id_to_mac_id
); i
++) {
3605 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[i
],
3606 lockdep_is_held(&mvm
->mutex
));
3607 if (IS_ERR_OR_NULL(sta
))
3610 mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
3611 if (mvm_sta
->mac_id_n_color
!=
3612 FW_CMD_ID_AND_COLOR(mvmvif
->id
, mvmvif
->color
))
3615 iwl_mvm_sta_modify_disable_tx_ap(mvm
, sta
, disable
);
3618 if (!fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
))
3621 /* Need to block/unblock also multicast station */
3622 if (mvmvif
->mcast_sta
.sta_id
!= IWL_MVM_INVALID_STA
)
3623 iwl_mvm_int_sta_modify_disable_tx(mvm
, mvmvif
,
3624 &mvmvif
->mcast_sta
, disable
);
3627 * Only unblock the broadcast station (FW blocks it for immediate
3628 * quiet, not the driver)
3630 if (!disable
&& mvmvif
->bcast_sta
.sta_id
!= IWL_MVM_INVALID_STA
)
3631 iwl_mvm_int_sta_modify_disable_tx(mvm
, mvmvif
,
3632 &mvmvif
->bcast_sta
, disable
);
3635 void iwl_mvm_csa_client_absent(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
3637 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
3638 struct iwl_mvm_sta
*mvmsta
;
3642 mvmsta
= iwl_mvm_sta_from_staid_rcu(mvm
, mvmvif
->ap_sta_id
);
3644 if (!WARN_ON(!mvmsta
))
3645 iwl_mvm_sta_modify_disable_tx(mvm
, mvmsta
, true);
3650 u16
iwl_mvm_tid_queued(struct iwl_mvm
*mvm
, struct iwl_mvm_tid_data
*tid_data
)
3652 u16 sn
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
3655 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
3656 * to align the wrap around of ssn so we compare relevant values.
3658 if (mvm
->trans
->cfg
->gen2
)
3661 return ieee80211_sn_sub(sn
, tid_data
->next_reclaimed
);