1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
27 * The full GNU General Public License is included in this distribution
28 * in the file called COPYING.
30 * Contact Information:
31 * Intel Linux Wireless <linuxwifi@intel.com>
32 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
36 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
37 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
38 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
39 * Copyright(c) 2018 Intel Corporation
40 * All rights reserved.
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
46 * * Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * * Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in
50 * the documentation and/or other materials provided with the
52 * * Neither the name Intel Corporation nor the names of its
53 * contributors may be used to endorse or promote products derived
54 * from this software without specific prior written permission.
56 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
57 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
58 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
59 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
60 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
62 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
63 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
64 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
65 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
66 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
68 *****************************************************************************/
69 #include <net/mac80211.h>
76 * New version of ADD_STA_sta command added new fields at the end of the
77 * structure, so sending the size of the relevant API's structure is enough to
78 * support both API versions.
80 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm
*mvm
)
82 if (iwl_mvm_has_new_rx_api(mvm
) ||
83 fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
))
84 return sizeof(struct iwl_mvm_add_sta_cmd
);
86 return sizeof(struct iwl_mvm_add_sta_cmd_v7
);
89 static int iwl_mvm_find_free_sta_id(struct iwl_mvm
*mvm
,
90 enum nl80211_iftype iftype
)
95 BUILD_BUG_ON(IWL_MVM_STATION_COUNT
> 32);
96 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
));
98 lockdep_assert_held(&mvm
->mutex
);
100 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
101 if (iftype
!= NL80211_IFTYPE_STATION
)
102 reserved_ids
= BIT(0);
104 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
105 for (sta_id
= 0; sta_id
< ARRAY_SIZE(mvm
->fw_id_to_mac_id
); sta_id
++) {
106 if (BIT(sta_id
) & reserved_ids
)
109 if (!rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
110 lockdep_is_held(&mvm
->mutex
)))
113 return IWL_MVM_INVALID_STA
;
116 /* send station add/update command to firmware */
117 int iwl_mvm_sta_send_to_fw(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
118 bool update
, unsigned int flags
)
120 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
121 struct iwl_mvm_add_sta_cmd add_sta_cmd
= {
122 .sta_id
= mvm_sta
->sta_id
,
123 .mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
),
124 .add_modify
= update
? 1 : 0,
125 .station_flags_msk
= cpu_to_le32(STA_FLG_FAT_EN_MSK
|
126 STA_FLG_MIMO_EN_MSK
|
127 STA_FLG_RTS_MIMO_PROT
),
128 .tid_disable_tx
= cpu_to_le16(mvm_sta
->tid_disable_agg
),
132 u32 agg_size
= 0, mpdu_dens
= 0;
134 if (fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
))
135 add_sta_cmd
.station_type
= mvm_sta
->sta_type
;
137 if (!update
|| (flags
& STA_MODIFY_QUEUES
)) {
138 memcpy(&add_sta_cmd
.addr
, sta
->addr
, ETH_ALEN
);
140 if (!iwl_mvm_has_new_tx_api(mvm
)) {
141 add_sta_cmd
.tfd_queue_msk
=
142 cpu_to_le32(mvm_sta
->tfd_queue_msk
);
144 if (flags
& STA_MODIFY_QUEUES
)
145 add_sta_cmd
.modify_mask
|= STA_MODIFY_QUEUES
;
147 WARN_ON(flags
& STA_MODIFY_QUEUES
);
151 switch (sta
->bandwidth
) {
152 case IEEE80211_STA_RX_BW_160
:
153 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_160MHZ
);
155 case IEEE80211_STA_RX_BW_80
:
156 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_80MHZ
);
158 case IEEE80211_STA_RX_BW_40
:
159 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_40MHZ
);
161 case IEEE80211_STA_RX_BW_20
:
162 if (sta
->ht_cap
.ht_supported
)
163 add_sta_cmd
.station_flags
|=
164 cpu_to_le32(STA_FLG_FAT_EN_20MHZ
);
168 switch (sta
->rx_nss
) {
170 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_SISO
);
173 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2
);
176 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3
);
180 switch (sta
->smps_mode
) {
181 case IEEE80211_SMPS_AUTOMATIC
:
182 case IEEE80211_SMPS_NUM_MODES
:
185 case IEEE80211_SMPS_STATIC
:
187 add_sta_cmd
.station_flags
&= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK
);
188 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_SISO
);
190 case IEEE80211_SMPS_DYNAMIC
:
191 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_RTS_MIMO_PROT
);
193 case IEEE80211_SMPS_OFF
:
198 if (sta
->ht_cap
.ht_supported
) {
199 add_sta_cmd
.station_flags_msk
|=
200 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK
|
201 STA_FLG_AGG_MPDU_DENS_MSK
);
203 mpdu_dens
= sta
->ht_cap
.ampdu_density
;
206 if (sta
->vht_cap
.vht_supported
) {
207 agg_size
= sta
->vht_cap
.cap
&
208 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK
;
210 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT
;
211 } else if (sta
->ht_cap
.ht_supported
) {
212 agg_size
= sta
->ht_cap
.ampdu_factor
;
215 add_sta_cmd
.station_flags
|=
216 cpu_to_le32(agg_size
<< STA_FLG_MAX_AGG_SIZE_SHIFT
);
217 add_sta_cmd
.station_flags
|=
218 cpu_to_le32(mpdu_dens
<< STA_FLG_AGG_MPDU_DENS_SHIFT
);
219 if (mvm_sta
->sta_state
>= IEEE80211_STA_ASSOC
)
220 add_sta_cmd
.assoc_id
= cpu_to_le16(sta
->aid
);
223 add_sta_cmd
.modify_mask
|= STA_MODIFY_UAPSD_ACS
;
225 if (sta
->uapsd_queues
& IEEE80211_WMM_IE_STA_QOSINFO_AC_BK
)
226 add_sta_cmd
.uapsd_acs
|= BIT(AC_BK
);
227 if (sta
->uapsd_queues
& IEEE80211_WMM_IE_STA_QOSINFO_AC_BE
)
228 add_sta_cmd
.uapsd_acs
|= BIT(AC_BE
);
229 if (sta
->uapsd_queues
& IEEE80211_WMM_IE_STA_QOSINFO_AC_VI
)
230 add_sta_cmd
.uapsd_acs
|= BIT(AC_VI
);
231 if (sta
->uapsd_queues
& IEEE80211_WMM_IE_STA_QOSINFO_AC_VO
)
232 add_sta_cmd
.uapsd_acs
|= BIT(AC_VO
);
233 add_sta_cmd
.uapsd_acs
|= add_sta_cmd
.uapsd_acs
<< 4;
234 add_sta_cmd
.sp_length
= sta
->max_sp
? sta
->max_sp
* 2 : 128;
237 status
= ADD_STA_SUCCESS
;
238 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
239 iwl_mvm_add_sta_cmd_size(mvm
),
240 &add_sta_cmd
, &status
);
244 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
245 case ADD_STA_SUCCESS
:
246 IWL_DEBUG_ASSOC(mvm
, "ADD_STA PASSED\n");
250 IWL_ERR(mvm
, "ADD_STA failed\n");
257 static void iwl_mvm_rx_agg_session_expired(struct timer_list
*t
)
259 struct iwl_mvm_baid_data
*data
=
260 from_timer(data
, t
, session_timer
);
261 struct iwl_mvm_baid_data __rcu
**rcu_ptr
= data
->rcu_ptr
;
262 struct iwl_mvm_baid_data
*ba_data
;
263 struct ieee80211_sta
*sta
;
264 struct iwl_mvm_sta
*mvm_sta
;
265 unsigned long timeout
;
269 ba_data
= rcu_dereference(*rcu_ptr
);
271 if (WARN_ON(!ba_data
))
274 if (!ba_data
->timeout
)
277 timeout
= ba_data
->last_rx
+ TU_TO_JIFFIES(ba_data
->timeout
* 2);
278 if (time_is_after_jiffies(timeout
)) {
279 mod_timer(&ba_data
->session_timer
, timeout
);
284 sta
= rcu_dereference(ba_data
->mvm
->fw_id_to_mac_id
[ba_data
->sta_id
]);
287 * sta should be valid unless the following happens:
288 * The firmware asserts which triggers a reconfig flow, but
289 * the reconfig fails before we set the pointer to sta into
290 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
291 * A-MDPU and hence the timer continues to run. Then, the
292 * timer expires and sta is NULL.
297 mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
298 ieee80211_rx_ba_timer_expired(mvm_sta
->vif
,
299 sta
->addr
, ba_data
->tid
);
304 /* Disable aggregations for a bitmap of TIDs for a given station */
305 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm
*mvm
, int queue
,
306 unsigned long disable_agg_tids
,
309 struct iwl_mvm_add_sta_cmd cmd
= {};
310 struct ieee80211_sta
*sta
;
311 struct iwl_mvm_sta
*mvmsta
;
316 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
319 spin_lock_bh(&mvm
->queue_info_lock
);
320 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
321 spin_unlock_bh(&mvm
->queue_info_lock
);
325 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
327 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
))) {
332 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
334 mvmsta
->tid_disable_agg
|= disable_agg_tids
;
336 cmd
.mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
);
337 cmd
.sta_id
= mvmsta
->sta_id
;
338 cmd
.add_modify
= STA_MODE_MODIFY
;
339 cmd
.modify_mask
= STA_MODIFY_QUEUES
;
340 if (disable_agg_tids
)
341 cmd
.modify_mask
|= STA_MODIFY_TID_DISABLE_TX
;
343 cmd
.modify_mask
|= STA_MODIFY_QUEUE_REMOVAL
;
344 cmd
.tfd_queue_msk
= cpu_to_le32(mvmsta
->tfd_queue_msk
);
345 cmd
.tid_disable_tx
= cpu_to_le16(mvmsta
->tid_disable_agg
);
349 /* Notify FW of queue removal from the STA queues */
350 status
= ADD_STA_SUCCESS
;
351 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
352 iwl_mvm_add_sta_cmd_size(mvm
),
358 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm
*mvm
, int queue
)
360 struct ieee80211_sta
*sta
;
361 struct iwl_mvm_sta
*mvmsta
;
362 unsigned long tid_bitmap
;
363 unsigned long agg_tids
= 0;
367 lockdep_assert_held(&mvm
->mutex
);
369 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
372 spin_lock_bh(&mvm
->queue_info_lock
);
373 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
374 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
375 spin_unlock_bh(&mvm
->queue_info_lock
);
377 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
378 lockdep_is_held(&mvm
->mutex
));
380 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
)))
383 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
385 spin_lock_bh(&mvmsta
->lock
);
386 for_each_set_bit(tid
, &tid_bitmap
, IWL_MAX_TID_COUNT
+ 1) {
387 if (mvmsta
->tid_data
[tid
].state
== IWL_AGG_ON
)
388 agg_tids
|= BIT(tid
);
390 spin_unlock_bh(&mvmsta
->lock
);
396 * Remove a queue from a station's resources.
397 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
398 * doesn't disable the queue
400 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm
*mvm
, int queue
)
402 struct ieee80211_sta
*sta
;
403 struct iwl_mvm_sta
*mvmsta
;
404 unsigned long tid_bitmap
;
405 unsigned long disable_agg_tids
= 0;
409 lockdep_assert_held(&mvm
->mutex
);
411 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
414 spin_lock_bh(&mvm
->queue_info_lock
);
415 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
416 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
417 spin_unlock_bh(&mvm
->queue_info_lock
);
421 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
423 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
))) {
428 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
430 spin_lock_bh(&mvmsta
->lock
);
431 /* Unmap MAC queues and TIDs from this queue */
432 for_each_set_bit(tid
, &tid_bitmap
, IWL_MAX_TID_COUNT
+ 1) {
433 if (mvmsta
->tid_data
[tid
].state
== IWL_AGG_ON
)
434 disable_agg_tids
|= BIT(tid
);
435 mvmsta
->tid_data
[tid
].txq_id
= IWL_MVM_INVALID_QUEUE
;
438 mvmsta
->tfd_queue_msk
&= ~BIT(queue
); /* Don't use this queue anymore */
439 spin_unlock_bh(&mvmsta
->lock
);
443 return disable_agg_tids
;
446 static int iwl_mvm_free_inactive_queue(struct iwl_mvm
*mvm
, int queue
,
449 struct iwl_mvm_sta
*mvmsta
;
450 u8 txq_curr_ac
, sta_id
, tid
;
451 unsigned long disable_agg_tids
= 0;
454 lockdep_assert_held(&mvm
->mutex
);
456 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
459 spin_lock_bh(&mvm
->queue_info_lock
);
460 txq_curr_ac
= mvm
->queue_info
[queue
].mac80211_ac
;
461 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
462 tid
= mvm
->queue_info
[queue
].txq_tid
;
463 spin_unlock_bh(&mvm
->queue_info_lock
);
465 mvmsta
= iwl_mvm_sta_from_staid_protected(mvm
, sta_id
);
466 if (WARN_ON(!mvmsta
))
469 disable_agg_tids
= iwl_mvm_remove_sta_queue_marking(mvm
, queue
);
470 /* Disable the queue */
471 if (disable_agg_tids
)
472 iwl_mvm_invalidate_sta_queue(mvm
, queue
,
473 disable_agg_tids
, false);
475 ret
= iwl_mvm_disable_txq(mvm
, queue
,
476 mvmsta
->vif
->hw_queue
[txq_curr_ac
],
479 /* Re-mark the inactive queue as inactive */
480 spin_lock_bh(&mvm
->queue_info_lock
);
481 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_INACTIVE
;
482 spin_unlock_bh(&mvm
->queue_info_lock
);
484 "Failed to free inactive queue %d (ret=%d)\n",
490 /* If TXQ is allocated to another STA, update removal in FW */
492 iwl_mvm_invalidate_sta_queue(mvm
, queue
, 0, true);
497 static int iwl_mvm_get_shared_queue(struct iwl_mvm
*mvm
,
498 unsigned long tfd_queue_mask
, u8 ac
)
501 u8 ac_to_queue
[IEEE80211_NUM_ACS
];
504 lockdep_assert_held(&mvm
->queue_info_lock
);
505 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
508 memset(&ac_to_queue
, IEEE80211_INVAL_HW_QUEUE
, sizeof(ac_to_queue
));
510 /* See what ACs the existing queues for this STA have */
511 for_each_set_bit(i
, &tfd_queue_mask
, IWL_MVM_DQA_MAX_DATA_QUEUE
) {
512 /* Only DATA queues can be shared */
513 if (i
< IWL_MVM_DQA_MIN_DATA_QUEUE
&&
514 i
!= IWL_MVM_DQA_BSS_CLIENT_QUEUE
)
517 /* Don't try and take queues being reconfigured */
518 if (mvm
->queue_info
[queue
].status
==
519 IWL_MVM_QUEUE_RECONFIGURING
)
522 ac_to_queue
[mvm
->queue_info
[i
].mac80211_ac
] = i
;
526 * The queue to share is chosen only from DATA queues as follows (in
527 * descending priority):
530 * 3. Highest AC queue that is lower than new AC
531 * 4. Any existing AC (there always is at least 1 DATA queue)
534 /* Priority 1: An AC_BE queue */
535 if (ac_to_queue
[IEEE80211_AC_BE
] != IEEE80211_INVAL_HW_QUEUE
)
536 queue
= ac_to_queue
[IEEE80211_AC_BE
];
537 /* Priority 2: Same AC queue */
538 else if (ac_to_queue
[ac
] != IEEE80211_INVAL_HW_QUEUE
)
539 queue
= ac_to_queue
[ac
];
540 /* Priority 3a: If new AC is VO and VI exists - use VI */
541 else if (ac
== IEEE80211_AC_VO
&&
542 ac_to_queue
[IEEE80211_AC_VI
] != IEEE80211_INVAL_HW_QUEUE
)
543 queue
= ac_to_queue
[IEEE80211_AC_VI
];
544 /* Priority 3b: No BE so only AC less than the new one is BK */
545 else if (ac_to_queue
[IEEE80211_AC_BK
] != IEEE80211_INVAL_HW_QUEUE
)
546 queue
= ac_to_queue
[IEEE80211_AC_BK
];
547 /* Priority 4a: No BE nor BK - use VI if exists */
548 else if (ac_to_queue
[IEEE80211_AC_VI
] != IEEE80211_INVAL_HW_QUEUE
)
549 queue
= ac_to_queue
[IEEE80211_AC_VI
];
550 /* Priority 4b: No BE, BK nor VI - use VO if exists */
551 else if (ac_to_queue
[IEEE80211_AC_VO
] != IEEE80211_INVAL_HW_QUEUE
)
552 queue
= ac_to_queue
[IEEE80211_AC_VO
];
554 /* Make sure queue found (or not) is legal */
555 if (!iwl_mvm_is_dqa_data_queue(mvm
, queue
) &&
556 !iwl_mvm_is_dqa_mgmt_queue(mvm
, queue
) &&
557 (queue
!= IWL_MVM_DQA_BSS_CLIENT_QUEUE
)) {
558 IWL_ERR(mvm
, "No DATA queues available to share\n");
562 /* Make sure the queue isn't in the middle of being reconfigured */
563 if (mvm
->queue_info
[queue
].status
== IWL_MVM_QUEUE_RECONFIGURING
) {
565 "TXQ %d is in the middle of re-config - try again\n",
574 * If a given queue has a higher AC than the TID stream that is being compared
575 * to, the queue needs to be redirected to the lower AC. This function does that
576 * in such a case, otherwise - if no redirection required - it does nothing,
577 * unless the %force param is true.
579 int iwl_mvm_scd_queue_redirect(struct iwl_mvm
*mvm
, int queue
, int tid
,
580 int ac
, int ssn
, unsigned int wdg_timeout
,
583 struct iwl_scd_txq_cfg_cmd cmd
= {
585 .action
= SCD_CFG_DISABLE_QUEUE
,
591 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
595 * If the AC is lower than current one - FIFO needs to be redirected to
596 * the lowest one of the streams in the queue. Check if this is needed
598 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
599 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
600 * we need to check if the numerical value of X is LARGER than of Y.
602 spin_lock_bh(&mvm
->queue_info_lock
);
603 if (ac
<= mvm
->queue_info
[queue
].mac80211_ac
&& !force
) {
604 spin_unlock_bh(&mvm
->queue_info_lock
);
606 IWL_DEBUG_TX_QUEUES(mvm
,
607 "No redirection needed on TXQ #%d\n",
612 cmd
.sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
613 cmd
.tx_fifo
= iwl_mvm_ac_to_tx_fifo
[mvm
->queue_info
[queue
].mac80211_ac
];
614 cmd
.tid
= mvm
->queue_info
[queue
].txq_tid
;
615 mq
= mvm
->hw_queue_to_mac80211
[queue
];
616 shared_queue
= (mvm
->queue_info
[queue
].hw_queue_refcount
> 1);
617 spin_unlock_bh(&mvm
->queue_info_lock
);
619 IWL_DEBUG_TX_QUEUES(mvm
, "Redirecting TXQ #%d to FIFO #%d\n",
620 queue
, iwl_mvm_ac_to_tx_fifo
[ac
]);
622 /* Stop MAC queues and wait for this queue to empty */
623 iwl_mvm_stop_mac_queues(mvm
, mq
);
624 ret
= iwl_trans_wait_tx_queues_empty(mvm
->trans
, BIT(queue
));
626 IWL_ERR(mvm
, "Error draining queue %d before reconfig\n",
632 /* Before redirecting the queue we need to de-activate it */
633 iwl_trans_txq_disable(mvm
->trans
, queue
, false);
634 ret
= iwl_mvm_send_cmd_pdu(mvm
, SCD_QUEUE_CFG
, 0, sizeof(cmd
), &cmd
);
636 IWL_ERR(mvm
, "Failed SCD disable TXQ %d (ret=%d)\n", queue
,
639 /* Make sure the SCD wrptr is correctly set before reconfiguring */
640 iwl_trans_txq_enable_cfg(mvm
->trans
, queue
, ssn
, NULL
, wdg_timeout
);
642 /* Update the TID "owner" of the queue */
643 spin_lock_bh(&mvm
->queue_info_lock
);
644 mvm
->queue_info
[queue
].txq_tid
= tid
;
645 spin_unlock_bh(&mvm
->queue_info_lock
);
647 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
649 /* Redirect to lower AC */
650 iwl_mvm_reconfig_scd(mvm
, queue
, iwl_mvm_ac_to_tx_fifo
[ac
],
651 cmd
.sta_id
, tid
, IWL_FRAME_LIMIT
, ssn
);
653 /* Update AC marking of the queue */
654 spin_lock_bh(&mvm
->queue_info_lock
);
655 mvm
->queue_info
[queue
].mac80211_ac
= ac
;
656 spin_unlock_bh(&mvm
->queue_info_lock
);
659 * Mark queue as shared in transport if shared
660 * Note this has to be done after queue enablement because enablement
661 * can also set this value, and there is no indication there to shared
665 iwl_trans_txq_set_shared_mode(mvm
->trans
, queue
, true);
668 /* Continue using the MAC queues */
669 iwl_mvm_start_mac_queues(mvm
, mq
);
674 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm
*mvm
,
675 struct ieee80211_sta
*sta
, u8 ac
,
678 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
679 unsigned int wdg_timeout
=
680 iwl_mvm_get_wd_timeout(mvm
, mvmsta
->vif
, false, false);
681 u8 mac_queue
= mvmsta
->vif
->hw_queue
[ac
];
684 lockdep_assert_held(&mvm
->mutex
);
686 IWL_DEBUG_TX_QUEUES(mvm
,
687 "Allocating queue for sta %d on tid %d\n",
688 mvmsta
->sta_id
, tid
);
689 queue
= iwl_mvm_tvqm_enable_txq(mvm
, mac_queue
, mvmsta
->sta_id
, tid
,
694 IWL_DEBUG_TX_QUEUES(mvm
, "Allocated queue is %d\n", queue
);
696 spin_lock_bh(&mvmsta
->lock
);
697 mvmsta
->tid_data
[tid
].txq_id
= queue
;
698 mvmsta
->tid_data
[tid
].is_tid_active
= true;
699 spin_unlock_bh(&mvmsta
->lock
);
704 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm
*mvm
,
705 struct ieee80211_sta
*sta
, u8 ac
, int tid
,
706 struct ieee80211_hdr
*hdr
)
708 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
709 struct iwl_trans_txq_scd_cfg cfg
= {
710 .fifo
= iwl_mvm_mac_ac_to_tx_fifo(mvm
, ac
),
711 .sta_id
= mvmsta
->sta_id
,
713 .frame_limit
= IWL_FRAME_LIMIT
,
715 unsigned int wdg_timeout
=
716 iwl_mvm_get_wd_timeout(mvm
, mvmsta
->vif
, false, false);
717 u8 mac_queue
= mvmsta
->vif
->hw_queue
[ac
];
719 bool using_inactive_queue
= false, same_sta
= false;
720 unsigned long disable_agg_tids
= 0;
721 enum iwl_mvm_agg_state queue_state
;
722 bool shared_queue
= false, inc_ssn
;
724 unsigned long tfd_queue_mask
;
727 lockdep_assert_held(&mvm
->mutex
);
729 if (iwl_mvm_has_new_tx_api(mvm
))
730 return iwl_mvm_sta_alloc_queue_tvqm(mvm
, sta
, ac
, tid
);
732 spin_lock_bh(&mvmsta
->lock
);
733 tfd_queue_mask
= mvmsta
->tfd_queue_msk
;
734 spin_unlock_bh(&mvmsta
->lock
);
736 spin_lock_bh(&mvm
->queue_info_lock
);
739 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
742 if (!ieee80211_is_data_qos(hdr
->frame_control
) ||
743 ieee80211_is_qos_nullfunc(hdr
->frame_control
)) {
744 queue
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
745 IWL_MVM_DQA_MIN_MGMT_QUEUE
,
746 IWL_MVM_DQA_MAX_MGMT_QUEUE
);
747 if (queue
>= IWL_MVM_DQA_MIN_MGMT_QUEUE
)
748 IWL_DEBUG_TX_QUEUES(mvm
, "Found free MGMT queue #%d\n",
751 /* If no such queue is found, we'll use a DATA queue instead */
754 if ((queue
< 0 && mvmsta
->reserved_queue
!= IEEE80211_INVAL_HW_QUEUE
) &&
755 (mvm
->queue_info
[mvmsta
->reserved_queue
].status
==
756 IWL_MVM_QUEUE_RESERVED
||
757 mvm
->queue_info
[mvmsta
->reserved_queue
].status
==
758 IWL_MVM_QUEUE_INACTIVE
)) {
759 queue
= mvmsta
->reserved_queue
;
760 mvm
->queue_info
[queue
].reserved
= true;
761 IWL_DEBUG_TX_QUEUES(mvm
, "Using reserved queue #%d\n", queue
);
765 queue
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
766 IWL_MVM_DQA_MIN_DATA_QUEUE
,
767 IWL_MVM_DQA_MAX_DATA_QUEUE
);
770 * Check if this queue is already allocated but inactive.
771 * In such a case, we'll need to first free this queue before enabling
772 * it again, so we'll mark it as reserved to make sure no new traffic
776 mvm
->queue_info
[queue
].status
== IWL_MVM_QUEUE_INACTIVE
) {
777 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_RESERVED
;
778 using_inactive_queue
= true;
779 same_sta
= mvm
->queue_info
[queue
].ra_sta_id
== mvmsta
->sta_id
;
780 IWL_DEBUG_TX_QUEUES(mvm
,
781 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
782 queue
, mvmsta
->sta_id
, tid
);
785 /* No free queue - we'll have to share */
787 queue
= iwl_mvm_get_shared_queue(mvm
, tfd_queue_mask
, ac
);
790 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_SHARED
;
795 * Mark TXQ as ready, even though it hasn't been fully configured yet,
796 * to make sure no one else takes it.
797 * This will allow avoiding re-acquiring the lock at the end of the
798 * configuration. On error we'll mark it back as free.
800 if ((queue
> 0) && !shared_queue
)
801 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_READY
;
803 spin_unlock_bh(&mvm
->queue_info_lock
);
805 /* This shouldn't happen - out of queues */
806 if (WARN_ON(queue
<= 0)) {
807 IWL_ERR(mvm
, "No available queues for tid %d on sta_id %d\n",
813 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
814 * but for configuring the SCD to send A-MPDUs we need to mark the queue
816 * Mark all DATA queues as allowing to be aggregated at some point
818 cfg
.aggregate
= (queue
>= IWL_MVM_DQA_MIN_DATA_QUEUE
||
819 queue
== IWL_MVM_DQA_BSS_CLIENT_QUEUE
);
822 * If this queue was previously inactive (idle) - we need to free it
825 if (using_inactive_queue
) {
826 ret
= iwl_mvm_free_inactive_queue(mvm
, queue
, same_sta
);
831 IWL_DEBUG_TX_QUEUES(mvm
,
832 "Allocating %squeue #%d to sta %d on tid %d\n",
833 shared_queue
? "shared " : "", queue
,
834 mvmsta
->sta_id
, tid
);
837 /* Disable any open aggs on this queue */
838 disable_agg_tids
= iwl_mvm_get_queue_agg_tids(mvm
, queue
);
840 if (disable_agg_tids
) {
841 IWL_DEBUG_TX_QUEUES(mvm
, "Disabling aggs on queue %d\n",
843 iwl_mvm_invalidate_sta_queue(mvm
, queue
,
844 disable_agg_tids
, false);
848 ssn
= IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr
->seq_ctrl
));
849 inc_ssn
= iwl_mvm_enable_txq(mvm
, queue
, mac_queue
,
850 ssn
, &cfg
, wdg_timeout
);
852 ssn
= (ssn
+ 1) & IEEE80211_SCTL_SEQ
;
853 le16_add_cpu(&hdr
->seq_ctrl
, 0x10);
857 * Mark queue as shared in transport if shared
858 * Note this has to be done after queue enablement because enablement
859 * can also set this value, and there is no indication there to shared
863 iwl_trans_txq_set_shared_mode(mvm
->trans
, queue
, true);
865 spin_lock_bh(&mvmsta
->lock
);
867 * This looks racy, but it is not. We have only one packet for
868 * this ra/tid in our Tx path since we stop the Qdisc when we
869 * need to allocate a new TFD queue.
872 mvmsta
->tid_data
[tid
].seq_number
+= 0x10;
873 mvmsta
->tid_data
[tid
].txq_id
= queue
;
874 mvmsta
->tid_data
[tid
].is_tid_active
= true;
875 mvmsta
->tfd_queue_msk
|= BIT(queue
);
876 queue_state
= mvmsta
->tid_data
[tid
].state
;
878 if (mvmsta
->reserved_queue
== queue
)
879 mvmsta
->reserved_queue
= IEEE80211_INVAL_HW_QUEUE
;
880 spin_unlock_bh(&mvmsta
->lock
);
883 ret
= iwl_mvm_sta_send_to_fw(mvm
, sta
, true, STA_MODIFY_QUEUES
);
887 /* If we need to re-enable aggregations... */
888 if (queue_state
== IWL_AGG_ON
) {
889 ret
= iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, queue
, true);
894 /* Redirect queue, if needed */
895 ret
= iwl_mvm_scd_queue_redirect(mvm
, queue
, tid
, ac
, ssn
,
904 iwl_mvm_disable_txq(mvm
, queue
, mac_queue
, tid
, 0);
909 static void iwl_mvm_change_queue_owner(struct iwl_mvm
*mvm
, int queue
)
911 struct iwl_scd_txq_cfg_cmd cmd
= {
913 .action
= SCD_CFG_UPDATE_QUEUE_TID
,
916 unsigned long tid_bitmap
;
919 lockdep_assert_held(&mvm
->mutex
);
921 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
924 spin_lock_bh(&mvm
->queue_info_lock
);
925 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
926 spin_unlock_bh(&mvm
->queue_info_lock
);
928 if (WARN(!tid_bitmap
, "TXQ %d has no tids assigned to it\n", queue
))
931 /* Find any TID for queue */
932 tid
= find_first_bit(&tid_bitmap
, IWL_MAX_TID_COUNT
+ 1);
934 cmd
.tx_fifo
= iwl_mvm_ac_to_tx_fifo
[tid_to_mac80211_ac
[tid
]];
936 ret
= iwl_mvm_send_cmd_pdu(mvm
, SCD_QUEUE_CFG
, 0, sizeof(cmd
), &cmd
);
938 IWL_ERR(mvm
, "Failed to update owner of TXQ %d (ret=%d)\n",
943 spin_lock_bh(&mvm
->queue_info_lock
);
944 mvm
->queue_info
[queue
].txq_tid
= tid
;
945 spin_unlock_bh(&mvm
->queue_info_lock
);
946 IWL_DEBUG_TX_QUEUES(mvm
, "Changed TXQ %d ownership to tid %d\n",
950 static void iwl_mvm_unshare_queue(struct iwl_mvm
*mvm
, int queue
)
952 struct ieee80211_sta
*sta
;
953 struct iwl_mvm_sta
*mvmsta
;
956 unsigned long tid_bitmap
;
957 unsigned int wdg_timeout
;
961 /* queue sharing is disabled on new TX path */
962 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
965 lockdep_assert_held(&mvm
->mutex
);
967 spin_lock_bh(&mvm
->queue_info_lock
);
968 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
969 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
970 spin_unlock_bh(&mvm
->queue_info_lock
);
972 /* Find TID for queue, and make sure it is the only one on the queue */
973 tid
= find_first_bit(&tid_bitmap
, IWL_MAX_TID_COUNT
+ 1);
974 if (tid_bitmap
!= BIT(tid
)) {
975 IWL_ERR(mvm
, "Failed to unshare q %d, active tids=0x%lx\n",
980 IWL_DEBUG_TX_QUEUES(mvm
, "Unsharing TXQ %d, keeping tid %d\n", queue
,
983 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
984 lockdep_is_held(&mvm
->mutex
));
986 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
)))
989 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
990 wdg_timeout
= iwl_mvm_get_wd_timeout(mvm
, mvmsta
->vif
, false, false);
992 ssn
= IEEE80211_SEQ_TO_SN(mvmsta
->tid_data
[tid
].seq_number
);
994 ret
= iwl_mvm_scd_queue_redirect(mvm
, queue
, tid
,
995 tid_to_mac80211_ac
[tid
], ssn
,
998 IWL_ERR(mvm
, "Failed to redirect TXQ %d\n", queue
);
1002 /* If aggs should be turned back on - do it */
1003 if (mvmsta
->tid_data
[tid
].state
== IWL_AGG_ON
) {
1004 struct iwl_mvm_add_sta_cmd cmd
= {0};
1006 mvmsta
->tid_disable_agg
&= ~BIT(tid
);
1008 cmd
.mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
);
1009 cmd
.sta_id
= mvmsta
->sta_id
;
1010 cmd
.add_modify
= STA_MODE_MODIFY
;
1011 cmd
.modify_mask
= STA_MODIFY_TID_DISABLE_TX
;
1012 cmd
.tfd_queue_msk
= cpu_to_le32(mvmsta
->tfd_queue_msk
);
1013 cmd
.tid_disable_tx
= cpu_to_le16(mvmsta
->tid_disable_agg
);
1015 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
1016 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
1018 IWL_DEBUG_TX_QUEUES(mvm
,
1019 "TXQ #%d is now aggregated again\n",
1022 /* Mark queue intenally as aggregating again */
1023 iwl_trans_txq_set_shared_mode(mvm
->trans
, queue
, false);
1027 spin_lock_bh(&mvm
->queue_info_lock
);
1028 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_READY
;
1029 spin_unlock_bh(&mvm
->queue_info_lock
);
1032 static inline u8
iwl_mvm_tid_to_ac_queue(int tid
)
1034 if (tid
== IWL_MAX_TID_COUNT
)
1035 return IEEE80211_AC_VO
; /* MGMT */
1037 return tid_to_mac80211_ac
[tid
];
1040 static void iwl_mvm_tx_deferred_stream(struct iwl_mvm
*mvm
,
1041 struct ieee80211_sta
*sta
, int tid
)
1043 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1044 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
1045 struct sk_buff
*skb
;
1046 struct ieee80211_hdr
*hdr
;
1047 struct sk_buff_head deferred_tx
;
1049 bool no_queue
= false; /* Marks if there is a problem with the queue */
1052 lockdep_assert_held(&mvm
->mutex
);
1054 skb
= skb_peek(&tid_data
->deferred_tx_frames
);
1057 hdr
= (void *)skb
->data
;
1059 ac
= iwl_mvm_tid_to_ac_queue(tid
);
1060 mac_queue
= IEEE80211_SKB_CB(skb
)->hw_queue
;
1062 if (tid_data
->txq_id
== IWL_MVM_INVALID_QUEUE
&&
1063 iwl_mvm_sta_alloc_queue(mvm
, sta
, ac
, tid
, hdr
)) {
1065 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1066 mvmsta
->sta_id
, tid
);
1069 * Mark queue as problematic so later the deferred traffic is
1070 * freed, as we can do nothing with it
1075 __skb_queue_head_init(&deferred_tx
);
1077 /* Disable bottom-halves when entering TX path */
1079 spin_lock(&mvmsta
->lock
);
1080 skb_queue_splice_init(&tid_data
->deferred_tx_frames
, &deferred_tx
);
1081 mvmsta
->deferred_traffic_tid_map
&= ~BIT(tid
);
1082 spin_unlock(&mvmsta
->lock
);
1084 while ((skb
= __skb_dequeue(&deferred_tx
)))
1085 if (no_queue
|| iwl_mvm_tx_skb(mvm
, skb
, sta
))
1086 ieee80211_free_txskb(mvm
->hw
, skb
);
1090 iwl_mvm_start_mac_queues(mvm
, BIT(mac_queue
));
1093 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct
*wk
)
1095 struct iwl_mvm
*mvm
= container_of(wk
, struct iwl_mvm
,
1097 struct ieee80211_sta
*sta
;
1098 struct iwl_mvm_sta
*mvmsta
;
1099 unsigned long deferred_tid_traffic
;
1100 int queue
, sta_id
, tid
;
1102 /* Check inactivity of queues */
1103 iwl_mvm_inactivity_check(mvm
);
1105 mutex_lock(&mvm
->mutex
);
1107 /* No queue reconfiguration in TVQM mode */
1108 if (iwl_mvm_has_new_tx_api(mvm
))
1111 /* Reconfigure queues requiring reconfiguation */
1112 for (queue
= 0; queue
< ARRAY_SIZE(mvm
->queue_info
); queue
++) {
1116 spin_lock_bh(&mvm
->queue_info_lock
);
1117 reconfig
= (mvm
->queue_info
[queue
].status
==
1118 IWL_MVM_QUEUE_RECONFIGURING
);
1121 * We need to take into account a situation in which a TXQ was
1122 * allocated to TID x, and then turned shared by adding TIDs y
1123 * and z. If TID x becomes inactive and is removed from the TXQ,
1124 * ownership must be given to one of the remaining TIDs.
1125 * This is mainly because if TID x continues - a new queue can't
1126 * be allocated for it as long as it is an owner of another TXQ.
1128 change_owner
= !(mvm
->queue_info
[queue
].tid_bitmap
&
1129 BIT(mvm
->queue_info
[queue
].txq_tid
)) &&
1130 (mvm
->queue_info
[queue
].status
==
1131 IWL_MVM_QUEUE_SHARED
);
1132 spin_unlock_bh(&mvm
->queue_info_lock
);
1135 iwl_mvm_unshare_queue(mvm
, queue
);
1136 else if (change_owner
)
1137 iwl_mvm_change_queue_owner(mvm
, queue
);
1141 /* Go over all stations with deferred traffic */
1142 for_each_set_bit(sta_id
, mvm
->sta_deferred_frames
,
1143 IWL_MVM_STATION_COUNT
) {
1144 clear_bit(sta_id
, mvm
->sta_deferred_frames
);
1145 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
1146 lockdep_is_held(&mvm
->mutex
));
1147 if (IS_ERR_OR_NULL(sta
))
1150 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1151 deferred_tid_traffic
= mvmsta
->deferred_traffic_tid_map
;
1153 for_each_set_bit(tid
, &deferred_tid_traffic
,
1154 IWL_MAX_TID_COUNT
+ 1)
1155 iwl_mvm_tx_deferred_stream(mvm
, sta
, tid
);
1158 mutex_unlock(&mvm
->mutex
);
1161 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm
*mvm
,
1162 struct ieee80211_sta
*sta
,
1163 enum nl80211_iftype vif_type
)
1165 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1167 bool using_inactive_queue
= false, same_sta
= false;
1169 /* queue reserving is disabled on new TX path */
1170 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
1174 * Check for inactive queues, so we don't reach a situation where we
1175 * can't add a STA due to a shortage in queues that doesn't really exist
1177 iwl_mvm_inactivity_check(mvm
);
1179 spin_lock_bh(&mvm
->queue_info_lock
);
1181 /* Make sure we have free resources for this STA */
1182 if (vif_type
== NL80211_IFTYPE_STATION
&& !sta
->tdls
&&
1183 !mvm
->queue_info
[IWL_MVM_DQA_BSS_CLIENT_QUEUE
].hw_queue_refcount
&&
1184 (mvm
->queue_info
[IWL_MVM_DQA_BSS_CLIENT_QUEUE
].status
==
1185 IWL_MVM_QUEUE_FREE
))
1186 queue
= IWL_MVM_DQA_BSS_CLIENT_QUEUE
;
1188 queue
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
1189 IWL_MVM_DQA_MIN_DATA_QUEUE
,
1190 IWL_MVM_DQA_MAX_DATA_QUEUE
);
1192 spin_unlock_bh(&mvm
->queue_info_lock
);
1193 IWL_ERR(mvm
, "No available queues for new station\n");
1195 } else if (mvm
->queue_info
[queue
].status
== IWL_MVM_QUEUE_INACTIVE
) {
1197 * If this queue is already allocated but inactive we'll need to
1198 * first free this queue before enabling it again, we'll mark
1199 * it as reserved to make sure no new traffic arrives on it
1201 using_inactive_queue
= true;
1202 same_sta
= mvm
->queue_info
[queue
].ra_sta_id
== mvmsta
->sta_id
;
1204 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_RESERVED
;
1206 spin_unlock_bh(&mvm
->queue_info_lock
);
1208 mvmsta
->reserved_queue
= queue
;
1210 if (using_inactive_queue
)
1211 iwl_mvm_free_inactive_queue(mvm
, queue
, same_sta
);
1213 IWL_DEBUG_TX_QUEUES(mvm
, "Reserving data queue #%d for sta_id %d\n",
1214 queue
, mvmsta
->sta_id
);
1220 * In DQA mode, after a HW restart the queues should be allocated as before, in
1221 * order to avoid race conditions when there are shared queues. This function
1222 * does the re-mapping and queue allocation.
1224 * Note that re-enabling aggregations isn't done in this function.
1226 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm
*mvm
,
1227 struct iwl_mvm_sta
*mvm_sta
)
1229 unsigned int wdg_timeout
=
1230 iwl_mvm_get_wd_timeout(mvm
, mvm_sta
->vif
, false, false);
1232 struct iwl_trans_txq_scd_cfg cfg
= {
1233 .sta_id
= mvm_sta
->sta_id
,
1234 .frame_limit
= IWL_FRAME_LIMIT
,
1237 /* Make sure reserved queue is still marked as such (if allocated) */
1238 if (mvm_sta
->reserved_queue
!= IEEE80211_INVAL_HW_QUEUE
)
1239 mvm
->queue_info
[mvm_sta
->reserved_queue
].status
=
1240 IWL_MVM_QUEUE_RESERVED
;
1242 for (i
= 0; i
<= IWL_MAX_TID_COUNT
; i
++) {
1243 struct iwl_mvm_tid_data
*tid_data
= &mvm_sta
->tid_data
[i
];
1244 int txq_id
= tid_data
->txq_id
;
1248 if (txq_id
== IWL_MVM_INVALID_QUEUE
)
1251 skb_queue_head_init(&tid_data
->deferred_tx_frames
);
1253 ac
= tid_to_mac80211_ac
[i
];
1254 mac_queue
= mvm_sta
->vif
->hw_queue
[ac
];
1256 if (iwl_mvm_has_new_tx_api(mvm
)) {
1257 IWL_DEBUG_TX_QUEUES(mvm
,
1258 "Re-mapping sta %d tid %d\n",
1259 mvm_sta
->sta_id
, i
);
1260 txq_id
= iwl_mvm_tvqm_enable_txq(mvm
, mac_queue
,
1263 tid_data
->txq_id
= txq_id
;
1266 * Since we don't set the seq number after reset, and HW
1267 * sets it now, FW reset will cause the seq num to start
1268 * at 0 again, so driver will need to update it
1269 * internally as well, so it keeps in sync with real val
1271 tid_data
->seq_number
= 0;
1273 u16 seq
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
1276 cfg
.fifo
= iwl_mvm_mac_ac_to_tx_fifo(mvm
, ac
);
1277 cfg
.aggregate
= (txq_id
>= IWL_MVM_DQA_MIN_DATA_QUEUE
||
1279 IWL_MVM_DQA_BSS_CLIENT_QUEUE
);
1281 IWL_DEBUG_TX_QUEUES(mvm
,
1282 "Re-mapping sta %d tid %d to queue %d\n",
1283 mvm_sta
->sta_id
, i
, txq_id
);
1285 iwl_mvm_enable_txq(mvm
, txq_id
, mac_queue
, seq
, &cfg
,
1287 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_READY
;
1292 static int iwl_mvm_add_int_sta_common(struct iwl_mvm
*mvm
,
1293 struct iwl_mvm_int_sta
*sta
,
1295 u16 mac_id
, u16 color
)
1297 struct iwl_mvm_add_sta_cmd cmd
;
1299 u32 status
= ADD_STA_SUCCESS
;
1301 lockdep_assert_held(&mvm
->mutex
);
1303 memset(&cmd
, 0, sizeof(cmd
));
1304 cmd
.sta_id
= sta
->sta_id
;
1305 cmd
.mac_id_n_color
= cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id
,
1307 if (fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
))
1308 cmd
.station_type
= sta
->type
;
1310 if (!iwl_mvm_has_new_tx_api(mvm
))
1311 cmd
.tfd_queue_msk
= cpu_to_le32(sta
->tfd_queue_msk
);
1312 cmd
.tid_disable_tx
= cpu_to_le16(0xffff);
1315 memcpy(cmd
.addr
, addr
, ETH_ALEN
);
1317 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
1318 iwl_mvm_add_sta_cmd_size(mvm
),
1323 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
1324 case ADD_STA_SUCCESS
:
1325 IWL_DEBUG_INFO(mvm
, "Internal station added.\n");
1329 IWL_ERR(mvm
, "Add internal station failed, status=0x%x\n",
1336 int iwl_mvm_add_sta(struct iwl_mvm
*mvm
,
1337 struct ieee80211_vif
*vif
,
1338 struct ieee80211_sta
*sta
)
1340 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1341 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1342 struct iwl_mvm_rxq_dup_data
*dup_data
;
1344 bool sta_update
= false;
1345 unsigned int sta_flags
= 0;
1347 lockdep_assert_held(&mvm
->mutex
);
1349 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
))
1350 sta_id
= iwl_mvm_find_free_sta_id(mvm
,
1351 ieee80211_vif_type_p2p(vif
));
1353 sta_id
= mvm_sta
->sta_id
;
1355 if (sta_id
== IWL_MVM_INVALID_STA
)
1358 spin_lock_init(&mvm_sta
->lock
);
1360 /* if this is a HW restart re-alloc existing queues */
1361 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
1362 struct iwl_mvm_int_sta tmp_sta
= {
1364 .type
= mvm_sta
->sta_type
,
1368 * First add an empty station since allocating
1369 * a queue requires a valid station
1371 ret
= iwl_mvm_add_int_sta_common(mvm
, &tmp_sta
, sta
->addr
,
1372 mvmvif
->id
, mvmvif
->color
);
1376 iwl_mvm_realloc_queues_after_restart(mvm
, mvm_sta
);
1378 sta_flags
= iwl_mvm_has_new_tx_api(mvm
) ? 0 : STA_MODIFY_QUEUES
;
1382 mvm_sta
->sta_id
= sta_id
;
1383 mvm_sta
->mac_id_n_color
= FW_CMD_ID_AND_COLOR(mvmvif
->id
,
1386 if (!mvm
->trans
->cfg
->gen2
)
1387 mvm_sta
->max_agg_bufsize
= LINK_QUAL_AGG_FRAME_LIMIT_DEF
;
1389 mvm_sta
->max_agg_bufsize
= LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF
;
1390 mvm_sta
->tx_protection
= 0;
1391 mvm_sta
->tt_tx_protection
= false;
1392 mvm_sta
->sta_type
= sta
->tdls
? IWL_STA_TDLS_LINK
: IWL_STA_LINK
;
1394 /* HW restart, don't assume the memory has been zeroed */
1395 mvm_sta
->tid_disable_agg
= 0xffff; /* No aggs at first */
1396 mvm_sta
->tfd_queue_msk
= 0;
1398 /* for HW restart - reset everything but the sequence number */
1399 for (i
= 0; i
<= IWL_MAX_TID_COUNT
; i
++) {
1400 u16 seq
= mvm_sta
->tid_data
[i
].seq_number
;
1401 memset(&mvm_sta
->tid_data
[i
], 0, sizeof(mvm_sta
->tid_data
[i
]));
1402 mvm_sta
->tid_data
[i
].seq_number
= seq
;
1405 * Mark all queues for this STA as unallocated and defer TX
1406 * frames until the queue is allocated
1408 mvm_sta
->tid_data
[i
].txq_id
= IWL_MVM_INVALID_QUEUE
;
1409 skb_queue_head_init(&mvm_sta
->tid_data
[i
].deferred_tx_frames
);
1411 mvm_sta
->deferred_traffic_tid_map
= 0;
1412 mvm_sta
->agg_tids
= 0;
1414 if (iwl_mvm_has_new_rx_api(mvm
) &&
1415 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
1418 dup_data
= kcalloc(mvm
->trans
->num_rx_queues
,
1419 sizeof(*dup_data
), GFP_KERNEL
);
1423 * Initialize all the last_seq values to 0xffff which can never
1424 * compare equal to the frame's seq_ctrl in the check in
1425 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1426 * number and fragmented packets don't reach that function.
1428 * This thus allows receiving a packet with seqno 0 and the
1429 * retry bit set as the very first packet on a new TID.
1431 for (q
= 0; q
< mvm
->trans
->num_rx_queues
; q
++)
1432 memset(dup_data
[q
].last_seq
, 0xff,
1433 sizeof(dup_data
[q
].last_seq
));
1434 mvm_sta
->dup_data
= dup_data
;
1437 if (!iwl_mvm_has_new_tx_api(mvm
)) {
1438 ret
= iwl_mvm_reserve_sta_stream(mvm
, sta
,
1439 ieee80211_vif_type_p2p(vif
));
1445 * if rs is registered with mac80211, then "add station" will be handled
1446 * via the corresponding ops, otherwise need to notify rate scaling here
1448 if (iwl_mvm_has_tlc_offload(mvm
))
1449 iwl_mvm_rs_add_sta(mvm
, mvm_sta
);
1452 ret
= iwl_mvm_sta_send_to_fw(mvm
, sta
, sta_update
, sta_flags
);
1456 if (vif
->type
== NL80211_IFTYPE_STATION
) {
1458 WARN_ON(mvmvif
->ap_sta_id
!= IWL_MVM_INVALID_STA
);
1459 mvmvif
->ap_sta_id
= sta_id
;
1461 WARN_ON(mvmvif
->ap_sta_id
== IWL_MVM_INVALID_STA
);
1465 rcu_assign_pointer(mvm
->fw_id_to_mac_id
[sta_id
], sta
);
1473 int iwl_mvm_drain_sta(struct iwl_mvm
*mvm
, struct iwl_mvm_sta
*mvmsta
,
1476 struct iwl_mvm_add_sta_cmd cmd
= {};
1480 lockdep_assert_held(&mvm
->mutex
);
1482 cmd
.mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
);
1483 cmd
.sta_id
= mvmsta
->sta_id
;
1484 cmd
.add_modify
= STA_MODE_MODIFY
;
1485 cmd
.station_flags
= drain
? cpu_to_le32(STA_FLG_DRAIN_FLOW
) : 0;
1486 cmd
.station_flags_msk
= cpu_to_le32(STA_FLG_DRAIN_FLOW
);
1488 status
= ADD_STA_SUCCESS
;
1489 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
1490 iwl_mvm_add_sta_cmd_size(mvm
),
1495 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
1496 case ADD_STA_SUCCESS
:
1497 IWL_DEBUG_INFO(mvm
, "Frames for staid %d will drained in fw\n",
1502 IWL_ERR(mvm
, "Couldn't drain frames for staid %d\n",
1511 * Remove a station from the FW table. Before sending the command to remove
1512 * the station validate that the station is indeed known to the driver (sanity
1515 static int iwl_mvm_rm_sta_common(struct iwl_mvm
*mvm
, u8 sta_id
)
1517 struct ieee80211_sta
*sta
;
1518 struct iwl_mvm_rm_sta_cmd rm_sta_cmd
= {
1523 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
1524 lockdep_is_held(&mvm
->mutex
));
1526 /* Note: internal stations are marked as error values */
1528 IWL_ERR(mvm
, "Invalid station id\n");
1532 ret
= iwl_mvm_send_cmd_pdu(mvm
, REMOVE_STA
, 0,
1533 sizeof(rm_sta_cmd
), &rm_sta_cmd
);
1535 IWL_ERR(mvm
, "Failed to remove station. Id=%d\n", sta_id
);
1542 static void iwl_mvm_disable_sta_queues(struct iwl_mvm
*mvm
,
1543 struct ieee80211_vif
*vif
,
1544 struct iwl_mvm_sta
*mvm_sta
)
1549 lockdep_assert_held(&mvm
->mutex
);
1551 for (i
= 0; i
< ARRAY_SIZE(mvm_sta
->tid_data
); i
++) {
1552 if (mvm_sta
->tid_data
[i
].txq_id
== IWL_MVM_INVALID_QUEUE
)
1555 ac
= iwl_mvm_tid_to_ac_queue(i
);
1556 iwl_mvm_disable_txq(mvm
, mvm_sta
->tid_data
[i
].txq_id
,
1557 vif
->hw_queue
[ac
], i
, 0);
1558 mvm_sta
->tid_data
[i
].txq_id
= IWL_MVM_INVALID_QUEUE
;
1562 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm
*mvm
,
1563 struct iwl_mvm_sta
*mvm_sta
)
1567 for (i
= 0; i
< ARRAY_SIZE(mvm_sta
->tid_data
); i
++) {
1571 spin_lock_bh(&mvm_sta
->lock
);
1572 txq_id
= mvm_sta
->tid_data
[i
].txq_id
;
1573 spin_unlock_bh(&mvm_sta
->lock
);
1575 if (txq_id
== IWL_MVM_INVALID_QUEUE
)
1578 ret
= iwl_trans_wait_txq_empty(mvm
->trans
, txq_id
);
1586 int iwl_mvm_rm_sta(struct iwl_mvm
*mvm
,
1587 struct ieee80211_vif
*vif
,
1588 struct ieee80211_sta
*sta
)
1590 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1591 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1592 u8 sta_id
= mvm_sta
->sta_id
;
1595 lockdep_assert_held(&mvm
->mutex
);
1597 if (iwl_mvm_has_new_rx_api(mvm
))
1598 kfree(mvm_sta
->dup_data
);
1600 ret
= iwl_mvm_drain_sta(mvm
, mvm_sta
, true);
1604 /* flush its queues here since we are freeing mvm_sta */
1605 ret
= iwl_mvm_flush_sta(mvm
, mvm_sta
, false, 0);
1608 if (iwl_mvm_has_new_tx_api(mvm
)) {
1609 ret
= iwl_mvm_wait_sta_queues_empty(mvm
, mvm_sta
);
1611 u32 q_mask
= mvm_sta
->tfd_queue_msk
;
1613 ret
= iwl_trans_wait_tx_queues_empty(mvm
->trans
,
1619 ret
= iwl_mvm_drain_sta(mvm
, mvm_sta
, false);
1621 iwl_mvm_disable_sta_queues(mvm
, vif
, mvm_sta
);
1623 /* If there is a TXQ still marked as reserved - free it */
1624 if (mvm_sta
->reserved_queue
!= IEEE80211_INVAL_HW_QUEUE
) {
1625 u8 reserved_txq
= mvm_sta
->reserved_queue
;
1626 enum iwl_mvm_queue_status
*status
;
1629 * If no traffic has gone through the reserved TXQ - it
1630 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1631 * should be manually marked as free again
1633 spin_lock_bh(&mvm
->queue_info_lock
);
1634 status
= &mvm
->queue_info
[reserved_txq
].status
;
1635 if (WARN((*status
!= IWL_MVM_QUEUE_RESERVED
) &&
1636 (*status
!= IWL_MVM_QUEUE_FREE
),
1637 "sta_id %d reserved txq %d status %d",
1638 sta_id
, reserved_txq
, *status
)) {
1639 spin_unlock_bh(&mvm
->queue_info_lock
);
1643 *status
= IWL_MVM_QUEUE_FREE
;
1644 spin_unlock_bh(&mvm
->queue_info_lock
);
1647 if (vif
->type
== NL80211_IFTYPE_STATION
&&
1648 mvmvif
->ap_sta_id
== sta_id
) {
1649 /* if associated - we can't remove the AP STA now */
1650 if (vif
->bss_conf
.assoc
)
1653 /* unassoc - go ahead - remove the AP STA now */
1654 mvmvif
->ap_sta_id
= IWL_MVM_INVALID_STA
;
1656 /* clear d0i3_ap_sta_id if no longer relevant */
1657 if (mvm
->d0i3_ap_sta_id
== sta_id
)
1658 mvm
->d0i3_ap_sta_id
= IWL_MVM_INVALID_STA
;
1662 * This shouldn't happen - the TDLS channel switch should be canceled
1663 * before the STA is removed.
1665 if (WARN_ON_ONCE(mvm
->tdls_cs
.peer
.sta_id
== sta_id
)) {
1666 mvm
->tdls_cs
.peer
.sta_id
= IWL_MVM_INVALID_STA
;
1667 cancel_delayed_work(&mvm
->tdls_cs
.dwork
);
1671 * Make sure that the tx response code sees the station as -EBUSY and
1672 * calls the drain worker.
1674 spin_lock_bh(&mvm_sta
->lock
);
1675 spin_unlock_bh(&mvm_sta
->lock
);
1677 ret
= iwl_mvm_rm_sta_common(mvm
, mvm_sta
->sta_id
);
1678 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[mvm_sta
->sta_id
], NULL
);
1683 int iwl_mvm_rm_sta_id(struct iwl_mvm
*mvm
,
1684 struct ieee80211_vif
*vif
,
1687 int ret
= iwl_mvm_rm_sta_common(mvm
, sta_id
);
1689 lockdep_assert_held(&mvm
->mutex
);
1691 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[sta_id
], NULL
);
1695 int iwl_mvm_allocate_int_sta(struct iwl_mvm
*mvm
,
1696 struct iwl_mvm_int_sta
*sta
,
1697 u32 qmask
, enum nl80211_iftype iftype
,
1698 enum iwl_sta_type type
)
1700 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
) ||
1701 sta
->sta_id
== IWL_MVM_INVALID_STA
) {
1702 sta
->sta_id
= iwl_mvm_find_free_sta_id(mvm
, iftype
);
1703 if (WARN_ON_ONCE(sta
->sta_id
== IWL_MVM_INVALID_STA
))
1707 sta
->tfd_queue_msk
= qmask
;
1710 /* put a non-NULL value so iterating over the stations won't stop */
1711 rcu_assign_pointer(mvm
->fw_id_to_mac_id
[sta
->sta_id
], ERR_PTR(-EINVAL
));
1715 void iwl_mvm_dealloc_int_sta(struct iwl_mvm
*mvm
, struct iwl_mvm_int_sta
*sta
)
1717 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[sta
->sta_id
], NULL
);
1718 memset(sta
, 0, sizeof(struct iwl_mvm_int_sta
));
1719 sta
->sta_id
= IWL_MVM_INVALID_STA
;
1722 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm
*mvm
, u16
*queue
,
1725 unsigned int wdg_timeout
= iwlmvm_mod_params
.tfd_q_hang_detect
?
1726 mvm
->cfg
->base_params
->wd_timeout
:
1727 IWL_WATCHDOG_DISABLED
;
1729 if (iwl_mvm_has_new_tx_api(mvm
)) {
1731 iwl_mvm_tvqm_enable_txq(mvm
, *queue
, sta_id
,
1734 *queue
= tvqm_queue
;
1736 struct iwl_trans_txq_scd_cfg cfg
= {
1739 .tid
= IWL_MAX_TID_COUNT
,
1741 .frame_limit
= IWL_FRAME_LIMIT
,
1744 iwl_mvm_enable_txq(mvm
, *queue
, *queue
, 0, &cfg
, wdg_timeout
);
1748 int iwl_mvm_add_aux_sta(struct iwl_mvm
*mvm
)
1752 lockdep_assert_held(&mvm
->mutex
);
1754 /* Allocate aux station and assign to it the aux queue */
1755 ret
= iwl_mvm_allocate_int_sta(mvm
, &mvm
->aux_sta
, BIT(mvm
->aux_queue
),
1756 NL80211_IFTYPE_UNSPECIFIED
,
1757 IWL_STA_AUX_ACTIVITY
);
1761 /* Map Aux queue to fifo - needs to happen before adding Aux station */
1762 if (!iwl_mvm_has_new_tx_api(mvm
))
1763 iwl_mvm_enable_aux_snif_queue(mvm
, &mvm
->aux_queue
,
1764 mvm
->aux_sta
.sta_id
,
1765 IWL_MVM_TX_FIFO_MCAST
);
1767 ret
= iwl_mvm_add_int_sta_common(mvm
, &mvm
->aux_sta
, NULL
,
1770 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->aux_sta
);
1775 * For 22000 firmware and on we cannot add queue to a station unknown
1776 * to firmware so enable queue here - after the station was added
1778 if (iwl_mvm_has_new_tx_api(mvm
))
1779 iwl_mvm_enable_aux_snif_queue(mvm
, &mvm
->aux_queue
,
1780 mvm
->aux_sta
.sta_id
,
1781 IWL_MVM_TX_FIFO_MCAST
);
1786 int iwl_mvm_add_snif_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1788 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1791 lockdep_assert_held(&mvm
->mutex
);
1793 /* Map snif queue to fifo - must happen before adding snif station */
1794 if (!iwl_mvm_has_new_tx_api(mvm
))
1795 iwl_mvm_enable_aux_snif_queue(mvm
, &mvm
->snif_queue
,
1796 mvm
->snif_sta
.sta_id
,
1797 IWL_MVM_TX_FIFO_BE
);
1799 ret
= iwl_mvm_add_int_sta_common(mvm
, &mvm
->snif_sta
, vif
->addr
,
1805 * For 22000 firmware and on we cannot add queue to a station unknown
1806 * to firmware so enable queue here - after the station was added
1808 if (iwl_mvm_has_new_tx_api(mvm
))
1809 iwl_mvm_enable_aux_snif_queue(mvm
, &mvm
->snif_queue
,
1810 mvm
->snif_sta
.sta_id
,
1811 IWL_MVM_TX_FIFO_BE
);
1816 int iwl_mvm_rm_snif_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1820 lockdep_assert_held(&mvm
->mutex
);
1822 iwl_mvm_disable_txq(mvm
, mvm
->snif_queue
, mvm
->snif_queue
,
1823 IWL_MAX_TID_COUNT
, 0);
1824 ret
= iwl_mvm_rm_sta_common(mvm
, mvm
->snif_sta
.sta_id
);
1826 IWL_WARN(mvm
, "Failed sending remove station\n");
1831 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm
*mvm
)
1833 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->snif_sta
);
1836 void iwl_mvm_del_aux_sta(struct iwl_mvm
*mvm
)
1838 lockdep_assert_held(&mvm
->mutex
);
1840 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->aux_sta
);
1844 * Send the add station command for the vif's broadcast station.
1845 * Assumes that the station was already allocated.
1847 * @mvm: the mvm component
1848 * @vif: the interface to which the broadcast station is added
1849 * @bsta: the broadcast station to add.
1851 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1853 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1854 struct iwl_mvm_int_sta
*bsta
= &mvmvif
->bcast_sta
;
1855 static const u8 _baddr
[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1856 const u8
*baddr
= _baddr
;
1859 unsigned int wdg_timeout
=
1860 iwl_mvm_get_wd_timeout(mvm
, vif
, false, false);
1861 struct iwl_trans_txq_scd_cfg cfg
= {
1862 .fifo
= IWL_MVM_TX_FIFO_VO
,
1863 .sta_id
= mvmvif
->bcast_sta
.sta_id
,
1864 .tid
= IWL_MAX_TID_COUNT
,
1866 .frame_limit
= IWL_FRAME_LIMIT
,
1869 lockdep_assert_held(&mvm
->mutex
);
1871 if (!iwl_mvm_has_new_tx_api(mvm
)) {
1872 if (vif
->type
== NL80211_IFTYPE_AP
||
1873 vif
->type
== NL80211_IFTYPE_ADHOC
)
1874 queue
= mvm
->probe_queue
;
1875 else if (vif
->type
== NL80211_IFTYPE_P2P_DEVICE
)
1876 queue
= mvm
->p2p_dev_queue
;
1877 else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
1880 bsta
->tfd_queue_msk
|= BIT(queue
);
1882 iwl_mvm_enable_txq(mvm
, queue
, vif
->hw_queue
[0], 0,
1886 if (vif
->type
== NL80211_IFTYPE_ADHOC
)
1887 baddr
= vif
->bss_conf
.bssid
;
1889 if (WARN_ON_ONCE(bsta
->sta_id
== IWL_MVM_INVALID_STA
))
1892 ret
= iwl_mvm_add_int_sta_common(mvm
, bsta
, baddr
,
1893 mvmvif
->id
, mvmvif
->color
);
1898 * For 22000 firmware and on we cannot add queue to a station unknown
1899 * to firmware so enable queue here - after the station was added
1901 if (iwl_mvm_has_new_tx_api(mvm
)) {
1902 queue
= iwl_mvm_tvqm_enable_txq(mvm
, vif
->hw_queue
[0],
1907 if (vif
->type
== NL80211_IFTYPE_AP
||
1908 vif
->type
== NL80211_IFTYPE_ADHOC
)
1909 mvm
->probe_queue
= queue
;
1910 else if (vif
->type
== NL80211_IFTYPE_P2P_DEVICE
)
1911 mvm
->p2p_dev_queue
= queue
;
1917 static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm
*mvm
,
1918 struct ieee80211_vif
*vif
)
1920 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1923 lockdep_assert_held(&mvm
->mutex
);
1925 iwl_mvm_flush_sta(mvm
, &mvmvif
->bcast_sta
, true, 0);
1927 switch (vif
->type
) {
1928 case NL80211_IFTYPE_AP
:
1929 case NL80211_IFTYPE_ADHOC
:
1930 queue
= mvm
->probe_queue
;
1932 case NL80211_IFTYPE_P2P_DEVICE
:
1933 queue
= mvm
->p2p_dev_queue
;
1936 WARN(1, "Can't free bcast queue on vif type %d\n",
1941 iwl_mvm_disable_txq(mvm
, queue
, vif
->hw_queue
[0], IWL_MAX_TID_COUNT
, 0);
1942 if (iwl_mvm_has_new_tx_api(mvm
))
1945 WARN_ON(!(mvmvif
->bcast_sta
.tfd_queue_msk
& BIT(queue
)));
1946 mvmvif
->bcast_sta
.tfd_queue_msk
&= ~BIT(queue
);
1949 /* Send the FW a request to remove the station from it's internal data
1950 * structures, but DO NOT remove the entry from the local data structures. */
1951 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1953 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1956 lockdep_assert_held(&mvm
->mutex
);
1958 iwl_mvm_free_bcast_sta_queues(mvm
, vif
);
1960 ret
= iwl_mvm_rm_sta_common(mvm
, mvmvif
->bcast_sta
.sta_id
);
1962 IWL_WARN(mvm
, "Failed sending remove station\n");
1966 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1968 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1970 lockdep_assert_held(&mvm
->mutex
);
1972 return iwl_mvm_allocate_int_sta(mvm
, &mvmvif
->bcast_sta
, 0,
1973 ieee80211_vif_type_p2p(vif
),
1974 IWL_STA_GENERAL_PURPOSE
);
1977 /* Allocate a new station entry for the broadcast station to the given vif,
1978 * and send it to the FW.
1979 * Note that each P2P mac should have its own broadcast station.
1981 * @mvm: the mvm component
1982 * @vif: the interface to which the broadcast station is added
1983 * @bsta: the broadcast station to add. */
1984 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1986 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1987 struct iwl_mvm_int_sta
*bsta
= &mvmvif
->bcast_sta
;
1990 lockdep_assert_held(&mvm
->mutex
);
1992 ret
= iwl_mvm_alloc_bcast_sta(mvm
, vif
);
1996 ret
= iwl_mvm_send_add_bcast_sta(mvm
, vif
);
1999 iwl_mvm_dealloc_int_sta(mvm
, bsta
);
2004 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2006 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2008 iwl_mvm_dealloc_int_sta(mvm
, &mvmvif
->bcast_sta
);
2012 * Send the FW a request to remove the station from it's internal data
2013 * structures, and in addition remove it from the local data structure.
2015 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2019 lockdep_assert_held(&mvm
->mutex
);
2021 ret
= iwl_mvm_send_rm_bcast_sta(mvm
, vif
);
2023 iwl_mvm_dealloc_bcast_sta(mvm
, vif
);
2029 * Allocate a new station entry for the multicast station to the given vif,
2030 * and send it to the FW.
2031 * Note that each AP/GO mac should have its own multicast station.
2033 * @mvm: the mvm component
2034 * @vif: the interface to which the multicast station is added
2036 int iwl_mvm_add_mcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2038 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2039 struct iwl_mvm_int_sta
*msta
= &mvmvif
->mcast_sta
;
2040 static const u8 _maddr
[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2041 const u8
*maddr
= _maddr
;
2042 struct iwl_trans_txq_scd_cfg cfg
= {
2043 .fifo
= IWL_MVM_TX_FIFO_MCAST
,
2044 .sta_id
= msta
->sta_id
,
2047 .frame_limit
= IWL_FRAME_LIMIT
,
2049 unsigned int timeout
= iwl_mvm_get_wd_timeout(mvm
, vif
, false, false);
2052 lockdep_assert_held(&mvm
->mutex
);
2054 if (WARN_ON(vif
->type
!= NL80211_IFTYPE_AP
&&
2055 vif
->type
!= NL80211_IFTYPE_ADHOC
))
2059 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2060 * invalid, so make sure we use the queue we want.
2061 * Note that this is done here as we want to avoid making DQA
2062 * changes in mac80211 layer.
2064 if (vif
->type
== NL80211_IFTYPE_ADHOC
) {
2065 vif
->cab_queue
= IWL_MVM_DQA_GCAST_QUEUE
;
2066 mvmvif
->cab_queue
= vif
->cab_queue
;
2070 * While in previous FWs we had to exclude cab queue from TFD queue
2071 * mask, now it is needed as any other queue.
2073 if (!iwl_mvm_has_new_tx_api(mvm
) &&
2074 fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
)) {
2075 iwl_mvm_enable_txq(mvm
, vif
->cab_queue
, vif
->cab_queue
, 0,
2077 msta
->tfd_queue_msk
|= BIT(vif
->cab_queue
);
2079 ret
= iwl_mvm_add_int_sta_common(mvm
, msta
, maddr
,
2080 mvmvif
->id
, mvmvif
->color
);
2082 iwl_mvm_dealloc_int_sta(mvm
, msta
);
2087 * Enable cab queue after the ADD_STA command is sent.
2088 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
2089 * command with unknown station id, and for FW that doesn't support
2090 * station API since the cab queue is not included in the
2093 if (iwl_mvm_has_new_tx_api(mvm
)) {
2094 int queue
= iwl_mvm_tvqm_enable_txq(mvm
, vif
->cab_queue
,
2098 mvmvif
->cab_queue
= queue
;
2099 } else if (!fw_has_api(&mvm
->fw
->ucode_capa
,
2100 IWL_UCODE_TLV_API_STA_TYPE
))
2101 iwl_mvm_enable_txq(mvm
, vif
->cab_queue
, vif
->cab_queue
, 0,
2108 * Send the FW a request to remove the station from it's internal data
2109 * structures, and in addition remove it from the local data structure.
2111 int iwl_mvm_rm_mcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2113 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2116 lockdep_assert_held(&mvm
->mutex
);
2118 iwl_mvm_flush_sta(mvm
, &mvmvif
->mcast_sta
, true, 0);
2120 iwl_mvm_disable_txq(mvm
, mvmvif
->cab_queue
, vif
->cab_queue
,
2123 ret
= iwl_mvm_rm_sta_common(mvm
, mvmvif
->mcast_sta
.sta_id
);
2125 IWL_WARN(mvm
, "Failed sending remove station\n");
2130 #define IWL_MAX_RX_BA_SESSIONS 16
2132 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm
*mvm
, u8 baid
)
2134 struct iwl_mvm_delba_notif notif
= {
2135 .metadata
.type
= IWL_MVM_RXQ_NOTIF_DEL_BA
,
2139 iwl_mvm_sync_rx_queues_internal(mvm
, (void *)¬if
, sizeof(notif
));
2142 static void iwl_mvm_free_reorder(struct iwl_mvm
*mvm
,
2143 struct iwl_mvm_baid_data
*data
)
2147 iwl_mvm_sync_rxq_del_ba(mvm
, data
->baid
);
2149 for (i
= 0; i
< mvm
->trans
->num_rx_queues
; i
++) {
2151 struct iwl_mvm_reorder_buffer
*reorder_buf
=
2152 &data
->reorder_buf
[i
];
2153 struct iwl_mvm_reorder_buf_entry
*entries
=
2154 &data
->entries
[i
* data
->entries_per_queue
];
2156 spin_lock_bh(&reorder_buf
->lock
);
2157 if (likely(!reorder_buf
->num_stored
)) {
2158 spin_unlock_bh(&reorder_buf
->lock
);
2163 * This shouldn't happen in regular DELBA since the internal
2164 * delBA notification should trigger a release of all frames in
2165 * the reorder buffer.
2169 for (j
= 0; j
< reorder_buf
->buf_size
; j
++)
2170 __skb_queue_purge(&entries
[j
].e
.frames
);
2172 * Prevent timer re-arm. This prevents a very far fetched case
2173 * where we timed out on the notification. There may be prior
2174 * RX frames pending in the RX queue before the notification
2175 * that might get processed between now and the actual deletion
2176 * and we would re-arm the timer although we are deleting the
2179 reorder_buf
->removed
= true;
2180 spin_unlock_bh(&reorder_buf
->lock
);
2181 del_timer_sync(&reorder_buf
->reorder_timer
);
2185 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm
*mvm
,
2186 struct iwl_mvm_baid_data
*data
,
2187 u16 ssn
, u16 buf_size
)
2191 for (i
= 0; i
< mvm
->trans
->num_rx_queues
; i
++) {
2192 struct iwl_mvm_reorder_buffer
*reorder_buf
=
2193 &data
->reorder_buf
[i
];
2194 struct iwl_mvm_reorder_buf_entry
*entries
=
2195 &data
->entries
[i
* data
->entries_per_queue
];
2198 reorder_buf
->num_stored
= 0;
2199 reorder_buf
->head_sn
= ssn
;
2200 reorder_buf
->buf_size
= buf_size
;
2201 /* rx reorder timer */
2202 timer_setup(&reorder_buf
->reorder_timer
,
2203 iwl_mvm_reorder_timer_expired
, 0);
2204 spin_lock_init(&reorder_buf
->lock
);
2205 reorder_buf
->mvm
= mvm
;
2206 reorder_buf
->queue
= i
;
2207 reorder_buf
->valid
= false;
2208 for (j
= 0; j
< reorder_buf
->buf_size
; j
++)
2209 __skb_queue_head_init(&entries
[j
].e
.frames
);
2213 int iwl_mvm_sta_rx_agg(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
2214 int tid
, u16 ssn
, bool start
, u16 buf_size
, u16 timeout
)
2216 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
2217 struct iwl_mvm_add_sta_cmd cmd
= {};
2218 struct iwl_mvm_baid_data
*baid_data
= NULL
;
2222 lockdep_assert_held(&mvm
->mutex
);
2224 if (start
&& mvm
->rx_ba_sessions
>= IWL_MAX_RX_BA_SESSIONS
) {
2225 IWL_WARN(mvm
, "Not enough RX BA SESSIONS\n");
2229 if (iwl_mvm_has_new_rx_api(mvm
) && start
) {
2230 u16 reorder_buf_size
= buf_size
* sizeof(baid_data
->entries
[0]);
2232 /* sparse doesn't like the __align() so don't check */
2235 * The division below will be OK if either the cache line size
2236 * can be divided by the entry size (ALIGN will round up) or if
2237 * if the entry size can be divided by the cache line size, in
2238 * which case the ALIGN() will do nothing.
2240 BUILD_BUG_ON(SMP_CACHE_BYTES
% sizeof(baid_data
->entries
[0]) &&
2241 sizeof(baid_data
->entries
[0]) % SMP_CACHE_BYTES
);
2245 * Upward align the reorder buffer size to fill an entire cache
2246 * line for each queue, to avoid sharing cache lines between
2249 reorder_buf_size
= ALIGN(reorder_buf_size
, SMP_CACHE_BYTES
);
2252 * Allocate here so if allocation fails we can bail out early
2253 * before starting the BA session in the firmware
2255 baid_data
= kzalloc(sizeof(*baid_data
) +
2256 mvm
->trans
->num_rx_queues
*
2263 * This division is why we need the above BUILD_BUG_ON(),
2264 * if that doesn't hold then this will not be right.
2266 baid_data
->entries_per_queue
=
2267 reorder_buf_size
/ sizeof(baid_data
->entries
[0]);
2270 cmd
.mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
);
2271 cmd
.sta_id
= mvm_sta
->sta_id
;
2272 cmd
.add_modify
= STA_MODE_MODIFY
;
2274 cmd
.add_immediate_ba_tid
= (u8
) tid
;
2275 cmd
.add_immediate_ba_ssn
= cpu_to_le16(ssn
);
2276 cmd
.rx_ba_window
= cpu_to_le16(buf_size
);
2278 cmd
.remove_immediate_ba_tid
= (u8
) tid
;
2280 cmd
.modify_mask
= start
? STA_MODIFY_ADD_BA_TID
:
2281 STA_MODIFY_REMOVE_BA_TID
;
2283 status
= ADD_STA_SUCCESS
;
2284 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
2285 iwl_mvm_add_sta_cmd_size(mvm
),
2290 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
2291 case ADD_STA_SUCCESS
:
2292 IWL_DEBUG_HT(mvm
, "RX BA Session %sed in fw\n",
2293 start
? "start" : "stopp");
2295 case ADD_STA_IMMEDIATE_BA_FAILURE
:
2296 IWL_WARN(mvm
, "RX BA Session refused by fw\n");
2301 IWL_ERR(mvm
, "RX BA Session failed %sing, status 0x%x\n",
2302 start
? "start" : "stopp", status
);
2312 mvm
->rx_ba_sessions
++;
2314 if (!iwl_mvm_has_new_rx_api(mvm
))
2317 if (WARN_ON(!(status
& IWL_ADD_STA_BAID_VALID_MASK
))) {
2321 baid
= (u8
)((status
& IWL_ADD_STA_BAID_MASK
) >>
2322 IWL_ADD_STA_BAID_SHIFT
);
2323 baid_data
->baid
= baid
;
2324 baid_data
->timeout
= timeout
;
2325 baid_data
->last_rx
= jiffies
;
2326 baid_data
->rcu_ptr
= &mvm
->baid_map
[baid
];
2327 timer_setup(&baid_data
->session_timer
,
2328 iwl_mvm_rx_agg_session_expired
, 0);
2329 baid_data
->mvm
= mvm
;
2330 baid_data
->tid
= tid
;
2331 baid_data
->sta_id
= mvm_sta
->sta_id
;
2333 mvm_sta
->tid_to_baid
[tid
] = baid
;
2335 mod_timer(&baid_data
->session_timer
,
2336 TU_TO_EXP_TIME(timeout
* 2));
2338 iwl_mvm_init_reorder_buffer(mvm
, baid_data
, ssn
, buf_size
);
2340 * protect the BA data with RCU to cover a case where our
2341 * internal RX sync mechanism will timeout (not that it's
2342 * supposed to happen) and we will free the session data while
2343 * RX is being processed in parallel
2345 IWL_DEBUG_HT(mvm
, "Sta %d(%d) is assigned to BAID %d\n",
2346 mvm_sta
->sta_id
, tid
, baid
);
2347 WARN_ON(rcu_access_pointer(mvm
->baid_map
[baid
]));
2348 rcu_assign_pointer(mvm
->baid_map
[baid
], baid_data
);
2350 u8 baid
= mvm_sta
->tid_to_baid
[tid
];
2352 if (mvm
->rx_ba_sessions
> 0)
2353 /* check that restart flow didn't zero the counter */
2354 mvm
->rx_ba_sessions
--;
2355 if (!iwl_mvm_has_new_rx_api(mvm
))
2358 if (WARN_ON(baid
== IWL_RX_REORDER_DATA_INVALID_BAID
))
2361 baid_data
= rcu_access_pointer(mvm
->baid_map
[baid
]);
2362 if (WARN_ON(!baid_data
))
2365 /* synchronize all rx queues so we can safely delete */
2366 iwl_mvm_free_reorder(mvm
, baid_data
);
2367 del_timer_sync(&baid_data
->session_timer
);
2368 RCU_INIT_POINTER(mvm
->baid_map
[baid
], NULL
);
2369 kfree_rcu(baid_data
, rcu_head
);
2370 IWL_DEBUG_HT(mvm
, "BAID %d is free\n", baid
);
2379 int iwl_mvm_sta_tx_agg(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
2380 int tid
, u8 queue
, bool start
)
2382 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
2383 struct iwl_mvm_add_sta_cmd cmd
= {};
2387 lockdep_assert_held(&mvm
->mutex
);
2390 mvm_sta
->tfd_queue_msk
|= BIT(queue
);
2391 mvm_sta
->tid_disable_agg
&= ~BIT(tid
);
2393 /* In DQA-mode the queue isn't removed on agg termination */
2394 mvm_sta
->tid_disable_agg
|= BIT(tid
);
2397 cmd
.mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
);
2398 cmd
.sta_id
= mvm_sta
->sta_id
;
2399 cmd
.add_modify
= STA_MODE_MODIFY
;
2400 if (!iwl_mvm_has_new_tx_api(mvm
))
2401 cmd
.modify_mask
= STA_MODIFY_QUEUES
;
2402 cmd
.modify_mask
|= STA_MODIFY_TID_DISABLE_TX
;
2403 cmd
.tfd_queue_msk
= cpu_to_le32(mvm_sta
->tfd_queue_msk
);
2404 cmd
.tid_disable_tx
= cpu_to_le16(mvm_sta
->tid_disable_agg
);
2406 status
= ADD_STA_SUCCESS
;
2407 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
2408 iwl_mvm_add_sta_cmd_size(mvm
),
2413 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
2414 case ADD_STA_SUCCESS
:
2418 IWL_ERR(mvm
, "TX BA Session failed %sing, status 0x%x\n",
2419 start
? "start" : "stopp", status
);
2426 const u8 tid_to_mac80211_ac
[] = {
2435 IEEE80211_AC_VO
, /* We treat MGMT as TID 8, which is set as AC_VO */
2438 static const u8 tid_to_ucode_ac
[] = {
2449 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
2450 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
)
2452 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2453 struct iwl_mvm_tid_data
*tid_data
;
2458 if (WARN_ON_ONCE(tid
>= IWL_MAX_TID_COUNT
))
2461 if (mvmsta
->tid_data
[tid
].state
!= IWL_AGG_QUEUED
&&
2462 mvmsta
->tid_data
[tid
].state
!= IWL_AGG_OFF
) {
2464 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2465 mvmsta
->tid_data
[tid
].state
);
2469 lockdep_assert_held(&mvm
->mutex
);
2471 if (mvmsta
->tid_data
[tid
].txq_id
== IWL_MVM_INVALID_QUEUE
&&
2472 iwl_mvm_has_new_tx_api(mvm
)) {
2473 u8 ac
= tid_to_mac80211_ac
[tid
];
2475 ret
= iwl_mvm_sta_alloc_queue_tvqm(mvm
, sta
, ac
, tid
);
2480 spin_lock_bh(&mvmsta
->lock
);
2482 /* possible race condition - we entered D0i3 while starting agg */
2483 if (test_bit(IWL_MVM_STATUS_IN_D0I3
, &mvm
->status
)) {
2484 spin_unlock_bh(&mvmsta
->lock
);
2485 IWL_ERR(mvm
, "Entered D0i3 while starting Tx agg\n");
2489 spin_lock(&mvm
->queue_info_lock
);
2492 * Note the possible cases:
2493 * 1. An enabled TXQ - TXQ needs to become agg'ed
2494 * 2. The TXQ hasn't yet been enabled, so find a free one and mark
2497 txq_id
= mvmsta
->tid_data
[tid
].txq_id
;
2498 if (txq_id
== IWL_MVM_INVALID_QUEUE
) {
2499 txq_id
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
2500 IWL_MVM_DQA_MIN_DATA_QUEUE
,
2501 IWL_MVM_DQA_MAX_DATA_QUEUE
);
2504 IWL_ERR(mvm
, "Failed to allocate agg queue\n");
2508 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2509 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_RESERVED
;
2510 } else if (unlikely(mvm
->queue_info
[txq_id
].status
==
2511 IWL_MVM_QUEUE_SHARED
)) {
2513 IWL_DEBUG_TX_QUEUES(mvm
,
2514 "Can't start tid %d agg on shared queue!\n",
2519 spin_unlock(&mvm
->queue_info_lock
);
2521 IWL_DEBUG_TX_QUEUES(mvm
,
2522 "AGG for tid %d will be on queue #%d\n",
2525 tid_data
= &mvmsta
->tid_data
[tid
];
2526 tid_data
->ssn
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
2527 tid_data
->txq_id
= txq_id
;
2528 *ssn
= tid_data
->ssn
;
2530 IWL_DEBUG_TX_QUEUES(mvm
,
2531 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2532 mvmsta
->sta_id
, tid
, txq_id
, tid_data
->ssn
,
2533 tid_data
->next_reclaimed
);
2536 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
2537 * to align the wrap around of ssn so we compare relevant values.
2539 normalized_ssn
= tid_data
->ssn
;
2540 if (mvm
->trans
->cfg
->gen2
)
2541 normalized_ssn
&= 0xff;
2543 if (normalized_ssn
== tid_data
->next_reclaimed
) {
2544 tid_data
->state
= IWL_AGG_STARTING
;
2545 ieee80211_start_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
2547 tid_data
->state
= IWL_EMPTYING_HW_QUEUE_ADDBA
;
2554 spin_unlock(&mvm
->queue_info_lock
);
2556 spin_unlock_bh(&mvmsta
->lock
);
2561 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
2562 struct ieee80211_sta
*sta
, u16 tid
, u16 buf_size
,
2565 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2566 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
2567 unsigned int wdg_timeout
=
2568 iwl_mvm_get_wd_timeout(mvm
, vif
, sta
->tdls
, false);
2570 bool alloc_queue
= true;
2571 enum iwl_mvm_queue_status queue_status
;
2574 struct iwl_trans_txq_scd_cfg cfg
= {
2575 .sta_id
= mvmsta
->sta_id
,
2577 .frame_limit
= buf_size
,
2582 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
2583 * manager, so this function should never be called in this case.
2585 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm
)))
2588 BUILD_BUG_ON((sizeof(mvmsta
->agg_tids
) * BITS_PER_BYTE
)
2589 != IWL_MAX_TID_COUNT
);
2591 spin_lock_bh(&mvmsta
->lock
);
2592 ssn
= tid_data
->ssn
;
2593 queue
= tid_data
->txq_id
;
2594 tid_data
->state
= IWL_AGG_ON
;
2595 mvmsta
->agg_tids
|= BIT(tid
);
2596 tid_data
->ssn
= 0xffff;
2597 tid_data
->amsdu_in_ampdu_allowed
= amsdu
;
2598 spin_unlock_bh(&mvmsta
->lock
);
2600 if (iwl_mvm_has_new_tx_api(mvm
)) {
2602 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
2603 * would have failed, so if we are here there is no need to
2605 * However, if aggregation size is different than the default
2606 * size, the scheduler should be reconfigured.
2607 * We cannot do this with the new TX API, so return unsupported
2608 * for now, until it will be offloaded to firmware..
2609 * Note that if SCD default value changes - this condition
2610 * should be updated as well.
2612 if (buf_size
< IWL_FRAME_LIMIT
)
2615 ret
= iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, queue
, true);
2621 cfg
.fifo
= iwl_mvm_ac_to_tx_fifo
[tid_to_mac80211_ac
[tid
]];
2623 spin_lock_bh(&mvm
->queue_info_lock
);
2624 queue_status
= mvm
->queue_info
[queue
].status
;
2625 spin_unlock_bh(&mvm
->queue_info_lock
);
2627 /* Maybe there is no need to even alloc a queue... */
2628 if (mvm
->queue_info
[queue
].status
== IWL_MVM_QUEUE_READY
)
2629 alloc_queue
= false;
2632 * Only reconfig the SCD for the queue if the window size has
2633 * changed from current (become smaller)
2635 if (!alloc_queue
&& buf_size
< IWL_FRAME_LIMIT
) {
2637 * If reconfiguring an existing queue, it first must be
2640 ret
= iwl_trans_wait_tx_queues_empty(mvm
->trans
,
2644 "Error draining queue before reconfig\n");
2648 ret
= iwl_mvm_reconfig_scd(mvm
, queue
, cfg
.fifo
,
2649 mvmsta
->sta_id
, tid
,
2653 "Error reconfiguring TXQ #%d\n", queue
);
2659 iwl_mvm_enable_txq(mvm
, queue
,
2660 vif
->hw_queue
[tid_to_mac80211_ac
[tid
]], ssn
,
2663 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2664 if (queue_status
!= IWL_MVM_QUEUE_SHARED
) {
2665 ret
= iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, queue
, true);
2670 /* No need to mark as reserved */
2671 spin_lock_bh(&mvm
->queue_info_lock
);
2672 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_READY
;
2673 spin_unlock_bh(&mvm
->queue_info_lock
);
2677 * Even though in theory the peer could have different
2678 * aggregation reorder buffer sizes for different sessions,
2679 * our ucode doesn't allow for that and has a global limit
2680 * for each station. Therefore, use the minimum of all the
2681 * aggregation sessions and our default value.
2683 mvmsta
->max_agg_bufsize
=
2684 min(mvmsta
->max_agg_bufsize
, buf_size
);
2685 mvmsta
->lq_sta
.rs_drv
.lq
.agg_frame_cnt_limit
= mvmsta
->max_agg_bufsize
;
2687 IWL_DEBUG_HT(mvm
, "Tx aggregation enabled on ra = %pM tid = %d\n",
2690 return iwl_mvm_send_lq_cmd(mvm
, &mvmsta
->lq_sta
.rs_drv
.lq
, false);
2693 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm
*mvm
,
2694 struct iwl_mvm_sta
*mvmsta
,
2695 struct iwl_mvm_tid_data
*tid_data
)
2697 u16 txq_id
= tid_data
->txq_id
;
2699 if (iwl_mvm_has_new_tx_api(mvm
))
2702 spin_lock_bh(&mvm
->queue_info_lock
);
2704 * The TXQ is marked as reserved only if no traffic came through yet
2705 * This means no traffic has been sent on this TID (agg'd or not), so
2706 * we no longer have use for the queue. Since it hasn't even been
2707 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2710 if (mvm
->queue_info
[txq_id
].status
== IWL_MVM_QUEUE_RESERVED
) {
2711 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_FREE
;
2712 tid_data
->txq_id
= IWL_MVM_INVALID_QUEUE
;
2715 spin_unlock_bh(&mvm
->queue_info_lock
);
2718 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
2719 struct ieee80211_sta
*sta
, u16 tid
)
2721 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2722 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
2727 * If mac80211 is cleaning its state, then say that we finished since
2728 * our state has been cleared anyway.
2730 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
2731 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
2735 spin_lock_bh(&mvmsta
->lock
);
2737 txq_id
= tid_data
->txq_id
;
2739 IWL_DEBUG_TX_QUEUES(mvm
, "Stop AGG: sta %d tid %d q %d state %d\n",
2740 mvmsta
->sta_id
, tid
, txq_id
, tid_data
->state
);
2742 mvmsta
->agg_tids
&= ~BIT(tid
);
2744 iwl_mvm_unreserve_agg_queue(mvm
, mvmsta
, tid_data
);
2746 switch (tid_data
->state
) {
2748 tid_data
->ssn
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
2750 IWL_DEBUG_TX_QUEUES(mvm
,
2751 "ssn = %d, next_recl = %d\n",
2752 tid_data
->ssn
, tid_data
->next_reclaimed
);
2754 tid_data
->ssn
= 0xffff;
2755 tid_data
->state
= IWL_AGG_OFF
;
2756 spin_unlock_bh(&mvmsta
->lock
);
2758 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
2760 iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, txq_id
, false);
2762 case IWL_AGG_STARTING
:
2763 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
2765 * The agg session has been stopped before it was set up. This
2766 * can happen when the AddBA timer times out for example.
2769 /* No barriers since we are under mutex */
2770 lockdep_assert_held(&mvm
->mutex
);
2772 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
2773 tid_data
->state
= IWL_AGG_OFF
;
2778 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
2779 mvmsta
->sta_id
, tid
, tid_data
->state
);
2781 "\ttid_data->txq_id = %d\n", tid_data
->txq_id
);
2785 spin_unlock_bh(&mvmsta
->lock
);
2790 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
2791 struct ieee80211_sta
*sta
, u16 tid
)
2793 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2794 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
2796 enum iwl_mvm_agg_state old_state
;
2799 * First set the agg state to OFF to avoid calling
2800 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
2802 spin_lock_bh(&mvmsta
->lock
);
2803 txq_id
= tid_data
->txq_id
;
2804 IWL_DEBUG_TX_QUEUES(mvm
, "Flush AGG: sta %d tid %d q %d state %d\n",
2805 mvmsta
->sta_id
, tid
, txq_id
, tid_data
->state
);
2806 old_state
= tid_data
->state
;
2807 tid_data
->state
= IWL_AGG_OFF
;
2808 mvmsta
->agg_tids
&= ~BIT(tid
);
2809 spin_unlock_bh(&mvmsta
->lock
);
2811 iwl_mvm_unreserve_agg_queue(mvm
, mvmsta
, tid_data
);
2813 if (old_state
>= IWL_AGG_ON
) {
2814 iwl_mvm_drain_sta(mvm
, mvmsta
, true);
2816 if (iwl_mvm_has_new_tx_api(mvm
)) {
2817 if (iwl_mvm_flush_sta_tids(mvm
, mvmsta
->sta_id
,
2819 IWL_ERR(mvm
, "Couldn't flush the AGG queue\n");
2820 iwl_trans_wait_txq_empty(mvm
->trans
, txq_id
);
2822 if (iwl_mvm_flush_tx_path(mvm
, BIT(txq_id
), 0))
2823 IWL_ERR(mvm
, "Couldn't flush the AGG queue\n");
2824 iwl_trans_wait_tx_queues_empty(mvm
->trans
, BIT(txq_id
));
2827 iwl_mvm_drain_sta(mvm
, mvmsta
, false);
2829 iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, txq_id
, false);
2835 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm
*mvm
)
2837 int i
, max
= -1, max_offs
= -1;
2839 lockdep_assert_held(&mvm
->mutex
);
2841 /* Pick the unused key offset with the highest 'deleted'
2842 * counter. Every time a key is deleted, all the counters
2843 * are incremented and the one that was just deleted is
2844 * reset to zero. Thus, the highest counter is the one
2845 * that was deleted longest ago. Pick that one.
2847 for (i
= 0; i
< STA_KEY_MAX_NUM
; i
++) {
2848 if (test_bit(i
, mvm
->fw_key_table
))
2850 if (mvm
->fw_key_deleted
[i
] > max
) {
2851 max
= mvm
->fw_key_deleted
[i
];
2857 return STA_KEY_IDX_INVALID
;
2862 static struct iwl_mvm_sta
*iwl_mvm_get_key_sta(struct iwl_mvm
*mvm
,
2863 struct ieee80211_vif
*vif
,
2864 struct ieee80211_sta
*sta
)
2866 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2869 return iwl_mvm_sta_from_mac80211(sta
);
2872 * The device expects GTKs for station interfaces to be
2873 * installed as GTKs for the AP station. If we have no
2874 * station ID, then use AP's station ID.
2876 if (vif
->type
== NL80211_IFTYPE_STATION
&&
2877 mvmvif
->ap_sta_id
!= IWL_MVM_INVALID_STA
) {
2878 u8 sta_id
= mvmvif
->ap_sta_id
;
2880 sta
= rcu_dereference_check(mvm
->fw_id_to_mac_id
[sta_id
],
2881 lockdep_is_held(&mvm
->mutex
));
2884 * It is possible that the 'sta' parameter is NULL,
2885 * for example when a GTK is removed - the sta_id will then
2886 * be the AP ID, and no station was passed by mac80211.
2888 if (IS_ERR_OR_NULL(sta
))
2891 return iwl_mvm_sta_from_mac80211(sta
);
2897 static int iwl_mvm_send_sta_key(struct iwl_mvm
*mvm
,
2899 struct ieee80211_key_conf
*key
, bool mcast
,
2900 u32 tkip_iv32
, u16
*tkip_p1k
, u32 cmd_flags
,
2901 u8 key_offset
, bool mfp
)
2904 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1
;
2905 struct iwl_mvm_add_sta_key_cmd cmd
;
2913 bool new_api
= fw_has_api(&mvm
->fw
->ucode_capa
,
2914 IWL_UCODE_TLV_API_TKIP_MIC_KEYS
);
2916 if (sta_id
== IWL_MVM_INVALID_STA
)
2919 keyidx
= (key
->keyidx
<< STA_KEY_FLG_KEYID_POS
) &
2920 STA_KEY_FLG_KEYID_MSK
;
2921 key_flags
= cpu_to_le16(keyidx
);
2922 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP
);
2924 switch (key
->cipher
) {
2925 case WLAN_CIPHER_SUITE_TKIP
:
2926 key_flags
|= cpu_to_le16(STA_KEY_FLG_TKIP
);
2928 memcpy((void *)&u
.cmd
.tx_mic_key
,
2929 &key
->key
[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY
],
2932 memcpy((void *)&u
.cmd
.rx_mic_key
,
2933 &key
->key
[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY
],
2935 pn
= atomic64_read(&key
->tx_pn
);
2938 u
.cmd_v1
.tkip_rx_tsc_byte2
= tkip_iv32
;
2939 for (i
= 0; i
< 5; i
++)
2940 u
.cmd_v1
.tkip_rx_ttak
[i
] =
2941 cpu_to_le16(tkip_p1k
[i
]);
2943 memcpy(u
.cmd
.common
.key
, key
->key
, key
->keylen
);
2945 case WLAN_CIPHER_SUITE_CCMP
:
2946 key_flags
|= cpu_to_le16(STA_KEY_FLG_CCM
);
2947 memcpy(u
.cmd
.common
.key
, key
->key
, key
->keylen
);
2949 pn
= atomic64_read(&key
->tx_pn
);
2951 case WLAN_CIPHER_SUITE_WEP104
:
2952 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES
);
2954 case WLAN_CIPHER_SUITE_WEP40
:
2955 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP
);
2956 memcpy(u
.cmd
.common
.key
+ 3, key
->key
, key
->keylen
);
2958 case WLAN_CIPHER_SUITE_GCMP_256
:
2959 key_flags
|= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES
);
2961 case WLAN_CIPHER_SUITE_GCMP
:
2962 key_flags
|= cpu_to_le16(STA_KEY_FLG_GCMP
);
2963 memcpy(u
.cmd
.common
.key
, key
->key
, key
->keylen
);
2965 pn
= atomic64_read(&key
->tx_pn
);
2968 key_flags
|= cpu_to_le16(STA_KEY_FLG_EXT
);
2969 memcpy(u
.cmd
.common
.key
, key
->key
, key
->keylen
);
2973 key_flags
|= cpu_to_le16(STA_KEY_MULTICAST
);
2975 key_flags
|= cpu_to_le16(STA_KEY_MFP
);
2977 u
.cmd
.common
.key_offset
= key_offset
;
2978 u
.cmd
.common
.key_flags
= key_flags
;
2979 u
.cmd
.common
.sta_id
= sta_id
;
2982 u
.cmd
.transmit_seq_cnt
= cpu_to_le64(pn
);
2983 size
= sizeof(u
.cmd
);
2985 size
= sizeof(u
.cmd_v1
);
2988 status
= ADD_STA_SUCCESS
;
2989 if (cmd_flags
& CMD_ASYNC
)
2990 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA_KEY
, CMD_ASYNC
, size
,
2993 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA_KEY
, size
,
2997 case ADD_STA_SUCCESS
:
2998 IWL_DEBUG_WEP(mvm
, "MODIFY_STA: set dynamic key passed\n");
3002 IWL_ERR(mvm
, "MODIFY_STA: set dynamic key failed\n");
3009 static int iwl_mvm_send_sta_igtk(struct iwl_mvm
*mvm
,
3010 struct ieee80211_key_conf
*keyconf
,
3011 u8 sta_id
, bool remove_key
)
3013 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd
= {};
3015 /* verify the key details match the required command's expectations */
3016 if (WARN_ON((keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
) ||
3017 (keyconf
->keyidx
!= 4 && keyconf
->keyidx
!= 5) ||
3018 (keyconf
->cipher
!= WLAN_CIPHER_SUITE_AES_CMAC
&&
3019 keyconf
->cipher
!= WLAN_CIPHER_SUITE_BIP_GMAC_128
&&
3020 keyconf
->cipher
!= WLAN_CIPHER_SUITE_BIP_GMAC_256
)))
3023 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm
) &&
3024 keyconf
->cipher
!= WLAN_CIPHER_SUITE_AES_CMAC
))
3027 igtk_cmd
.key_id
= cpu_to_le32(keyconf
->keyidx
);
3028 igtk_cmd
.sta_id
= cpu_to_le32(sta_id
);
3031 igtk_cmd
.ctrl_flags
|= cpu_to_le32(STA_KEY_NOT_VALID
);
3033 struct ieee80211_key_seq seq
;
3036 switch (keyconf
->cipher
) {
3037 case WLAN_CIPHER_SUITE_AES_CMAC
:
3038 igtk_cmd
.ctrl_flags
|= cpu_to_le32(STA_KEY_FLG_CCM
);
3040 case WLAN_CIPHER_SUITE_BIP_GMAC_128
:
3041 case WLAN_CIPHER_SUITE_BIP_GMAC_256
:
3042 igtk_cmd
.ctrl_flags
|= cpu_to_le32(STA_KEY_FLG_GCMP
);
3048 memcpy(igtk_cmd
.igtk
, keyconf
->key
, keyconf
->keylen
);
3049 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_256
)
3050 igtk_cmd
.ctrl_flags
|=
3051 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES
);
3052 ieee80211_get_key_rx_seq(keyconf
, 0, &seq
);
3053 pn
= seq
.aes_cmac
.pn
;
3054 igtk_cmd
.receive_seq_cnt
= cpu_to_le64(((u64
) pn
[5] << 0) |
3055 ((u64
) pn
[4] << 8) |
3056 ((u64
) pn
[3] << 16) |
3057 ((u64
) pn
[2] << 24) |
3058 ((u64
) pn
[1] << 32) |
3059 ((u64
) pn
[0] << 40));
3062 IWL_DEBUG_INFO(mvm
, "%s igtk for sta %u\n",
3063 remove_key
? "removing" : "installing",
3066 if (!iwl_mvm_has_new_rx_api(mvm
)) {
3067 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1
= {
3068 .ctrl_flags
= igtk_cmd
.ctrl_flags
,
3069 .key_id
= igtk_cmd
.key_id
,
3070 .sta_id
= igtk_cmd
.sta_id
,
3071 .receive_seq_cnt
= igtk_cmd
.receive_seq_cnt
3074 memcpy(igtk_cmd_v1
.igtk
, igtk_cmd
.igtk
,
3075 ARRAY_SIZE(igtk_cmd_v1
.igtk
));
3076 return iwl_mvm_send_cmd_pdu(mvm
, MGMT_MCAST_KEY
, 0,
3077 sizeof(igtk_cmd_v1
), &igtk_cmd_v1
);
3079 return iwl_mvm_send_cmd_pdu(mvm
, MGMT_MCAST_KEY
, 0,
3080 sizeof(igtk_cmd
), &igtk_cmd
);
3084 static inline u8
*iwl_mvm_get_mac_addr(struct iwl_mvm
*mvm
,
3085 struct ieee80211_vif
*vif
,
3086 struct ieee80211_sta
*sta
)
3088 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
3093 if (vif
->type
== NL80211_IFTYPE_STATION
&&
3094 mvmvif
->ap_sta_id
!= IWL_MVM_INVALID_STA
) {
3095 u8 sta_id
= mvmvif
->ap_sta_id
;
3096 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
3097 lockdep_is_held(&mvm
->mutex
));
3105 static int __iwl_mvm_set_sta_key(struct iwl_mvm
*mvm
,
3106 struct ieee80211_vif
*vif
,
3107 struct ieee80211_sta
*sta
,
3108 struct ieee80211_key_conf
*keyconf
,
3114 struct ieee80211_key_seq seq
;
3120 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
3122 sta_id
= mvm_sta
->sta_id
;
3124 } else if (vif
->type
== NL80211_IFTYPE_AP
&&
3125 !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
)) {
3126 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
3128 sta_id
= mvmvif
->mcast_sta
.sta_id
;
3130 IWL_ERR(mvm
, "Failed to find station id\n");
3134 switch (keyconf
->cipher
) {
3135 case WLAN_CIPHER_SUITE_TKIP
:
3136 if (vif
->type
== NL80211_IFTYPE_AP
) {
3140 addr
= iwl_mvm_get_mac_addr(mvm
, vif
, sta
);
3141 /* get phase 1 key from mac80211 */
3142 ieee80211_get_key_rx_seq(keyconf
, 0, &seq
);
3143 ieee80211_get_tkip_rx_p1k(keyconf
, addr
, seq
.tkip
.iv32
, p1k
);
3144 ret
= iwl_mvm_send_sta_key(mvm
, sta_id
, keyconf
, mcast
,
3145 seq
.tkip
.iv32
, p1k
, 0, key_offset
,
3148 case WLAN_CIPHER_SUITE_CCMP
:
3149 case WLAN_CIPHER_SUITE_WEP40
:
3150 case WLAN_CIPHER_SUITE_WEP104
:
3151 case WLAN_CIPHER_SUITE_GCMP
:
3152 case WLAN_CIPHER_SUITE_GCMP_256
:
3153 ret
= iwl_mvm_send_sta_key(mvm
, sta_id
, keyconf
, mcast
,
3154 0, NULL
, 0, key_offset
, mfp
);
3157 ret
= iwl_mvm_send_sta_key(mvm
, sta_id
, keyconf
, mcast
,
3158 0, NULL
, 0, key_offset
, mfp
);
3164 static int __iwl_mvm_remove_sta_key(struct iwl_mvm
*mvm
, u8 sta_id
,
3165 struct ieee80211_key_conf
*keyconf
,
3169 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1
;
3170 struct iwl_mvm_add_sta_key_cmd cmd
;
3172 bool new_api
= fw_has_api(&mvm
->fw
->ucode_capa
,
3173 IWL_UCODE_TLV_API_TKIP_MIC_KEYS
);
3178 /* This is a valid situation for GTK removal */
3179 if (sta_id
== IWL_MVM_INVALID_STA
)
3182 key_flags
= cpu_to_le16((keyconf
->keyidx
<< STA_KEY_FLG_KEYID_POS
) &
3183 STA_KEY_FLG_KEYID_MSK
);
3184 key_flags
|= cpu_to_le16(STA_KEY_FLG_NO_ENC
| STA_KEY_FLG_WEP_KEY_MAP
);
3185 key_flags
|= cpu_to_le16(STA_KEY_NOT_VALID
);
3188 key_flags
|= cpu_to_le16(STA_KEY_MULTICAST
);
3191 * The fields assigned here are in the same location at the start
3192 * of the command, so we can do this union trick.
3194 u
.cmd
.common
.key_flags
= key_flags
;
3195 u
.cmd
.common
.key_offset
= keyconf
->hw_key_idx
;
3196 u
.cmd
.common
.sta_id
= sta_id
;
3198 size
= new_api
? sizeof(u
.cmd
) : sizeof(u
.cmd_v1
);
3200 status
= ADD_STA_SUCCESS
;
3201 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA_KEY
, size
, &u
.cmd
,
3205 case ADD_STA_SUCCESS
:
3206 IWL_DEBUG_WEP(mvm
, "MODIFY_STA: remove sta key passed\n");
3210 IWL_ERR(mvm
, "MODIFY_STA: remove sta key failed\n");
3217 int iwl_mvm_set_sta_key(struct iwl_mvm
*mvm
,
3218 struct ieee80211_vif
*vif
,
3219 struct ieee80211_sta
*sta
,
3220 struct ieee80211_key_conf
*keyconf
,
3223 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
3224 struct iwl_mvm_sta
*mvm_sta
;
3225 u8 sta_id
= IWL_MVM_INVALID_STA
;
3227 static const u8 __maybe_unused zero_addr
[ETH_ALEN
] = {0};
3229 lockdep_assert_held(&mvm
->mutex
);
3231 if (vif
->type
!= NL80211_IFTYPE_AP
||
3232 keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
) {
3233 /* Get the station id from the mvm local station table */
3234 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
3236 IWL_ERR(mvm
, "Failed to find station\n");
3239 sta_id
= mvm_sta
->sta_id
;
3242 * It is possible that the 'sta' parameter is NULL, and thus
3243 * there is a need to retrieve the sta from the local station
3247 sta
= rcu_dereference_protected(
3248 mvm
->fw_id_to_mac_id
[sta_id
],
3249 lockdep_is_held(&mvm
->mutex
));
3250 if (IS_ERR_OR_NULL(sta
)) {
3251 IWL_ERR(mvm
, "Invalid station id\n");
3256 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta
)->vif
!= vif
))
3259 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
3261 sta_id
= mvmvif
->mcast_sta
.sta_id
;
3264 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_AES_CMAC
||
3265 keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_128
||
3266 keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_256
) {
3267 ret
= iwl_mvm_send_sta_igtk(mvm
, keyconf
, sta_id
, false);
3271 /* If the key_offset is not pre-assigned, we need to find a
3272 * new offset to use. In normal cases, the offset is not
3273 * pre-assigned, but during HW_RESTART we want to reuse the
3274 * same indices, so we pass them when this function is called.
3276 * In D3 entry, we need to hardcoded the indices (because the
3277 * firmware hardcodes the PTK offset to 0). In this case, we
3278 * need to make sure we don't overwrite the hw_key_idx in the
3279 * keyconf structure, because otherwise we cannot configure
3280 * the original ones back when resuming.
3282 if (key_offset
== STA_KEY_IDX_INVALID
) {
3283 key_offset
= iwl_mvm_set_fw_key_idx(mvm
);
3284 if (key_offset
== STA_KEY_IDX_INVALID
)
3286 keyconf
->hw_key_idx
= key_offset
;
3289 ret
= __iwl_mvm_set_sta_key(mvm
, vif
, sta
, keyconf
, key_offset
, mcast
);
3294 * For WEP, the same key is used for multicast and unicast. Upload it
3295 * again, using the same key offset, and now pointing the other one
3296 * to the same key slot (offset).
3297 * If this fails, remove the original as well.
3299 if ((keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP40
||
3300 keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP104
) &&
3302 ret
= __iwl_mvm_set_sta_key(mvm
, vif
, sta
, keyconf
,
3303 key_offset
, !mcast
);
3305 __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, mcast
);
3310 __set_bit(key_offset
, mvm
->fw_key_table
);
3313 IWL_DEBUG_WEP(mvm
, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3314 keyconf
->cipher
, keyconf
->keylen
, keyconf
->keyidx
,
3315 sta
? sta
->addr
: zero_addr
, ret
);
3319 int iwl_mvm_remove_sta_key(struct iwl_mvm
*mvm
,
3320 struct ieee80211_vif
*vif
,
3321 struct ieee80211_sta
*sta
,
3322 struct ieee80211_key_conf
*keyconf
)
3324 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
3325 struct iwl_mvm_sta
*mvm_sta
;
3326 u8 sta_id
= IWL_MVM_INVALID_STA
;
3329 lockdep_assert_held(&mvm
->mutex
);
3331 /* Get the station from the mvm local station table */
3332 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
3334 sta_id
= mvm_sta
->sta_id
;
3335 else if (!sta
&& vif
->type
== NL80211_IFTYPE_AP
&& mcast
)
3336 sta_id
= iwl_mvm_vif_from_mac80211(vif
)->mcast_sta
.sta_id
;
3339 IWL_DEBUG_WEP(mvm
, "mvm remove dynamic key: idx=%d sta=%d\n",
3340 keyconf
->keyidx
, sta_id
);
3342 if (mvm_sta
&& (keyconf
->cipher
== WLAN_CIPHER_SUITE_AES_CMAC
||
3343 keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_128
||
3344 keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_256
))
3345 return iwl_mvm_send_sta_igtk(mvm
, keyconf
, sta_id
, true);
3347 if (!__test_and_clear_bit(keyconf
->hw_key_idx
, mvm
->fw_key_table
)) {
3348 IWL_ERR(mvm
, "offset %d not used in fw key table.\n",
3349 keyconf
->hw_key_idx
);
3353 /* track which key was deleted last */
3354 for (i
= 0; i
< STA_KEY_MAX_NUM
; i
++) {
3355 if (mvm
->fw_key_deleted
[i
] < U8_MAX
)
3356 mvm
->fw_key_deleted
[i
]++;
3358 mvm
->fw_key_deleted
[keyconf
->hw_key_idx
] = 0;
3360 if (sta
&& !mvm_sta
) {
3361 IWL_DEBUG_WEP(mvm
, "station non-existent, early return.\n");
3365 ret
= __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, mcast
);
3369 /* delete WEP key twice to get rid of (now useless) offset */
3370 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP40
||
3371 keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP104
)
3372 ret
= __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, !mcast
);
3377 void iwl_mvm_update_tkip_key(struct iwl_mvm
*mvm
,
3378 struct ieee80211_vif
*vif
,
3379 struct ieee80211_key_conf
*keyconf
,
3380 struct ieee80211_sta
*sta
, u32 iv32
,
3383 struct iwl_mvm_sta
*mvm_sta
;
3384 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
3385 bool mfp
= sta
? sta
->mfp
: false;
3389 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
3390 if (WARN_ON_ONCE(!mvm_sta
))
3392 iwl_mvm_send_sta_key(mvm
, mvm_sta
->sta_id
, keyconf
, mcast
,
3393 iv32
, phase1key
, CMD_ASYNC
, keyconf
->hw_key_idx
,
3400 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm
*mvm
,
3401 struct ieee80211_sta
*sta
)
3403 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
3404 struct iwl_mvm_add_sta_cmd cmd
= {
3405 .add_modify
= STA_MODE_MODIFY
,
3406 .sta_id
= mvmsta
->sta_id
,
3407 .station_flags_msk
= cpu_to_le32(STA_FLG_PS
),
3408 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
3412 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
3413 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
3415 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
3418 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm
*mvm
,
3419 struct ieee80211_sta
*sta
,
3420 enum ieee80211_frame_release_type reason
,
3421 u16 cnt
, u16 tids
, bool more_data
,
3422 bool single_sta_queue
)
3424 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
3425 struct iwl_mvm_add_sta_cmd cmd
= {
3426 .add_modify
= STA_MODE_MODIFY
,
3427 .sta_id
= mvmsta
->sta_id
,
3428 .modify_mask
= STA_MODIFY_SLEEPING_STA_TX_COUNT
,
3429 .sleep_tx_count
= cpu_to_le16(cnt
),
3430 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
3433 unsigned long _tids
= tids
;
3435 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3436 * Note that this field is reserved and unused by firmware not
3437 * supporting GO uAPSD, so it's safe to always do this.
3439 for_each_set_bit(tid
, &_tids
, IWL_MAX_TID_COUNT
)
3440 cmd
.awake_acs
|= BIT(tid_to_ucode_ac
[tid
]);
3442 /* If we're releasing frames from aggregation or dqa queues then check
3443 * if all the queues that we're releasing frames from, combined, have:
3444 * - more frames than the service period, in which case more_data
3446 * - fewer than 'cnt' frames, in which case we need to adjust the
3447 * firmware command (but do that unconditionally)
3449 if (single_sta_queue
) {
3450 int remaining
= cnt
;
3453 spin_lock_bh(&mvmsta
->lock
);
3454 for_each_set_bit(tid
, &_tids
, IWL_MAX_TID_COUNT
) {
3455 struct iwl_mvm_tid_data
*tid_data
;
3458 tid_data
= &mvmsta
->tid_data
[tid
];
3460 n_queued
= iwl_mvm_tid_queued(mvm
, tid_data
);
3461 if (n_queued
> remaining
) {
3466 remaining
-= n_queued
;
3468 sleep_tx_count
= cnt
- remaining
;
3469 if (reason
== IEEE80211_FRAME_RELEASE_UAPSD
)
3470 mvmsta
->sleep_tx_count
= sleep_tx_count
;
3471 spin_unlock_bh(&mvmsta
->lock
);
3473 cmd
.sleep_tx_count
= cpu_to_le16(sleep_tx_count
);
3474 if (WARN_ON(cnt
- remaining
== 0)) {
3475 ieee80211_sta_eosp(sta
);
3480 /* Note: this is ignored by firmware not supporting GO uAPSD */
3482 cmd
.sleep_state_flags
|= STA_SLEEP_STATE_MOREDATA
;
3484 if (reason
== IEEE80211_FRAME_RELEASE_PSPOLL
) {
3485 mvmsta
->next_status_eosp
= true;
3486 cmd
.sleep_state_flags
|= STA_SLEEP_STATE_PS_POLL
;
3488 cmd
.sleep_state_flags
|= STA_SLEEP_STATE_UAPSD
;
3491 /* block the Tx queues until the FW updated the sleep Tx count */
3492 iwl_trans_block_txq_ptrs(mvm
->trans
, true);
3494 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
,
3495 CMD_ASYNC
| CMD_WANT_ASYNC_CALLBACK
,
3496 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
3498 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
3501 void iwl_mvm_rx_eosp_notif(struct iwl_mvm
*mvm
,
3502 struct iwl_rx_cmd_buffer
*rxb
)
3504 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
3505 struct iwl_mvm_eosp_notification
*notif
= (void *)pkt
->data
;
3506 struct ieee80211_sta
*sta
;
3507 u32 sta_id
= le32_to_cpu(notif
->sta_id
);
3509 if (WARN_ON_ONCE(sta_id
>= IWL_MVM_STATION_COUNT
))
3513 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
3514 if (!IS_ERR_OR_NULL(sta
))
3515 ieee80211_sta_eosp(sta
);
3519 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm
*mvm
,
3520 struct iwl_mvm_sta
*mvmsta
, bool disable
)
3522 struct iwl_mvm_add_sta_cmd cmd
= {
3523 .add_modify
= STA_MODE_MODIFY
,
3524 .sta_id
= mvmsta
->sta_id
,
3525 .station_flags
= disable
? cpu_to_le32(STA_FLG_DISABLE_TX
) : 0,
3526 .station_flags_msk
= cpu_to_le32(STA_FLG_DISABLE_TX
),
3527 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
3531 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
3532 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
3534 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
3537 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm
*mvm
,
3538 struct ieee80211_sta
*sta
,
3541 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
3543 spin_lock_bh(&mvm_sta
->lock
);
3545 if (mvm_sta
->disable_tx
== disable
) {
3546 spin_unlock_bh(&mvm_sta
->lock
);
3550 mvm_sta
->disable_tx
= disable
;
3552 /* Tell mac80211 to start/stop queuing tx for this station */
3553 ieee80211_sta_block_awake(mvm
->hw
, sta
, disable
);
3555 iwl_mvm_sta_modify_disable_tx(mvm
, mvm_sta
, disable
);
3557 spin_unlock_bh(&mvm_sta
->lock
);
3560 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm
*mvm
,
3561 struct iwl_mvm_vif
*mvmvif
,
3562 struct iwl_mvm_int_sta
*sta
,
3565 u32 id
= FW_CMD_ID_AND_COLOR(mvmvif
->id
, mvmvif
->color
);
3566 struct iwl_mvm_add_sta_cmd cmd
= {
3567 .add_modify
= STA_MODE_MODIFY
,
3568 .sta_id
= sta
->sta_id
,
3569 .station_flags
= disable
? cpu_to_le32(STA_FLG_DISABLE_TX
) : 0,
3570 .station_flags_msk
= cpu_to_le32(STA_FLG_DISABLE_TX
),
3571 .mac_id_n_color
= cpu_to_le32(id
),
3575 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, 0,
3576 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
3578 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
3581 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm
*mvm
,
3582 struct iwl_mvm_vif
*mvmvif
,
3585 struct ieee80211_sta
*sta
;
3586 struct iwl_mvm_sta
*mvm_sta
;
3589 lockdep_assert_held(&mvm
->mutex
);
3591 /* Block/unblock all the stations of the given mvmvif */
3592 for (i
= 0; i
< ARRAY_SIZE(mvm
->fw_id_to_mac_id
); i
++) {
3593 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[i
],
3594 lockdep_is_held(&mvm
->mutex
));
3595 if (IS_ERR_OR_NULL(sta
))
3598 mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
3599 if (mvm_sta
->mac_id_n_color
!=
3600 FW_CMD_ID_AND_COLOR(mvmvif
->id
, mvmvif
->color
))
3603 iwl_mvm_sta_modify_disable_tx_ap(mvm
, sta
, disable
);
3606 if (!fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
))
3609 /* Need to block/unblock also multicast station */
3610 if (mvmvif
->mcast_sta
.sta_id
!= IWL_MVM_INVALID_STA
)
3611 iwl_mvm_int_sta_modify_disable_tx(mvm
, mvmvif
,
3612 &mvmvif
->mcast_sta
, disable
);
3615 * Only unblock the broadcast station (FW blocks it for immediate
3616 * quiet, not the driver)
3618 if (!disable
&& mvmvif
->bcast_sta
.sta_id
!= IWL_MVM_INVALID_STA
)
3619 iwl_mvm_int_sta_modify_disable_tx(mvm
, mvmvif
,
3620 &mvmvif
->bcast_sta
, disable
);
3623 void iwl_mvm_csa_client_absent(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
3625 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
3626 struct iwl_mvm_sta
*mvmsta
;
3630 mvmsta
= iwl_mvm_sta_from_staid_rcu(mvm
, mvmvif
->ap_sta_id
);
3632 if (!WARN_ON(!mvmsta
))
3633 iwl_mvm_sta_modify_disable_tx(mvm
, mvmsta
, true);
3638 u16
iwl_mvm_tid_queued(struct iwl_mvm
*mvm
, struct iwl_mvm_tid_data
*tid_data
)
3640 u16 sn
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
3643 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
3644 * to align the wrap around of ssn so we compare relevant values.
3646 if (mvm
->trans
->cfg
->gen2
)
3649 return ieee80211_sn_sub(sn
, tid_data
->next_reclaimed
);