1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
35 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
38 * All rights reserved.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 *****************************************************************************/
67 #include <net/mac80211.h>
74 * New version of ADD_STA_sta command added new fields at the end of the
75 * structure, so sending the size of the relevant API's structure is enough to
76 * support both API versions.
78 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm
*mvm
)
80 if (iwl_mvm_has_new_rx_api(mvm
) ||
81 fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
))
82 return sizeof(struct iwl_mvm_add_sta_cmd
);
84 return sizeof(struct iwl_mvm_add_sta_cmd_v7
);
87 static int iwl_mvm_find_free_sta_id(struct iwl_mvm
*mvm
,
88 enum nl80211_iftype iftype
)
93 BUILD_BUG_ON(IWL_MVM_STATION_COUNT
> 32);
94 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
));
96 lockdep_assert_held(&mvm
->mutex
);
98 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
99 if (iftype
!= NL80211_IFTYPE_STATION
)
100 reserved_ids
= BIT(0);
102 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
103 for (sta_id
= 0; sta_id
< ARRAY_SIZE(mvm
->fw_id_to_mac_id
); sta_id
++) {
104 if (BIT(sta_id
) & reserved_ids
)
107 if (!rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
108 lockdep_is_held(&mvm
->mutex
)))
111 return IWL_MVM_INVALID_STA
;
114 /* send station add/update command to firmware */
115 int iwl_mvm_sta_send_to_fw(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
116 bool update
, unsigned int flags
)
118 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
119 struct iwl_mvm_add_sta_cmd add_sta_cmd
= {
120 .sta_id
= mvm_sta
->sta_id
,
121 .mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
),
122 .add_modify
= update
? 1 : 0,
123 .station_flags_msk
= cpu_to_le32(STA_FLG_FAT_EN_MSK
|
124 STA_FLG_MIMO_EN_MSK
|
125 STA_FLG_RTS_MIMO_PROT
),
126 .tid_disable_tx
= cpu_to_le16(mvm_sta
->tid_disable_agg
),
130 u32 agg_size
= 0, mpdu_dens
= 0;
132 if (fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
))
133 add_sta_cmd
.station_type
= mvm_sta
->sta_type
;
135 if (!update
|| (flags
& STA_MODIFY_QUEUES
)) {
136 memcpy(&add_sta_cmd
.addr
, sta
->addr
, ETH_ALEN
);
138 if (!iwl_mvm_has_new_tx_api(mvm
)) {
139 add_sta_cmd
.tfd_queue_msk
=
140 cpu_to_le32(mvm_sta
->tfd_queue_msk
);
142 if (flags
& STA_MODIFY_QUEUES
)
143 add_sta_cmd
.modify_mask
|= STA_MODIFY_QUEUES
;
145 WARN_ON(flags
& STA_MODIFY_QUEUES
);
149 switch (sta
->bandwidth
) {
150 case IEEE80211_STA_RX_BW_160
:
151 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_160MHZ
);
153 case IEEE80211_STA_RX_BW_80
:
154 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_80MHZ
);
156 case IEEE80211_STA_RX_BW_40
:
157 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_40MHZ
);
159 case IEEE80211_STA_RX_BW_20
:
160 if (sta
->ht_cap
.ht_supported
)
161 add_sta_cmd
.station_flags
|=
162 cpu_to_le32(STA_FLG_FAT_EN_20MHZ
);
166 switch (sta
->rx_nss
) {
168 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_SISO
);
171 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2
);
174 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3
);
178 switch (sta
->smps_mode
) {
179 case IEEE80211_SMPS_AUTOMATIC
:
180 case IEEE80211_SMPS_NUM_MODES
:
183 case IEEE80211_SMPS_STATIC
:
185 add_sta_cmd
.station_flags
&= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK
);
186 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_SISO
);
188 case IEEE80211_SMPS_DYNAMIC
:
189 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_RTS_MIMO_PROT
);
191 case IEEE80211_SMPS_OFF
:
196 if (sta
->ht_cap
.ht_supported
) {
197 add_sta_cmd
.station_flags_msk
|=
198 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK
|
199 STA_FLG_AGG_MPDU_DENS_MSK
);
201 mpdu_dens
= sta
->ht_cap
.ampdu_density
;
204 if (sta
->vht_cap
.vht_supported
) {
205 agg_size
= sta
->vht_cap
.cap
&
206 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK
;
208 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT
;
209 } else if (sta
->ht_cap
.ht_supported
) {
210 agg_size
= sta
->ht_cap
.ampdu_factor
;
213 add_sta_cmd
.station_flags
|=
214 cpu_to_le32(agg_size
<< STA_FLG_MAX_AGG_SIZE_SHIFT
);
215 add_sta_cmd
.station_flags
|=
216 cpu_to_le32(mpdu_dens
<< STA_FLG_AGG_MPDU_DENS_SHIFT
);
217 if (mvm_sta
->associated
)
218 add_sta_cmd
.assoc_id
= cpu_to_le16(sta
->aid
);
221 add_sta_cmd
.modify_mask
|= STA_MODIFY_UAPSD_ACS
;
223 if (sta
->uapsd_queues
& IEEE80211_WMM_IE_STA_QOSINFO_AC_BK
)
224 add_sta_cmd
.uapsd_acs
|= BIT(AC_BK
);
225 if (sta
->uapsd_queues
& IEEE80211_WMM_IE_STA_QOSINFO_AC_BE
)
226 add_sta_cmd
.uapsd_acs
|= BIT(AC_BE
);
227 if (sta
->uapsd_queues
& IEEE80211_WMM_IE_STA_QOSINFO_AC_VI
)
228 add_sta_cmd
.uapsd_acs
|= BIT(AC_VI
);
229 if (sta
->uapsd_queues
& IEEE80211_WMM_IE_STA_QOSINFO_AC_VO
)
230 add_sta_cmd
.uapsd_acs
|= BIT(AC_VO
);
231 add_sta_cmd
.uapsd_acs
|= add_sta_cmd
.uapsd_acs
<< 4;
232 add_sta_cmd
.sp_length
= sta
->max_sp
? sta
->max_sp
* 2 : 128;
235 status
= ADD_STA_SUCCESS
;
236 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
237 iwl_mvm_add_sta_cmd_size(mvm
),
238 &add_sta_cmd
, &status
);
242 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
243 case ADD_STA_SUCCESS
:
244 IWL_DEBUG_ASSOC(mvm
, "ADD_STA PASSED\n");
248 IWL_ERR(mvm
, "ADD_STA failed\n");
255 static void iwl_mvm_rx_agg_session_expired(struct timer_list
*t
)
257 struct iwl_mvm_baid_data
*data
=
258 from_timer(data
, t
, session_timer
);
259 struct iwl_mvm_baid_data __rcu
**rcu_ptr
= data
->rcu_ptr
;
260 struct iwl_mvm_baid_data
*ba_data
;
261 struct ieee80211_sta
*sta
;
262 struct iwl_mvm_sta
*mvm_sta
;
263 unsigned long timeout
;
267 ba_data
= rcu_dereference(*rcu_ptr
);
269 if (WARN_ON(!ba_data
))
272 if (!ba_data
->timeout
)
275 timeout
= ba_data
->last_rx
+ TU_TO_JIFFIES(ba_data
->timeout
* 2);
276 if (time_is_after_jiffies(timeout
)) {
277 mod_timer(&ba_data
->session_timer
, timeout
);
282 sta
= rcu_dereference(ba_data
->mvm
->fw_id_to_mac_id
[ba_data
->sta_id
]);
285 * sta should be valid unless the following happens:
286 * The firmware asserts which triggers a reconfig flow, but
287 * the reconfig fails before we set the pointer to sta into
288 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
289 * A-MDPU and hence the timer continues to run. Then, the
290 * timer expires and sta is NULL.
295 mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
296 ieee80211_rx_ba_timer_expired(mvm_sta
->vif
,
297 sta
->addr
, ba_data
->tid
);
302 /* Disable aggregations for a bitmap of TIDs for a given station */
303 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm
*mvm
, int queue
,
304 unsigned long disable_agg_tids
,
307 struct iwl_mvm_add_sta_cmd cmd
= {};
308 struct ieee80211_sta
*sta
;
309 struct iwl_mvm_sta
*mvmsta
;
314 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
317 spin_lock_bh(&mvm
->queue_info_lock
);
318 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
319 spin_unlock_bh(&mvm
->queue_info_lock
);
323 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
325 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
))) {
330 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
332 mvmsta
->tid_disable_agg
|= disable_agg_tids
;
334 cmd
.mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
);
335 cmd
.sta_id
= mvmsta
->sta_id
;
336 cmd
.add_modify
= STA_MODE_MODIFY
;
337 cmd
.modify_mask
= STA_MODIFY_QUEUES
;
338 if (disable_agg_tids
)
339 cmd
.modify_mask
|= STA_MODIFY_TID_DISABLE_TX
;
341 cmd
.modify_mask
|= STA_MODIFY_QUEUE_REMOVAL
;
342 cmd
.tfd_queue_msk
= cpu_to_le32(mvmsta
->tfd_queue_msk
);
343 cmd
.tid_disable_tx
= cpu_to_le16(mvmsta
->tid_disable_agg
);
347 /* Notify FW of queue removal from the STA queues */
348 status
= ADD_STA_SUCCESS
;
349 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
350 iwl_mvm_add_sta_cmd_size(mvm
),
356 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm
*mvm
, int queue
)
358 struct ieee80211_sta
*sta
;
359 struct iwl_mvm_sta
*mvmsta
;
360 unsigned long tid_bitmap
;
361 unsigned long agg_tids
= 0;
365 lockdep_assert_held(&mvm
->mutex
);
367 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
370 spin_lock_bh(&mvm
->queue_info_lock
);
371 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
372 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
373 spin_unlock_bh(&mvm
->queue_info_lock
);
375 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
376 lockdep_is_held(&mvm
->mutex
));
378 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
)))
381 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
383 spin_lock_bh(&mvmsta
->lock
);
384 for_each_set_bit(tid
, &tid_bitmap
, IWL_MAX_TID_COUNT
+ 1) {
385 if (mvmsta
->tid_data
[tid
].state
== IWL_AGG_ON
)
386 agg_tids
|= BIT(tid
);
388 spin_unlock_bh(&mvmsta
->lock
);
394 * Remove a queue from a station's resources.
395 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
396 * doesn't disable the queue
398 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm
*mvm
, int queue
)
400 struct ieee80211_sta
*sta
;
401 struct iwl_mvm_sta
*mvmsta
;
402 unsigned long tid_bitmap
;
403 unsigned long disable_agg_tids
= 0;
407 lockdep_assert_held(&mvm
->mutex
);
409 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
412 spin_lock_bh(&mvm
->queue_info_lock
);
413 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
414 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
415 spin_unlock_bh(&mvm
->queue_info_lock
);
419 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
421 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
))) {
426 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
428 spin_lock_bh(&mvmsta
->lock
);
429 /* Unmap MAC queues and TIDs from this queue */
430 for_each_set_bit(tid
, &tid_bitmap
, IWL_MAX_TID_COUNT
+ 1) {
431 if (mvmsta
->tid_data
[tid
].state
== IWL_AGG_ON
)
432 disable_agg_tids
|= BIT(tid
);
433 mvmsta
->tid_data
[tid
].txq_id
= IWL_MVM_INVALID_QUEUE
;
436 mvmsta
->tfd_queue_msk
&= ~BIT(queue
); /* Don't use this queue anymore */
437 spin_unlock_bh(&mvmsta
->lock
);
441 return disable_agg_tids
;
444 static int iwl_mvm_free_inactive_queue(struct iwl_mvm
*mvm
, int queue
,
447 struct iwl_mvm_sta
*mvmsta
;
448 u8 txq_curr_ac
, sta_id
, tid
;
449 unsigned long disable_agg_tids
= 0;
452 lockdep_assert_held(&mvm
->mutex
);
454 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
457 spin_lock_bh(&mvm
->queue_info_lock
);
458 txq_curr_ac
= mvm
->queue_info
[queue
].mac80211_ac
;
459 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
460 tid
= mvm
->queue_info
[queue
].txq_tid
;
461 spin_unlock_bh(&mvm
->queue_info_lock
);
463 mvmsta
= iwl_mvm_sta_from_staid_protected(mvm
, sta_id
);
464 if (WARN_ON(!mvmsta
))
467 disable_agg_tids
= iwl_mvm_remove_sta_queue_marking(mvm
, queue
);
468 /* Disable the queue */
469 if (disable_agg_tids
)
470 iwl_mvm_invalidate_sta_queue(mvm
, queue
,
471 disable_agg_tids
, false);
473 ret
= iwl_mvm_disable_txq(mvm
, queue
,
474 mvmsta
->vif
->hw_queue
[txq_curr_ac
],
477 /* Re-mark the inactive queue as inactive */
478 spin_lock_bh(&mvm
->queue_info_lock
);
479 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_INACTIVE
;
480 spin_unlock_bh(&mvm
->queue_info_lock
);
482 "Failed to free inactive queue %d (ret=%d)\n",
488 /* If TXQ is allocated to another STA, update removal in FW */
490 iwl_mvm_invalidate_sta_queue(mvm
, queue
, 0, true);
495 static int iwl_mvm_get_shared_queue(struct iwl_mvm
*mvm
,
496 unsigned long tfd_queue_mask
, u8 ac
)
499 u8 ac_to_queue
[IEEE80211_NUM_ACS
];
502 lockdep_assert_held(&mvm
->queue_info_lock
);
503 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
506 memset(&ac_to_queue
, IEEE80211_INVAL_HW_QUEUE
, sizeof(ac_to_queue
));
508 /* See what ACs the existing queues for this STA have */
509 for_each_set_bit(i
, &tfd_queue_mask
, IWL_MVM_DQA_MAX_DATA_QUEUE
) {
510 /* Only DATA queues can be shared */
511 if (i
< IWL_MVM_DQA_MIN_DATA_QUEUE
&&
512 i
!= IWL_MVM_DQA_BSS_CLIENT_QUEUE
)
515 /* Don't try and take queues being reconfigured */
516 if (mvm
->queue_info
[queue
].status
==
517 IWL_MVM_QUEUE_RECONFIGURING
)
520 ac_to_queue
[mvm
->queue_info
[i
].mac80211_ac
] = i
;
524 * The queue to share is chosen only from DATA queues as follows (in
525 * descending priority):
528 * 3. Highest AC queue that is lower than new AC
529 * 4. Any existing AC (there always is at least 1 DATA queue)
532 /* Priority 1: An AC_BE queue */
533 if (ac_to_queue
[IEEE80211_AC_BE
] != IEEE80211_INVAL_HW_QUEUE
)
534 queue
= ac_to_queue
[IEEE80211_AC_BE
];
535 /* Priority 2: Same AC queue */
536 else if (ac_to_queue
[ac
] != IEEE80211_INVAL_HW_QUEUE
)
537 queue
= ac_to_queue
[ac
];
538 /* Priority 3a: If new AC is VO and VI exists - use VI */
539 else if (ac
== IEEE80211_AC_VO
&&
540 ac_to_queue
[IEEE80211_AC_VI
] != IEEE80211_INVAL_HW_QUEUE
)
541 queue
= ac_to_queue
[IEEE80211_AC_VI
];
542 /* Priority 3b: No BE so only AC less than the new one is BK */
543 else if (ac_to_queue
[IEEE80211_AC_BK
] != IEEE80211_INVAL_HW_QUEUE
)
544 queue
= ac_to_queue
[IEEE80211_AC_BK
];
545 /* Priority 4a: No BE nor BK - use VI if exists */
546 else if (ac_to_queue
[IEEE80211_AC_VI
] != IEEE80211_INVAL_HW_QUEUE
)
547 queue
= ac_to_queue
[IEEE80211_AC_VI
];
548 /* Priority 4b: No BE, BK nor VI - use VO if exists */
549 else if (ac_to_queue
[IEEE80211_AC_VO
] != IEEE80211_INVAL_HW_QUEUE
)
550 queue
= ac_to_queue
[IEEE80211_AC_VO
];
552 /* Make sure queue found (or not) is legal */
553 if (!iwl_mvm_is_dqa_data_queue(mvm
, queue
) &&
554 !iwl_mvm_is_dqa_mgmt_queue(mvm
, queue
) &&
555 (queue
!= IWL_MVM_DQA_BSS_CLIENT_QUEUE
)) {
556 IWL_ERR(mvm
, "No DATA queues available to share\n");
560 /* Make sure the queue isn't in the middle of being reconfigured */
561 if (mvm
->queue_info
[queue
].status
== IWL_MVM_QUEUE_RECONFIGURING
) {
563 "TXQ %d is in the middle of re-config - try again\n",
572 * If a given queue has a higher AC than the TID stream that is being compared
573 * to, the queue needs to be redirected to the lower AC. This function does that
574 * in such a case, otherwise - if no redirection required - it does nothing,
575 * unless the %force param is true.
577 int iwl_mvm_scd_queue_redirect(struct iwl_mvm
*mvm
, int queue
, int tid
,
578 int ac
, int ssn
, unsigned int wdg_timeout
,
581 struct iwl_scd_txq_cfg_cmd cmd
= {
583 .action
= SCD_CFG_DISABLE_QUEUE
,
589 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
593 * If the AC is lower than current one - FIFO needs to be redirected to
594 * the lowest one of the streams in the queue. Check if this is needed
596 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
597 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
598 * we need to check if the numerical value of X is LARGER than of Y.
600 spin_lock_bh(&mvm
->queue_info_lock
);
601 if (ac
<= mvm
->queue_info
[queue
].mac80211_ac
&& !force
) {
602 spin_unlock_bh(&mvm
->queue_info_lock
);
604 IWL_DEBUG_TX_QUEUES(mvm
,
605 "No redirection needed on TXQ #%d\n",
610 cmd
.sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
611 cmd
.tx_fifo
= iwl_mvm_ac_to_tx_fifo
[mvm
->queue_info
[queue
].mac80211_ac
];
612 cmd
.tid
= mvm
->queue_info
[queue
].txq_tid
;
613 mq
= mvm
->hw_queue_to_mac80211
[queue
];
614 shared_queue
= (mvm
->queue_info
[queue
].hw_queue_refcount
> 1);
615 spin_unlock_bh(&mvm
->queue_info_lock
);
617 IWL_DEBUG_TX_QUEUES(mvm
, "Redirecting TXQ #%d to FIFO #%d\n",
618 queue
, iwl_mvm_ac_to_tx_fifo
[ac
]);
620 /* Stop MAC queues and wait for this queue to empty */
621 iwl_mvm_stop_mac_queues(mvm
, mq
);
622 ret
= iwl_trans_wait_tx_queues_empty(mvm
->trans
, BIT(queue
));
624 IWL_ERR(mvm
, "Error draining queue %d before reconfig\n",
630 /* Before redirecting the queue we need to de-activate it */
631 iwl_trans_txq_disable(mvm
->trans
, queue
, false);
632 ret
= iwl_mvm_send_cmd_pdu(mvm
, SCD_QUEUE_CFG
, 0, sizeof(cmd
), &cmd
);
634 IWL_ERR(mvm
, "Failed SCD disable TXQ %d (ret=%d)\n", queue
,
637 /* Make sure the SCD wrptr is correctly set before reconfiguring */
638 iwl_trans_txq_enable_cfg(mvm
->trans
, queue
, ssn
, NULL
, wdg_timeout
);
640 /* Update the TID "owner" of the queue */
641 spin_lock_bh(&mvm
->queue_info_lock
);
642 mvm
->queue_info
[queue
].txq_tid
= tid
;
643 spin_unlock_bh(&mvm
->queue_info_lock
);
645 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
647 /* Redirect to lower AC */
648 iwl_mvm_reconfig_scd(mvm
, queue
, iwl_mvm_ac_to_tx_fifo
[ac
],
649 cmd
.sta_id
, tid
, IWL_FRAME_LIMIT
, ssn
);
651 /* Update AC marking of the queue */
652 spin_lock_bh(&mvm
->queue_info_lock
);
653 mvm
->queue_info
[queue
].mac80211_ac
= ac
;
654 spin_unlock_bh(&mvm
->queue_info_lock
);
657 * Mark queue as shared in transport if shared
658 * Note this has to be done after queue enablement because enablement
659 * can also set this value, and there is no indication there to shared
663 iwl_trans_txq_set_shared_mode(mvm
->trans
, queue
, true);
666 /* Continue using the MAC queues */
667 iwl_mvm_start_mac_queues(mvm
, mq
);
672 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm
*mvm
,
673 struct ieee80211_sta
*sta
, u8 ac
,
676 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
677 unsigned int wdg_timeout
=
678 iwl_mvm_get_wd_timeout(mvm
, mvmsta
->vif
, false, false);
679 u8 mac_queue
= mvmsta
->vif
->hw_queue
[ac
];
682 lockdep_assert_held(&mvm
->mutex
);
684 IWL_DEBUG_TX_QUEUES(mvm
,
685 "Allocating queue for sta %d on tid %d\n",
686 mvmsta
->sta_id
, tid
);
687 queue
= iwl_mvm_tvqm_enable_txq(mvm
, mac_queue
, mvmsta
->sta_id
, tid
,
692 IWL_DEBUG_TX_QUEUES(mvm
, "Allocated queue is %d\n", queue
);
694 spin_lock_bh(&mvmsta
->lock
);
695 mvmsta
->tid_data
[tid
].txq_id
= queue
;
696 mvmsta
->tid_data
[tid
].is_tid_active
= true;
697 spin_unlock_bh(&mvmsta
->lock
);
702 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm
*mvm
,
703 struct ieee80211_sta
*sta
, u8 ac
, int tid
,
704 struct ieee80211_hdr
*hdr
)
706 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
707 struct iwl_trans_txq_scd_cfg cfg
= {
708 .fifo
= iwl_mvm_mac_ac_to_tx_fifo(mvm
, ac
),
709 .sta_id
= mvmsta
->sta_id
,
711 .frame_limit
= IWL_FRAME_LIMIT
,
713 unsigned int wdg_timeout
=
714 iwl_mvm_get_wd_timeout(mvm
, mvmsta
->vif
, false, false);
715 u8 mac_queue
= mvmsta
->vif
->hw_queue
[ac
];
717 bool using_inactive_queue
= false, same_sta
= false;
718 unsigned long disable_agg_tids
= 0;
719 enum iwl_mvm_agg_state queue_state
;
720 bool shared_queue
= false, inc_ssn
;
722 unsigned long tfd_queue_mask
;
725 lockdep_assert_held(&mvm
->mutex
);
727 if (iwl_mvm_has_new_tx_api(mvm
))
728 return iwl_mvm_sta_alloc_queue_tvqm(mvm
, sta
, ac
, tid
);
730 spin_lock_bh(&mvmsta
->lock
);
731 tfd_queue_mask
= mvmsta
->tfd_queue_msk
;
732 spin_unlock_bh(&mvmsta
->lock
);
734 spin_lock_bh(&mvm
->queue_info_lock
);
737 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
740 if (!ieee80211_is_data_qos(hdr
->frame_control
) ||
741 ieee80211_is_qos_nullfunc(hdr
->frame_control
)) {
742 queue
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
743 IWL_MVM_DQA_MIN_MGMT_QUEUE
,
744 IWL_MVM_DQA_MAX_MGMT_QUEUE
);
745 if (queue
>= IWL_MVM_DQA_MIN_MGMT_QUEUE
)
746 IWL_DEBUG_TX_QUEUES(mvm
, "Found free MGMT queue #%d\n",
749 /* If no such queue is found, we'll use a DATA queue instead */
752 if ((queue
< 0 && mvmsta
->reserved_queue
!= IEEE80211_INVAL_HW_QUEUE
) &&
753 (mvm
->queue_info
[mvmsta
->reserved_queue
].status
==
754 IWL_MVM_QUEUE_RESERVED
||
755 mvm
->queue_info
[mvmsta
->reserved_queue
].status
==
756 IWL_MVM_QUEUE_INACTIVE
)) {
757 queue
= mvmsta
->reserved_queue
;
758 mvm
->queue_info
[queue
].reserved
= true;
759 IWL_DEBUG_TX_QUEUES(mvm
, "Using reserved queue #%d\n", queue
);
763 queue
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
764 IWL_MVM_DQA_MIN_DATA_QUEUE
,
765 IWL_MVM_DQA_MAX_DATA_QUEUE
);
768 * Check if this queue is already allocated but inactive.
769 * In such a case, we'll need to first free this queue before enabling
770 * it again, so we'll mark it as reserved to make sure no new traffic
774 mvm
->queue_info
[queue
].status
== IWL_MVM_QUEUE_INACTIVE
) {
775 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_RESERVED
;
776 using_inactive_queue
= true;
777 same_sta
= mvm
->queue_info
[queue
].ra_sta_id
== mvmsta
->sta_id
;
778 IWL_DEBUG_TX_QUEUES(mvm
,
779 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
780 queue
, mvmsta
->sta_id
, tid
);
783 /* No free queue - we'll have to share */
785 queue
= iwl_mvm_get_shared_queue(mvm
, tfd_queue_mask
, ac
);
788 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_SHARED
;
793 * Mark TXQ as ready, even though it hasn't been fully configured yet,
794 * to make sure no one else takes it.
795 * This will allow avoiding re-acquiring the lock at the end of the
796 * configuration. On error we'll mark it back as free.
798 if ((queue
> 0) && !shared_queue
)
799 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_READY
;
801 spin_unlock_bh(&mvm
->queue_info_lock
);
803 /* This shouldn't happen - out of queues */
804 if (WARN_ON(queue
<= 0)) {
805 IWL_ERR(mvm
, "No available queues for tid %d on sta_id %d\n",
811 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
812 * but for configuring the SCD to send A-MPDUs we need to mark the queue
814 * Mark all DATA queues as allowing to be aggregated at some point
816 cfg
.aggregate
= (queue
>= IWL_MVM_DQA_MIN_DATA_QUEUE
||
817 queue
== IWL_MVM_DQA_BSS_CLIENT_QUEUE
);
820 * If this queue was previously inactive (idle) - we need to free it
823 if (using_inactive_queue
) {
824 ret
= iwl_mvm_free_inactive_queue(mvm
, queue
, same_sta
);
829 IWL_DEBUG_TX_QUEUES(mvm
,
830 "Allocating %squeue #%d to sta %d on tid %d\n",
831 shared_queue
? "shared " : "", queue
,
832 mvmsta
->sta_id
, tid
);
835 /* Disable any open aggs on this queue */
836 disable_agg_tids
= iwl_mvm_get_queue_agg_tids(mvm
, queue
);
838 if (disable_agg_tids
) {
839 IWL_DEBUG_TX_QUEUES(mvm
, "Disabling aggs on queue %d\n",
841 iwl_mvm_invalidate_sta_queue(mvm
, queue
,
842 disable_agg_tids
, false);
846 ssn
= IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr
->seq_ctrl
));
847 inc_ssn
= iwl_mvm_enable_txq(mvm
, queue
, mac_queue
,
848 ssn
, &cfg
, wdg_timeout
);
850 ssn
= (ssn
+ 1) & IEEE80211_SCTL_SEQ
;
851 le16_add_cpu(&hdr
->seq_ctrl
, 0x10);
855 * Mark queue as shared in transport if shared
856 * Note this has to be done after queue enablement because enablement
857 * can also set this value, and there is no indication there to shared
861 iwl_trans_txq_set_shared_mode(mvm
->trans
, queue
, true);
863 spin_lock_bh(&mvmsta
->lock
);
865 * This looks racy, but it is not. We have only one packet for
866 * this ra/tid in our Tx path since we stop the Qdisc when we
867 * need to allocate a new TFD queue.
870 mvmsta
->tid_data
[tid
].seq_number
+= 0x10;
871 mvmsta
->tid_data
[tid
].txq_id
= queue
;
872 mvmsta
->tid_data
[tid
].is_tid_active
= true;
873 mvmsta
->tfd_queue_msk
|= BIT(queue
);
874 queue_state
= mvmsta
->tid_data
[tid
].state
;
876 if (mvmsta
->reserved_queue
== queue
)
877 mvmsta
->reserved_queue
= IEEE80211_INVAL_HW_QUEUE
;
878 spin_unlock_bh(&mvmsta
->lock
);
881 ret
= iwl_mvm_sta_send_to_fw(mvm
, sta
, true, STA_MODIFY_QUEUES
);
885 /* If we need to re-enable aggregations... */
886 if (queue_state
== IWL_AGG_ON
) {
887 ret
= iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, queue
, true);
892 /* Redirect queue, if needed */
893 ret
= iwl_mvm_scd_queue_redirect(mvm
, queue
, tid
, ac
, ssn
,
902 iwl_mvm_disable_txq(mvm
, queue
, mac_queue
, tid
, 0);
907 static void iwl_mvm_change_queue_owner(struct iwl_mvm
*mvm
, int queue
)
909 struct iwl_scd_txq_cfg_cmd cmd
= {
911 .action
= SCD_CFG_UPDATE_QUEUE_TID
,
914 unsigned long tid_bitmap
;
917 lockdep_assert_held(&mvm
->mutex
);
919 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
922 spin_lock_bh(&mvm
->queue_info_lock
);
923 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
924 spin_unlock_bh(&mvm
->queue_info_lock
);
926 if (WARN(!tid_bitmap
, "TXQ %d has no tids assigned to it\n", queue
))
929 /* Find any TID for queue */
930 tid
= find_first_bit(&tid_bitmap
, IWL_MAX_TID_COUNT
+ 1);
932 cmd
.tx_fifo
= iwl_mvm_ac_to_tx_fifo
[tid_to_mac80211_ac
[tid
]];
934 ret
= iwl_mvm_send_cmd_pdu(mvm
, SCD_QUEUE_CFG
, 0, sizeof(cmd
), &cmd
);
936 IWL_ERR(mvm
, "Failed to update owner of TXQ %d (ret=%d)\n",
941 spin_lock_bh(&mvm
->queue_info_lock
);
942 mvm
->queue_info
[queue
].txq_tid
= tid
;
943 spin_unlock_bh(&mvm
->queue_info_lock
);
944 IWL_DEBUG_TX_QUEUES(mvm
, "Changed TXQ %d ownership to tid %d\n",
948 static void iwl_mvm_unshare_queue(struct iwl_mvm
*mvm
, int queue
)
950 struct ieee80211_sta
*sta
;
951 struct iwl_mvm_sta
*mvmsta
;
954 unsigned long tid_bitmap
;
955 unsigned int wdg_timeout
;
959 /* queue sharing is disabled on new TX path */
960 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
963 lockdep_assert_held(&mvm
->mutex
);
965 spin_lock_bh(&mvm
->queue_info_lock
);
966 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
967 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
968 spin_unlock_bh(&mvm
->queue_info_lock
);
970 /* Find TID for queue, and make sure it is the only one on the queue */
971 tid
= find_first_bit(&tid_bitmap
, IWL_MAX_TID_COUNT
+ 1);
972 if (tid_bitmap
!= BIT(tid
)) {
973 IWL_ERR(mvm
, "Failed to unshare q %d, active tids=0x%lx\n",
978 IWL_DEBUG_TX_QUEUES(mvm
, "Unsharing TXQ %d, keeping tid %d\n", queue
,
981 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
982 lockdep_is_held(&mvm
->mutex
));
984 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
)))
987 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
988 wdg_timeout
= iwl_mvm_get_wd_timeout(mvm
, mvmsta
->vif
, false, false);
990 ssn
= IEEE80211_SEQ_TO_SN(mvmsta
->tid_data
[tid
].seq_number
);
992 ret
= iwl_mvm_scd_queue_redirect(mvm
, queue
, tid
,
993 tid_to_mac80211_ac
[tid
], ssn
,
996 IWL_ERR(mvm
, "Failed to redirect TXQ %d\n", queue
);
1000 /* If aggs should be turned back on - do it */
1001 if (mvmsta
->tid_data
[tid
].state
== IWL_AGG_ON
) {
1002 struct iwl_mvm_add_sta_cmd cmd
= {0};
1004 mvmsta
->tid_disable_agg
&= ~BIT(tid
);
1006 cmd
.mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
);
1007 cmd
.sta_id
= mvmsta
->sta_id
;
1008 cmd
.add_modify
= STA_MODE_MODIFY
;
1009 cmd
.modify_mask
= STA_MODIFY_TID_DISABLE_TX
;
1010 cmd
.tfd_queue_msk
= cpu_to_le32(mvmsta
->tfd_queue_msk
);
1011 cmd
.tid_disable_tx
= cpu_to_le16(mvmsta
->tid_disable_agg
);
1013 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
1014 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
1016 IWL_DEBUG_TX_QUEUES(mvm
,
1017 "TXQ #%d is now aggregated again\n",
1020 /* Mark queue intenally as aggregating again */
1021 iwl_trans_txq_set_shared_mode(mvm
->trans
, queue
, false);
1025 spin_lock_bh(&mvm
->queue_info_lock
);
1026 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_READY
;
1027 spin_unlock_bh(&mvm
->queue_info_lock
);
1030 static inline u8
iwl_mvm_tid_to_ac_queue(int tid
)
1032 if (tid
== IWL_MAX_TID_COUNT
)
1033 return IEEE80211_AC_VO
; /* MGMT */
1035 return tid_to_mac80211_ac
[tid
];
1038 static void iwl_mvm_tx_deferred_stream(struct iwl_mvm
*mvm
,
1039 struct ieee80211_sta
*sta
, int tid
)
1041 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1042 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
1043 struct sk_buff
*skb
;
1044 struct ieee80211_hdr
*hdr
;
1045 struct sk_buff_head deferred_tx
;
1047 bool no_queue
= false; /* Marks if there is a problem with the queue */
1050 lockdep_assert_held(&mvm
->mutex
);
1052 skb
= skb_peek(&tid_data
->deferred_tx_frames
);
1055 hdr
= (void *)skb
->data
;
1057 ac
= iwl_mvm_tid_to_ac_queue(tid
);
1058 mac_queue
= IEEE80211_SKB_CB(skb
)->hw_queue
;
1060 if (tid_data
->txq_id
== IWL_MVM_INVALID_QUEUE
&&
1061 iwl_mvm_sta_alloc_queue(mvm
, sta
, ac
, tid
, hdr
)) {
1063 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1064 mvmsta
->sta_id
, tid
);
1067 * Mark queue as problematic so later the deferred traffic is
1068 * freed, as we can do nothing with it
1073 __skb_queue_head_init(&deferred_tx
);
1075 /* Disable bottom-halves when entering TX path */
1077 spin_lock(&mvmsta
->lock
);
1078 skb_queue_splice_init(&tid_data
->deferred_tx_frames
, &deferred_tx
);
1079 mvmsta
->deferred_traffic_tid_map
&= ~BIT(tid
);
1080 spin_unlock(&mvmsta
->lock
);
1082 while ((skb
= __skb_dequeue(&deferred_tx
)))
1083 if (no_queue
|| iwl_mvm_tx_skb(mvm
, skb
, sta
))
1084 ieee80211_free_txskb(mvm
->hw
, skb
);
1088 iwl_mvm_start_mac_queues(mvm
, BIT(mac_queue
));
1091 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct
*wk
)
1093 struct iwl_mvm
*mvm
= container_of(wk
, struct iwl_mvm
,
1095 struct ieee80211_sta
*sta
;
1096 struct iwl_mvm_sta
*mvmsta
;
1097 unsigned long deferred_tid_traffic
;
1098 int queue
, sta_id
, tid
;
1100 /* Check inactivity of queues */
1101 iwl_mvm_inactivity_check(mvm
);
1103 mutex_lock(&mvm
->mutex
);
1105 /* No queue reconfiguration in TVQM mode */
1106 if (iwl_mvm_has_new_tx_api(mvm
))
1109 /* Reconfigure queues requiring reconfiguation */
1110 for (queue
= 0; queue
< ARRAY_SIZE(mvm
->queue_info
); queue
++) {
1114 spin_lock_bh(&mvm
->queue_info_lock
);
1115 reconfig
= (mvm
->queue_info
[queue
].status
==
1116 IWL_MVM_QUEUE_RECONFIGURING
);
1119 * We need to take into account a situation in which a TXQ was
1120 * allocated to TID x, and then turned shared by adding TIDs y
1121 * and z. If TID x becomes inactive and is removed from the TXQ,
1122 * ownership must be given to one of the remaining TIDs.
1123 * This is mainly because if TID x continues - a new queue can't
1124 * be allocated for it as long as it is an owner of another TXQ.
1126 change_owner
= !(mvm
->queue_info
[queue
].tid_bitmap
&
1127 BIT(mvm
->queue_info
[queue
].txq_tid
)) &&
1128 (mvm
->queue_info
[queue
].status
==
1129 IWL_MVM_QUEUE_SHARED
);
1130 spin_unlock_bh(&mvm
->queue_info_lock
);
1133 iwl_mvm_unshare_queue(mvm
, queue
);
1134 else if (change_owner
)
1135 iwl_mvm_change_queue_owner(mvm
, queue
);
1139 /* Go over all stations with deferred traffic */
1140 for_each_set_bit(sta_id
, mvm
->sta_deferred_frames
,
1141 IWL_MVM_STATION_COUNT
) {
1142 clear_bit(sta_id
, mvm
->sta_deferred_frames
);
1143 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
1144 lockdep_is_held(&mvm
->mutex
));
1145 if (IS_ERR_OR_NULL(sta
))
1148 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1149 deferred_tid_traffic
= mvmsta
->deferred_traffic_tid_map
;
1151 for_each_set_bit(tid
, &deferred_tid_traffic
,
1152 IWL_MAX_TID_COUNT
+ 1)
1153 iwl_mvm_tx_deferred_stream(mvm
, sta
, tid
);
1156 mutex_unlock(&mvm
->mutex
);
1159 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm
*mvm
,
1160 struct ieee80211_sta
*sta
,
1161 enum nl80211_iftype vif_type
)
1163 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1165 bool using_inactive_queue
= false, same_sta
= false;
1167 /* queue reserving is disabled on new TX path */
1168 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
1172 * Check for inactive queues, so we don't reach a situation where we
1173 * can't add a STA due to a shortage in queues that doesn't really exist
1175 iwl_mvm_inactivity_check(mvm
);
1177 spin_lock_bh(&mvm
->queue_info_lock
);
1179 /* Make sure we have free resources for this STA */
1180 if (vif_type
== NL80211_IFTYPE_STATION
&& !sta
->tdls
&&
1181 !mvm
->queue_info
[IWL_MVM_DQA_BSS_CLIENT_QUEUE
].hw_queue_refcount
&&
1182 (mvm
->queue_info
[IWL_MVM_DQA_BSS_CLIENT_QUEUE
].status
==
1183 IWL_MVM_QUEUE_FREE
))
1184 queue
= IWL_MVM_DQA_BSS_CLIENT_QUEUE
;
1186 queue
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
1187 IWL_MVM_DQA_MIN_DATA_QUEUE
,
1188 IWL_MVM_DQA_MAX_DATA_QUEUE
);
1190 spin_unlock_bh(&mvm
->queue_info_lock
);
1191 IWL_ERR(mvm
, "No available queues for new station\n");
1193 } else if (mvm
->queue_info
[queue
].status
== IWL_MVM_QUEUE_INACTIVE
) {
1195 * If this queue is already allocated but inactive we'll need to
1196 * first free this queue before enabling it again, we'll mark
1197 * it as reserved to make sure no new traffic arrives on it
1199 using_inactive_queue
= true;
1200 same_sta
= mvm
->queue_info
[queue
].ra_sta_id
== mvmsta
->sta_id
;
1202 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_RESERVED
;
1204 spin_unlock_bh(&mvm
->queue_info_lock
);
1206 mvmsta
->reserved_queue
= queue
;
1208 if (using_inactive_queue
)
1209 iwl_mvm_free_inactive_queue(mvm
, queue
, same_sta
);
1211 IWL_DEBUG_TX_QUEUES(mvm
, "Reserving data queue #%d for sta_id %d\n",
1212 queue
, mvmsta
->sta_id
);
1218 * In DQA mode, after a HW restart the queues should be allocated as before, in
1219 * order to avoid race conditions when there are shared queues. This function
1220 * does the re-mapping and queue allocation.
1222 * Note that re-enabling aggregations isn't done in this function.
1224 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm
*mvm
,
1225 struct iwl_mvm_sta
*mvm_sta
)
1227 unsigned int wdg_timeout
=
1228 iwl_mvm_get_wd_timeout(mvm
, mvm_sta
->vif
, false, false);
1230 struct iwl_trans_txq_scd_cfg cfg
= {
1231 .sta_id
= mvm_sta
->sta_id
,
1232 .frame_limit
= IWL_FRAME_LIMIT
,
1235 /* Make sure reserved queue is still marked as such (if allocated) */
1236 if (mvm_sta
->reserved_queue
!= IEEE80211_INVAL_HW_QUEUE
)
1237 mvm
->queue_info
[mvm_sta
->reserved_queue
].status
=
1238 IWL_MVM_QUEUE_RESERVED
;
1240 for (i
= 0; i
<= IWL_MAX_TID_COUNT
; i
++) {
1241 struct iwl_mvm_tid_data
*tid_data
= &mvm_sta
->tid_data
[i
];
1242 int txq_id
= tid_data
->txq_id
;
1246 if (txq_id
== IWL_MVM_INVALID_QUEUE
)
1249 skb_queue_head_init(&tid_data
->deferred_tx_frames
);
1251 ac
= tid_to_mac80211_ac
[i
];
1252 mac_queue
= mvm_sta
->vif
->hw_queue
[ac
];
1254 if (iwl_mvm_has_new_tx_api(mvm
)) {
1255 IWL_DEBUG_TX_QUEUES(mvm
,
1256 "Re-mapping sta %d tid %d\n",
1257 mvm_sta
->sta_id
, i
);
1258 txq_id
= iwl_mvm_tvqm_enable_txq(mvm
, mac_queue
,
1261 tid_data
->txq_id
= txq_id
;
1264 * Since we don't set the seq number after reset, and HW
1265 * sets it now, FW reset will cause the seq num to start
1266 * at 0 again, so driver will need to update it
1267 * internally as well, so it keeps in sync with real val
1269 tid_data
->seq_number
= 0;
1271 u16 seq
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
1274 cfg
.fifo
= iwl_mvm_mac_ac_to_tx_fifo(mvm
, ac
);
1275 cfg
.aggregate
= (txq_id
>= IWL_MVM_DQA_MIN_DATA_QUEUE
||
1277 IWL_MVM_DQA_BSS_CLIENT_QUEUE
);
1279 IWL_DEBUG_TX_QUEUES(mvm
,
1280 "Re-mapping sta %d tid %d to queue %d\n",
1281 mvm_sta
->sta_id
, i
, txq_id
);
1283 iwl_mvm_enable_txq(mvm
, txq_id
, mac_queue
, seq
, &cfg
,
1285 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_READY
;
1290 static int iwl_mvm_add_int_sta_common(struct iwl_mvm
*mvm
,
1291 struct iwl_mvm_int_sta
*sta
,
1293 u16 mac_id
, u16 color
)
1295 struct iwl_mvm_add_sta_cmd cmd
;
1297 u32 status
= ADD_STA_SUCCESS
;
1299 lockdep_assert_held(&mvm
->mutex
);
1301 memset(&cmd
, 0, sizeof(cmd
));
1302 cmd
.sta_id
= sta
->sta_id
;
1303 cmd
.mac_id_n_color
= cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id
,
1305 if (fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
))
1306 cmd
.station_type
= sta
->type
;
1308 if (!iwl_mvm_has_new_tx_api(mvm
))
1309 cmd
.tfd_queue_msk
= cpu_to_le32(sta
->tfd_queue_msk
);
1310 cmd
.tid_disable_tx
= cpu_to_le16(0xffff);
1313 memcpy(cmd
.addr
, addr
, ETH_ALEN
);
1315 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
1316 iwl_mvm_add_sta_cmd_size(mvm
),
1321 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
1322 case ADD_STA_SUCCESS
:
1323 IWL_DEBUG_INFO(mvm
, "Internal station added.\n");
1327 IWL_ERR(mvm
, "Add internal station failed, status=0x%x\n",
1334 int iwl_mvm_add_sta(struct iwl_mvm
*mvm
,
1335 struct ieee80211_vif
*vif
,
1336 struct ieee80211_sta
*sta
)
1338 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1339 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1340 struct iwl_mvm_rxq_dup_data
*dup_data
;
1342 bool sta_update
= false;
1343 unsigned int sta_flags
= 0;
1345 lockdep_assert_held(&mvm
->mutex
);
1347 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
))
1348 sta_id
= iwl_mvm_find_free_sta_id(mvm
,
1349 ieee80211_vif_type_p2p(vif
));
1351 sta_id
= mvm_sta
->sta_id
;
1353 if (sta_id
== IWL_MVM_INVALID_STA
)
1356 spin_lock_init(&mvm_sta
->lock
);
1358 /* if this is a HW restart re-alloc existing queues */
1359 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
1360 struct iwl_mvm_int_sta tmp_sta
= {
1362 .type
= mvm_sta
->sta_type
,
1366 * First add an empty station since allocating
1367 * a queue requires a valid station
1369 ret
= iwl_mvm_add_int_sta_common(mvm
, &tmp_sta
, sta
->addr
,
1370 mvmvif
->id
, mvmvif
->color
);
1374 iwl_mvm_realloc_queues_after_restart(mvm
, mvm_sta
);
1376 sta_flags
= iwl_mvm_has_new_tx_api(mvm
) ? 0 : STA_MODIFY_QUEUES
;
1380 mvm_sta
->sta_id
= sta_id
;
1381 mvm_sta
->mac_id_n_color
= FW_CMD_ID_AND_COLOR(mvmvif
->id
,
1384 if (!mvm
->trans
->cfg
->gen2
)
1385 mvm_sta
->max_agg_bufsize
= LINK_QUAL_AGG_FRAME_LIMIT_DEF
;
1387 mvm_sta
->max_agg_bufsize
= LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF
;
1388 mvm_sta
->tx_protection
= 0;
1389 mvm_sta
->tt_tx_protection
= false;
1390 mvm_sta
->sta_type
= sta
->tdls
? IWL_STA_TDLS_LINK
: IWL_STA_LINK
;
1392 /* HW restart, don't assume the memory has been zeroed */
1393 mvm_sta
->tid_disable_agg
= 0xffff; /* No aggs at first */
1394 mvm_sta
->tfd_queue_msk
= 0;
1396 /* for HW restart - reset everything but the sequence number */
1397 for (i
= 0; i
<= IWL_MAX_TID_COUNT
; i
++) {
1398 u16 seq
= mvm_sta
->tid_data
[i
].seq_number
;
1399 memset(&mvm_sta
->tid_data
[i
], 0, sizeof(mvm_sta
->tid_data
[i
]));
1400 mvm_sta
->tid_data
[i
].seq_number
= seq
;
1403 * Mark all queues for this STA as unallocated and defer TX
1404 * frames until the queue is allocated
1406 mvm_sta
->tid_data
[i
].txq_id
= IWL_MVM_INVALID_QUEUE
;
1407 skb_queue_head_init(&mvm_sta
->tid_data
[i
].deferred_tx_frames
);
1409 mvm_sta
->deferred_traffic_tid_map
= 0;
1410 mvm_sta
->agg_tids
= 0;
1412 if (iwl_mvm_has_new_rx_api(mvm
) &&
1413 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
1416 dup_data
= kcalloc(mvm
->trans
->num_rx_queues
,
1417 sizeof(*dup_data
), GFP_KERNEL
);
1421 * Initialize all the last_seq values to 0xffff which can never
1422 * compare equal to the frame's seq_ctrl in the check in
1423 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1424 * number and fragmented packets don't reach that function.
1426 * This thus allows receiving a packet with seqno 0 and the
1427 * retry bit set as the very first packet on a new TID.
1429 for (q
= 0; q
< mvm
->trans
->num_rx_queues
; q
++)
1430 memset(dup_data
[q
].last_seq
, 0xff,
1431 sizeof(dup_data
[q
].last_seq
));
1432 mvm_sta
->dup_data
= dup_data
;
1435 if (!iwl_mvm_has_new_tx_api(mvm
)) {
1436 ret
= iwl_mvm_reserve_sta_stream(mvm
, sta
,
1437 ieee80211_vif_type_p2p(vif
));
1443 * if rs is registered with mac80211, then "add station" will be handled
1444 * via the corresponding ops, otherwise need to notify rate scaling here
1446 if (iwl_mvm_has_tlc_offload(mvm
))
1447 iwl_mvm_rs_add_sta(mvm
, mvm_sta
);
1450 ret
= iwl_mvm_sta_send_to_fw(mvm
, sta
, sta_update
, sta_flags
);
1454 if (vif
->type
== NL80211_IFTYPE_STATION
) {
1456 WARN_ON(mvmvif
->ap_sta_id
!= IWL_MVM_INVALID_STA
);
1457 mvmvif
->ap_sta_id
= sta_id
;
1459 WARN_ON(mvmvif
->ap_sta_id
== IWL_MVM_INVALID_STA
);
1463 rcu_assign_pointer(mvm
->fw_id_to_mac_id
[sta_id
], sta
);
1471 int iwl_mvm_drain_sta(struct iwl_mvm
*mvm
, struct iwl_mvm_sta
*mvmsta
,
1474 struct iwl_mvm_add_sta_cmd cmd
= {};
1478 lockdep_assert_held(&mvm
->mutex
);
1480 cmd
.mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
);
1481 cmd
.sta_id
= mvmsta
->sta_id
;
1482 cmd
.add_modify
= STA_MODE_MODIFY
;
1483 cmd
.station_flags
= drain
? cpu_to_le32(STA_FLG_DRAIN_FLOW
) : 0;
1484 cmd
.station_flags_msk
= cpu_to_le32(STA_FLG_DRAIN_FLOW
);
1486 status
= ADD_STA_SUCCESS
;
1487 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
1488 iwl_mvm_add_sta_cmd_size(mvm
),
1493 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
1494 case ADD_STA_SUCCESS
:
1495 IWL_DEBUG_INFO(mvm
, "Frames for staid %d will drained in fw\n",
1500 IWL_ERR(mvm
, "Couldn't drain frames for staid %d\n",
1509 * Remove a station from the FW table. Before sending the command to remove
1510 * the station validate that the station is indeed known to the driver (sanity
1513 static int iwl_mvm_rm_sta_common(struct iwl_mvm
*mvm
, u8 sta_id
)
1515 struct ieee80211_sta
*sta
;
1516 struct iwl_mvm_rm_sta_cmd rm_sta_cmd
= {
1521 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
1522 lockdep_is_held(&mvm
->mutex
));
1524 /* Note: internal stations are marked as error values */
1526 IWL_ERR(mvm
, "Invalid station id\n");
1530 ret
= iwl_mvm_send_cmd_pdu(mvm
, REMOVE_STA
, 0,
1531 sizeof(rm_sta_cmd
), &rm_sta_cmd
);
1533 IWL_ERR(mvm
, "Failed to remove station. Id=%d\n", sta_id
);
1540 static void iwl_mvm_disable_sta_queues(struct iwl_mvm
*mvm
,
1541 struct ieee80211_vif
*vif
,
1542 struct iwl_mvm_sta
*mvm_sta
)
1547 lockdep_assert_held(&mvm
->mutex
);
1549 for (i
= 0; i
< ARRAY_SIZE(mvm_sta
->tid_data
); i
++) {
1550 if (mvm_sta
->tid_data
[i
].txq_id
== IWL_MVM_INVALID_QUEUE
)
1553 ac
= iwl_mvm_tid_to_ac_queue(i
);
1554 iwl_mvm_disable_txq(mvm
, mvm_sta
->tid_data
[i
].txq_id
,
1555 vif
->hw_queue
[ac
], i
, 0);
1556 mvm_sta
->tid_data
[i
].txq_id
= IWL_MVM_INVALID_QUEUE
;
1560 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm
*mvm
,
1561 struct iwl_mvm_sta
*mvm_sta
)
1565 for (i
= 0; i
< ARRAY_SIZE(mvm_sta
->tid_data
); i
++) {
1569 spin_lock_bh(&mvm_sta
->lock
);
1570 txq_id
= mvm_sta
->tid_data
[i
].txq_id
;
1571 spin_unlock_bh(&mvm_sta
->lock
);
1573 if (txq_id
== IWL_MVM_INVALID_QUEUE
)
1576 ret
= iwl_trans_wait_txq_empty(mvm
->trans
, txq_id
);
1584 int iwl_mvm_rm_sta(struct iwl_mvm
*mvm
,
1585 struct ieee80211_vif
*vif
,
1586 struct ieee80211_sta
*sta
)
1588 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1589 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1590 u8 sta_id
= mvm_sta
->sta_id
;
1593 lockdep_assert_held(&mvm
->mutex
);
1595 if (iwl_mvm_has_new_rx_api(mvm
))
1596 kfree(mvm_sta
->dup_data
);
1598 ret
= iwl_mvm_drain_sta(mvm
, mvm_sta
, true);
1602 /* flush its queues here since we are freeing mvm_sta */
1603 ret
= iwl_mvm_flush_sta(mvm
, mvm_sta
, false, 0);
1606 if (iwl_mvm_has_new_tx_api(mvm
)) {
1607 ret
= iwl_mvm_wait_sta_queues_empty(mvm
, mvm_sta
);
1609 u32 q_mask
= mvm_sta
->tfd_queue_msk
;
1611 ret
= iwl_trans_wait_tx_queues_empty(mvm
->trans
,
1617 ret
= iwl_mvm_drain_sta(mvm
, mvm_sta
, false);
1619 iwl_mvm_disable_sta_queues(mvm
, vif
, mvm_sta
);
1621 /* If there is a TXQ still marked as reserved - free it */
1622 if (mvm_sta
->reserved_queue
!= IEEE80211_INVAL_HW_QUEUE
) {
1623 u8 reserved_txq
= mvm_sta
->reserved_queue
;
1624 enum iwl_mvm_queue_status
*status
;
1627 * If no traffic has gone through the reserved TXQ - it
1628 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1629 * should be manually marked as free again
1631 spin_lock_bh(&mvm
->queue_info_lock
);
1632 status
= &mvm
->queue_info
[reserved_txq
].status
;
1633 if (WARN((*status
!= IWL_MVM_QUEUE_RESERVED
) &&
1634 (*status
!= IWL_MVM_QUEUE_FREE
),
1635 "sta_id %d reserved txq %d status %d",
1636 sta_id
, reserved_txq
, *status
)) {
1637 spin_unlock_bh(&mvm
->queue_info_lock
);
1641 *status
= IWL_MVM_QUEUE_FREE
;
1642 spin_unlock_bh(&mvm
->queue_info_lock
);
1645 if (vif
->type
== NL80211_IFTYPE_STATION
&&
1646 mvmvif
->ap_sta_id
== sta_id
) {
1647 /* if associated - we can't remove the AP STA now */
1648 if (vif
->bss_conf
.assoc
)
1651 /* unassoc - go ahead - remove the AP STA now */
1652 mvmvif
->ap_sta_id
= IWL_MVM_INVALID_STA
;
1654 /* clear d0i3_ap_sta_id if no longer relevant */
1655 if (mvm
->d0i3_ap_sta_id
== sta_id
)
1656 mvm
->d0i3_ap_sta_id
= IWL_MVM_INVALID_STA
;
1660 * This shouldn't happen - the TDLS channel switch should be canceled
1661 * before the STA is removed.
1663 if (WARN_ON_ONCE(mvm
->tdls_cs
.peer
.sta_id
== sta_id
)) {
1664 mvm
->tdls_cs
.peer
.sta_id
= IWL_MVM_INVALID_STA
;
1665 cancel_delayed_work(&mvm
->tdls_cs
.dwork
);
1669 * Make sure that the tx response code sees the station as -EBUSY and
1670 * calls the drain worker.
1672 spin_lock_bh(&mvm_sta
->lock
);
1673 spin_unlock_bh(&mvm_sta
->lock
);
1675 ret
= iwl_mvm_rm_sta_common(mvm
, mvm_sta
->sta_id
);
1676 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[mvm_sta
->sta_id
], NULL
);
1681 int iwl_mvm_rm_sta_id(struct iwl_mvm
*mvm
,
1682 struct ieee80211_vif
*vif
,
1685 int ret
= iwl_mvm_rm_sta_common(mvm
, sta_id
);
1687 lockdep_assert_held(&mvm
->mutex
);
1689 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[sta_id
], NULL
);
1693 int iwl_mvm_allocate_int_sta(struct iwl_mvm
*mvm
,
1694 struct iwl_mvm_int_sta
*sta
,
1695 u32 qmask
, enum nl80211_iftype iftype
,
1696 enum iwl_sta_type type
)
1698 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
1699 sta
->sta_id
= iwl_mvm_find_free_sta_id(mvm
, iftype
);
1700 if (WARN_ON_ONCE(sta
->sta_id
== IWL_MVM_INVALID_STA
))
1704 sta
->tfd_queue_msk
= qmask
;
1707 /* put a non-NULL value so iterating over the stations won't stop */
1708 rcu_assign_pointer(mvm
->fw_id_to_mac_id
[sta
->sta_id
], ERR_PTR(-EINVAL
));
1712 void iwl_mvm_dealloc_int_sta(struct iwl_mvm
*mvm
, struct iwl_mvm_int_sta
*sta
)
1714 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[sta
->sta_id
], NULL
);
1715 memset(sta
, 0, sizeof(struct iwl_mvm_int_sta
));
1716 sta
->sta_id
= IWL_MVM_INVALID_STA
;
1719 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm
*mvm
, u16
*queue
,
1722 unsigned int wdg_timeout
= iwlmvm_mod_params
.tfd_q_hang_detect
?
1723 mvm
->cfg
->base_params
->wd_timeout
:
1724 IWL_WATCHDOG_DISABLED
;
1726 if (iwl_mvm_has_new_tx_api(mvm
)) {
1728 iwl_mvm_tvqm_enable_txq(mvm
, *queue
, sta_id
,
1731 *queue
= tvqm_queue
;
1733 struct iwl_trans_txq_scd_cfg cfg
= {
1736 .tid
= IWL_MAX_TID_COUNT
,
1738 .frame_limit
= IWL_FRAME_LIMIT
,
1741 iwl_mvm_enable_txq(mvm
, *queue
, *queue
, 0, &cfg
, wdg_timeout
);
1745 int iwl_mvm_add_aux_sta(struct iwl_mvm
*mvm
)
1749 lockdep_assert_held(&mvm
->mutex
);
1751 /* Allocate aux station and assign to it the aux queue */
1752 ret
= iwl_mvm_allocate_int_sta(mvm
, &mvm
->aux_sta
, BIT(mvm
->aux_queue
),
1753 NL80211_IFTYPE_UNSPECIFIED
,
1754 IWL_STA_AUX_ACTIVITY
);
1758 /* Map Aux queue to fifo - needs to happen before adding Aux station */
1759 if (!iwl_mvm_has_new_tx_api(mvm
))
1760 iwl_mvm_enable_aux_snif_queue(mvm
, &mvm
->aux_queue
,
1761 mvm
->aux_sta
.sta_id
,
1762 IWL_MVM_TX_FIFO_MCAST
);
1764 ret
= iwl_mvm_add_int_sta_common(mvm
, &mvm
->aux_sta
, NULL
,
1767 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->aux_sta
);
1772 * For 22000 firmware and on we cannot add queue to a station unknown
1773 * to firmware so enable queue here - after the station was added
1775 if (iwl_mvm_has_new_tx_api(mvm
))
1776 iwl_mvm_enable_aux_snif_queue(mvm
, &mvm
->aux_queue
,
1777 mvm
->aux_sta
.sta_id
,
1778 IWL_MVM_TX_FIFO_MCAST
);
1783 int iwl_mvm_add_snif_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1785 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1788 lockdep_assert_held(&mvm
->mutex
);
1790 /* Map snif queue to fifo - must happen before adding snif station */
1791 if (!iwl_mvm_has_new_tx_api(mvm
))
1792 iwl_mvm_enable_aux_snif_queue(mvm
, &mvm
->snif_queue
,
1793 mvm
->snif_sta
.sta_id
,
1794 IWL_MVM_TX_FIFO_BE
);
1796 ret
= iwl_mvm_add_int_sta_common(mvm
, &mvm
->snif_sta
, vif
->addr
,
1802 * For 22000 firmware and on we cannot add queue to a station unknown
1803 * to firmware so enable queue here - after the station was added
1805 if (iwl_mvm_has_new_tx_api(mvm
))
1806 iwl_mvm_enable_aux_snif_queue(mvm
, &mvm
->snif_queue
,
1807 mvm
->snif_sta
.sta_id
,
1808 IWL_MVM_TX_FIFO_BE
);
1813 int iwl_mvm_rm_snif_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1817 lockdep_assert_held(&mvm
->mutex
);
1819 iwl_mvm_disable_txq(mvm
, mvm
->snif_queue
, mvm
->snif_queue
,
1820 IWL_MAX_TID_COUNT
, 0);
1821 ret
= iwl_mvm_rm_sta_common(mvm
, mvm
->snif_sta
.sta_id
);
1823 IWL_WARN(mvm
, "Failed sending remove station\n");
1828 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm
*mvm
)
1830 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->snif_sta
);
1833 void iwl_mvm_del_aux_sta(struct iwl_mvm
*mvm
)
1835 lockdep_assert_held(&mvm
->mutex
);
1837 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->aux_sta
);
1841 * Send the add station command for the vif's broadcast station.
1842 * Assumes that the station was already allocated.
1844 * @mvm: the mvm component
1845 * @vif: the interface to which the broadcast station is added
1846 * @bsta: the broadcast station to add.
1848 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1850 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1851 struct iwl_mvm_int_sta
*bsta
= &mvmvif
->bcast_sta
;
1852 static const u8 _baddr
[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1853 const u8
*baddr
= _baddr
;
1856 unsigned int wdg_timeout
=
1857 iwl_mvm_get_wd_timeout(mvm
, vif
, false, false);
1858 struct iwl_trans_txq_scd_cfg cfg
= {
1859 .fifo
= IWL_MVM_TX_FIFO_VO
,
1860 .sta_id
= mvmvif
->bcast_sta
.sta_id
,
1861 .tid
= IWL_MAX_TID_COUNT
,
1863 .frame_limit
= IWL_FRAME_LIMIT
,
1866 lockdep_assert_held(&mvm
->mutex
);
1868 if (!iwl_mvm_has_new_tx_api(mvm
)) {
1869 if (vif
->type
== NL80211_IFTYPE_AP
||
1870 vif
->type
== NL80211_IFTYPE_ADHOC
)
1871 queue
= mvm
->probe_queue
;
1872 else if (vif
->type
== NL80211_IFTYPE_P2P_DEVICE
)
1873 queue
= mvm
->p2p_dev_queue
;
1874 else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
1877 bsta
->tfd_queue_msk
|= BIT(queue
);
1879 iwl_mvm_enable_txq(mvm
, queue
, vif
->hw_queue
[0], 0,
1883 if (vif
->type
== NL80211_IFTYPE_ADHOC
)
1884 baddr
= vif
->bss_conf
.bssid
;
1886 if (WARN_ON_ONCE(bsta
->sta_id
== IWL_MVM_INVALID_STA
))
1889 ret
= iwl_mvm_add_int_sta_common(mvm
, bsta
, baddr
,
1890 mvmvif
->id
, mvmvif
->color
);
1895 * For 22000 firmware and on we cannot add queue to a station unknown
1896 * to firmware so enable queue here - after the station was added
1898 if (iwl_mvm_has_new_tx_api(mvm
)) {
1899 queue
= iwl_mvm_tvqm_enable_txq(mvm
, vif
->hw_queue
[0],
1904 if (vif
->type
== NL80211_IFTYPE_AP
||
1905 vif
->type
== NL80211_IFTYPE_ADHOC
)
1906 mvm
->probe_queue
= queue
;
1907 else if (vif
->type
== NL80211_IFTYPE_P2P_DEVICE
)
1908 mvm
->p2p_dev_queue
= queue
;
1914 static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm
*mvm
,
1915 struct ieee80211_vif
*vif
)
1917 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1920 lockdep_assert_held(&mvm
->mutex
);
1922 iwl_mvm_flush_sta(mvm
, &mvmvif
->bcast_sta
, true, 0);
1924 switch (vif
->type
) {
1925 case NL80211_IFTYPE_AP
:
1926 case NL80211_IFTYPE_ADHOC
:
1927 queue
= mvm
->probe_queue
;
1929 case NL80211_IFTYPE_P2P_DEVICE
:
1930 queue
= mvm
->p2p_dev_queue
;
1933 WARN(1, "Can't free bcast queue on vif type %d\n",
1938 iwl_mvm_disable_txq(mvm
, queue
, vif
->hw_queue
[0], IWL_MAX_TID_COUNT
, 0);
1939 if (iwl_mvm_has_new_tx_api(mvm
))
1942 WARN_ON(!(mvmvif
->bcast_sta
.tfd_queue_msk
& BIT(queue
)));
1943 mvmvif
->bcast_sta
.tfd_queue_msk
&= ~BIT(queue
);
1946 /* Send the FW a request to remove the station from it's internal data
1947 * structures, but DO NOT remove the entry from the local data structures. */
1948 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1950 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1953 lockdep_assert_held(&mvm
->mutex
);
1955 iwl_mvm_free_bcast_sta_queues(mvm
, vif
);
1957 ret
= iwl_mvm_rm_sta_common(mvm
, mvmvif
->bcast_sta
.sta_id
);
1959 IWL_WARN(mvm
, "Failed sending remove station\n");
1963 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1965 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1967 lockdep_assert_held(&mvm
->mutex
);
1969 return iwl_mvm_allocate_int_sta(mvm
, &mvmvif
->bcast_sta
, 0,
1970 ieee80211_vif_type_p2p(vif
),
1971 IWL_STA_GENERAL_PURPOSE
);
1974 /* Allocate a new station entry for the broadcast station to the given vif,
1975 * and send it to the FW.
1976 * Note that each P2P mac should have its own broadcast station.
1978 * @mvm: the mvm component
1979 * @vif: the interface to which the broadcast station is added
1980 * @bsta: the broadcast station to add. */
1981 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1983 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1984 struct iwl_mvm_int_sta
*bsta
= &mvmvif
->bcast_sta
;
1987 lockdep_assert_held(&mvm
->mutex
);
1989 ret
= iwl_mvm_alloc_bcast_sta(mvm
, vif
);
1993 ret
= iwl_mvm_send_add_bcast_sta(mvm
, vif
);
1996 iwl_mvm_dealloc_int_sta(mvm
, bsta
);
2001 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2003 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2005 iwl_mvm_dealloc_int_sta(mvm
, &mvmvif
->bcast_sta
);
2009 * Send the FW a request to remove the station from it's internal data
2010 * structures, and in addition remove it from the local data structure.
2012 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2016 lockdep_assert_held(&mvm
->mutex
);
2018 ret
= iwl_mvm_send_rm_bcast_sta(mvm
, vif
);
2020 iwl_mvm_dealloc_bcast_sta(mvm
, vif
);
2026 * Allocate a new station entry for the multicast station to the given vif,
2027 * and send it to the FW.
2028 * Note that each AP/GO mac should have its own multicast station.
2030 * @mvm: the mvm component
2031 * @vif: the interface to which the multicast station is added
2033 int iwl_mvm_add_mcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2035 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2036 struct iwl_mvm_int_sta
*msta
= &mvmvif
->mcast_sta
;
2037 static const u8 _maddr
[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2038 const u8
*maddr
= _maddr
;
2039 struct iwl_trans_txq_scd_cfg cfg
= {
2040 .fifo
= IWL_MVM_TX_FIFO_MCAST
,
2041 .sta_id
= msta
->sta_id
,
2042 .tid
= IWL_MAX_TID_COUNT
,
2044 .frame_limit
= IWL_FRAME_LIMIT
,
2046 unsigned int timeout
= iwl_mvm_get_wd_timeout(mvm
, vif
, false, false);
2049 lockdep_assert_held(&mvm
->mutex
);
2051 if (WARN_ON(vif
->type
!= NL80211_IFTYPE_AP
&&
2052 vif
->type
!= NL80211_IFTYPE_ADHOC
))
2056 * While in previous FWs we had to exclude cab queue from TFD queue
2057 * mask, now it is needed as any other queue.
2059 if (!iwl_mvm_has_new_tx_api(mvm
) &&
2060 fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
)) {
2061 iwl_mvm_enable_txq(mvm
, vif
->cab_queue
, vif
->cab_queue
, 0,
2063 msta
->tfd_queue_msk
|= BIT(vif
->cab_queue
);
2065 ret
= iwl_mvm_add_int_sta_common(mvm
, msta
, maddr
,
2066 mvmvif
->id
, mvmvif
->color
);
2068 iwl_mvm_dealloc_int_sta(mvm
, msta
);
2073 * Enable cab queue after the ADD_STA command is sent.
2074 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
2075 * command with unknown station id, and for FW that doesn't support
2076 * station API since the cab queue is not included in the
2079 if (iwl_mvm_has_new_tx_api(mvm
)) {
2080 int queue
= iwl_mvm_tvqm_enable_txq(mvm
, vif
->cab_queue
,
2084 mvmvif
->cab_queue
= queue
;
2085 } else if (!fw_has_api(&mvm
->fw
->ucode_capa
,
2086 IWL_UCODE_TLV_API_STA_TYPE
)) {
2088 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2089 * invalid, so make sure we use the queue we want.
2090 * Note that this is done here as we want to avoid making DQA
2091 * changes in mac80211 layer.
2093 if (vif
->type
== NL80211_IFTYPE_ADHOC
) {
2094 vif
->cab_queue
= IWL_MVM_DQA_GCAST_QUEUE
;
2095 mvmvif
->cab_queue
= vif
->cab_queue
;
2097 iwl_mvm_enable_txq(mvm
, vif
->cab_queue
, vif
->cab_queue
, 0,
2105 * Send the FW a request to remove the station from it's internal data
2106 * structures, and in addition remove it from the local data structure.
2108 int iwl_mvm_rm_mcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2110 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2113 lockdep_assert_held(&mvm
->mutex
);
2115 iwl_mvm_flush_sta(mvm
, &mvmvif
->mcast_sta
, true, 0);
2117 iwl_mvm_disable_txq(mvm
, mvmvif
->cab_queue
, vif
->cab_queue
,
2118 IWL_MAX_TID_COUNT
, 0);
2120 ret
= iwl_mvm_rm_sta_common(mvm
, mvmvif
->mcast_sta
.sta_id
);
2122 IWL_WARN(mvm
, "Failed sending remove station\n");
2127 #define IWL_MAX_RX_BA_SESSIONS 16
2129 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm
*mvm
, u8 baid
)
2131 struct iwl_mvm_delba_notif notif
= {
2132 .metadata
.type
= IWL_MVM_RXQ_NOTIF_DEL_BA
,
2136 iwl_mvm_sync_rx_queues_internal(mvm
, (void *)¬if
, sizeof(notif
));
2139 static void iwl_mvm_free_reorder(struct iwl_mvm
*mvm
,
2140 struct iwl_mvm_baid_data
*data
)
2144 iwl_mvm_sync_rxq_del_ba(mvm
, data
->baid
);
2146 for (i
= 0; i
< mvm
->trans
->num_rx_queues
; i
++) {
2148 struct iwl_mvm_reorder_buffer
*reorder_buf
=
2149 &data
->reorder_buf
[i
];
2150 struct iwl_mvm_reorder_buf_entry
*entries
=
2151 &data
->entries
[i
* data
->entries_per_queue
];
2153 spin_lock_bh(&reorder_buf
->lock
);
2154 if (likely(!reorder_buf
->num_stored
)) {
2155 spin_unlock_bh(&reorder_buf
->lock
);
2160 * This shouldn't happen in regular DELBA since the internal
2161 * delBA notification should trigger a release of all frames in
2162 * the reorder buffer.
2166 for (j
= 0; j
< reorder_buf
->buf_size
; j
++)
2167 __skb_queue_purge(&entries
[j
].e
.frames
);
2169 * Prevent timer re-arm. This prevents a very far fetched case
2170 * where we timed out on the notification. There may be prior
2171 * RX frames pending in the RX queue before the notification
2172 * that might get processed between now and the actual deletion
2173 * and we would re-arm the timer although we are deleting the
2176 reorder_buf
->removed
= true;
2177 spin_unlock_bh(&reorder_buf
->lock
);
2178 del_timer_sync(&reorder_buf
->reorder_timer
);
2182 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm
*mvm
,
2183 struct iwl_mvm_baid_data
*data
,
2184 u16 ssn
, u8 buf_size
)
2188 for (i
= 0; i
< mvm
->trans
->num_rx_queues
; i
++) {
2189 struct iwl_mvm_reorder_buffer
*reorder_buf
=
2190 &data
->reorder_buf
[i
];
2191 struct iwl_mvm_reorder_buf_entry
*entries
=
2192 &data
->entries
[i
* data
->entries_per_queue
];
2195 reorder_buf
->num_stored
= 0;
2196 reorder_buf
->head_sn
= ssn
;
2197 reorder_buf
->buf_size
= buf_size
;
2198 /* rx reorder timer */
2199 timer_setup(&reorder_buf
->reorder_timer
,
2200 iwl_mvm_reorder_timer_expired
, 0);
2201 spin_lock_init(&reorder_buf
->lock
);
2202 reorder_buf
->mvm
= mvm
;
2203 reorder_buf
->queue
= i
;
2204 reorder_buf
->valid
= false;
2205 for (j
= 0; j
< reorder_buf
->buf_size
; j
++)
2206 __skb_queue_head_init(&entries
[j
].e
.frames
);
2210 int iwl_mvm_sta_rx_agg(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
2211 int tid
, u16 ssn
, bool start
, u8 buf_size
, u16 timeout
)
2213 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
2214 struct iwl_mvm_add_sta_cmd cmd
= {};
2215 struct iwl_mvm_baid_data
*baid_data
= NULL
;
2219 lockdep_assert_held(&mvm
->mutex
);
2221 if (start
&& mvm
->rx_ba_sessions
>= IWL_MAX_RX_BA_SESSIONS
) {
2222 IWL_WARN(mvm
, "Not enough RX BA SESSIONS\n");
2226 if (iwl_mvm_has_new_rx_api(mvm
) && start
) {
2227 u16 reorder_buf_size
= buf_size
* sizeof(baid_data
->entries
[0]);
2229 /* sparse doesn't like the __align() so don't check */
2232 * The division below will be OK if either the cache line size
2233 * can be divided by the entry size (ALIGN will round up) or if
2234 * if the entry size can be divided by the cache line size, in
2235 * which case the ALIGN() will do nothing.
2237 BUILD_BUG_ON(SMP_CACHE_BYTES
% sizeof(baid_data
->entries
[0]) &&
2238 sizeof(baid_data
->entries
[0]) % SMP_CACHE_BYTES
);
2242 * Upward align the reorder buffer size to fill an entire cache
2243 * line for each queue, to avoid sharing cache lines between
2246 reorder_buf_size
= ALIGN(reorder_buf_size
, SMP_CACHE_BYTES
);
2249 * Allocate here so if allocation fails we can bail out early
2250 * before starting the BA session in the firmware
2252 baid_data
= kzalloc(sizeof(*baid_data
) +
2253 mvm
->trans
->num_rx_queues
*
2260 * This division is why we need the above BUILD_BUG_ON(),
2261 * if that doesn't hold then this will not be right.
2263 baid_data
->entries_per_queue
=
2264 reorder_buf_size
/ sizeof(baid_data
->entries
[0]);
2267 cmd
.mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
);
2268 cmd
.sta_id
= mvm_sta
->sta_id
;
2269 cmd
.add_modify
= STA_MODE_MODIFY
;
2271 cmd
.add_immediate_ba_tid
= (u8
) tid
;
2272 cmd
.add_immediate_ba_ssn
= cpu_to_le16(ssn
);
2273 cmd
.rx_ba_window
= cpu_to_le16((u16
)buf_size
);
2275 cmd
.remove_immediate_ba_tid
= (u8
) tid
;
2277 cmd
.modify_mask
= start
? STA_MODIFY_ADD_BA_TID
:
2278 STA_MODIFY_REMOVE_BA_TID
;
2280 status
= ADD_STA_SUCCESS
;
2281 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
2282 iwl_mvm_add_sta_cmd_size(mvm
),
2287 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
2288 case ADD_STA_SUCCESS
:
2289 IWL_DEBUG_HT(mvm
, "RX BA Session %sed in fw\n",
2290 start
? "start" : "stopp");
2292 case ADD_STA_IMMEDIATE_BA_FAILURE
:
2293 IWL_WARN(mvm
, "RX BA Session refused by fw\n");
2298 IWL_ERR(mvm
, "RX BA Session failed %sing, status 0x%x\n",
2299 start
? "start" : "stopp", status
);
2309 mvm
->rx_ba_sessions
++;
2311 if (!iwl_mvm_has_new_rx_api(mvm
))
2314 if (WARN_ON(!(status
& IWL_ADD_STA_BAID_VALID_MASK
))) {
2318 baid
= (u8
)((status
& IWL_ADD_STA_BAID_MASK
) >>
2319 IWL_ADD_STA_BAID_SHIFT
);
2320 baid_data
->baid
= baid
;
2321 baid_data
->timeout
= timeout
;
2322 baid_data
->last_rx
= jiffies
;
2323 baid_data
->rcu_ptr
= &mvm
->baid_map
[baid
];
2324 timer_setup(&baid_data
->session_timer
,
2325 iwl_mvm_rx_agg_session_expired
, 0);
2326 baid_data
->mvm
= mvm
;
2327 baid_data
->tid
= tid
;
2328 baid_data
->sta_id
= mvm_sta
->sta_id
;
2330 mvm_sta
->tid_to_baid
[tid
] = baid
;
2332 mod_timer(&baid_data
->session_timer
,
2333 TU_TO_EXP_TIME(timeout
* 2));
2335 iwl_mvm_init_reorder_buffer(mvm
, baid_data
, ssn
, buf_size
);
2337 * protect the BA data with RCU to cover a case where our
2338 * internal RX sync mechanism will timeout (not that it's
2339 * supposed to happen) and we will free the session data while
2340 * RX is being processed in parallel
2342 IWL_DEBUG_HT(mvm
, "Sta %d(%d) is assigned to BAID %d\n",
2343 mvm_sta
->sta_id
, tid
, baid
);
2344 WARN_ON(rcu_access_pointer(mvm
->baid_map
[baid
]));
2345 rcu_assign_pointer(mvm
->baid_map
[baid
], baid_data
);
2347 u8 baid
= mvm_sta
->tid_to_baid
[tid
];
2349 if (mvm
->rx_ba_sessions
> 0)
2350 /* check that restart flow didn't zero the counter */
2351 mvm
->rx_ba_sessions
--;
2352 if (!iwl_mvm_has_new_rx_api(mvm
))
2355 if (WARN_ON(baid
== IWL_RX_REORDER_DATA_INVALID_BAID
))
2358 baid_data
= rcu_access_pointer(mvm
->baid_map
[baid
]);
2359 if (WARN_ON(!baid_data
))
2362 /* synchronize all rx queues so we can safely delete */
2363 iwl_mvm_free_reorder(mvm
, baid_data
);
2364 del_timer_sync(&baid_data
->session_timer
);
2365 RCU_INIT_POINTER(mvm
->baid_map
[baid
], NULL
);
2366 kfree_rcu(baid_data
, rcu_head
);
2367 IWL_DEBUG_HT(mvm
, "BAID %d is free\n", baid
);
2376 int iwl_mvm_sta_tx_agg(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
2377 int tid
, u8 queue
, bool start
)
2379 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
2380 struct iwl_mvm_add_sta_cmd cmd
= {};
2384 lockdep_assert_held(&mvm
->mutex
);
2387 mvm_sta
->tfd_queue_msk
|= BIT(queue
);
2388 mvm_sta
->tid_disable_agg
&= ~BIT(tid
);
2390 /* In DQA-mode the queue isn't removed on agg termination */
2391 mvm_sta
->tid_disable_agg
|= BIT(tid
);
2394 cmd
.mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
);
2395 cmd
.sta_id
= mvm_sta
->sta_id
;
2396 cmd
.add_modify
= STA_MODE_MODIFY
;
2397 if (!iwl_mvm_has_new_tx_api(mvm
))
2398 cmd
.modify_mask
= STA_MODIFY_QUEUES
;
2399 cmd
.modify_mask
|= STA_MODIFY_TID_DISABLE_TX
;
2400 cmd
.tfd_queue_msk
= cpu_to_le32(mvm_sta
->tfd_queue_msk
);
2401 cmd
.tid_disable_tx
= cpu_to_le16(mvm_sta
->tid_disable_agg
);
2403 status
= ADD_STA_SUCCESS
;
2404 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
2405 iwl_mvm_add_sta_cmd_size(mvm
),
2410 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
2411 case ADD_STA_SUCCESS
:
2415 IWL_ERR(mvm
, "TX BA Session failed %sing, status 0x%x\n",
2416 start
? "start" : "stopp", status
);
2423 const u8 tid_to_mac80211_ac
[] = {
2432 IEEE80211_AC_VO
, /* We treat MGMT as TID 8, which is set as AC_VO */
2435 static const u8 tid_to_ucode_ac
[] = {
2446 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
2447 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
)
2449 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2450 struct iwl_mvm_tid_data
*tid_data
;
2455 if (WARN_ON_ONCE(tid
>= IWL_MAX_TID_COUNT
))
2458 if (mvmsta
->tid_data
[tid
].state
!= IWL_AGG_QUEUED
&&
2459 mvmsta
->tid_data
[tid
].state
!= IWL_AGG_OFF
) {
2461 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2462 mvmsta
->tid_data
[tid
].state
);
2466 lockdep_assert_held(&mvm
->mutex
);
2468 spin_lock_bh(&mvmsta
->lock
);
2470 /* possible race condition - we entered D0i3 while starting agg */
2471 if (test_bit(IWL_MVM_STATUS_IN_D0I3
, &mvm
->status
)) {
2472 spin_unlock_bh(&mvmsta
->lock
);
2473 IWL_ERR(mvm
, "Entered D0i3 while starting Tx agg\n");
2477 spin_lock(&mvm
->queue_info_lock
);
2480 * Note the possible cases:
2481 * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
2482 * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
2483 * one and mark it as reserved
2484 * 3. In DQA mode, but no traffic yet on this TID: same treatment as in
2485 * non-DQA mode, since the TXQ hasn't yet been allocated
2486 * Don't support case 3 for new TX path as it is not expected to happen
2487 * and aggregation will be offloaded soon anyway
2489 txq_id
= mvmsta
->tid_data
[tid
].txq_id
;
2490 if (iwl_mvm_has_new_tx_api(mvm
)) {
2491 if (txq_id
== IWL_MVM_INVALID_QUEUE
) {
2495 } else if (unlikely(mvm
->queue_info
[txq_id
].status
==
2496 IWL_MVM_QUEUE_SHARED
)) {
2498 IWL_DEBUG_TX_QUEUES(mvm
,
2499 "Can't start tid %d agg on shared queue!\n",
2502 } else if (mvm
->queue_info
[txq_id
].status
!= IWL_MVM_QUEUE_READY
) {
2503 txq_id
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
2504 IWL_MVM_DQA_MIN_DATA_QUEUE
,
2505 IWL_MVM_DQA_MAX_DATA_QUEUE
);
2508 IWL_ERR(mvm
, "Failed to allocate agg queue\n");
2512 * TXQ shouldn't be in inactive mode for non-DQA, so getting
2513 * an inactive queue from iwl_mvm_find_free_queue() is
2516 WARN_ON(mvm
->queue_info
[txq_id
].status
==
2517 IWL_MVM_QUEUE_INACTIVE
);
2519 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2520 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_RESERVED
;
2523 spin_unlock(&mvm
->queue_info_lock
);
2525 IWL_DEBUG_TX_QUEUES(mvm
,
2526 "AGG for tid %d will be on queue #%d\n",
2529 tid_data
= &mvmsta
->tid_data
[tid
];
2530 tid_data
->ssn
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
2531 tid_data
->txq_id
= txq_id
;
2532 *ssn
= tid_data
->ssn
;
2534 IWL_DEBUG_TX_QUEUES(mvm
,
2535 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2536 mvmsta
->sta_id
, tid
, txq_id
, tid_data
->ssn
,
2537 tid_data
->next_reclaimed
);
2540 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
2541 * to align the wrap around of ssn so we compare relevant values.
2543 normalized_ssn
= tid_data
->ssn
;
2544 if (mvm
->trans
->cfg
->gen2
)
2545 normalized_ssn
&= 0xff;
2547 if (normalized_ssn
== tid_data
->next_reclaimed
) {
2548 tid_data
->state
= IWL_AGG_STARTING
;
2549 ieee80211_start_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
2551 tid_data
->state
= IWL_EMPTYING_HW_QUEUE_ADDBA
;
2558 spin_unlock(&mvm
->queue_info_lock
);
2560 spin_unlock_bh(&mvmsta
->lock
);
2565 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
2566 struct ieee80211_sta
*sta
, u16 tid
, u8 buf_size
,
2569 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2570 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
2571 unsigned int wdg_timeout
=
2572 iwl_mvm_get_wd_timeout(mvm
, vif
, sta
->tdls
, false);
2574 bool alloc_queue
= true;
2575 enum iwl_mvm_queue_status queue_status
;
2578 struct iwl_trans_txq_scd_cfg cfg
= {
2579 .sta_id
= mvmsta
->sta_id
,
2581 .frame_limit
= buf_size
,
2586 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
2587 * manager, so this function should never be called in this case.
2589 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm
)))
2592 BUILD_BUG_ON((sizeof(mvmsta
->agg_tids
) * BITS_PER_BYTE
)
2593 != IWL_MAX_TID_COUNT
);
2595 spin_lock_bh(&mvmsta
->lock
);
2596 ssn
= tid_data
->ssn
;
2597 queue
= tid_data
->txq_id
;
2598 tid_data
->state
= IWL_AGG_ON
;
2599 mvmsta
->agg_tids
|= BIT(tid
);
2600 tid_data
->ssn
= 0xffff;
2601 tid_data
->amsdu_in_ampdu_allowed
= amsdu
;
2602 spin_unlock_bh(&mvmsta
->lock
);
2604 if (iwl_mvm_has_new_tx_api(mvm
)) {
2606 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
2607 * would have failed, so if we are here there is no need to
2609 * However, if aggregation size is different than the default
2610 * size, the scheduler should be reconfigured.
2611 * We cannot do this with the new TX API, so return unsupported
2612 * for now, until it will be offloaded to firmware..
2613 * Note that if SCD default value changes - this condition
2614 * should be updated as well.
2616 if (buf_size
< IWL_FRAME_LIMIT
)
2619 ret
= iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, queue
, true);
2625 cfg
.fifo
= iwl_mvm_ac_to_tx_fifo
[tid_to_mac80211_ac
[tid
]];
2627 spin_lock_bh(&mvm
->queue_info_lock
);
2628 queue_status
= mvm
->queue_info
[queue
].status
;
2629 spin_unlock_bh(&mvm
->queue_info_lock
);
2631 /* Maybe there is no need to even alloc a queue... */
2632 if (mvm
->queue_info
[queue
].status
== IWL_MVM_QUEUE_READY
)
2633 alloc_queue
= false;
2636 * Only reconfig the SCD for the queue if the window size has
2637 * changed from current (become smaller)
2639 if (!alloc_queue
&& buf_size
< IWL_FRAME_LIMIT
) {
2641 * If reconfiguring an existing queue, it first must be
2644 ret
= iwl_trans_wait_tx_queues_empty(mvm
->trans
,
2648 "Error draining queue before reconfig\n");
2652 ret
= iwl_mvm_reconfig_scd(mvm
, queue
, cfg
.fifo
,
2653 mvmsta
->sta_id
, tid
,
2657 "Error reconfiguring TXQ #%d\n", queue
);
2663 iwl_mvm_enable_txq(mvm
, queue
,
2664 vif
->hw_queue
[tid_to_mac80211_ac
[tid
]], ssn
,
2667 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2668 if (queue_status
!= IWL_MVM_QUEUE_SHARED
) {
2669 ret
= iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, queue
, true);
2674 /* No need to mark as reserved */
2675 spin_lock_bh(&mvm
->queue_info_lock
);
2676 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_READY
;
2677 spin_unlock_bh(&mvm
->queue_info_lock
);
2681 * Even though in theory the peer could have different
2682 * aggregation reorder buffer sizes for different sessions,
2683 * our ucode doesn't allow for that and has a global limit
2684 * for each station. Therefore, use the minimum of all the
2685 * aggregation sessions and our default value.
2687 mvmsta
->max_agg_bufsize
=
2688 min(mvmsta
->max_agg_bufsize
, buf_size
);
2689 mvmsta
->lq_sta
.rs_drv
.lq
.agg_frame_cnt_limit
= mvmsta
->max_agg_bufsize
;
2691 IWL_DEBUG_HT(mvm
, "Tx aggregation enabled on ra = %pM tid = %d\n",
2694 return iwl_mvm_send_lq_cmd(mvm
, &mvmsta
->lq_sta
.rs_drv
.lq
, false);
2697 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm
*mvm
,
2698 struct iwl_mvm_sta
*mvmsta
,
2701 if (iwl_mvm_has_new_tx_api(mvm
))
2704 spin_lock_bh(&mvm
->queue_info_lock
);
2706 * The TXQ is marked as reserved only if no traffic came through yet
2707 * This means no traffic has been sent on this TID (agg'd or not), so
2708 * we no longer have use for the queue. Since it hasn't even been
2709 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2712 if (mvm
->queue_info
[txq_id
].status
== IWL_MVM_QUEUE_RESERVED
)
2713 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_FREE
;
2715 spin_unlock_bh(&mvm
->queue_info_lock
);
2718 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
2719 struct ieee80211_sta
*sta
, u16 tid
)
2721 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2722 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
2727 * If mac80211 is cleaning its state, then say that we finished since
2728 * our state has been cleared anyway.
2730 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
2731 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
2735 spin_lock_bh(&mvmsta
->lock
);
2737 txq_id
= tid_data
->txq_id
;
2739 IWL_DEBUG_TX_QUEUES(mvm
, "Stop AGG: sta %d tid %d q %d state %d\n",
2740 mvmsta
->sta_id
, tid
, txq_id
, tid_data
->state
);
2742 mvmsta
->agg_tids
&= ~BIT(tid
);
2744 iwl_mvm_unreserve_agg_queue(mvm
, mvmsta
, txq_id
);
2746 switch (tid_data
->state
) {
2748 tid_data
->ssn
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
2750 IWL_DEBUG_TX_QUEUES(mvm
,
2751 "ssn = %d, next_recl = %d\n",
2752 tid_data
->ssn
, tid_data
->next_reclaimed
);
2754 tid_data
->ssn
= 0xffff;
2755 tid_data
->state
= IWL_AGG_OFF
;
2756 spin_unlock_bh(&mvmsta
->lock
);
2758 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
2760 iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, txq_id
, false);
2762 case IWL_AGG_STARTING
:
2763 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
2765 * The agg session has been stopped before it was set up. This
2766 * can happen when the AddBA timer times out for example.
2769 /* No barriers since we are under mutex */
2770 lockdep_assert_held(&mvm
->mutex
);
2772 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
2773 tid_data
->state
= IWL_AGG_OFF
;
2778 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
2779 mvmsta
->sta_id
, tid
, tid_data
->state
);
2781 "\ttid_data->txq_id = %d\n", tid_data
->txq_id
);
2785 spin_unlock_bh(&mvmsta
->lock
);
2790 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
2791 struct ieee80211_sta
*sta
, u16 tid
)
2793 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2794 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
2796 enum iwl_mvm_agg_state old_state
;
2799 * First set the agg state to OFF to avoid calling
2800 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
2802 spin_lock_bh(&mvmsta
->lock
);
2803 txq_id
= tid_data
->txq_id
;
2804 IWL_DEBUG_TX_QUEUES(mvm
, "Flush AGG: sta %d tid %d q %d state %d\n",
2805 mvmsta
->sta_id
, tid
, txq_id
, tid_data
->state
);
2806 old_state
= tid_data
->state
;
2807 tid_data
->state
= IWL_AGG_OFF
;
2808 mvmsta
->agg_tids
&= ~BIT(tid
);
2809 spin_unlock_bh(&mvmsta
->lock
);
2811 iwl_mvm_unreserve_agg_queue(mvm
, mvmsta
, txq_id
);
2813 if (old_state
>= IWL_AGG_ON
) {
2814 iwl_mvm_drain_sta(mvm
, mvmsta
, true);
2816 if (iwl_mvm_has_new_tx_api(mvm
)) {
2817 if (iwl_mvm_flush_sta_tids(mvm
, mvmsta
->sta_id
,
2819 IWL_ERR(mvm
, "Couldn't flush the AGG queue\n");
2820 iwl_trans_wait_txq_empty(mvm
->trans
, txq_id
);
2822 if (iwl_mvm_flush_tx_path(mvm
, BIT(txq_id
), 0))
2823 IWL_ERR(mvm
, "Couldn't flush the AGG queue\n");
2824 iwl_trans_wait_tx_queues_empty(mvm
->trans
, BIT(txq_id
));
2827 iwl_mvm_drain_sta(mvm
, mvmsta
, false);
2829 iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, txq_id
, false);
2835 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm
*mvm
)
2837 int i
, max
= -1, max_offs
= -1;
2839 lockdep_assert_held(&mvm
->mutex
);
2841 /* Pick the unused key offset with the highest 'deleted'
2842 * counter. Every time a key is deleted, all the counters
2843 * are incremented and the one that was just deleted is
2844 * reset to zero. Thus, the highest counter is the one
2845 * that was deleted longest ago. Pick that one.
2847 for (i
= 0; i
< STA_KEY_MAX_NUM
; i
++) {
2848 if (test_bit(i
, mvm
->fw_key_table
))
2850 if (mvm
->fw_key_deleted
[i
] > max
) {
2851 max
= mvm
->fw_key_deleted
[i
];
2857 return STA_KEY_IDX_INVALID
;
2862 static struct iwl_mvm_sta
*iwl_mvm_get_key_sta(struct iwl_mvm
*mvm
,
2863 struct ieee80211_vif
*vif
,
2864 struct ieee80211_sta
*sta
)
2866 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2869 return iwl_mvm_sta_from_mac80211(sta
);
2872 * The device expects GTKs for station interfaces to be
2873 * installed as GTKs for the AP station. If we have no
2874 * station ID, then use AP's station ID.
2876 if (vif
->type
== NL80211_IFTYPE_STATION
&&
2877 mvmvif
->ap_sta_id
!= IWL_MVM_INVALID_STA
) {
2878 u8 sta_id
= mvmvif
->ap_sta_id
;
2880 sta
= rcu_dereference_check(mvm
->fw_id_to_mac_id
[sta_id
],
2881 lockdep_is_held(&mvm
->mutex
));
2884 * It is possible that the 'sta' parameter is NULL,
2885 * for example when a GTK is removed - the sta_id will then
2886 * be the AP ID, and no station was passed by mac80211.
2888 if (IS_ERR_OR_NULL(sta
))
2891 return iwl_mvm_sta_from_mac80211(sta
);
2897 static int iwl_mvm_send_sta_key(struct iwl_mvm
*mvm
,
2899 struct ieee80211_key_conf
*key
, bool mcast
,
2900 u32 tkip_iv32
, u16
*tkip_p1k
, u32 cmd_flags
,
2904 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1
;
2905 struct iwl_mvm_add_sta_key_cmd cmd
;
2913 bool new_api
= fw_has_api(&mvm
->fw
->ucode_capa
,
2914 IWL_UCODE_TLV_API_TKIP_MIC_KEYS
);
2916 if (sta_id
== IWL_MVM_INVALID_STA
)
2919 keyidx
= (key
->keyidx
<< STA_KEY_FLG_KEYID_POS
) &
2920 STA_KEY_FLG_KEYID_MSK
;
2921 key_flags
= cpu_to_le16(keyidx
);
2922 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP
);
2924 switch (key
->cipher
) {
2925 case WLAN_CIPHER_SUITE_TKIP
:
2926 key_flags
|= cpu_to_le16(STA_KEY_FLG_TKIP
);
2928 memcpy((void *)&u
.cmd
.tx_mic_key
,
2929 &key
->key
[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY
],
2932 memcpy((void *)&u
.cmd
.rx_mic_key
,
2933 &key
->key
[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY
],
2935 pn
= atomic64_read(&key
->tx_pn
);
2938 u
.cmd_v1
.tkip_rx_tsc_byte2
= tkip_iv32
;
2939 for (i
= 0; i
< 5; i
++)
2940 u
.cmd_v1
.tkip_rx_ttak
[i
] =
2941 cpu_to_le16(tkip_p1k
[i
]);
2943 memcpy(u
.cmd
.common
.key
, key
->key
, key
->keylen
);
2945 case WLAN_CIPHER_SUITE_CCMP
:
2946 key_flags
|= cpu_to_le16(STA_KEY_FLG_CCM
);
2947 memcpy(u
.cmd
.common
.key
, key
->key
, key
->keylen
);
2949 pn
= atomic64_read(&key
->tx_pn
);
2951 case WLAN_CIPHER_SUITE_WEP104
:
2952 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES
);
2954 case WLAN_CIPHER_SUITE_WEP40
:
2955 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP
);
2956 memcpy(u
.cmd
.common
.key
+ 3, key
->key
, key
->keylen
);
2958 case WLAN_CIPHER_SUITE_GCMP_256
:
2959 key_flags
|= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES
);
2961 case WLAN_CIPHER_SUITE_GCMP
:
2962 key_flags
|= cpu_to_le16(STA_KEY_FLG_GCMP
);
2963 memcpy(u
.cmd
.common
.key
, key
->key
, key
->keylen
);
2965 pn
= atomic64_read(&key
->tx_pn
);
2968 key_flags
|= cpu_to_le16(STA_KEY_FLG_EXT
);
2969 memcpy(u
.cmd
.common
.key
, key
->key
, key
->keylen
);
2973 key_flags
|= cpu_to_le16(STA_KEY_MULTICAST
);
2975 u
.cmd
.common
.key_offset
= key_offset
;
2976 u
.cmd
.common
.key_flags
= key_flags
;
2977 u
.cmd
.common
.sta_id
= sta_id
;
2980 u
.cmd
.transmit_seq_cnt
= cpu_to_le64(pn
);
2981 size
= sizeof(u
.cmd
);
2983 size
= sizeof(u
.cmd_v1
);
2986 status
= ADD_STA_SUCCESS
;
2987 if (cmd_flags
& CMD_ASYNC
)
2988 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA_KEY
, CMD_ASYNC
, size
,
2991 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA_KEY
, size
,
2995 case ADD_STA_SUCCESS
:
2996 IWL_DEBUG_WEP(mvm
, "MODIFY_STA: set dynamic key passed\n");
3000 IWL_ERR(mvm
, "MODIFY_STA: set dynamic key failed\n");
3007 static int iwl_mvm_send_sta_igtk(struct iwl_mvm
*mvm
,
3008 struct ieee80211_key_conf
*keyconf
,
3009 u8 sta_id
, bool remove_key
)
3011 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd
= {};
3013 /* verify the key details match the required command's expectations */
3014 if (WARN_ON((keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
) ||
3015 (keyconf
->keyidx
!= 4 && keyconf
->keyidx
!= 5) ||
3016 (keyconf
->cipher
!= WLAN_CIPHER_SUITE_AES_CMAC
&&
3017 keyconf
->cipher
!= WLAN_CIPHER_SUITE_BIP_GMAC_128
&&
3018 keyconf
->cipher
!= WLAN_CIPHER_SUITE_BIP_GMAC_256
)))
3021 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm
) &&
3022 keyconf
->cipher
!= WLAN_CIPHER_SUITE_AES_CMAC
))
3025 igtk_cmd
.key_id
= cpu_to_le32(keyconf
->keyidx
);
3026 igtk_cmd
.sta_id
= cpu_to_le32(sta_id
);
3029 igtk_cmd
.ctrl_flags
|= cpu_to_le32(STA_KEY_NOT_VALID
);
3031 struct ieee80211_key_seq seq
;
3034 switch (keyconf
->cipher
) {
3035 case WLAN_CIPHER_SUITE_AES_CMAC
:
3036 igtk_cmd
.ctrl_flags
|= cpu_to_le32(STA_KEY_FLG_CCM
);
3038 case WLAN_CIPHER_SUITE_BIP_GMAC_128
:
3039 case WLAN_CIPHER_SUITE_BIP_GMAC_256
:
3040 igtk_cmd
.ctrl_flags
|= cpu_to_le32(STA_KEY_FLG_GCMP
);
3046 memcpy(igtk_cmd
.igtk
, keyconf
->key
, keyconf
->keylen
);
3047 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_256
)
3048 igtk_cmd
.ctrl_flags
|=
3049 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES
);
3050 ieee80211_get_key_rx_seq(keyconf
, 0, &seq
);
3051 pn
= seq
.aes_cmac
.pn
;
3052 igtk_cmd
.receive_seq_cnt
= cpu_to_le64(((u64
) pn
[5] << 0) |
3053 ((u64
) pn
[4] << 8) |
3054 ((u64
) pn
[3] << 16) |
3055 ((u64
) pn
[2] << 24) |
3056 ((u64
) pn
[1] << 32) |
3057 ((u64
) pn
[0] << 40));
3060 IWL_DEBUG_INFO(mvm
, "%s igtk for sta %u\n",
3061 remove_key
? "removing" : "installing",
3064 if (!iwl_mvm_has_new_rx_api(mvm
)) {
3065 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1
= {
3066 .ctrl_flags
= igtk_cmd
.ctrl_flags
,
3067 .key_id
= igtk_cmd
.key_id
,
3068 .sta_id
= igtk_cmd
.sta_id
,
3069 .receive_seq_cnt
= igtk_cmd
.receive_seq_cnt
3072 memcpy(igtk_cmd_v1
.igtk
, igtk_cmd
.igtk
,
3073 ARRAY_SIZE(igtk_cmd_v1
.igtk
));
3074 return iwl_mvm_send_cmd_pdu(mvm
, MGMT_MCAST_KEY
, 0,
3075 sizeof(igtk_cmd_v1
), &igtk_cmd_v1
);
3077 return iwl_mvm_send_cmd_pdu(mvm
, MGMT_MCAST_KEY
, 0,
3078 sizeof(igtk_cmd
), &igtk_cmd
);
3082 static inline u8
*iwl_mvm_get_mac_addr(struct iwl_mvm
*mvm
,
3083 struct ieee80211_vif
*vif
,
3084 struct ieee80211_sta
*sta
)
3086 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
3091 if (vif
->type
== NL80211_IFTYPE_STATION
&&
3092 mvmvif
->ap_sta_id
!= IWL_MVM_INVALID_STA
) {
3093 u8 sta_id
= mvmvif
->ap_sta_id
;
3094 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
3095 lockdep_is_held(&mvm
->mutex
));
3103 static int __iwl_mvm_set_sta_key(struct iwl_mvm
*mvm
,
3104 struct ieee80211_vif
*vif
,
3105 struct ieee80211_sta
*sta
,
3106 struct ieee80211_key_conf
*keyconf
,
3112 struct ieee80211_key_seq seq
;
3117 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
3119 sta_id
= mvm_sta
->sta_id
;
3120 } else if (vif
->type
== NL80211_IFTYPE_AP
&&
3121 !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
)) {
3122 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
3124 sta_id
= mvmvif
->mcast_sta
.sta_id
;
3126 IWL_ERR(mvm
, "Failed to find station id\n");
3130 switch (keyconf
->cipher
) {
3131 case WLAN_CIPHER_SUITE_TKIP
:
3132 if (vif
->type
== NL80211_IFTYPE_AP
) {
3136 addr
= iwl_mvm_get_mac_addr(mvm
, vif
, sta
);
3137 /* get phase 1 key from mac80211 */
3138 ieee80211_get_key_rx_seq(keyconf
, 0, &seq
);
3139 ieee80211_get_tkip_rx_p1k(keyconf
, addr
, seq
.tkip
.iv32
, p1k
);
3140 ret
= iwl_mvm_send_sta_key(mvm
, sta_id
, keyconf
, mcast
,
3141 seq
.tkip
.iv32
, p1k
, 0, key_offset
);
3143 case WLAN_CIPHER_SUITE_CCMP
:
3144 case WLAN_CIPHER_SUITE_WEP40
:
3145 case WLAN_CIPHER_SUITE_WEP104
:
3146 case WLAN_CIPHER_SUITE_GCMP
:
3147 case WLAN_CIPHER_SUITE_GCMP_256
:
3148 ret
= iwl_mvm_send_sta_key(mvm
, sta_id
, keyconf
, mcast
,
3149 0, NULL
, 0, key_offset
);
3152 ret
= iwl_mvm_send_sta_key(mvm
, sta_id
, keyconf
, mcast
,
3153 0, NULL
, 0, key_offset
);
3159 static int __iwl_mvm_remove_sta_key(struct iwl_mvm
*mvm
, u8 sta_id
,
3160 struct ieee80211_key_conf
*keyconf
,
3164 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1
;
3165 struct iwl_mvm_add_sta_key_cmd cmd
;
3167 bool new_api
= fw_has_api(&mvm
->fw
->ucode_capa
,
3168 IWL_UCODE_TLV_API_TKIP_MIC_KEYS
);
3173 if (sta_id
== IWL_MVM_INVALID_STA
)
3176 key_flags
= cpu_to_le16((keyconf
->keyidx
<< STA_KEY_FLG_KEYID_POS
) &
3177 STA_KEY_FLG_KEYID_MSK
);
3178 key_flags
|= cpu_to_le16(STA_KEY_FLG_NO_ENC
| STA_KEY_FLG_WEP_KEY_MAP
);
3179 key_flags
|= cpu_to_le16(STA_KEY_NOT_VALID
);
3182 key_flags
|= cpu_to_le16(STA_KEY_MULTICAST
);
3185 * The fields assigned here are in the same location at the start
3186 * of the command, so we can do this union trick.
3188 u
.cmd
.common
.key_flags
= key_flags
;
3189 u
.cmd
.common
.key_offset
= keyconf
->hw_key_idx
;
3190 u
.cmd
.common
.sta_id
= sta_id
;
3192 size
= new_api
? sizeof(u
.cmd
) : sizeof(u
.cmd_v1
);
3194 status
= ADD_STA_SUCCESS
;
3195 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA_KEY
, size
, &u
.cmd
,
3199 case ADD_STA_SUCCESS
:
3200 IWL_DEBUG_WEP(mvm
, "MODIFY_STA: remove sta key passed\n");
3204 IWL_ERR(mvm
, "MODIFY_STA: remove sta key failed\n");
3211 int iwl_mvm_set_sta_key(struct iwl_mvm
*mvm
,
3212 struct ieee80211_vif
*vif
,
3213 struct ieee80211_sta
*sta
,
3214 struct ieee80211_key_conf
*keyconf
,
3217 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
3218 struct iwl_mvm_sta
*mvm_sta
;
3219 u8 sta_id
= IWL_MVM_INVALID_STA
;
3221 static const u8 __maybe_unused zero_addr
[ETH_ALEN
] = {0};
3223 lockdep_assert_held(&mvm
->mutex
);
3225 if (vif
->type
!= NL80211_IFTYPE_AP
||
3226 keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
) {
3227 /* Get the station id from the mvm local station table */
3228 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
3230 IWL_ERR(mvm
, "Failed to find station\n");
3233 sta_id
= mvm_sta
->sta_id
;
3235 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_AES_CMAC
||
3236 keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_128
||
3237 keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_256
) {
3238 ret
= iwl_mvm_send_sta_igtk(mvm
, keyconf
, sta_id
,
3244 * It is possible that the 'sta' parameter is NULL, and thus
3245 * there is a need to retrieve the sta from the local station
3249 sta
= rcu_dereference_protected(
3250 mvm
->fw_id_to_mac_id
[sta_id
],
3251 lockdep_is_held(&mvm
->mutex
));
3252 if (IS_ERR_OR_NULL(sta
)) {
3253 IWL_ERR(mvm
, "Invalid station id\n");
3258 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta
)->vif
!= vif
))
3262 /* If the key_offset is not pre-assigned, we need to find a
3263 * new offset to use. In normal cases, the offset is not
3264 * pre-assigned, but during HW_RESTART we want to reuse the
3265 * same indices, so we pass them when this function is called.
3267 * In D3 entry, we need to hardcoded the indices (because the
3268 * firmware hardcodes the PTK offset to 0). In this case, we
3269 * need to make sure we don't overwrite the hw_key_idx in the
3270 * keyconf structure, because otherwise we cannot configure
3271 * the original ones back when resuming.
3273 if (key_offset
== STA_KEY_IDX_INVALID
) {
3274 key_offset
= iwl_mvm_set_fw_key_idx(mvm
);
3275 if (key_offset
== STA_KEY_IDX_INVALID
)
3277 keyconf
->hw_key_idx
= key_offset
;
3280 ret
= __iwl_mvm_set_sta_key(mvm
, vif
, sta
, keyconf
, key_offset
, mcast
);
3285 * For WEP, the same key is used for multicast and unicast. Upload it
3286 * again, using the same key offset, and now pointing the other one
3287 * to the same key slot (offset).
3288 * If this fails, remove the original as well.
3290 if ((keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP40
||
3291 keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP104
) &&
3293 ret
= __iwl_mvm_set_sta_key(mvm
, vif
, sta
, keyconf
,
3294 key_offset
, !mcast
);
3296 __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, mcast
);
3301 __set_bit(key_offset
, mvm
->fw_key_table
);
3304 IWL_DEBUG_WEP(mvm
, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3305 keyconf
->cipher
, keyconf
->keylen
, keyconf
->keyidx
,
3306 sta
? sta
->addr
: zero_addr
, ret
);
3310 int iwl_mvm_remove_sta_key(struct iwl_mvm
*mvm
,
3311 struct ieee80211_vif
*vif
,
3312 struct ieee80211_sta
*sta
,
3313 struct ieee80211_key_conf
*keyconf
)
3315 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
3316 struct iwl_mvm_sta
*mvm_sta
;
3317 u8 sta_id
= IWL_MVM_INVALID_STA
;
3320 lockdep_assert_held(&mvm
->mutex
);
3322 /* Get the station from the mvm local station table */
3323 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
3325 sta_id
= mvm_sta
->sta_id
;
3326 else if (!sta
&& vif
->type
== NL80211_IFTYPE_AP
&& mcast
)
3327 sta_id
= iwl_mvm_vif_from_mac80211(vif
)->mcast_sta
.sta_id
;
3330 IWL_DEBUG_WEP(mvm
, "mvm remove dynamic key: idx=%d sta=%d\n",
3331 keyconf
->keyidx
, sta_id
);
3333 if (mvm_sta
&& (keyconf
->cipher
== WLAN_CIPHER_SUITE_AES_CMAC
||
3334 keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_128
||
3335 keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_256
))
3336 return iwl_mvm_send_sta_igtk(mvm
, keyconf
, sta_id
, true);
3338 if (!__test_and_clear_bit(keyconf
->hw_key_idx
, mvm
->fw_key_table
)) {
3339 IWL_ERR(mvm
, "offset %d not used in fw key table.\n",
3340 keyconf
->hw_key_idx
);
3344 /* track which key was deleted last */
3345 for (i
= 0; i
< STA_KEY_MAX_NUM
; i
++) {
3346 if (mvm
->fw_key_deleted
[i
] < U8_MAX
)
3347 mvm
->fw_key_deleted
[i
]++;
3349 mvm
->fw_key_deleted
[keyconf
->hw_key_idx
] = 0;
3351 if (sta
&& !mvm_sta
) {
3352 IWL_DEBUG_WEP(mvm
, "station non-existent, early return.\n");
3356 ret
= __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, mcast
);
3360 /* delete WEP key twice to get rid of (now useless) offset */
3361 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP40
||
3362 keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP104
)
3363 ret
= __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, !mcast
);
3368 void iwl_mvm_update_tkip_key(struct iwl_mvm
*mvm
,
3369 struct ieee80211_vif
*vif
,
3370 struct ieee80211_key_conf
*keyconf
,
3371 struct ieee80211_sta
*sta
, u32 iv32
,
3374 struct iwl_mvm_sta
*mvm_sta
;
3375 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
3379 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
3380 if (WARN_ON_ONCE(!mvm_sta
))
3382 iwl_mvm_send_sta_key(mvm
, mvm_sta
->sta_id
, keyconf
, mcast
,
3383 iv32
, phase1key
, CMD_ASYNC
, keyconf
->hw_key_idx
);
3389 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm
*mvm
,
3390 struct ieee80211_sta
*sta
)
3392 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
3393 struct iwl_mvm_add_sta_cmd cmd
= {
3394 .add_modify
= STA_MODE_MODIFY
,
3395 .sta_id
= mvmsta
->sta_id
,
3396 .station_flags_msk
= cpu_to_le32(STA_FLG_PS
),
3397 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
3401 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
3402 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
3404 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
3407 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm
*mvm
,
3408 struct ieee80211_sta
*sta
,
3409 enum ieee80211_frame_release_type reason
,
3410 u16 cnt
, u16 tids
, bool more_data
,
3411 bool single_sta_queue
)
3413 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
3414 struct iwl_mvm_add_sta_cmd cmd
= {
3415 .add_modify
= STA_MODE_MODIFY
,
3416 .sta_id
= mvmsta
->sta_id
,
3417 .modify_mask
= STA_MODIFY_SLEEPING_STA_TX_COUNT
,
3418 .sleep_tx_count
= cpu_to_le16(cnt
),
3419 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
3422 unsigned long _tids
= tids
;
3424 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3425 * Note that this field is reserved and unused by firmware not
3426 * supporting GO uAPSD, so it's safe to always do this.
3428 for_each_set_bit(tid
, &_tids
, IWL_MAX_TID_COUNT
)
3429 cmd
.awake_acs
|= BIT(tid_to_ucode_ac
[tid
]);
3431 /* If we're releasing frames from aggregation or dqa queues then check
3432 * if all the queues that we're releasing frames from, combined, have:
3433 * - more frames than the service period, in which case more_data
3435 * - fewer than 'cnt' frames, in which case we need to adjust the
3436 * firmware command (but do that unconditionally)
3438 if (single_sta_queue
) {
3439 int remaining
= cnt
;
3442 spin_lock_bh(&mvmsta
->lock
);
3443 for_each_set_bit(tid
, &_tids
, IWL_MAX_TID_COUNT
) {
3444 struct iwl_mvm_tid_data
*tid_data
;
3447 tid_data
= &mvmsta
->tid_data
[tid
];
3449 n_queued
= iwl_mvm_tid_queued(mvm
, tid_data
);
3450 if (n_queued
> remaining
) {
3455 remaining
-= n_queued
;
3457 sleep_tx_count
= cnt
- remaining
;
3458 if (reason
== IEEE80211_FRAME_RELEASE_UAPSD
)
3459 mvmsta
->sleep_tx_count
= sleep_tx_count
;
3460 spin_unlock_bh(&mvmsta
->lock
);
3462 cmd
.sleep_tx_count
= cpu_to_le16(sleep_tx_count
);
3463 if (WARN_ON(cnt
- remaining
== 0)) {
3464 ieee80211_sta_eosp(sta
);
3469 /* Note: this is ignored by firmware not supporting GO uAPSD */
3471 cmd
.sleep_state_flags
|= STA_SLEEP_STATE_MOREDATA
;
3473 if (reason
== IEEE80211_FRAME_RELEASE_PSPOLL
) {
3474 mvmsta
->next_status_eosp
= true;
3475 cmd
.sleep_state_flags
|= STA_SLEEP_STATE_PS_POLL
;
3477 cmd
.sleep_state_flags
|= STA_SLEEP_STATE_UAPSD
;
3480 /* block the Tx queues until the FW updated the sleep Tx count */
3481 iwl_trans_block_txq_ptrs(mvm
->trans
, true);
3483 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
,
3484 CMD_ASYNC
| CMD_WANT_ASYNC_CALLBACK
,
3485 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
3487 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
3490 void iwl_mvm_rx_eosp_notif(struct iwl_mvm
*mvm
,
3491 struct iwl_rx_cmd_buffer
*rxb
)
3493 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
3494 struct iwl_mvm_eosp_notification
*notif
= (void *)pkt
->data
;
3495 struct ieee80211_sta
*sta
;
3496 u32 sta_id
= le32_to_cpu(notif
->sta_id
);
3498 if (WARN_ON_ONCE(sta_id
>= IWL_MVM_STATION_COUNT
))
3502 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
3503 if (!IS_ERR_OR_NULL(sta
))
3504 ieee80211_sta_eosp(sta
);
3508 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm
*mvm
,
3509 struct iwl_mvm_sta
*mvmsta
, bool disable
)
3511 struct iwl_mvm_add_sta_cmd cmd
= {
3512 .add_modify
= STA_MODE_MODIFY
,
3513 .sta_id
= mvmsta
->sta_id
,
3514 .station_flags
= disable
? cpu_to_le32(STA_FLG_DISABLE_TX
) : 0,
3515 .station_flags_msk
= cpu_to_le32(STA_FLG_DISABLE_TX
),
3516 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
3520 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
3521 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
3523 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
3526 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm
*mvm
,
3527 struct ieee80211_sta
*sta
,
3530 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
3532 spin_lock_bh(&mvm_sta
->lock
);
3534 if (mvm_sta
->disable_tx
== disable
) {
3535 spin_unlock_bh(&mvm_sta
->lock
);
3539 mvm_sta
->disable_tx
= disable
;
3541 /* Tell mac80211 to start/stop queuing tx for this station */
3542 ieee80211_sta_block_awake(mvm
->hw
, sta
, disable
);
3544 iwl_mvm_sta_modify_disable_tx(mvm
, mvm_sta
, disable
);
3546 spin_unlock_bh(&mvm_sta
->lock
);
3549 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm
*mvm
,
3550 struct iwl_mvm_vif
*mvmvif
,
3551 struct iwl_mvm_int_sta
*sta
,
3554 u32 id
= FW_CMD_ID_AND_COLOR(mvmvif
->id
, mvmvif
->color
);
3555 struct iwl_mvm_add_sta_cmd cmd
= {
3556 .add_modify
= STA_MODE_MODIFY
,
3557 .sta_id
= sta
->sta_id
,
3558 .station_flags
= disable
? cpu_to_le32(STA_FLG_DISABLE_TX
) : 0,
3559 .station_flags_msk
= cpu_to_le32(STA_FLG_DISABLE_TX
),
3560 .mac_id_n_color
= cpu_to_le32(id
),
3564 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, 0,
3565 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
3567 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
3570 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm
*mvm
,
3571 struct iwl_mvm_vif
*mvmvif
,
3574 struct ieee80211_sta
*sta
;
3575 struct iwl_mvm_sta
*mvm_sta
;
3578 lockdep_assert_held(&mvm
->mutex
);
3580 /* Block/unblock all the stations of the given mvmvif */
3581 for (i
= 0; i
< ARRAY_SIZE(mvm
->fw_id_to_mac_id
); i
++) {
3582 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[i
],
3583 lockdep_is_held(&mvm
->mutex
));
3584 if (IS_ERR_OR_NULL(sta
))
3587 mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
3588 if (mvm_sta
->mac_id_n_color
!=
3589 FW_CMD_ID_AND_COLOR(mvmvif
->id
, mvmvif
->color
))
3592 iwl_mvm_sta_modify_disable_tx_ap(mvm
, sta
, disable
);
3595 if (!fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
))
3598 /* Need to block/unblock also multicast station */
3599 if (mvmvif
->mcast_sta
.sta_id
!= IWL_MVM_INVALID_STA
)
3600 iwl_mvm_int_sta_modify_disable_tx(mvm
, mvmvif
,
3601 &mvmvif
->mcast_sta
, disable
);
3604 * Only unblock the broadcast station (FW blocks it for immediate
3605 * quiet, not the driver)
3607 if (!disable
&& mvmvif
->bcast_sta
.sta_id
!= IWL_MVM_INVALID_STA
)
3608 iwl_mvm_int_sta_modify_disable_tx(mvm
, mvmvif
,
3609 &mvmvif
->bcast_sta
, disable
);
3612 void iwl_mvm_csa_client_absent(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
3614 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
3615 struct iwl_mvm_sta
*mvmsta
;
3619 mvmsta
= iwl_mvm_sta_from_staid_rcu(mvm
, mvmvif
->ap_sta_id
);
3621 if (!WARN_ON(!mvmsta
))
3622 iwl_mvm_sta_modify_disable_tx(mvm
, mvmsta
, true);
3627 u16
iwl_mvm_tid_queued(struct iwl_mvm
*mvm
, struct iwl_mvm_tid_data
*tid_data
)
3629 u16 sn
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
3632 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
3633 * to align the wrap around of ssn so we compare relevant values.
3635 if (mvm
->trans
->cfg
->gen2
)
3638 return ieee80211_sn_sub(sn
, tid_data
->next_reclaimed
);