1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
38 * All rights reserved.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 *****************************************************************************/
67 #include <linux/ieee80211.h>
68 #include <linux/etherdevice.h>
69 #include <linux/tcp.h>
73 #include "iwl-trans.h"
74 #include "iwl-eeprom-parse.h"
80 iwl_mvm_bar_check_trigger(struct iwl_mvm
*mvm
, const u8
*addr
,
83 struct iwl_fw_dbg_trigger_tlv
*trig
;
84 struct iwl_fw_dbg_trigger_ba
*ba_trig
;
86 if (!iwl_fw_dbg_trigger_enabled(mvm
->fw
, FW_DBG_TRIGGER_BA
))
89 trig
= iwl_fw_dbg_get_trigger(mvm
->fw
, FW_DBG_TRIGGER_BA
);
90 ba_trig
= (void *)trig
->data
;
92 if (!iwl_fw_dbg_trigger_check_stop(mvm
, NULL
, trig
))
95 if (!(le16_to_cpu(ba_trig
->tx_bar
) & BIT(tid
)))
98 iwl_mvm_fw_dbg_collect_trig(mvm
, trig
,
99 "BAR sent to %pM, tid %d, ssn %d",
103 #define OPT_HDR(type, skb, off) \
104 (type *)(skb_network_header(skb) + (off))
106 static u16
iwl_mvm_tx_csum(struct iwl_mvm
*mvm
, struct sk_buff
*skb
,
107 struct ieee80211_hdr
*hdr
,
108 struct ieee80211_tx_info
*info
)
110 u16 offload_assist
= 0;
111 #if IS_ENABLED(CONFIG_INET)
112 u16 mh_len
= ieee80211_hdrlen(hdr
->frame_control
);
116 * Do not compute checksum if already computed or if transport will
119 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
|| IWL_MVM_SW_TX_CSUM_OFFLOAD
)
122 /* We do not expect to be requested to csum stuff we do not support */
123 if (WARN_ONCE(!(mvm
->hw
->netdev_features
& IWL_TX_CSUM_NETIF_FLAGS
) ||
124 (skb
->protocol
!= htons(ETH_P_IP
) &&
125 skb
->protocol
!= htons(ETH_P_IPV6
)),
126 "No support for requested checksum\n")) {
127 skb_checksum_help(skb
);
131 if (skb
->protocol
== htons(ETH_P_IP
)) {
132 protocol
= ip_hdr(skb
)->protocol
;
134 #if IS_ENABLED(CONFIG_IPV6)
135 struct ipv6hdr
*ipv6h
=
136 (struct ipv6hdr
*)skb_network_header(skb
);
137 unsigned int off
= sizeof(*ipv6h
);
139 protocol
= ipv6h
->nexthdr
;
140 while (protocol
!= NEXTHDR_NONE
&& ipv6_ext_hdr(protocol
)) {
141 struct ipv6_opt_hdr
*hp
;
143 /* only supported extension headers */
144 if (protocol
!= NEXTHDR_ROUTING
&&
145 protocol
!= NEXTHDR_HOP
&&
146 protocol
!= NEXTHDR_DEST
) {
147 skb_checksum_help(skb
);
151 hp
= OPT_HDR(struct ipv6_opt_hdr
, skb
, off
);
152 protocol
= hp
->nexthdr
;
153 off
+= ipv6_optlen(hp
);
155 /* if we get here - protocol now should be TCP/UDP */
159 if (protocol
!= IPPROTO_TCP
&& protocol
!= IPPROTO_UDP
) {
161 skb_checksum_help(skb
);
166 offload_assist
|= BIT(TX_CMD_OFFLD_L4_EN
);
169 * Set offset to IP header (snap).
170 * We don't support tunneling so no need to take care of inner header.
173 offload_assist
|= (4 << TX_CMD_OFFLD_IP_HDR
);
175 /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
176 if (skb
->protocol
== htons(ETH_P_IP
) &&
177 (offload_assist
& BIT(TX_CMD_OFFLD_AMSDU
))) {
178 ip_hdr(skb
)->check
= 0;
179 offload_assist
|= BIT(TX_CMD_OFFLD_L3_EN
);
182 /* reset UDP/TCP header csum */
183 if (protocol
== IPPROTO_TCP
)
184 tcp_hdr(skb
)->check
= 0;
186 udp_hdr(skb
)->check
= 0;
188 /* mac header len should include IV, size is in words */
189 if (info
->control
.hw_key
)
190 mh_len
+= info
->control
.hw_key
->iv_len
;
192 offload_assist
|= mh_len
<< TX_CMD_OFFLD_MH_SIZE
;
196 return offload_assist
;
200 * Sets most of the Tx cmd's fields
202 void iwl_mvm_set_tx_cmd(struct iwl_mvm
*mvm
, struct sk_buff
*skb
,
203 struct iwl_tx_cmd
*tx_cmd
,
204 struct ieee80211_tx_info
*info
, u8 sta_id
)
206 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
207 __le16 fc
= hdr
->frame_control
;
208 u32 tx_flags
= le32_to_cpu(tx_cmd
->tx_flags
);
209 u32 len
= skb
->len
+ FCS_LEN
;
212 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
))
213 tx_flags
|= TX_CMD_FLG_ACK
;
215 tx_flags
&= ~TX_CMD_FLG_ACK
;
217 if (ieee80211_is_probe_resp(fc
))
218 tx_flags
|= TX_CMD_FLG_TSF
;
220 if (ieee80211_has_morefrags(fc
))
221 tx_flags
|= TX_CMD_FLG_MORE_FRAG
;
223 if (ieee80211_is_data_qos(fc
)) {
224 u8
*qc
= ieee80211_get_qos_ctl(hdr
);
225 tx_cmd
->tid_tspec
= qc
[0] & 0xf;
226 tx_flags
&= ~TX_CMD_FLG_SEQ_CTL
;
227 if (*qc
& IEEE80211_QOS_CTL_A_MSDU_PRESENT
)
228 tx_cmd
->offload_assist
|=
229 cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU
));
230 } else if (ieee80211_is_back_req(fc
)) {
231 struct ieee80211_bar
*bar
= (void *)skb
->data
;
232 u16 control
= le16_to_cpu(bar
->control
);
233 u16 ssn
= le16_to_cpu(bar
->start_seq_num
);
235 tx_flags
|= TX_CMD_FLG_ACK
| TX_CMD_FLG_BAR
;
236 tx_cmd
->tid_tspec
= (control
&
237 IEEE80211_BAR_CTRL_TID_INFO_MASK
) >>
238 IEEE80211_BAR_CTRL_TID_INFO_SHIFT
;
239 WARN_ON_ONCE(tx_cmd
->tid_tspec
>= IWL_MAX_TID_COUNT
);
240 iwl_mvm_bar_check_trigger(mvm
, bar
->ra
, tx_cmd
->tid_tspec
,
243 tx_cmd
->tid_tspec
= IWL_TID_NON_QOS
;
244 if (info
->flags
& IEEE80211_TX_CTL_ASSIGN_SEQ
)
245 tx_flags
|= TX_CMD_FLG_SEQ_CTL
;
247 tx_flags
&= ~TX_CMD_FLG_SEQ_CTL
;
250 /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */
251 if (tx_cmd
->tid_tspec
< IWL_MAX_TID_COUNT
)
252 ac
= tid_to_mac80211_ac
[tx_cmd
->tid_tspec
];
254 ac
= tid_to_mac80211_ac
[0];
256 tx_flags
|= iwl_mvm_bt_coex_tx_prio(mvm
, hdr
, info
, ac
) <<
257 TX_CMD_FLG_BT_PRIO_POS
;
259 if (ieee80211_is_mgmt(fc
)) {
260 if (ieee80211_is_assoc_req(fc
) || ieee80211_is_reassoc_req(fc
))
261 tx_cmd
->pm_frame_timeout
= cpu_to_le16(PM_FRAME_ASSOC
);
262 else if (ieee80211_is_action(fc
))
263 tx_cmd
->pm_frame_timeout
= cpu_to_le16(PM_FRAME_NONE
);
265 tx_cmd
->pm_frame_timeout
= cpu_to_le16(PM_FRAME_MGMT
);
267 /* The spec allows Action frames in A-MPDU, we don't support
270 WARN_ON_ONCE(info
->flags
& IEEE80211_TX_CTL_AMPDU
);
271 } else if (info
->control
.flags
& IEEE80211_TX_CTRL_PORT_CTRL_PROTO
) {
272 tx_cmd
->pm_frame_timeout
= cpu_to_le16(PM_FRAME_MGMT
);
274 tx_cmd
->pm_frame_timeout
= cpu_to_le16(PM_FRAME_NONE
);
277 if (ieee80211_is_data(fc
) && len
> mvm
->rts_threshold
&&
278 !is_multicast_ether_addr(ieee80211_get_DA(hdr
)))
279 tx_flags
|= TX_CMD_FLG_PROT_REQUIRE
;
281 if (fw_has_capa(&mvm
->fw
->ucode_capa
,
282 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT
) &&
283 ieee80211_action_contains_tpc(skb
))
284 tx_flags
|= TX_CMD_FLG_WRITE_TX_POWER
;
286 tx_cmd
->tx_flags
= cpu_to_le32(tx_flags
);
287 /* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */
288 tx_cmd
->len
= cpu_to_le16((u16
)skb
->len
);
289 tx_cmd
->life_time
= cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE
);
290 tx_cmd
->sta_id
= sta_id
;
292 /* padding is inserted later in transport */
293 if (ieee80211_hdrlen(fc
) % 4 &&
294 !(tx_cmd
->offload_assist
& cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU
))))
295 tx_cmd
->offload_assist
|= cpu_to_le16(BIT(TX_CMD_OFFLD_PAD
));
297 tx_cmd
->offload_assist
|=
298 cpu_to_le16(iwl_mvm_tx_csum(mvm
, skb
, hdr
, info
));
301 static u32
iwl_mvm_get_tx_rate(struct iwl_mvm
*mvm
,
302 struct ieee80211_tx_info
*info
,
303 struct ieee80211_sta
*sta
)
309 /* HT rate doesn't make sense for a non data frame */
310 WARN_ONCE(info
->control
.rates
[0].flags
& IEEE80211_TX_RC_MCS
,
311 "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame\n",
312 info
->control
.rates
[0].flags
,
313 info
->control
.rates
[0].idx
);
315 rate_idx
= info
->control
.rates
[0].idx
;
316 /* if the rate isn't a well known legacy rate, take the lowest one */
317 if (rate_idx
< 0 || rate_idx
>= IWL_RATE_COUNT_LEGACY
)
318 rate_idx
= rate_lowest_index(
319 &mvm
->nvm_data
->bands
[info
->band
], sta
);
321 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
322 if (info
->band
== NL80211_BAND_5GHZ
)
323 rate_idx
+= IWL_FIRST_OFDM_RATE
;
325 /* For 2.4 GHZ band, check that there is no need to remap */
326 BUILD_BUG_ON(IWL_FIRST_CCK_RATE
!= 0);
328 /* Get PLCP rate for tx_cmd->rate_n_flags */
329 rate_plcp
= iwl_mvm_mac80211_idx_to_hwrate(rate_idx
);
331 if (info
->band
== NL80211_BAND_2GHZ
&&
332 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm
))
333 rate_flags
= mvm
->cfg
->non_shared_ant
<< RATE_MCS_ANT_POS
;
336 BIT(mvm
->mgmt_last_antenna_idx
) << RATE_MCS_ANT_POS
;
338 /* Set CCK flag as needed */
339 if ((rate_idx
>= IWL_FIRST_CCK_RATE
) && (rate_idx
<= IWL_LAST_CCK_RATE
))
340 rate_flags
|= RATE_MCS_CCK_MSK
;
342 return (u32
)rate_plcp
| rate_flags
;
346 * Sets the fields in the Tx cmd that are rate related
348 void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm
*mvm
, struct iwl_tx_cmd
*tx_cmd
,
349 struct ieee80211_tx_info
*info
,
350 struct ieee80211_sta
*sta
, __le16 fc
)
352 /* Set retry limit on RTS packets */
353 tx_cmd
->rts_retry_limit
= IWL_RTS_DFAULT_RETRY_LIMIT
;
355 /* Set retry limit on DATA packets and Probe Responses*/
356 if (ieee80211_is_probe_resp(fc
)) {
357 tx_cmd
->data_retry_limit
= IWL_MGMT_DFAULT_RETRY_LIMIT
;
358 tx_cmd
->rts_retry_limit
=
359 min(tx_cmd
->data_retry_limit
, tx_cmd
->rts_retry_limit
);
360 } else if (ieee80211_is_back_req(fc
)) {
361 tx_cmd
->data_retry_limit
= IWL_BAR_DFAULT_RETRY_LIMIT
;
363 tx_cmd
->data_retry_limit
= IWL_DEFAULT_TX_RETRY
;
367 * for data packets, rate info comes from the table inside the fw. This
368 * table is controlled by LINK_QUALITY commands
371 if (ieee80211_is_data(fc
) && sta
) {
372 tx_cmd
->initial_rate_index
= 0;
373 tx_cmd
->tx_flags
|= cpu_to_le32(TX_CMD_FLG_STA_RATE
);
375 } else if (ieee80211_is_back_req(fc
)) {
377 cpu_to_le32(TX_CMD_FLG_ACK
| TX_CMD_FLG_BAR
);
380 mvm
->mgmt_last_antenna_idx
=
381 iwl_mvm_next_antenna(mvm
, iwl_mvm_get_valid_tx_ant(mvm
),
382 mvm
->mgmt_last_antenna_idx
);
384 /* Set the rate in the TX cmd */
385 tx_cmd
->rate_n_flags
= cpu_to_le32(iwl_mvm_get_tx_rate(mvm
, info
, sta
));
388 static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info
*info
,
391 struct ieee80211_key_conf
*keyconf
= info
->control
.hw_key
;
394 pn
= atomic64_inc_return(&keyconf
->tx_pn
);
397 crypto_hdr
[3] = 0x20 | (keyconf
->keyidx
<< 6);
398 crypto_hdr
[1] = pn
>> 8;
399 crypto_hdr
[4] = pn
>> 16;
400 crypto_hdr
[5] = pn
>> 24;
401 crypto_hdr
[6] = pn
>> 32;
402 crypto_hdr
[7] = pn
>> 40;
406 * Sets the fields in the Tx cmd that are crypto related
408 static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm
*mvm
,
409 struct ieee80211_tx_info
*info
,
410 struct iwl_tx_cmd
*tx_cmd
,
411 struct sk_buff
*skb_frag
,
414 struct ieee80211_key_conf
*keyconf
= info
->control
.hw_key
;
415 u8
*crypto_hdr
= skb_frag
->data
+ hdrlen
;
418 switch (keyconf
->cipher
) {
419 case WLAN_CIPHER_SUITE_CCMP
:
420 case WLAN_CIPHER_SUITE_CCMP_256
:
421 iwl_mvm_set_tx_cmd_ccmp(info
, tx_cmd
);
422 iwl_mvm_set_tx_cmd_pn(info
, crypto_hdr
);
425 case WLAN_CIPHER_SUITE_TKIP
:
426 tx_cmd
->sec_ctl
= TX_CMD_SEC_TKIP
;
427 pn
= atomic64_inc_return(&keyconf
->tx_pn
);
428 ieee80211_tkip_add_iv(crypto_hdr
, keyconf
, pn
);
429 ieee80211_get_tkip_p2k(keyconf
, skb_frag
, tx_cmd
->key
);
432 case WLAN_CIPHER_SUITE_WEP104
:
433 tx_cmd
->sec_ctl
|= TX_CMD_SEC_KEY128
;
435 case WLAN_CIPHER_SUITE_WEP40
:
436 tx_cmd
->sec_ctl
|= TX_CMD_SEC_WEP
|
437 ((keyconf
->keyidx
<< TX_CMD_SEC_WEP_KEY_IDX_POS
) &
438 TX_CMD_SEC_WEP_KEY_IDX_MSK
);
440 memcpy(&tx_cmd
->key
[3], keyconf
->key
, keyconf
->keylen
);
442 case WLAN_CIPHER_SUITE_GCMP
:
443 case WLAN_CIPHER_SUITE_GCMP_256
:
444 /* TODO: Taking the key from the table might introduce a race
445 * when PTK rekeying is done, having an old packets with a PN
446 * based on the old key but the message encrypted with a new
448 * Need to handle this.
450 tx_cmd
->sec_ctl
|= TX_CMD_SEC_GCMP
| TX_CMD_SEC_KEY_FROM_TABLE
;
451 tx_cmd
->key
[0] = keyconf
->hw_key_idx
;
452 iwl_mvm_set_tx_cmd_pn(info
, crypto_hdr
);
455 tx_cmd
->sec_ctl
|= TX_CMD_SEC_EXT
;
460 * Allocates and sets the Tx cmd the driver data pointers in the skb
462 static struct iwl_device_cmd
*
463 iwl_mvm_set_tx_params(struct iwl_mvm
*mvm
, struct sk_buff
*skb
,
464 struct ieee80211_tx_info
*info
, int hdrlen
,
465 struct ieee80211_sta
*sta
, u8 sta_id
)
467 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
468 struct iwl_device_cmd
*dev_cmd
;
469 struct iwl_tx_cmd
*tx_cmd
;
471 dev_cmd
= iwl_trans_alloc_tx_cmd(mvm
->trans
);
473 if (unlikely(!dev_cmd
))
476 memset(dev_cmd
, 0, sizeof(*dev_cmd
));
477 dev_cmd
->hdr
.cmd
= TX_CMD
;
479 if (iwl_mvm_has_new_tx_api(mvm
)) {
480 struct iwl_tx_cmd_gen2
*cmd
= (void *)dev_cmd
->payload
;
481 u16 offload_assist
= iwl_mvm_tx_csum(mvm
, skb
, hdr
, info
);
483 /* padding is inserted later in transport */
484 /* FIXME - check for AMSDU may need to be removed */
485 if (ieee80211_hdrlen(hdr
->frame_control
) % 4 &&
486 !(offload_assist
& BIT(TX_CMD_OFFLD_AMSDU
)))
487 offload_assist
|= BIT(TX_CMD_OFFLD_PAD
);
489 cmd
->offload_assist
|= cpu_to_le16(offload_assist
);
491 /* Total # bytes to be transmitted */
492 cmd
->len
= cpu_to_le16((u16
)skb
->len
);
494 /* Copy MAC header from skb into command buffer */
495 memcpy(cmd
->hdr
, hdr
, hdrlen
);
497 if (!info
->control
.hw_key
)
498 cmd
->flags
|= cpu_to_le32(IWL_TX_FLAGS_ENCRYPT_DIS
);
500 /* For data packets rate info comes from the fw */
501 if (ieee80211_is_data(hdr
->frame_control
) && sta
)
504 cmd
->flags
|= cpu_to_le32(IWL_TX_FLAGS_CMD_RATE
);
506 cpu_to_le32(iwl_mvm_get_tx_rate(mvm
, info
, sta
));
511 tx_cmd
= (struct iwl_tx_cmd
*)dev_cmd
->payload
;
513 if (info
->control
.hw_key
)
514 iwl_mvm_set_tx_cmd_crypto(mvm
, info
, tx_cmd
, skb
, hdrlen
);
516 iwl_mvm_set_tx_cmd(mvm
, skb
, tx_cmd
, info
, sta_id
);
518 iwl_mvm_set_tx_cmd_rate(mvm
, tx_cmd
, info
, sta
, hdr
->frame_control
);
520 /* Copy MAC header from skb into command buffer */
521 memcpy(tx_cmd
->hdr
, hdr
, hdrlen
);
527 static void iwl_mvm_skb_prepare_status(struct sk_buff
*skb
,
528 struct iwl_device_cmd
*cmd
)
530 struct ieee80211_tx_info
*skb_info
= IEEE80211_SKB_CB(skb
);
532 memset(&skb_info
->status
, 0, sizeof(skb_info
->status
));
533 memset(skb_info
->driver_data
, 0, sizeof(skb_info
->driver_data
));
535 skb_info
->driver_data
[1] = cmd
;
538 static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm
*mvm
,
539 struct ieee80211_tx_info
*info
, __le16 fc
)
541 if (!iwl_mvm_is_dqa_supported(mvm
))
542 return info
->hw_queue
;
544 switch (info
->control
.vif
->type
) {
545 case NL80211_IFTYPE_AP
:
546 case NL80211_IFTYPE_ADHOC
:
548 * Handle legacy hostapd as well, where station may be added
549 * only after assoc. Take care of the case where we send a
550 * deauth to a station that we don't have.
552 if (ieee80211_is_probe_resp(fc
) || ieee80211_is_auth(fc
) ||
553 ieee80211_is_deauth(fc
))
554 return mvm
->probe_queue
;
555 if (info
->hw_queue
== info
->control
.vif
->cab_queue
)
556 return info
->hw_queue
;
558 WARN_ONCE(info
->control
.vif
->type
!= NL80211_IFTYPE_ADHOC
,
559 "fc=0x%02x", le16_to_cpu(fc
));
560 return mvm
->probe_queue
;
561 case NL80211_IFTYPE_P2P_DEVICE
:
562 if (ieee80211_is_mgmt(fc
))
563 return mvm
->p2p_dev_queue
;
564 if (info
->hw_queue
== info
->control
.vif
->cab_queue
)
565 return info
->hw_queue
;
568 return mvm
->p2p_dev_queue
;
570 WARN_ONCE(1, "Not a ctrl vif, no available queue\n");
575 int iwl_mvm_tx_skb_non_sta(struct iwl_mvm
*mvm
, struct sk_buff
*skb
)
577 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
578 struct ieee80211_tx_info
*skb_info
= IEEE80211_SKB_CB(skb
);
579 struct ieee80211_tx_info info
;
580 struct iwl_device_cmd
*dev_cmd
;
582 int hdrlen
= ieee80211_hdrlen(hdr
->frame_control
);
585 /* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
586 * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
587 * queue. STATION (HS2.0) uses the auxiliary context of the FW,
588 * and hence needs to be sent on the aux queue
590 if (skb_info
->hw_queue
== IWL_MVM_OFFCHANNEL_QUEUE
&&
591 skb_info
->control
.vif
->type
== NL80211_IFTYPE_STATION
)
592 skb_info
->hw_queue
= mvm
->aux_queue
;
594 memcpy(&info
, skb
->cb
, sizeof(info
));
596 if (WARN_ON_ONCE(info
.flags
& IEEE80211_TX_CTL_AMPDU
))
599 if (WARN_ON_ONCE(info
.flags
& IEEE80211_TX_CTL_SEND_AFTER_DTIM
&&
600 (!info
.control
.vif
||
601 info
.hw_queue
!= info
.control
.vif
->cab_queue
)))
604 queue
= info
.hw_queue
;
607 * If the interface on which the frame is sent is the P2P_DEVICE
608 * or an AP/GO interface use the broadcast station associated
609 * with it; otherwise if the interface is a managed interface
610 * use the AP station associated with it for multicast traffic
611 * (this is not possible for unicast packets as a TLDS discovery
612 * response are sent without a station entry); otherwise use the
614 * In DQA mode, if vif is of type STATION and frames are not multicast
615 * or offchannel, they should be sent from the BSS queue.
616 * For example, TDLS setup frames should be sent on this queue,
617 * as they go through the AP.
619 sta_id
= mvm
->aux_sta
.sta_id
;
620 if (info
.control
.vif
) {
621 struct iwl_mvm_vif
*mvmvif
=
622 iwl_mvm_vif_from_mac80211(info
.control
.vif
);
624 if (info
.control
.vif
->type
== NL80211_IFTYPE_P2P_DEVICE
||
625 info
.control
.vif
->type
== NL80211_IFTYPE_AP
||
626 info
.control
.vif
->type
== NL80211_IFTYPE_ADHOC
) {
627 sta_id
= mvmvif
->bcast_sta
.sta_id
;
628 queue
= iwl_mvm_get_ctrl_vif_queue(mvm
, &info
,
633 if (queue
== info
.control
.vif
->cab_queue
)
634 queue
= mvmvif
->cab_queue
;
635 } else if (info
.control
.vif
->type
== NL80211_IFTYPE_STATION
&&
636 is_multicast_ether_addr(hdr
->addr1
)) {
637 u8 ap_sta_id
= ACCESS_ONCE(mvmvif
->ap_sta_id
);
639 if (ap_sta_id
!= IWL_MVM_INVALID_STA
)
641 } else if (iwl_mvm_is_dqa_supported(mvm
) &&
642 info
.control
.vif
->type
== NL80211_IFTYPE_STATION
&&
643 queue
!= mvm
->aux_queue
) {
644 queue
= IWL_MVM_DQA_BSS_CLIENT_QUEUE
;
648 IWL_DEBUG_TX(mvm
, "station Id %d, queue=%d\n", sta_id
, queue
);
650 dev_cmd
= iwl_mvm_set_tx_params(mvm
, skb
, &info
, hdrlen
, NULL
, sta_id
);
654 /* From now on, we cannot access info->control */
655 iwl_mvm_skb_prepare_status(skb
, dev_cmd
);
657 if (iwl_trans_tx(mvm
->trans
, skb
, dev_cmd
, queue
)) {
658 iwl_trans_free_tx_cmd(mvm
->trans
, dev_cmd
);
663 * Increase the pending frames counter, so that later when a reply comes
664 * in and the counter is decreased - we don't start getting negative
666 * Note that we don't need to make sure it isn't agg'd, since we're
668 * For DQA mode - we shouldn't increase it though
670 if (!iwl_mvm_is_dqa_supported(mvm
))
671 atomic_inc(&mvm
->pending_frames
[sta_id
]);
677 static int iwl_mvm_tx_tso(struct iwl_mvm
*mvm
, struct sk_buff
*skb
,
678 struct ieee80211_tx_info
*info
,
679 struct ieee80211_sta
*sta
,
680 struct sk_buff_head
*mpdus_skb
)
682 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
683 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
684 unsigned int mss
= skb_shinfo(skb
)->gso_size
;
685 struct sk_buff
*tmp
, *next
;
686 char cb
[sizeof(skb
->cb
)];
687 unsigned int num_subframes
, tcp_payload_len
, subf_len
, max_amsdu_len
;
688 bool ipv4
= (skb
->protocol
== htons(ETH_P_IP
));
689 u16 ip_base_id
= ipv4
? ntohs(ip_hdr(skb
)->id
) : 0;
690 u16 snap_ip_tcp
, pad
, i
= 0;
691 unsigned int dbg_max_amsdu_len
;
692 netdev_features_t netdev_features
= NETIF_F_CSUM_MASK
| NETIF_F_SG
;
695 snap_ip_tcp
= 8 + skb_transport_header(skb
) - skb_network_header(skb
) +
698 qc
= ieee80211_get_qos_ctl(hdr
);
699 tid
= *qc
& IEEE80211_QOS_CTL_TID_MASK
;
700 if (WARN_ON_ONCE(tid
>= IWL_MAX_TID_COUNT
))
703 dbg_max_amsdu_len
= ACCESS_ONCE(mvm
->max_amsdu_len
);
705 if (!sta
->max_amsdu_len
||
706 !ieee80211_is_data_qos(hdr
->frame_control
) ||
707 (!mvmsta
->tlc_amsdu
&& !dbg_max_amsdu_len
)) {
714 * Do not build AMSDU for IPv6 with extension headers.
715 * ask stack to segment and checkum the generated MPDUs for us.
717 if (skb
->protocol
== htons(ETH_P_IPV6
) &&
718 ((struct ipv6hdr
*)skb_network_header(skb
))->nexthdr
!=
722 netdev_features
&= ~NETIF_F_CSUM_MASK
;
727 * No need to lock amsdu_in_ampdu_allowed since it can't be modified
728 * during an BA session.
730 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
&&
731 !mvmsta
->tid_data
[tid
].amsdu_in_ampdu_allowed
) {
737 max_amsdu_len
= sta
->max_amsdu_len
;
739 /* the Tx FIFO to which this A-MSDU will be routed */
740 txf
= iwl_mvm_ac_to_tx_fifo
[tid_to_mac80211_ac
[tid
]];
743 * Don't send an AMSDU that will be longer than the TXF.
744 * Add a security margin of 256 for the TX command + headers.
745 * We also want to have the start of the next packet inside the
746 * fifo to be able to send bursts.
748 max_amsdu_len
= min_t(unsigned int, max_amsdu_len
,
749 mvm
->smem_cfg
.lmac
[0].txfifo_size
[txf
] - 256);
751 if (unlikely(dbg_max_amsdu_len
))
752 max_amsdu_len
= min_t(unsigned int, max_amsdu_len
,
756 * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
757 * supported. This is a spec requirement (IEEE 802.11-2015
758 * section 8.7.3 NOTE 3).
760 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
&&
761 !sta
->vht_cap
.vht_supported
)
762 max_amsdu_len
= min_t(unsigned int, max_amsdu_len
, 4095);
764 /* Sub frame header + SNAP + IP header + TCP header + MSS */
765 subf_len
= sizeof(struct ethhdr
) + snap_ip_tcp
+ mss
;
766 pad
= (4 - subf_len
) & 0x3;
769 * If we have N subframes in the A-MSDU, then the A-MSDU's size is
770 * N * subf_len + (N - 1) * pad.
772 num_subframes
= (max_amsdu_len
+ pad
) / (subf_len
+ pad
);
773 if (num_subframes
> 1)
774 *qc
|= IEEE80211_QOS_CTL_A_MSDU_PRESENT
;
776 tcp_payload_len
= skb_tail_pointer(skb
) - skb_transport_header(skb
) -
777 tcp_hdrlen(skb
) + skb
->data_len
;
780 * Make sure we have enough TBs for the A-MSDU:
781 * 2 for each subframe
782 * 1 more for each fragment
783 * 1 more for the potential data in the header
786 min_t(unsigned int, num_subframes
,
787 (mvm
->trans
->max_skb_frags
- 1 -
788 skb_shinfo(skb
)->nr_frags
) / 2);
790 /* This skb fits in one single A-MSDU */
791 if (num_subframes
* mss
>= tcp_payload_len
) {
792 __skb_queue_tail(mpdus_skb
, skb
);
797 * Trick the segmentation function to make it
798 * create SKBs that can fit into one A-MSDU.
801 skb_shinfo(skb
)->gso_size
= num_subframes
* mss
;
802 memcpy(cb
, skb
->cb
, sizeof(cb
));
804 next
= skb_gso_segment(skb
, netdev_features
);
805 skb_shinfo(skb
)->gso_size
= mss
;
806 if (WARN_ON_ONCE(IS_ERR(next
)))
815 memcpy(tmp
->cb
, cb
, sizeof(tmp
->cb
));
817 * Compute the length of all the data added for the A-MSDU.
818 * This will be used to compute the length to write in the TX
819 * command. We have: SNAP + IP + TCP for n -1 subframes and
820 * ETH header for n subframes.
822 tcp_payload_len
= skb_tail_pointer(tmp
) -
823 skb_transport_header(tmp
) -
824 tcp_hdrlen(tmp
) + tmp
->data_len
;
827 ip_hdr(tmp
)->id
= htons(ip_base_id
+ i
* num_subframes
);
829 if (tcp_payload_len
> mss
) {
830 skb_shinfo(tmp
)->gso_size
= mss
;
832 qc
= ieee80211_get_qos_ctl((void *)tmp
->data
);
835 ip_send_check(ip_hdr(tmp
));
836 *qc
&= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT
;
837 skb_shinfo(tmp
)->gso_size
= 0;
843 __skb_queue_tail(mpdus_skb
, tmp
);
849 #else /* CONFIG_INET */
850 static int iwl_mvm_tx_tso(struct iwl_mvm
*mvm
, struct sk_buff
*skb
,
851 struct ieee80211_tx_info
*info
,
852 struct ieee80211_sta
*sta
,
853 struct sk_buff_head
*mpdus_skb
)
855 /* Impossible to get TSO with CONFIG_INET */
862 static void iwl_mvm_tx_add_stream(struct iwl_mvm
*mvm
,
863 struct iwl_mvm_sta
*mvm_sta
, u8 tid
,
866 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
867 u8 mac_queue
= info
->hw_queue
;
868 struct sk_buff_head
*deferred_tx_frames
;
870 lockdep_assert_held(&mvm_sta
->lock
);
872 mvm_sta
->deferred_traffic_tid_map
|= BIT(tid
);
873 set_bit(mvm_sta
->sta_id
, mvm
->sta_deferred_frames
);
875 deferred_tx_frames
= &mvm_sta
->tid_data
[tid
].deferred_tx_frames
;
877 skb_queue_tail(deferred_tx_frames
, skb
);
880 * The first deferred frame should've stopped the MAC queues, so we
881 * should never get a second deferred frame for the RA/TID.
883 if (!WARN(skb_queue_len(deferred_tx_frames
) != 1,
884 "RATID %d/%d has %d deferred frames\n", mvm_sta
->sta_id
, tid
,
885 skb_queue_len(deferred_tx_frames
))) {
886 iwl_mvm_stop_mac_queues(mvm
, BIT(mac_queue
));
887 schedule_work(&mvm
->add_stream_wk
);
891 /* Check if there are any timed-out TIDs on a given shared TXQ */
892 static bool iwl_mvm_txq_should_update(struct iwl_mvm
*mvm
, int txq_id
)
894 unsigned long queue_tid_bitmap
= mvm
->queue_info
[txq_id
].tid_bitmap
;
895 unsigned long now
= jiffies
;
898 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
901 for_each_set_bit(tid
, &queue_tid_bitmap
, IWL_MAX_TID_COUNT
+ 1) {
902 if (time_before(mvm
->queue_info
[txq_id
].last_frame_time
[tid
] +
903 IWL_MVM_DQA_QUEUE_TIMEOUT
, now
))
911 * Sets the fields in the Tx cmd that are crypto related
913 static int iwl_mvm_tx_mpdu(struct iwl_mvm
*mvm
, struct sk_buff
*skb
,
914 struct ieee80211_tx_info
*info
,
915 struct ieee80211_sta
*sta
)
917 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
918 struct iwl_mvm_sta
*mvmsta
;
919 struct iwl_device_cmd
*dev_cmd
;
922 u8 tid
= IWL_MAX_TID_COUNT
;
923 u16 txq_id
= info
->hw_queue
;
924 bool is_ampdu
= false;
927 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
928 fc
= hdr
->frame_control
;
929 hdrlen
= ieee80211_hdrlen(fc
);
931 if (WARN_ON_ONCE(!mvmsta
))
934 if (WARN_ON_ONCE(mvmsta
->sta_id
== IWL_MVM_INVALID_STA
))
937 dev_cmd
= iwl_mvm_set_tx_params(mvm
, skb
, info
, hdrlen
,
938 sta
, mvmsta
->sta_id
);
943 * we handle that entirely ourselves -- for uAPSD the firmware
944 * will always send a notification, and for PS-Poll responses
945 * we'll notify mac80211 when getting frame status
947 info
->flags
&= ~IEEE80211_TX_STATUS_EOSP
;
949 spin_lock(&mvmsta
->lock
);
951 /* nullfunc frames should go to the MGMT queue regardless of QOS,
952 * the condition of !ieee80211_is_qos_nullfunc(fc) keeps the default
953 * assignment of MGMT TID
955 if (ieee80211_is_data_qos(fc
) && !ieee80211_is_qos_nullfunc(fc
)) {
957 qc
= ieee80211_get_qos_ctl(hdr
);
958 tid
= qc
[0] & IEEE80211_QOS_CTL_TID_MASK
;
959 if (WARN_ON_ONCE(tid
>= IWL_MAX_TID_COUNT
))
960 goto drop_unlock_sta
;
962 is_ampdu
= info
->flags
& IEEE80211_TX_CTL_AMPDU
;
963 if (WARN_ON_ONCE(is_ampdu
&&
964 mvmsta
->tid_data
[tid
].state
!= IWL_AGG_ON
))
965 goto drop_unlock_sta
;
967 seq_number
= mvmsta
->tid_data
[tid
].seq_number
;
968 seq_number
&= IEEE80211_SCTL_SEQ
;
970 if (!iwl_mvm_has_new_tx_api(mvm
)) {
971 struct iwl_tx_cmd
*tx_cmd
= (void *)dev_cmd
->payload
;
973 hdr
->seq_ctrl
&= cpu_to_le16(IEEE80211_SCTL_FRAG
);
974 hdr
->seq_ctrl
|= cpu_to_le16(seq_number
);
975 /* update the tx_cmd hdr as it was already copied */
976 tx_cmd
->hdr
->seq_ctrl
= hdr
->seq_ctrl
;
980 if (iwl_mvm_is_dqa_supported(mvm
) || is_ampdu
)
981 txq_id
= mvmsta
->tid_data
[tid
].txq_id
;
983 if (sta
->tdls
&& !iwl_mvm_is_dqa_supported(mvm
)) {
984 /* default to TID 0 for non-QoS packets */
985 u8 tdls_tid
= tid
== IWL_MAX_TID_COUNT
? 0 : tid
;
987 txq_id
= mvmsta
->hw_queue
[tid_to_mac80211_ac
[tdls_tid
]];
990 WARN_ON_ONCE(info
->flags
& IEEE80211_TX_CTL_SEND_AFTER_DTIM
);
992 /* Check if TXQ needs to be allocated or re-activated */
993 if (unlikely(txq_id
== IWL_MVM_INVALID_QUEUE
||
994 !mvmsta
->tid_data
[tid
].is_tid_active
) &&
995 iwl_mvm_is_dqa_supported(mvm
)) {
996 /* If TXQ needs to be allocated... */
997 if (txq_id
== IWL_MVM_INVALID_QUEUE
) {
998 iwl_mvm_tx_add_stream(mvm
, mvmsta
, tid
, skb
);
1001 * The frame is now deferred, and the worker scheduled
1002 * will re-allocate it, so we can free it for now.
1004 iwl_trans_free_tx_cmd(mvm
->trans
, dev_cmd
);
1005 spin_unlock(&mvmsta
->lock
);
1009 /* queue should always be active in new TX path */
1010 WARN_ON(iwl_mvm_has_new_tx_api(mvm
));
1012 /* If we are here - TXQ exists and needs to be re-activated */
1013 spin_lock(&mvm
->queue_info_lock
);
1014 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_READY
;
1015 mvmsta
->tid_data
[tid
].is_tid_active
= true;
1016 spin_unlock(&mvm
->queue_info_lock
);
1018 IWL_DEBUG_TX_QUEUES(mvm
, "Re-activating queue %d for TX\n",
1022 if (iwl_mvm_is_dqa_supported(mvm
) && !iwl_mvm_has_new_tx_api(mvm
)) {
1023 /* Keep track of the time of the last frame for this RA/TID */
1024 mvm
->queue_info
[txq_id
].last_frame_time
[tid
] = jiffies
;
1027 * If we have timed-out TIDs - schedule the worker that will
1028 * reconfig the queues and update them
1030 * Note that the mvm->queue_info_lock isn't being taken here in
1031 * order to not serialize the TX flow. This isn't dangerous
1032 * because scheduling mvm->add_stream_wk can't ruin the state,
1033 * and if we DON'T schedule it due to some race condition then
1034 * next TX we get here we will.
1036 if (unlikely(mvm
->queue_info
[txq_id
].status
==
1037 IWL_MVM_QUEUE_SHARED
&&
1038 iwl_mvm_txq_should_update(mvm
, txq_id
)))
1039 schedule_work(&mvm
->add_stream_wk
);
1042 IWL_DEBUG_TX(mvm
, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta
->sta_id
,
1043 tid
, txq_id
, IEEE80211_SEQ_TO_SN(seq_number
));
1045 /* From now on, we cannot access info->control */
1046 iwl_mvm_skb_prepare_status(skb
, dev_cmd
);
1048 if (iwl_trans_tx(mvm
->trans
, skb
, dev_cmd
, txq_id
))
1049 goto drop_unlock_sta
;
1051 if (tid
< IWL_MAX_TID_COUNT
&& !ieee80211_has_morefrags(fc
))
1052 mvmsta
->tid_data
[tid
].seq_number
= seq_number
+ 0x10;
1054 spin_unlock(&mvmsta
->lock
);
1056 /* Increase pending frames count if this isn't AMPDU or DQA queue */
1057 if (!iwl_mvm_is_dqa_supported(mvm
) && !is_ampdu
)
1058 atomic_inc(&mvm
->pending_frames
[mvmsta
->sta_id
]);
1063 iwl_trans_free_tx_cmd(mvm
->trans
, dev_cmd
);
1064 spin_unlock(&mvmsta
->lock
);
1069 int iwl_mvm_tx_skb(struct iwl_mvm
*mvm
, struct sk_buff
*skb
,
1070 struct ieee80211_sta
*sta
)
1072 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1073 struct ieee80211_tx_info info
;
1074 struct sk_buff_head mpdus_skbs
;
1075 unsigned int payload_len
;
1078 if (WARN_ON_ONCE(!mvmsta
))
1081 if (WARN_ON_ONCE(mvmsta
->sta_id
== IWL_MVM_INVALID_STA
))
1084 memcpy(&info
, skb
->cb
, sizeof(info
));
1086 if (!skb_is_gso(skb
))
1087 return iwl_mvm_tx_mpdu(mvm
, skb
, &info
, sta
);
1089 payload_len
= skb_tail_pointer(skb
) - skb_transport_header(skb
) -
1090 tcp_hdrlen(skb
) + skb
->data_len
;
1092 if (payload_len
<= skb_shinfo(skb
)->gso_size
)
1093 return iwl_mvm_tx_mpdu(mvm
, skb
, &info
, sta
);
1095 __skb_queue_head_init(&mpdus_skbs
);
1097 ret
= iwl_mvm_tx_tso(mvm
, skb
, &info
, sta
, &mpdus_skbs
);
1101 if (WARN_ON(skb_queue_empty(&mpdus_skbs
)))
1104 while (!skb_queue_empty(&mpdus_skbs
)) {
1105 skb
= __skb_dequeue(&mpdus_skbs
);
1107 ret
= iwl_mvm_tx_mpdu(mvm
, skb
, &info
, sta
);
1109 __skb_queue_purge(&mpdus_skbs
);
1117 static void iwl_mvm_check_ratid_empty(struct iwl_mvm
*mvm
,
1118 struct ieee80211_sta
*sta
, u8 tid
)
1120 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1121 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
1122 struct ieee80211_vif
*vif
= mvmsta
->vif
;
1124 lockdep_assert_held(&mvmsta
->lock
);
1126 if ((tid_data
->state
== IWL_AGG_ON
||
1127 tid_data
->state
== IWL_EMPTYING_HW_QUEUE_DELBA
||
1128 iwl_mvm_is_dqa_supported(mvm
)) &&
1129 iwl_mvm_tid_queued(tid_data
) == 0) {
1131 * Now that this aggregation or DQA queue is empty tell
1132 * mac80211 so it knows we no longer have frames buffered for
1133 * the station on this TID (for the TIM bitmap calculation.)
1135 ieee80211_sta_set_buffered(sta
, tid
, false);
1138 if (tid_data
->ssn
!= tid_data
->next_reclaimed
)
1141 switch (tid_data
->state
) {
1142 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
1143 IWL_DEBUG_TX_QUEUES(mvm
,
1144 "Can continue addBA flow ssn = next_recl = %d\n",
1145 tid_data
->next_reclaimed
);
1146 tid_data
->state
= IWL_AGG_STARTING
;
1147 ieee80211_start_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
1150 case IWL_EMPTYING_HW_QUEUE_DELBA
:
1151 IWL_DEBUG_TX_QUEUES(mvm
,
1152 "Can continue DELBA flow ssn = next_recl = %d\n",
1153 tid_data
->next_reclaimed
);
1154 if (!iwl_mvm_is_dqa_supported(mvm
)) {
1155 u8 mac80211_ac
= tid_to_mac80211_ac
[tid
];
1157 iwl_mvm_disable_txq(mvm
, tid_data
->txq_id
,
1158 vif
->hw_queue
[mac80211_ac
], tid
,
1161 tid_data
->state
= IWL_AGG_OFF
;
1162 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
1170 #ifdef CONFIG_IWLWIFI_DEBUG
1171 const char *iwl_mvm_get_tx_fail_reason(u32 status
)
1173 #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1174 #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1176 switch (status
& TX_STATUS_MSK
) {
1177 case TX_STATUS_SUCCESS
:
1179 TX_STATUS_POSTPONE(DELAY
);
1180 TX_STATUS_POSTPONE(FEW_BYTES
);
1181 TX_STATUS_POSTPONE(BT_PRIO
);
1182 TX_STATUS_POSTPONE(QUIET_PERIOD
);
1183 TX_STATUS_POSTPONE(CALC_TTAK
);
1184 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY
);
1185 TX_STATUS_FAIL(SHORT_LIMIT
);
1186 TX_STATUS_FAIL(LONG_LIMIT
);
1187 TX_STATUS_FAIL(UNDERRUN
);
1188 TX_STATUS_FAIL(DRAIN_FLOW
);
1189 TX_STATUS_FAIL(RFKILL_FLUSH
);
1190 TX_STATUS_FAIL(LIFE_EXPIRE
);
1191 TX_STATUS_FAIL(DEST_PS
);
1192 TX_STATUS_FAIL(HOST_ABORTED
);
1193 TX_STATUS_FAIL(BT_RETRY
);
1194 TX_STATUS_FAIL(STA_INVALID
);
1195 TX_STATUS_FAIL(FRAG_DROPPED
);
1196 TX_STATUS_FAIL(TID_DISABLE
);
1197 TX_STATUS_FAIL(FIFO_FLUSHED
);
1198 TX_STATUS_FAIL(SMALL_CF_POLL
);
1199 TX_STATUS_FAIL(FW_DROP
);
1200 TX_STATUS_FAIL(STA_COLOR_MISMATCH
);
1205 #undef TX_STATUS_FAIL
1206 #undef TX_STATUS_POSTPONE
1208 #endif /* CONFIG_IWLWIFI_DEBUG */
1210 void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags
,
1211 enum nl80211_band band
,
1212 struct ieee80211_tx_rate
*r
)
1214 if (rate_n_flags
& RATE_HT_MCS_GF_MSK
)
1215 r
->flags
|= IEEE80211_TX_RC_GREEN_FIELD
;
1216 switch (rate_n_flags
& RATE_MCS_CHAN_WIDTH_MSK
) {
1217 case RATE_MCS_CHAN_WIDTH_20
:
1219 case RATE_MCS_CHAN_WIDTH_40
:
1220 r
->flags
|= IEEE80211_TX_RC_40_MHZ_WIDTH
;
1222 case RATE_MCS_CHAN_WIDTH_80
:
1223 r
->flags
|= IEEE80211_TX_RC_80_MHZ_WIDTH
;
1225 case RATE_MCS_CHAN_WIDTH_160
:
1226 r
->flags
|= IEEE80211_TX_RC_160_MHZ_WIDTH
;
1229 if (rate_n_flags
& RATE_MCS_SGI_MSK
)
1230 r
->flags
|= IEEE80211_TX_RC_SHORT_GI
;
1231 if (rate_n_flags
& RATE_MCS_HT_MSK
) {
1232 r
->flags
|= IEEE80211_TX_RC_MCS
;
1233 r
->idx
= rate_n_flags
& RATE_HT_MCS_INDEX_MSK
;
1234 } else if (rate_n_flags
& RATE_MCS_VHT_MSK
) {
1235 ieee80211_rate_set_vht(
1236 r
, rate_n_flags
& RATE_VHT_MCS_RATE_CODE_MSK
,
1237 ((rate_n_flags
& RATE_VHT_MCS_NSS_MSK
) >>
1238 RATE_VHT_MCS_NSS_POS
) + 1);
1239 r
->flags
|= IEEE80211_TX_RC_VHT_MCS
;
1241 r
->idx
= iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags
,
1247 * translate ucode response to mac80211 tx status control values
1249 static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags
,
1250 struct ieee80211_tx_info
*info
)
1252 struct ieee80211_tx_rate
*r
= &info
->status
.rates
[0];
1254 info
->status
.antenna
=
1255 ((rate_n_flags
& RATE_MCS_ANT_ABC_MSK
) >> RATE_MCS_ANT_POS
);
1256 iwl_mvm_hwrate_to_tx_rate(rate_n_flags
, info
->band
, r
);
1259 static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm
*mvm
,
1262 struct iwl_fw_dbg_trigger_tlv
*trig
;
1263 struct iwl_fw_dbg_trigger_tx_status
*status_trig
;
1266 if (!iwl_fw_dbg_trigger_enabled(mvm
->fw
, FW_DBG_TRIGGER_TX_STATUS
))
1269 trig
= iwl_fw_dbg_get_trigger(mvm
->fw
, FW_DBG_TRIGGER_TX_STATUS
);
1270 status_trig
= (void *)trig
->data
;
1272 if (!iwl_fw_dbg_trigger_check_stop(mvm
, NULL
, trig
))
1275 for (i
= 0; i
< ARRAY_SIZE(status_trig
->statuses
); i
++) {
1276 /* don't collect on status 0 */
1277 if (!status_trig
->statuses
[i
].status
)
1280 if (status_trig
->statuses
[i
].status
!= (status
& TX_STATUS_MSK
))
1283 iwl_mvm_fw_dbg_collect_trig(mvm
, trig
,
1284 "Tx status %d was received",
1285 status
& TX_STATUS_MSK
);
1291 * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
1292 * @tx_resp: the Tx response from the fw (agg or non-agg)
1294 * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
1295 * it can't know that everything will go well until the end of the AMPDU, it
1296 * can't know in advance the number of MPDUs that will be sent in the current
1297 * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
1298 * Hence, it can't know in advance what the SSN of the SCD will be at the end
1299 * of the batch. This is why the SSN of the SCD is written at the end of the
1300 * whole struct at a variable offset. This function knows how to cope with the
1301 * variable offset and returns the SSN of the SCD.
1303 static inline u32
iwl_mvm_get_scd_ssn(struct iwl_mvm
*mvm
,
1304 struct iwl_mvm_tx_resp
*tx_resp
)
1306 return le32_to_cpup((__le32
*)iwl_mvm_get_agg_status(mvm
, tx_resp
) +
1307 tx_resp
->frame_count
) & 0xfff;
1310 static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm
*mvm
,
1311 struct iwl_rx_packet
*pkt
)
1313 struct ieee80211_sta
*sta
;
1314 u16 sequence
= le16_to_cpu(pkt
->hdr
.sequence
);
1315 int txq_id
= SEQ_TO_QUEUE(sequence
);
1316 struct iwl_mvm_tx_resp
*tx_resp
= (void *)pkt
->data
;
1317 int sta_id
= IWL_MVM_TX_RES_GET_RA(tx_resp
->ra_tid
);
1318 int tid
= IWL_MVM_TX_RES_GET_TID(tx_resp
->ra_tid
);
1319 struct agg_tx_status
*agg_status
=
1320 iwl_mvm_get_agg_status(mvm
, tx_resp
);
1321 u32 status
= le16_to_cpu(agg_status
->status
);
1322 u16 ssn
= iwl_mvm_get_scd_ssn(mvm
, tx_resp
);
1323 struct iwl_mvm_sta
*mvmsta
;
1324 struct sk_buff_head skbs
;
1326 u16 next_reclaimed
, seq_ctl
;
1327 bool is_ndp
= false;
1329 __skb_queue_head_init(&skbs
);
1331 if (iwl_mvm_has_new_tx_api(mvm
))
1332 txq_id
= le16_to_cpu(tx_resp
->v6
.tx_queue
);
1334 seq_ctl
= le16_to_cpu(tx_resp
->seq_ctl
);
1336 /* we can free until ssn % q.n_bd not inclusive */
1337 iwl_trans_reclaim(mvm
->trans
, txq_id
, ssn
, &skbs
);
1339 while (!skb_queue_empty(&skbs
)) {
1340 struct sk_buff
*skb
= __skb_dequeue(&skbs
);
1341 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1345 iwl_trans_free_tx_cmd(mvm
->trans
, info
->driver_data
[1]);
1347 memset(&info
->status
, 0, sizeof(info
->status
));
1349 /* inform mac80211 about what happened with the frame */
1350 switch (status
& TX_STATUS_MSK
) {
1351 case TX_STATUS_SUCCESS
:
1352 case TX_STATUS_DIRECT_DONE
:
1353 info
->flags
|= IEEE80211_TX_STAT_ACK
;
1355 case TX_STATUS_FAIL_DEST_PS
:
1356 /* In DQA, the FW should have stopped the queue and not
1357 * return this status
1359 WARN_ON(iwl_mvm_is_dqa_supported(mvm
));
1360 info
->flags
|= IEEE80211_TX_STAT_TX_FILTERED
;
1366 iwl_mvm_tx_status_check_trigger(mvm
, status
);
1368 info
->status
.rates
[0].count
= tx_resp
->failure_frame
+ 1;
1369 iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp
->initial_rate
),
1371 info
->status
.status_driver_data
[1] =
1372 (void *)(uintptr_t)le32_to_cpu(tx_resp
->initial_rate
);
1374 /* Single frame failure in an AMPDU queue => send BAR */
1375 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
&&
1376 !(info
->flags
& IEEE80211_TX_STAT_ACK
) &&
1377 !(info
->flags
& IEEE80211_TX_STAT_TX_FILTERED
))
1378 info
->flags
|= IEEE80211_TX_STAT_AMPDU_NO_BACK
;
1379 info
->flags
&= ~IEEE80211_TX_CTL_AMPDU
;
1381 /* W/A FW bug: seq_ctl is wrong when the status isn't success */
1382 if (status
!= TX_STATUS_SUCCESS
) {
1383 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
1384 seq_ctl
= le16_to_cpu(hdr
->seq_ctrl
);
1387 if (unlikely(!seq_ctl
)) {
1388 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
1391 * If it is an NDP, we can't update next_reclaim since
1392 * its sequence control is 0. Note that for that same
1393 * reason, NDPs are never sent to A-MPDU'able queues
1394 * so that we can never have more than one freed frame
1395 * for a single Tx resonse (see WARN_ON below).
1397 if (ieee80211_is_qos_nullfunc(hdr
->frame_control
))
1402 * TODO: this is not accurate if we are freeing more than one
1405 info
->status
.tx_time
=
1406 le16_to_cpu(tx_resp
->wireless_media_time
);
1407 BUILD_BUG_ON(ARRAY_SIZE(info
->status
.status_driver_data
) < 1);
1408 info
->status
.status_driver_data
[0] =
1409 (void *)(uintptr_t)tx_resp
->reduced_tpc
;
1411 ieee80211_tx_status(mvm
->hw
, skb
);
1414 if (iwl_mvm_is_dqa_supported(mvm
) || txq_id
>= mvm
->first_agg_queue
) {
1415 /* If this is an aggregation queue, we use the ssn since:
1416 * ssn = wifi seq_num % 256.
1417 * The seq_ctl is the sequence control of the packet to which
1418 * this Tx response relates. But if there is a hole in the
1419 * bitmap of the BA we received, this Tx response may allow to
1420 * reclaim the hole and all the subsequent packets that were
1421 * already acked. In that case, seq_ctl != ssn, and the next
1422 * packet to be reclaimed will be ssn and not seq_ctl. In that
1423 * case, several packets will be reclaimed even if
1426 * The ssn is the index (% 256) of the latest packet that has
1427 * treated (acked / dropped) + 1.
1429 next_reclaimed
= ssn
;
1431 /* The next packet to be reclaimed is the one after this one */
1432 next_reclaimed
= IEEE80211_SEQ_TO_SN(seq_ctl
+ 0x10);
1435 IWL_DEBUG_TX_REPLY(mvm
,
1436 "TXQ %d status %s (0x%08x)\n",
1437 txq_id
, iwl_mvm_get_tx_fail_reason(status
), status
);
1439 IWL_DEBUG_TX_REPLY(mvm
,
1440 "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n",
1441 le32_to_cpu(tx_resp
->initial_rate
),
1442 tx_resp
->failure_frame
, SEQ_TO_INDEX(sequence
),
1443 ssn
, next_reclaimed
, seq_ctl
);
1447 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
1449 * sta can't be NULL otherwise it'd mean that the sta has been freed in
1450 * the firmware while we still have packets for it in the Tx queues.
1452 if (WARN_ON_ONCE(!sta
))
1456 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1458 if (tid
!= IWL_TID_NON_QOS
&& tid
!= IWL_MGMT_TID
) {
1459 struct iwl_mvm_tid_data
*tid_data
=
1460 &mvmsta
->tid_data
[tid
];
1461 bool send_eosp_ndp
= false;
1463 spin_lock_bh(&mvmsta
->lock
);
1466 tid_data
->next_reclaimed
= next_reclaimed
;
1467 IWL_DEBUG_TX_REPLY(mvm
,
1468 "Next reclaimed packet:%d\n",
1471 IWL_DEBUG_TX_REPLY(mvm
,
1472 "NDP - don't update next_reclaimed\n");
1475 iwl_mvm_check_ratid_empty(mvm
, sta
, tid
);
1477 if (mvmsta
->sleep_tx_count
) {
1478 mvmsta
->sleep_tx_count
--;
1479 if (mvmsta
->sleep_tx_count
&&
1480 !iwl_mvm_tid_queued(tid_data
)) {
1482 * The number of frames in the queue
1483 * dropped to 0 even if we sent less
1484 * frames than we thought we had on the
1486 * This means we had holes in the BA
1487 * window that we just filled, ask
1488 * mac80211 to send EOSP since the
1489 * firmware won't know how to do that.
1490 * Send NDP and the firmware will send
1491 * EOSP notification that will trigger
1492 * a call to ieee80211_sta_eosp().
1494 send_eosp_ndp
= true;
1498 spin_unlock_bh(&mvmsta
->lock
);
1499 if (send_eosp_ndp
) {
1500 iwl_mvm_sta_modify_sleep_tx_count(mvm
, sta
,
1501 IEEE80211_FRAME_RELEASE_UAPSD
,
1502 1, tid
, false, false);
1503 mvmsta
->sleep_tx_count
= 0;
1504 ieee80211_send_eosp_nullfunc(sta
, tid
);
1508 if (mvmsta
->next_status_eosp
) {
1509 mvmsta
->next_status_eosp
= false;
1510 ieee80211_sta_eosp(sta
);
1517 * If the txq is not an AMPDU queue, there is no chance we freed
1518 * several skbs. Check that out...
1520 if (iwl_mvm_is_dqa_supported(mvm
) || txq_id
>= mvm
->first_agg_queue
)
1523 /* We can't free more than one frame at once on a shared queue */
1524 WARN_ON(skb_freed
> 1);
1526 /* If we have still frames for this STA nothing to do here */
1527 if (!atomic_sub_and_test(skb_freed
, &mvm
->pending_frames
[sta_id
]))
1530 if (mvmsta
&& mvmsta
->vif
->type
== NL80211_IFTYPE_AP
) {
1533 * If there are no pending frames for this STA and
1534 * the tx to this station is not disabled, notify
1535 * mac80211 that this station can now wake up in its
1537 * If mvmsta is not NULL, sta is valid.
1540 spin_lock_bh(&mvmsta
->lock
);
1542 if (!mvmsta
->disable_tx
)
1543 ieee80211_sta_block_awake(mvm
->hw
, sta
, false);
1545 spin_unlock_bh(&mvmsta
->lock
);
1548 if (PTR_ERR(sta
) == -EBUSY
|| PTR_ERR(sta
) == -ENOENT
) {
1550 * We are draining and this was the last packet - pre_rcu_remove
1551 * has been called already. We might be after the
1552 * synchronize_net already.
1553 * Don't rely on iwl_mvm_rm_sta to see the empty Tx queues.
1555 set_bit(sta_id
, mvm
->sta_drained
);
1556 schedule_work(&mvm
->sta_drained_wk
);
1563 #ifdef CONFIG_IWLWIFI_DEBUG
1564 #define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x
1565 static const char *iwl_get_agg_tx_status(u16 status
)
1567 switch (status
& AGG_TX_STATE_STATUS_MSK
) {
1568 AGG_TX_STATE_(TRANSMITTED
);
1569 AGG_TX_STATE_(UNDERRUN
);
1570 AGG_TX_STATE_(BT_PRIO
);
1571 AGG_TX_STATE_(FEW_BYTES
);
1572 AGG_TX_STATE_(ABORT
);
1573 AGG_TX_STATE_(LAST_SENT_TTL
);
1574 AGG_TX_STATE_(LAST_SENT_TRY_CNT
);
1575 AGG_TX_STATE_(LAST_SENT_BT_KILL
);
1576 AGG_TX_STATE_(SCD_QUERY
);
1577 AGG_TX_STATE_(TEST_BAD_CRC32
);
1578 AGG_TX_STATE_(RESPONSE
);
1579 AGG_TX_STATE_(DUMP_TX
);
1580 AGG_TX_STATE_(DELAY_TX
);
1586 static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm
*mvm
,
1587 struct iwl_rx_packet
*pkt
)
1589 struct iwl_mvm_tx_resp
*tx_resp
= (void *)pkt
->data
;
1590 struct agg_tx_status
*frame_status
=
1591 iwl_mvm_get_agg_status(mvm
, tx_resp
);
1594 for (i
= 0; i
< tx_resp
->frame_count
; i
++) {
1595 u16 fstatus
= le16_to_cpu(frame_status
[i
].status
);
1597 IWL_DEBUG_TX_REPLY(mvm
,
1598 "status %s (0x%04x), try-count (%d) seq (0x%x)\n",
1599 iwl_get_agg_tx_status(fstatus
),
1600 fstatus
& AGG_TX_STATE_STATUS_MSK
,
1601 (fstatus
& AGG_TX_STATE_TRY_CNT_MSK
) >>
1602 AGG_TX_STATE_TRY_CNT_POS
,
1603 le16_to_cpu(frame_status
[i
].sequence
));
1607 static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm
*mvm
,
1608 struct iwl_rx_packet
*pkt
)
1610 #endif /* CONFIG_IWLWIFI_DEBUG */
1612 static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm
*mvm
,
1613 struct iwl_rx_packet
*pkt
)
1615 struct iwl_mvm_tx_resp
*tx_resp
= (void *)pkt
->data
;
1616 int sta_id
= IWL_MVM_TX_RES_GET_RA(tx_resp
->ra_tid
);
1617 int tid
= IWL_MVM_TX_RES_GET_TID(tx_resp
->ra_tid
);
1618 u16 sequence
= le16_to_cpu(pkt
->hdr
.sequence
);
1619 struct iwl_mvm_sta
*mvmsta
;
1620 int queue
= SEQ_TO_QUEUE(sequence
);
1622 if (WARN_ON_ONCE(queue
< mvm
->first_agg_queue
&&
1623 (!iwl_mvm_is_dqa_supported(mvm
) ||
1624 (queue
!= IWL_MVM_DQA_BSS_CLIENT_QUEUE
))))
1627 if (WARN_ON_ONCE(tid
== IWL_TID_NON_QOS
))
1630 iwl_mvm_rx_tx_cmd_agg_dbg(mvm
, pkt
);
1634 mvmsta
= iwl_mvm_sta_from_staid_rcu(mvm
, sta_id
);
1636 if (!WARN_ON_ONCE(!mvmsta
)) {
1637 mvmsta
->tid_data
[tid
].rate_n_flags
=
1638 le32_to_cpu(tx_resp
->initial_rate
);
1639 mvmsta
->tid_data
[tid
].tx_time
=
1640 le16_to_cpu(tx_resp
->wireless_media_time
);
1646 void iwl_mvm_rx_tx_cmd(struct iwl_mvm
*mvm
, struct iwl_rx_cmd_buffer
*rxb
)
1648 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1649 struct iwl_mvm_tx_resp
*tx_resp
= (void *)pkt
->data
;
1651 if (tx_resp
->frame_count
== 1)
1652 iwl_mvm_rx_tx_cmd_single(mvm
, pkt
);
1654 iwl_mvm_rx_tx_cmd_agg(mvm
, pkt
);
1657 static void iwl_mvm_tx_reclaim(struct iwl_mvm
*mvm
, int sta_id
, int tid
,
1659 struct ieee80211_tx_info
*ba_info
, u32 rate
)
1661 struct sk_buff_head reclaimed_skbs
;
1662 struct iwl_mvm_tid_data
*tid_data
;
1663 struct ieee80211_sta
*sta
;
1664 struct iwl_mvm_sta
*mvmsta
;
1665 struct sk_buff
*skb
;
1668 if (WARN_ONCE(sta_id
>= IWL_MVM_STATION_COUNT
||
1669 tid
>= IWL_MAX_TID_COUNT
,
1670 "sta_id %d tid %d", sta_id
, tid
))
1675 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
1677 /* Reclaiming frames for a station that has been deleted ? */
1678 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
))) {
1683 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1684 tid_data
= &mvmsta
->tid_data
[tid
];
1686 if (tid_data
->txq_id
!= txq
) {
1688 "invalid BA notification: Q %d, tid %d\n",
1689 tid_data
->txq_id
, tid
);
1694 spin_lock_bh(&mvmsta
->lock
);
1696 __skb_queue_head_init(&reclaimed_skbs
);
1699 * Release all TFDs before the SSN, i.e. all TFDs in front of
1700 * block-ack window (we assume that they've been successfully
1701 * transmitted ... if not, it's too late anyway).
1703 iwl_trans_reclaim(mvm
->trans
, txq
, index
, &reclaimed_skbs
);
1705 tid_data
->next_reclaimed
= index
;
1707 iwl_mvm_check_ratid_empty(mvm
, sta
, tid
);
1710 ba_info
->status
.status_driver_data
[1] = (void *)(uintptr_t)rate
;
1712 skb_queue_walk(&reclaimed_skbs
, skb
) {
1713 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
1714 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1716 if (ieee80211_is_data_qos(hdr
->frame_control
))
1721 iwl_trans_free_tx_cmd(mvm
->trans
, info
->driver_data
[1]);
1723 memset(&info
->status
, 0, sizeof(info
->status
));
1724 /* Packet was transmitted successfully, failures come as single
1725 * frames because before failing a frame the firmware transmits
1726 * it without aggregation at least once.
1728 info
->flags
|= IEEE80211_TX_STAT_ACK
;
1730 /* this is the first skb we deliver in this batch */
1731 /* put the rate scaling data there */
1733 info
->flags
|= IEEE80211_TX_STAT_AMPDU
;
1734 memcpy(&info
->status
, &ba_info
->status
,
1735 sizeof(ba_info
->status
));
1736 iwl_mvm_hwrate_to_tx_status(rate
, info
);
1740 spin_unlock_bh(&mvmsta
->lock
);
1742 /* We got a BA notif with 0 acked or scd_ssn didn't progress which is
1743 * possible (i.e. first MPDU in the aggregation wasn't acked)
1744 * Still it's important to update RS about sent vs. acked.
1746 if (skb_queue_empty(&reclaimed_skbs
)) {
1747 struct ieee80211_chanctx_conf
*chanctx_conf
= NULL
;
1751 rcu_dereference(mvmsta
->vif
->chanctx_conf
);
1753 if (WARN_ON_ONCE(!chanctx_conf
))
1756 ba_info
->band
= chanctx_conf
->def
.chan
->band
;
1757 iwl_mvm_hwrate_to_tx_status(rate
, ba_info
);
1759 IWL_DEBUG_TX_REPLY(mvm
, "No reclaim. Update rs directly\n");
1760 iwl_mvm_rs_tx_status(mvm
, sta
, tid
, ba_info
, false);
1766 while (!skb_queue_empty(&reclaimed_skbs
)) {
1767 skb
= __skb_dequeue(&reclaimed_skbs
);
1768 ieee80211_tx_status(mvm
->hw
, skb
);
1772 void iwl_mvm_rx_ba_notif(struct iwl_mvm
*mvm
, struct iwl_rx_cmd_buffer
*rxb
)
1774 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1775 int sta_id
, tid
, txq
, index
;
1776 struct ieee80211_tx_info ba_info
= {};
1777 struct iwl_mvm_ba_notif
*ba_notif
;
1778 struct iwl_mvm_tid_data
*tid_data
;
1779 struct iwl_mvm_sta
*mvmsta
;
1781 if (iwl_mvm_has_new_tx_api(mvm
)) {
1782 struct iwl_mvm_compressed_ba_notif
*ba_res
=
1785 sta_id
= ba_res
->sta_id
;
1786 ba_info
.status
.ampdu_ack_len
= (u8
)le16_to_cpu(ba_res
->done
);
1787 ba_info
.status
.ampdu_len
= (u8
)le16_to_cpu(ba_res
->txed
);
1788 ba_info
.status
.tx_time
=
1789 (u16
)le32_to_cpu(ba_res
->wireless_time
);
1790 ba_info
.status
.status_driver_data
[0] =
1791 (void *)(uintptr_t)ba_res
->reduced_txp
;
1793 if (!le16_to_cpu(ba_res
->tfd_cnt
))
1798 * When supporting multi TID aggregations - we need to move
1799 * next_reclaimed to be per TXQ and not per TID or handle it
1800 * in a different way.
1801 * This will go together with SN and AddBA offload and cannot
1802 * be handled properly for now.
1804 WARN_ON(le16_to_cpu(ba_res
->ra_tid_cnt
) != 1);
1805 tid
= ba_res
->ra_tid
[0].tid
;
1806 if (tid
== IWL_MGMT_TID
)
1807 tid
= IWL_MAX_TID_COUNT
;
1808 iwl_mvm_tx_reclaim(mvm
, sta_id
, tid
,
1809 (int)(le16_to_cpu(ba_res
->tfd
[0].q_num
)),
1810 le16_to_cpu(ba_res
->tfd
[0].tfd_index
),
1811 &ba_info
, le32_to_cpu(ba_res
->tx_rate
));
1814 IWL_DEBUG_TX_REPLY(mvm
,
1815 "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
1816 sta_id
, le32_to_cpu(ba_res
->flags
),
1817 le16_to_cpu(ba_res
->txed
),
1818 le16_to_cpu(ba_res
->done
));
1822 ba_notif
= (void *)pkt
->data
;
1823 sta_id
= ba_notif
->sta_id
;
1824 tid
= ba_notif
->tid
;
1825 /* "flow" corresponds to Tx queue */
1826 txq
= le16_to_cpu(ba_notif
->scd_flow
);
1827 /* "ssn" is start of block-ack Tx window, corresponds to index
1828 * (in Tx queue's circular buffer) of first TFD/frame in window */
1829 index
= le16_to_cpu(ba_notif
->scd_ssn
);
1832 mvmsta
= iwl_mvm_sta_from_staid_rcu(mvm
, sta_id
);
1833 if (WARN_ON_ONCE(!mvmsta
)) {
1838 tid_data
= &mvmsta
->tid_data
[tid
];
1840 ba_info
.status
.ampdu_ack_len
= ba_notif
->txed_2_done
;
1841 ba_info
.status
.ampdu_len
= ba_notif
->txed
;
1842 ba_info
.status
.tx_time
= tid_data
->tx_time
;
1843 ba_info
.status
.status_driver_data
[0] =
1844 (void *)(uintptr_t)ba_notif
->reduced_txp
;
1848 iwl_mvm_tx_reclaim(mvm
, sta_id
, tid
, txq
, index
, &ba_info
,
1849 tid_data
->rate_n_flags
);
1851 IWL_DEBUG_TX_REPLY(mvm
,
1852 "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
1853 (u8
*)&ba_notif
->sta_addr_lo32
, ba_notif
->sta_id
);
1855 IWL_DEBUG_TX_REPLY(mvm
,
1856 "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
1857 ba_notif
->tid
, le16_to_cpu(ba_notif
->seq_ctl
),
1858 le64_to_cpu(ba_notif
->bitmap
), txq
, index
,
1859 ba_notif
->txed
, ba_notif
->txed_2_done
);
1861 IWL_DEBUG_TX_REPLY(mvm
, "reduced txp from ba notif %d\n",
1862 ba_notif
->reduced_txp
);
1866 * Note that there are transports that buffer frames before they reach
1867 * the firmware. This means that after flush_tx_path is called, the
1868 * queue might not be empty. The race-free way to handle this is to:
1869 * 1) set the station as draining
1870 * 2) flush the Tx path
1871 * 3) wait for the transport queues to be empty
1873 int iwl_mvm_flush_tx_path(struct iwl_mvm
*mvm
, u32 tfd_msk
, u32 flags
)
1876 struct iwl_tx_path_flush_cmd flush_cmd
= {
1877 .queues_ctl
= cpu_to_le32(tfd_msk
),
1878 .flush_ctl
= cpu_to_le16(DUMP_TX_FIFO_FLUSH
),
1881 ret
= iwl_mvm_send_cmd_pdu(mvm
, TXPATH_FLUSH
, flags
,
1882 sizeof(flush_cmd
), &flush_cmd
);
1884 IWL_ERR(mvm
, "Failed to send flush command (%d)\n", ret
);