1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * All rights reserved.
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
43 * * Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * * Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
49 * * Neither the name Intel Corporation nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *****************************************************************************/
66 #include <linux/ieee80211.h>
67 #include <linux/etherdevice.h>
68 #include <linux/tcp.h>
72 #include "iwl-trans.h"
73 #include "iwl-eeprom-parse.h"
79 iwl_mvm_bar_check_trigger(struct iwl_mvm
*mvm
, const u8
*addr
,
82 struct iwl_fw_dbg_trigger_tlv
*trig
;
83 struct iwl_fw_dbg_trigger_ba
*ba_trig
;
85 if (!iwl_fw_dbg_trigger_enabled(mvm
->fw
, FW_DBG_TRIGGER_BA
))
88 trig
= iwl_fw_dbg_get_trigger(mvm
->fw
, FW_DBG_TRIGGER_BA
);
89 ba_trig
= (void *)trig
->data
;
91 if (!iwl_fw_dbg_trigger_check_stop(mvm
, NULL
, trig
))
94 if (!(le16_to_cpu(ba_trig
->tx_bar
) & BIT(tid
)))
97 iwl_mvm_fw_dbg_collect_trig(mvm
, trig
,
98 "BAR sent to %pM, tid %d, ssn %d",
102 #define OPT_HDR(type, skb, off) \
103 (type *)(skb_network_header(skb) + (off))
105 static void iwl_mvm_tx_csum(struct iwl_mvm
*mvm
, struct sk_buff
*skb
,
106 struct ieee80211_hdr
*hdr
,
107 struct ieee80211_tx_info
*info
,
108 struct iwl_tx_cmd
*tx_cmd
)
110 #if IS_ENABLED(CONFIG_INET)
111 u16 mh_len
= ieee80211_hdrlen(hdr
->frame_control
);
112 u16 offload_assist
= le16_to_cpu(tx_cmd
->offload_assist
);
116 * Do not compute checksum if already computed or if transport will
119 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
|| IWL_MVM_SW_TX_CSUM_OFFLOAD
)
122 /* We do not expect to be requested to csum stuff we do not support */
123 if (WARN_ONCE(!(mvm
->hw
->netdev_features
& IWL_TX_CSUM_NETIF_FLAGS
) ||
124 (skb
->protocol
!= htons(ETH_P_IP
) &&
125 skb
->protocol
!= htons(ETH_P_IPV6
)),
126 "No support for requested checksum\n")) {
127 skb_checksum_help(skb
);
131 if (skb
->protocol
== htons(ETH_P_IP
)) {
132 protocol
= ip_hdr(skb
)->protocol
;
134 #if IS_ENABLED(CONFIG_IPV6)
135 struct ipv6hdr
*ipv6h
=
136 (struct ipv6hdr
*)skb_network_header(skb
);
137 unsigned int off
= sizeof(*ipv6h
);
139 protocol
= ipv6h
->nexthdr
;
140 while (protocol
!= NEXTHDR_NONE
&& ipv6_ext_hdr(protocol
)) {
141 /* only supported extension headers */
142 if (protocol
!= NEXTHDR_ROUTING
&&
143 protocol
!= NEXTHDR_HOP
&&
144 protocol
!= NEXTHDR_DEST
&&
145 protocol
!= NEXTHDR_FRAGMENT
) {
146 skb_checksum_help(skb
);
150 if (protocol
== NEXTHDR_FRAGMENT
) {
151 struct frag_hdr
*hp
=
152 OPT_HDR(struct frag_hdr
, skb
, off
);
154 protocol
= hp
->nexthdr
;
155 off
+= sizeof(struct frag_hdr
);
157 struct ipv6_opt_hdr
*hp
=
158 OPT_HDR(struct ipv6_opt_hdr
, skb
, off
);
160 protocol
= hp
->nexthdr
;
161 off
+= ipv6_optlen(hp
);
164 /* if we get here - protocol now should be TCP/UDP */
168 if (protocol
!= IPPROTO_TCP
&& protocol
!= IPPROTO_UDP
) {
170 skb_checksum_help(skb
);
175 offload_assist
|= BIT(TX_CMD_OFFLD_L4_EN
);
178 * Set offset to IP header (snap).
179 * We don't support tunneling so no need to take care of inner header.
182 offload_assist
|= (4 << TX_CMD_OFFLD_IP_HDR
);
184 /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
185 if (skb
->protocol
== htons(ETH_P_IP
) &&
186 (offload_assist
& BIT(TX_CMD_OFFLD_AMSDU
))) {
187 ip_hdr(skb
)->check
= 0;
188 offload_assist
|= BIT(TX_CMD_OFFLD_L3_EN
);
191 /* reset UDP/TCP header csum */
192 if (protocol
== IPPROTO_TCP
)
193 tcp_hdr(skb
)->check
= 0;
195 udp_hdr(skb
)->check
= 0;
197 /* mac header len should include IV, size is in words */
198 if (info
->control
.hw_key
)
199 mh_len
+= info
->control
.hw_key
->iv_len
;
201 offload_assist
|= mh_len
<< TX_CMD_OFFLD_MH_SIZE
;
203 tx_cmd
->offload_assist
= cpu_to_le16(offload_assist
);
208 * Sets most of the Tx cmd's fields
210 void iwl_mvm_set_tx_cmd(struct iwl_mvm
*mvm
, struct sk_buff
*skb
,
211 struct iwl_tx_cmd
*tx_cmd
,
212 struct ieee80211_tx_info
*info
, u8 sta_id
)
214 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
215 __le16 fc
= hdr
->frame_control
;
216 u32 tx_flags
= le32_to_cpu(tx_cmd
->tx_flags
);
217 u32 len
= skb
->len
+ FCS_LEN
;
220 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
))
221 tx_flags
|= TX_CMD_FLG_ACK
;
223 tx_flags
&= ~TX_CMD_FLG_ACK
;
225 if (ieee80211_is_probe_resp(fc
))
226 tx_flags
|= TX_CMD_FLG_TSF
;
228 if (ieee80211_has_morefrags(fc
))
229 tx_flags
|= TX_CMD_FLG_MORE_FRAG
;
231 if (ieee80211_is_data_qos(fc
)) {
232 u8
*qc
= ieee80211_get_qos_ctl(hdr
);
233 tx_cmd
->tid_tspec
= qc
[0] & 0xf;
234 tx_flags
&= ~TX_CMD_FLG_SEQ_CTL
;
235 if (*qc
& IEEE80211_QOS_CTL_A_MSDU_PRESENT
)
236 tx_cmd
->offload_assist
|=
237 cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU
));
238 } else if (ieee80211_is_back_req(fc
)) {
239 struct ieee80211_bar
*bar
= (void *)skb
->data
;
240 u16 control
= le16_to_cpu(bar
->control
);
241 u16 ssn
= le16_to_cpu(bar
->start_seq_num
);
243 tx_flags
|= TX_CMD_FLG_ACK
| TX_CMD_FLG_BAR
;
244 tx_cmd
->tid_tspec
= (control
&
245 IEEE80211_BAR_CTRL_TID_INFO_MASK
) >>
246 IEEE80211_BAR_CTRL_TID_INFO_SHIFT
;
247 WARN_ON_ONCE(tx_cmd
->tid_tspec
>= IWL_MAX_TID_COUNT
);
248 iwl_mvm_bar_check_trigger(mvm
, bar
->ra
, tx_cmd
->tid_tspec
,
251 tx_cmd
->tid_tspec
= IWL_TID_NON_QOS
;
252 if (info
->flags
& IEEE80211_TX_CTL_ASSIGN_SEQ
)
253 tx_flags
|= TX_CMD_FLG_SEQ_CTL
;
255 tx_flags
&= ~TX_CMD_FLG_SEQ_CTL
;
258 /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */
259 if (tx_cmd
->tid_tspec
< IWL_MAX_TID_COUNT
)
260 ac
= tid_to_mac80211_ac
[tx_cmd
->tid_tspec
];
262 ac
= tid_to_mac80211_ac
[0];
264 tx_flags
|= iwl_mvm_bt_coex_tx_prio(mvm
, hdr
, info
, ac
) <<
265 TX_CMD_FLG_BT_PRIO_POS
;
267 if (ieee80211_is_mgmt(fc
)) {
268 if (ieee80211_is_assoc_req(fc
) || ieee80211_is_reassoc_req(fc
))
269 tx_cmd
->pm_frame_timeout
= cpu_to_le16(PM_FRAME_ASSOC
);
270 else if (ieee80211_is_action(fc
))
271 tx_cmd
->pm_frame_timeout
= cpu_to_le16(PM_FRAME_NONE
);
273 tx_cmd
->pm_frame_timeout
= cpu_to_le16(PM_FRAME_MGMT
);
275 /* The spec allows Action frames in A-MPDU, we don't support
278 WARN_ON_ONCE(info
->flags
& IEEE80211_TX_CTL_AMPDU
);
279 } else if (info
->control
.flags
& IEEE80211_TX_CTRL_PORT_CTRL_PROTO
) {
280 tx_cmd
->pm_frame_timeout
= cpu_to_le16(PM_FRAME_MGMT
);
282 tx_cmd
->pm_frame_timeout
= cpu_to_le16(PM_FRAME_NONE
);
285 if (ieee80211_is_data(fc
) && len
> mvm
->rts_threshold
&&
286 !is_multicast_ether_addr(ieee80211_get_DA(hdr
)))
287 tx_flags
|= TX_CMD_FLG_PROT_REQUIRE
;
289 if (fw_has_capa(&mvm
->fw
->ucode_capa
,
290 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT
) &&
291 ieee80211_action_contains_tpc(skb
))
292 tx_flags
|= TX_CMD_FLG_WRITE_TX_POWER
;
294 tx_cmd
->tx_flags
= cpu_to_le32(tx_flags
);
295 /* Total # bytes to be transmitted */
296 tx_cmd
->len
= cpu_to_le16((u16
)skb
->len
+
297 (uintptr_t)info
->driver_data
[0]);
298 tx_cmd
->life_time
= cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE
);
299 tx_cmd
->sta_id
= sta_id
;
301 /* padding is inserted later in transport */
302 if (ieee80211_hdrlen(fc
) % 4 &&
303 !(tx_cmd
->offload_assist
& cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU
))))
304 tx_cmd
->offload_assist
|= cpu_to_le16(BIT(TX_CMD_OFFLD_PAD
));
306 iwl_mvm_tx_csum(mvm
, skb
, hdr
, info
, tx_cmd
);
310 * Sets the fields in the Tx cmd that are rate related
312 void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm
*mvm
, struct iwl_tx_cmd
*tx_cmd
,
313 struct ieee80211_tx_info
*info
,
314 struct ieee80211_sta
*sta
, __le16 fc
)
320 /* Set retry limit on RTS packets */
321 tx_cmd
->rts_retry_limit
= IWL_RTS_DFAULT_RETRY_LIMIT
;
323 /* Set retry limit on DATA packets and Probe Responses*/
324 if (ieee80211_is_probe_resp(fc
)) {
325 tx_cmd
->data_retry_limit
= IWL_MGMT_DFAULT_RETRY_LIMIT
;
326 tx_cmd
->rts_retry_limit
=
327 min(tx_cmd
->data_retry_limit
, tx_cmd
->rts_retry_limit
);
328 } else if (ieee80211_is_back_req(fc
)) {
329 tx_cmd
->data_retry_limit
= IWL_BAR_DFAULT_RETRY_LIMIT
;
331 tx_cmd
->data_retry_limit
= IWL_DEFAULT_TX_RETRY
;
335 * for data packets, rate info comes from the table inside the fw. This
336 * table is controlled by LINK_QUALITY commands
339 if (ieee80211_is_data(fc
) && sta
) {
340 tx_cmd
->initial_rate_index
= 0;
341 tx_cmd
->tx_flags
|= cpu_to_le32(TX_CMD_FLG_STA_RATE
);
343 } else if (ieee80211_is_back_req(fc
)) {
345 cpu_to_le32(TX_CMD_FLG_ACK
| TX_CMD_FLG_BAR
);
348 /* HT rate doesn't make sense for a non data frame */
349 WARN_ONCE(info
->control
.rates
[0].flags
& IEEE80211_TX_RC_MCS
,
350 "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame (fc:0x%x)\n",
351 info
->control
.rates
[0].flags
,
352 info
->control
.rates
[0].idx
,
355 rate_idx
= info
->control
.rates
[0].idx
;
356 /* if the rate isn't a well known legacy rate, take the lowest one */
357 if (rate_idx
< 0 || rate_idx
> IWL_RATE_COUNT_LEGACY
)
358 rate_idx
= rate_lowest_index(
359 &mvm
->nvm_data
->bands
[info
->band
], sta
);
361 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
362 if (info
->band
== NL80211_BAND_5GHZ
)
363 rate_idx
+= IWL_FIRST_OFDM_RATE
;
365 /* For 2.4 GHZ band, check that there is no need to remap */
366 BUILD_BUG_ON(IWL_FIRST_CCK_RATE
!= 0);
368 /* Get PLCP rate for tx_cmd->rate_n_flags */
369 rate_plcp
= iwl_mvm_mac80211_idx_to_hwrate(rate_idx
);
371 mvm
->mgmt_last_antenna_idx
=
372 iwl_mvm_next_antenna(mvm
, iwl_mvm_get_valid_tx_ant(mvm
),
373 mvm
->mgmt_last_antenna_idx
);
375 if (info
->band
== NL80211_BAND_2GHZ
&&
376 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm
))
377 rate_flags
= mvm
->cfg
->non_shared_ant
<< RATE_MCS_ANT_POS
;
380 BIT(mvm
->mgmt_last_antenna_idx
) << RATE_MCS_ANT_POS
;
382 /* Set CCK flag as needed */
383 if ((rate_idx
>= IWL_FIRST_CCK_RATE
) && (rate_idx
<= IWL_LAST_CCK_RATE
))
384 rate_flags
|= RATE_MCS_CCK_MSK
;
386 /* Set the rate in the TX cmd */
387 tx_cmd
->rate_n_flags
= cpu_to_le32((u32
)rate_plcp
| rate_flags
);
391 * Sets the fields in the Tx cmd that are crypto related
393 static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm
*mvm
,
394 struct ieee80211_tx_info
*info
,
395 struct iwl_tx_cmd
*tx_cmd
,
396 struct sk_buff
*skb_frag
,
399 struct ieee80211_key_conf
*keyconf
= info
->control
.hw_key
;
400 u8
*crypto_hdr
= skb_frag
->data
+ hdrlen
;
403 switch (keyconf
->cipher
) {
404 case WLAN_CIPHER_SUITE_CCMP
:
405 case WLAN_CIPHER_SUITE_CCMP_256
:
406 iwl_mvm_set_tx_cmd_ccmp(info
, tx_cmd
);
407 pn
= atomic64_inc_return(&keyconf
->tx_pn
);
410 crypto_hdr
[3] = 0x20 | (keyconf
->keyidx
<< 6);
411 crypto_hdr
[1] = pn
>> 8;
412 crypto_hdr
[4] = pn
>> 16;
413 crypto_hdr
[5] = pn
>> 24;
414 crypto_hdr
[6] = pn
>> 32;
415 crypto_hdr
[7] = pn
>> 40;
418 case WLAN_CIPHER_SUITE_TKIP
:
419 tx_cmd
->sec_ctl
= TX_CMD_SEC_TKIP
;
420 pn
= atomic64_inc_return(&keyconf
->tx_pn
);
421 ieee80211_tkip_add_iv(crypto_hdr
, keyconf
, pn
);
422 ieee80211_get_tkip_p2k(keyconf
, skb_frag
, tx_cmd
->key
);
425 case WLAN_CIPHER_SUITE_WEP104
:
426 tx_cmd
->sec_ctl
|= TX_CMD_SEC_KEY128
;
428 case WLAN_CIPHER_SUITE_WEP40
:
429 tx_cmd
->sec_ctl
|= TX_CMD_SEC_WEP
|
430 ((keyconf
->keyidx
<< TX_CMD_SEC_WEP_KEY_IDX_POS
) &
431 TX_CMD_SEC_WEP_KEY_IDX_MSK
);
433 memcpy(&tx_cmd
->key
[3], keyconf
->key
, keyconf
->keylen
);
436 tx_cmd
->sec_ctl
|= TX_CMD_SEC_EXT
;
441 * Allocates and sets the Tx cmd the driver data pointers in the skb
443 static struct iwl_device_cmd
*
444 iwl_mvm_set_tx_params(struct iwl_mvm
*mvm
, struct sk_buff
*skb
,
445 int hdrlen
, struct ieee80211_sta
*sta
, u8 sta_id
)
447 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
448 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
449 struct iwl_device_cmd
*dev_cmd
;
450 struct iwl_tx_cmd
*tx_cmd
;
452 dev_cmd
= iwl_trans_alloc_tx_cmd(mvm
->trans
);
454 if (unlikely(!dev_cmd
))
457 memset(dev_cmd
, 0, sizeof(*dev_cmd
));
458 dev_cmd
->hdr
.cmd
= TX_CMD
;
459 tx_cmd
= (struct iwl_tx_cmd
*)dev_cmd
->payload
;
461 if (info
->control
.hw_key
)
462 iwl_mvm_set_tx_cmd_crypto(mvm
, info
, tx_cmd
, skb
, hdrlen
);
464 iwl_mvm_set_tx_cmd(mvm
, skb
, tx_cmd
, info
, sta_id
);
466 iwl_mvm_set_tx_cmd_rate(mvm
, tx_cmd
, info
, sta
, hdr
->frame_control
);
468 memset(&info
->status
, 0, sizeof(info
->status
));
469 memset(info
->driver_data
, 0, sizeof(info
->driver_data
));
471 info
->driver_data
[1] = dev_cmd
;
476 int iwl_mvm_tx_skb_non_sta(struct iwl_mvm
*mvm
, struct sk_buff
*skb
)
478 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
479 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
480 struct iwl_device_cmd
*dev_cmd
;
481 struct iwl_tx_cmd
*tx_cmd
;
483 int hdrlen
= ieee80211_hdrlen(hdr
->frame_control
);
485 if (WARN_ON_ONCE(info
->flags
& IEEE80211_TX_CTL_AMPDU
))
488 if (WARN_ON_ONCE(info
->flags
& IEEE80211_TX_CTL_SEND_AFTER_DTIM
&&
489 (!info
->control
.vif
||
490 info
->hw_queue
!= info
->control
.vif
->cab_queue
)))
493 /* This holds the amsdu headers length */
494 info
->driver_data
[0] = (void *)(uintptr_t)0;
497 * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
498 * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
499 * queue. STATION (HS2.0) uses the auxiliary context of the FW,
500 * and hence needs to be sent on the aux queue
502 if (IEEE80211_SKB_CB(skb
)->hw_queue
== IWL_MVM_OFFCHANNEL_QUEUE
&&
503 info
->control
.vif
->type
== NL80211_IFTYPE_STATION
)
504 IEEE80211_SKB_CB(skb
)->hw_queue
= mvm
->aux_queue
;
507 * If the interface on which the frame is sent is the P2P_DEVICE
508 * or an AP/GO interface use the broadcast station associated
509 * with it; otherwise if the interface is a managed interface
510 * use the AP station associated with it for multicast traffic
511 * (this is not possible for unicast packets as a TLDS discovery
512 * response are sent without a station entry); otherwise use the
515 sta_id
= mvm
->aux_sta
.sta_id
;
516 if (info
->control
.vif
) {
517 struct iwl_mvm_vif
*mvmvif
=
518 iwl_mvm_vif_from_mac80211(info
->control
.vif
);
520 if (info
->control
.vif
->type
== NL80211_IFTYPE_P2P_DEVICE
||
521 info
->control
.vif
->type
== NL80211_IFTYPE_AP
)
522 sta_id
= mvmvif
->bcast_sta
.sta_id
;
523 else if (info
->control
.vif
->type
== NL80211_IFTYPE_STATION
&&
524 is_multicast_ether_addr(hdr
->addr1
)) {
525 u8 ap_sta_id
= ACCESS_ONCE(mvmvif
->ap_sta_id
);
527 if (ap_sta_id
!= IWL_MVM_STATION_COUNT
)
532 IWL_DEBUG_TX(mvm
, "station Id %d, queue=%d\n", sta_id
, info
->hw_queue
);
534 dev_cmd
= iwl_mvm_set_tx_params(mvm
, skb
, hdrlen
, NULL
, sta_id
);
538 /* From now on, we cannot access info->control */
539 tx_cmd
= (struct iwl_tx_cmd
*)dev_cmd
->payload
;
541 /* Copy MAC header from skb into command buffer */
542 memcpy(tx_cmd
->hdr
, hdr
, hdrlen
);
544 if (iwl_trans_tx(mvm
->trans
, skb
, dev_cmd
, info
->hw_queue
)) {
545 iwl_trans_free_tx_cmd(mvm
->trans
, dev_cmd
);
550 * Increase the pending frames counter, so that later when a reply comes
551 * in and the counter is decreased - we don't start getting negative
553 * Note that we don't need to make sure it isn't agg'd, since we're
556 atomic_inc(&mvm
->pending_frames
[sta_id
]);
562 static int iwl_mvm_tx_tso(struct iwl_mvm
*mvm
, struct sk_buff
*skb
,
563 struct ieee80211_sta
*sta
,
564 struct sk_buff_head
*mpdus_skb
)
566 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
567 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
568 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
569 unsigned int mss
= skb_shinfo(skb
)->gso_size
;
570 struct sk_buff
*tmp
, *next
;
571 char cb
[sizeof(skb
->cb
)];
572 unsigned int num_subframes
, tcp_payload_len
, subf_len
, max_amsdu_len
;
573 bool ipv4
= (skb
->protocol
== htons(ETH_P_IP
));
574 u16 ip_base_id
= ipv4
? ntohs(ip_hdr(skb
)->id
) : 0;
575 u16 amsdu_add
, snap_ip_tcp
, pad
, i
= 0;
576 unsigned int dbg_max_amsdu_len
;
577 netdev_features_t netdev_features
= NETIF_F_CSUM_MASK
| NETIF_F_SG
;
580 snap_ip_tcp
= 8 + skb_transport_header(skb
) - skb_network_header(skb
) +
583 qc
= ieee80211_get_qos_ctl(hdr
);
584 tid
= *qc
& IEEE80211_QOS_CTL_TID_MASK
;
585 if (WARN_ON_ONCE(tid
>= IWL_MAX_TID_COUNT
))
588 if (!sta
->max_amsdu_len
||
589 !ieee80211_is_data_qos(hdr
->frame_control
) ||
590 !mvmsta
->tlc_amsdu
) {
597 * Do not build AMSDU for IPv6 with extension headers.
598 * ask stack to segment and checkum the generated MPDUs for us.
600 if (skb
->protocol
== htons(ETH_P_IPV6
) &&
601 ((struct ipv6hdr
*)skb_network_header(skb
))->nexthdr
!=
605 netdev_features
&= ~NETIF_F_CSUM_MASK
;
610 * No need to lock amsdu_in_ampdu_allowed since it can't be modified
611 * during an BA session.
613 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
&&
614 !mvmsta
->tid_data
[tid
].amsdu_in_ampdu_allowed
) {
620 max_amsdu_len
= sta
->max_amsdu_len
;
621 dbg_max_amsdu_len
= ACCESS_ONCE(mvm
->max_amsdu_len
);
623 /* the Tx FIFO to which this A-MSDU will be routed */
624 txf
= iwl_mvm_ac_to_tx_fifo
[tid_to_mac80211_ac
[tid
]];
627 * Don't send an AMSDU that will be longer than the TXF.
628 * Add a security margin of 256 for the TX command + headers.
629 * We also want to have the start of the next packet inside the
630 * fifo to be able to send bursts.
632 max_amsdu_len
= min_t(unsigned int, max_amsdu_len
,
633 mvm
->shared_mem_cfg
.txfifo_size
[txf
] - 256);
635 if (dbg_max_amsdu_len
)
636 max_amsdu_len
= min_t(unsigned int, max_amsdu_len
,
640 * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
641 * supported. This is a spec requirement (IEEE 802.11-2015
642 * section 8.7.3 NOTE 3).
644 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
&&
645 !sta
->vht_cap
.vht_supported
)
646 max_amsdu_len
= min_t(unsigned int, max_amsdu_len
, 4095);
648 /* Sub frame header + SNAP + IP header + TCP header + MSS */
649 subf_len
= sizeof(struct ethhdr
) + snap_ip_tcp
+ mss
;
650 pad
= (4 - subf_len
) & 0x3;
653 * If we have N subframes in the A-MSDU, then the A-MSDU's size is
654 * N * subf_len + (N - 1) * pad.
656 num_subframes
= (max_amsdu_len
+ pad
) / (subf_len
+ pad
);
657 if (num_subframes
> 1)
658 *qc
|= IEEE80211_QOS_CTL_A_MSDU_PRESENT
;
660 tcp_payload_len
= skb_tail_pointer(skb
) - skb_transport_header(skb
) -
661 tcp_hdrlen(skb
) + skb
->data_len
;
664 * Make sure we have enough TBs for the A-MSDU:
665 * 2 for each subframe
666 * 1 more for each fragment
667 * 1 more for the potential data in the header
670 min_t(unsigned int, num_subframes
,
671 (mvm
->trans
->max_skb_frags
- 1 -
672 skb_shinfo(skb
)->nr_frags
) / 2);
674 /* This skb fits in one single A-MSDU */
675 if (num_subframes
* mss
>= tcp_payload_len
) {
677 * Compute the length of all the data added for the A-MSDU.
678 * This will be used to compute the length to write in the TX
679 * command. We have: SNAP + IP + TCP for n -1 subframes and
680 * ETH header for n subframes. Note that the original skb
681 * already had one set of SNAP / IP / TCP headers.
683 num_subframes
= DIV_ROUND_UP(tcp_payload_len
, mss
);
684 info
= IEEE80211_SKB_CB(skb
);
685 amsdu_add
= num_subframes
* sizeof(struct ethhdr
) +
686 (num_subframes
- 1) * (snap_ip_tcp
+ pad
);
687 /* This holds the amsdu headers length */
688 info
->driver_data
[0] = (void *)(uintptr_t)amsdu_add
;
690 __skb_queue_tail(mpdus_skb
, skb
);
695 * Trick the segmentation function to make it
696 * create SKBs that can fit into one A-MSDU.
699 skb_shinfo(skb
)->gso_size
= num_subframes
* mss
;
700 memcpy(cb
, skb
->cb
, sizeof(cb
));
702 next
= skb_gso_segment(skb
, netdev_features
);
703 skb_shinfo(skb
)->gso_size
= mss
;
704 if (WARN_ON_ONCE(IS_ERR(next
)))
713 memcpy(tmp
->cb
, cb
, sizeof(tmp
->cb
));
715 * Compute the length of all the data added for the A-MSDU.
716 * This will be used to compute the length to write in the TX
717 * command. We have: SNAP + IP + TCP for n -1 subframes and
718 * ETH header for n subframes.
720 tcp_payload_len
= skb_tail_pointer(tmp
) -
721 skb_transport_header(tmp
) -
722 tcp_hdrlen(tmp
) + tmp
->data_len
;
725 ip_hdr(tmp
)->id
= htons(ip_base_id
+ i
* num_subframes
);
727 if (tcp_payload_len
> mss
) {
728 num_subframes
= DIV_ROUND_UP(tcp_payload_len
, mss
);
729 info
= IEEE80211_SKB_CB(tmp
);
730 amsdu_add
= num_subframes
* sizeof(struct ethhdr
) +
731 (num_subframes
- 1) * (snap_ip_tcp
+ pad
);
732 info
->driver_data
[0] = (void *)(uintptr_t)amsdu_add
;
733 skb_shinfo(tmp
)->gso_size
= mss
;
735 qc
= ieee80211_get_qos_ctl((void *)tmp
->data
);
738 ip_send_check(ip_hdr(tmp
));
739 *qc
&= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT
;
740 skb_shinfo(tmp
)->gso_size
= 0;
746 __skb_queue_tail(mpdus_skb
, tmp
);
752 #else /* CONFIG_INET */
753 static int iwl_mvm_tx_tso(struct iwl_mvm
*mvm
, struct sk_buff
*skb
,
754 struct ieee80211_sta
*sta
,
755 struct sk_buff_head
*mpdus_skb
)
757 /* Impossible to get TSO with CONFIG_INET */
764 static void iwl_mvm_tx_add_stream(struct iwl_mvm
*mvm
,
765 struct iwl_mvm_sta
*mvm_sta
, u8 tid
,
768 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
769 u8 mac_queue
= info
->hw_queue
;
770 struct sk_buff_head
*deferred_tx_frames
;
772 lockdep_assert_held(&mvm_sta
->lock
);
774 mvm_sta
->deferred_traffic_tid_map
|= BIT(tid
);
775 set_bit(mvm_sta
->sta_id
, mvm
->sta_deferred_frames
);
777 deferred_tx_frames
= &mvm_sta
->tid_data
[tid
].deferred_tx_frames
;
779 skb_queue_tail(deferred_tx_frames
, skb
);
782 * The first deferred frame should've stopped the MAC queues, so we
783 * should never get a second deferred frame for the RA/TID.
785 if (!WARN(skb_queue_len(deferred_tx_frames
) != 1,
786 "RATID %d/%d has %d deferred frames\n", mvm_sta
->sta_id
, tid
,
787 skb_queue_len(deferred_tx_frames
))) {
788 iwl_mvm_stop_mac_queues(mvm
, BIT(mac_queue
));
789 schedule_work(&mvm
->add_stream_wk
);
794 * Sets the fields in the Tx cmd that are crypto related
796 static int iwl_mvm_tx_mpdu(struct iwl_mvm
*mvm
, struct sk_buff
*skb
,
797 struct ieee80211_sta
*sta
)
799 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
800 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
801 struct iwl_mvm_sta
*mvmsta
;
802 struct iwl_device_cmd
*dev_cmd
;
803 struct iwl_tx_cmd
*tx_cmd
;
806 u8 tid
= IWL_MAX_TID_COUNT
;
807 u8 txq_id
= info
->hw_queue
;
808 bool is_ampdu
= false;
811 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
812 fc
= hdr
->frame_control
;
813 hdrlen
= ieee80211_hdrlen(fc
);
815 if (WARN_ON_ONCE(!mvmsta
))
818 if (WARN_ON_ONCE(mvmsta
->sta_id
== IWL_MVM_STATION_COUNT
))
821 dev_cmd
= iwl_mvm_set_tx_params(mvm
, skb
, hdrlen
, sta
, mvmsta
->sta_id
);
825 tx_cmd
= (struct iwl_tx_cmd
*)dev_cmd
->payload
;
826 /* From now on, we cannot access info->control */
829 * we handle that entirely ourselves -- for uAPSD the firmware
830 * will always send a notification, and for PS-Poll responses
831 * we'll notify mac80211 when getting frame status
833 info
->flags
&= ~IEEE80211_TX_STATUS_EOSP
;
835 spin_lock(&mvmsta
->lock
);
837 if (ieee80211_is_data_qos(fc
) && !ieee80211_is_qos_nullfunc(fc
)) {
839 qc
= ieee80211_get_qos_ctl(hdr
);
840 tid
= qc
[0] & IEEE80211_QOS_CTL_TID_MASK
;
841 if (WARN_ON_ONCE(tid
>= IWL_MAX_TID_COUNT
))
842 goto drop_unlock_sta
;
844 seq_number
= mvmsta
->tid_data
[tid
].seq_number
;
845 seq_number
&= IEEE80211_SCTL_SEQ
;
846 hdr
->seq_ctrl
&= cpu_to_le16(IEEE80211_SCTL_FRAG
);
847 hdr
->seq_ctrl
|= cpu_to_le16(seq_number
);
848 is_ampdu
= info
->flags
& IEEE80211_TX_CTL_AMPDU
;
849 } else if (iwl_mvm_is_dqa_supported(mvm
) &&
850 (ieee80211_is_qos_nullfunc(fc
) ||
851 ieee80211_is_nullfunc(fc
))) {
853 * nullfunc frames should go to the MGMT queue regardless of QOS
855 tid
= IWL_MAX_TID_COUNT
;
856 txq_id
= mvmsta
->tid_data
[tid
].txq_id
;
859 /* Copy MAC header from skb into command buffer */
860 memcpy(tx_cmd
->hdr
, hdr
, hdrlen
);
862 WARN_ON_ONCE(info
->flags
& IEEE80211_TX_CTL_SEND_AFTER_DTIM
);
865 /* default to TID 0 for non-QoS packets */
866 u8 tdls_tid
= tid
== IWL_MAX_TID_COUNT
? 0 : tid
;
868 txq_id
= mvmsta
->hw_queue
[tid_to_mac80211_ac
[tdls_tid
]];
872 if (WARN_ON_ONCE(mvmsta
->tid_data
[tid
].state
!= IWL_AGG_ON
))
873 goto drop_unlock_sta
;
874 txq_id
= mvmsta
->tid_data
[tid
].txq_id
;
877 if (iwl_mvm_is_dqa_supported(mvm
)) {
878 if (unlikely(mvmsta
->tid_data
[tid
].txq_id
==
879 IEEE80211_INVAL_HW_QUEUE
)) {
880 iwl_mvm_tx_add_stream(mvm
, mvmsta
, tid
, skb
);
883 * The frame is now deferred, and the worker scheduled
884 * will re-allocate it, so we can free it for now.
886 iwl_trans_free_tx_cmd(mvm
->trans
, dev_cmd
);
887 spin_unlock(&mvmsta
->lock
);
891 txq_id
= mvmsta
->tid_data
[tid
].txq_id
;
894 IWL_DEBUG_TX(mvm
, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta
->sta_id
,
895 tid
, txq_id
, IEEE80211_SEQ_TO_SN(seq_number
));
897 if (iwl_trans_tx(mvm
->trans
, skb
, dev_cmd
, txq_id
))
898 goto drop_unlock_sta
;
900 if (tid
< IWL_MAX_TID_COUNT
&& !ieee80211_has_morefrags(fc
))
901 mvmsta
->tid_data
[tid
].seq_number
= seq_number
+ 0x10;
903 spin_unlock(&mvmsta
->lock
);
905 if (txq_id
< mvm
->first_agg_queue
)
906 atomic_inc(&mvm
->pending_frames
[mvmsta
->sta_id
]);
911 iwl_trans_free_tx_cmd(mvm
->trans
, dev_cmd
);
912 spin_unlock(&mvmsta
->lock
);
917 int iwl_mvm_tx_skb(struct iwl_mvm
*mvm
, struct sk_buff
*skb
,
918 struct ieee80211_sta
*sta
)
920 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
921 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
922 struct sk_buff_head mpdus_skbs
;
923 unsigned int payload_len
;
926 if (WARN_ON_ONCE(!mvmsta
))
929 if (WARN_ON_ONCE(mvmsta
->sta_id
== IWL_MVM_STATION_COUNT
))
932 /* This holds the amsdu headers length */
933 info
->driver_data
[0] = (void *)(uintptr_t)0;
935 if (!skb_is_gso(skb
))
936 return iwl_mvm_tx_mpdu(mvm
, skb
, sta
);
938 payload_len
= skb_tail_pointer(skb
) - skb_transport_header(skb
) -
939 tcp_hdrlen(skb
) + skb
->data_len
;
941 if (payload_len
<= skb_shinfo(skb
)->gso_size
)
942 return iwl_mvm_tx_mpdu(mvm
, skb
, sta
);
944 __skb_queue_head_init(&mpdus_skbs
);
946 ret
= iwl_mvm_tx_tso(mvm
, skb
, sta
, &mpdus_skbs
);
950 if (WARN_ON(skb_queue_empty(&mpdus_skbs
)))
953 while (!skb_queue_empty(&mpdus_skbs
)) {
954 skb
= __skb_dequeue(&mpdus_skbs
);
956 ret
= iwl_mvm_tx_mpdu(mvm
, skb
, sta
);
958 __skb_queue_purge(&mpdus_skbs
);
966 static void iwl_mvm_check_ratid_empty(struct iwl_mvm
*mvm
,
967 struct ieee80211_sta
*sta
, u8 tid
)
969 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
970 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
971 struct ieee80211_vif
*vif
= mvmsta
->vif
;
973 lockdep_assert_held(&mvmsta
->lock
);
975 if ((tid_data
->state
== IWL_AGG_ON
||
976 tid_data
->state
== IWL_EMPTYING_HW_QUEUE_DELBA
) &&
977 iwl_mvm_tid_queued(tid_data
) == 0) {
979 * Now that this aggregation queue is empty tell mac80211 so it
980 * knows we no longer have frames buffered for the station on
981 * this TID (for the TIM bitmap calculation.)
983 ieee80211_sta_set_buffered(sta
, tid
, false);
986 if (tid_data
->ssn
!= tid_data
->next_reclaimed
)
989 switch (tid_data
->state
) {
990 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
991 IWL_DEBUG_TX_QUEUES(mvm
,
992 "Can continue addBA flow ssn = next_recl = %d\n",
993 tid_data
->next_reclaimed
);
994 tid_data
->state
= IWL_AGG_STARTING
;
995 ieee80211_start_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
998 case IWL_EMPTYING_HW_QUEUE_DELBA
:
999 IWL_DEBUG_TX_QUEUES(mvm
,
1000 "Can continue DELBA flow ssn = next_recl = %d\n",
1001 tid_data
->next_reclaimed
);
1002 iwl_mvm_disable_txq(mvm
, tid_data
->txq_id
,
1003 vif
->hw_queue
[tid_to_mac80211_ac
[tid
]], tid
,
1005 tid_data
->state
= IWL_AGG_OFF
;
1006 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
1014 #ifdef CONFIG_IWLWIFI_DEBUG
1015 const char *iwl_mvm_get_tx_fail_reason(u32 status
)
1017 #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1018 #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1020 switch (status
& TX_STATUS_MSK
) {
1021 case TX_STATUS_SUCCESS
:
1023 TX_STATUS_POSTPONE(DELAY
);
1024 TX_STATUS_POSTPONE(FEW_BYTES
);
1025 TX_STATUS_POSTPONE(BT_PRIO
);
1026 TX_STATUS_POSTPONE(QUIET_PERIOD
);
1027 TX_STATUS_POSTPONE(CALC_TTAK
);
1028 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY
);
1029 TX_STATUS_FAIL(SHORT_LIMIT
);
1030 TX_STATUS_FAIL(LONG_LIMIT
);
1031 TX_STATUS_FAIL(UNDERRUN
);
1032 TX_STATUS_FAIL(DRAIN_FLOW
);
1033 TX_STATUS_FAIL(RFKILL_FLUSH
);
1034 TX_STATUS_FAIL(LIFE_EXPIRE
);
1035 TX_STATUS_FAIL(DEST_PS
);
1036 TX_STATUS_FAIL(HOST_ABORTED
);
1037 TX_STATUS_FAIL(BT_RETRY
);
1038 TX_STATUS_FAIL(STA_INVALID
);
1039 TX_STATUS_FAIL(FRAG_DROPPED
);
1040 TX_STATUS_FAIL(TID_DISABLE
);
1041 TX_STATUS_FAIL(FIFO_FLUSHED
);
1042 TX_STATUS_FAIL(SMALL_CF_POLL
);
1043 TX_STATUS_FAIL(FW_DROP
);
1044 TX_STATUS_FAIL(STA_COLOR_MISMATCH
);
1049 #undef TX_STATUS_FAIL
1050 #undef TX_STATUS_POSTPONE
1052 #endif /* CONFIG_IWLWIFI_DEBUG */
1054 void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags
,
1055 enum nl80211_band band
,
1056 struct ieee80211_tx_rate
*r
)
1058 if (rate_n_flags
& RATE_HT_MCS_GF_MSK
)
1059 r
->flags
|= IEEE80211_TX_RC_GREEN_FIELD
;
1060 switch (rate_n_flags
& RATE_MCS_CHAN_WIDTH_MSK
) {
1061 case RATE_MCS_CHAN_WIDTH_20
:
1063 case RATE_MCS_CHAN_WIDTH_40
:
1064 r
->flags
|= IEEE80211_TX_RC_40_MHZ_WIDTH
;
1066 case RATE_MCS_CHAN_WIDTH_80
:
1067 r
->flags
|= IEEE80211_TX_RC_80_MHZ_WIDTH
;
1069 case RATE_MCS_CHAN_WIDTH_160
:
1070 r
->flags
|= IEEE80211_TX_RC_160_MHZ_WIDTH
;
1073 if (rate_n_flags
& RATE_MCS_SGI_MSK
)
1074 r
->flags
|= IEEE80211_TX_RC_SHORT_GI
;
1075 if (rate_n_flags
& RATE_MCS_HT_MSK
) {
1076 r
->flags
|= IEEE80211_TX_RC_MCS
;
1077 r
->idx
= rate_n_flags
& RATE_HT_MCS_INDEX_MSK
;
1078 } else if (rate_n_flags
& RATE_MCS_VHT_MSK
) {
1079 ieee80211_rate_set_vht(
1080 r
, rate_n_flags
& RATE_VHT_MCS_RATE_CODE_MSK
,
1081 ((rate_n_flags
& RATE_VHT_MCS_NSS_MSK
) >>
1082 RATE_VHT_MCS_NSS_POS
) + 1);
1083 r
->flags
|= IEEE80211_TX_RC_VHT_MCS
;
1085 r
->idx
= iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags
,
1091 * translate ucode response to mac80211 tx status control values
1093 static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags
,
1094 struct ieee80211_tx_info
*info
)
1096 struct ieee80211_tx_rate
*r
= &info
->status
.rates
[0];
1098 info
->status
.antenna
=
1099 ((rate_n_flags
& RATE_MCS_ANT_ABC_MSK
) >> RATE_MCS_ANT_POS
);
1100 iwl_mvm_hwrate_to_tx_rate(rate_n_flags
, info
->band
, r
);
1103 static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm
*mvm
,
1106 struct iwl_fw_dbg_trigger_tlv
*trig
;
1107 struct iwl_fw_dbg_trigger_tx_status
*status_trig
;
1110 if (!iwl_fw_dbg_trigger_enabled(mvm
->fw
, FW_DBG_TRIGGER_TX_STATUS
))
1113 trig
= iwl_fw_dbg_get_trigger(mvm
->fw
, FW_DBG_TRIGGER_TX_STATUS
);
1114 status_trig
= (void *)trig
->data
;
1116 if (!iwl_fw_dbg_trigger_check_stop(mvm
, NULL
, trig
))
1119 for (i
= 0; i
< ARRAY_SIZE(status_trig
->statuses
); i
++) {
1120 /* don't collect on status 0 */
1121 if (!status_trig
->statuses
[i
].status
)
1124 if (status_trig
->statuses
[i
].status
!= (status
& TX_STATUS_MSK
))
1127 iwl_mvm_fw_dbg_collect_trig(mvm
, trig
,
1128 "Tx status %d was received",
1129 status
& TX_STATUS_MSK
);
1134 static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm
*mvm
,
1135 struct iwl_rx_packet
*pkt
)
1137 struct ieee80211_sta
*sta
;
1138 u16 sequence
= le16_to_cpu(pkt
->hdr
.sequence
);
1139 int txq_id
= SEQ_TO_QUEUE(sequence
);
1140 struct iwl_mvm_tx_resp
*tx_resp
= (void *)pkt
->data
;
1141 int sta_id
= IWL_MVM_TX_RES_GET_RA(tx_resp
->ra_tid
);
1142 int tid
= IWL_MVM_TX_RES_GET_TID(tx_resp
->ra_tid
);
1143 u32 status
= le16_to_cpu(tx_resp
->status
.status
);
1144 u16 ssn
= iwl_mvm_get_scd_ssn(tx_resp
);
1145 struct iwl_mvm_sta
*mvmsta
;
1146 struct sk_buff_head skbs
;
1148 u16 next_reclaimed
, seq_ctl
;
1149 bool is_ndp
= false;
1151 __skb_queue_head_init(&skbs
);
1153 seq_ctl
= le16_to_cpu(tx_resp
->seq_ctl
);
1155 /* we can free until ssn % q.n_bd not inclusive */
1156 iwl_trans_reclaim(mvm
->trans
, txq_id
, ssn
, &skbs
);
1158 while (!skb_queue_empty(&skbs
)) {
1159 struct sk_buff
*skb
= __skb_dequeue(&skbs
);
1160 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1164 iwl_trans_free_tx_cmd(mvm
->trans
, info
->driver_data
[1]);
1166 memset(&info
->status
, 0, sizeof(info
->status
));
1168 info
->flags
&= ~IEEE80211_TX_CTL_AMPDU
;
1170 /* inform mac80211 about what happened with the frame */
1171 switch (status
& TX_STATUS_MSK
) {
1172 case TX_STATUS_SUCCESS
:
1173 case TX_STATUS_DIRECT_DONE
:
1174 info
->flags
|= IEEE80211_TX_STAT_ACK
;
1176 case TX_STATUS_FAIL_DEST_PS
:
1177 info
->flags
|= IEEE80211_TX_STAT_TX_FILTERED
;
1183 iwl_mvm_tx_status_check_trigger(mvm
, status
);
1185 info
->status
.rates
[0].count
= tx_resp
->failure_frame
+ 1;
1186 iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp
->initial_rate
),
1188 info
->status
.status_driver_data
[1] =
1189 (void *)(uintptr_t)le32_to_cpu(tx_resp
->initial_rate
);
1191 /* Single frame failure in an AMPDU queue => send BAR */
1192 if (txq_id
>= mvm
->first_agg_queue
&&
1193 !(info
->flags
& IEEE80211_TX_STAT_ACK
) &&
1194 !(info
->flags
& IEEE80211_TX_STAT_TX_FILTERED
))
1195 info
->flags
|= IEEE80211_TX_STAT_AMPDU_NO_BACK
;
1197 /* W/A FW bug: seq_ctl is wrong when the status isn't success */
1198 if (status
!= TX_STATUS_SUCCESS
) {
1199 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
1200 seq_ctl
= le16_to_cpu(hdr
->seq_ctrl
);
1203 if (unlikely(!seq_ctl
)) {
1204 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
1207 * If it is an NDP, we can't update next_reclaim since
1208 * its sequence control is 0. Note that for that same
1209 * reason, NDPs are never sent to A-MPDU'able queues
1210 * so that we can never have more than one freed frame
1211 * for a single Tx resonse (see WARN_ON below).
1213 if (ieee80211_is_qos_nullfunc(hdr
->frame_control
))
1218 * TODO: this is not accurate if we are freeing more than one
1221 info
->status
.tx_time
=
1222 le16_to_cpu(tx_resp
->wireless_media_time
);
1223 BUILD_BUG_ON(ARRAY_SIZE(info
->status
.status_driver_data
) < 1);
1224 info
->status
.status_driver_data
[0] =
1225 (void *)(uintptr_t)tx_resp
->reduced_tpc
;
1227 ieee80211_tx_status(mvm
->hw
, skb
);
1230 if (txq_id
>= mvm
->first_agg_queue
) {
1231 /* If this is an aggregation queue, we use the ssn since:
1232 * ssn = wifi seq_num % 256.
1233 * The seq_ctl is the sequence control of the packet to which
1234 * this Tx response relates. But if there is a hole in the
1235 * bitmap of the BA we received, this Tx response may allow to
1236 * reclaim the hole and all the subsequent packets that were
1237 * already acked. In that case, seq_ctl != ssn, and the next
1238 * packet to be reclaimed will be ssn and not seq_ctl. In that
1239 * case, several packets will be reclaimed even if
1242 * The ssn is the index (% 256) of the latest packet that has
1243 * treated (acked / dropped) + 1.
1245 next_reclaimed
= ssn
;
1247 /* The next packet to be reclaimed is the one after this one */
1248 next_reclaimed
= IEEE80211_SEQ_TO_SN(seq_ctl
+ 0x10);
1251 IWL_DEBUG_TX_REPLY(mvm
,
1252 "TXQ %d status %s (0x%08x)\n",
1253 txq_id
, iwl_mvm_get_tx_fail_reason(status
), status
);
1255 IWL_DEBUG_TX_REPLY(mvm
,
1256 "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n",
1257 le32_to_cpu(tx_resp
->initial_rate
),
1258 tx_resp
->failure_frame
, SEQ_TO_INDEX(sequence
),
1259 ssn
, next_reclaimed
, seq_ctl
);
1263 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
1265 * sta can't be NULL otherwise it'd mean that the sta has been freed in
1266 * the firmware while we still have packets for it in the Tx queues.
1268 if (WARN_ON_ONCE(!sta
))
1272 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1274 if (tid
!= IWL_TID_NON_QOS
) {
1275 struct iwl_mvm_tid_data
*tid_data
=
1276 &mvmsta
->tid_data
[tid
];
1277 bool send_eosp_ndp
= false;
1279 spin_lock_bh(&mvmsta
->lock
);
1281 tid_data
->next_reclaimed
= next_reclaimed
;
1282 IWL_DEBUG_TX_REPLY(mvm
,
1283 "Next reclaimed packet:%d\n",
1286 IWL_DEBUG_TX_REPLY(mvm
,
1287 "NDP - don't update next_reclaimed\n");
1290 iwl_mvm_check_ratid_empty(mvm
, sta
, tid
);
1292 if (mvmsta
->sleep_tx_count
) {
1293 mvmsta
->sleep_tx_count
--;
1294 if (mvmsta
->sleep_tx_count
&&
1295 !iwl_mvm_tid_queued(tid_data
)) {
1297 * The number of frames in the queue
1298 * dropped to 0 even if we sent less
1299 * frames than we thought we had on the
1301 * This means we had holes in the BA
1302 * window that we just filled, ask
1303 * mac80211 to send EOSP since the
1304 * firmware won't know how to do that.
1305 * Send NDP and the firmware will send
1306 * EOSP notification that will trigger
1307 * a call to ieee80211_sta_eosp().
1309 send_eosp_ndp
= true;
1313 spin_unlock_bh(&mvmsta
->lock
);
1314 if (send_eosp_ndp
) {
1315 iwl_mvm_sta_modify_sleep_tx_count(mvm
, sta
,
1316 IEEE80211_FRAME_RELEASE_UAPSD
,
1317 1, tid
, false, false);
1318 mvmsta
->sleep_tx_count
= 0;
1319 ieee80211_send_eosp_nullfunc(sta
, tid
);
1323 if (mvmsta
->next_status_eosp
) {
1324 mvmsta
->next_status_eosp
= false;
1325 ieee80211_sta_eosp(sta
);
1332 * If the txq is not an AMPDU queue, there is no chance we freed
1333 * several skbs. Check that out...
1335 if (txq_id
>= mvm
->first_agg_queue
)
1338 /* We can't free more than one frame at once on a shared queue */
1339 WARN_ON(skb_freed
> 1);
1341 /* If we have still frames for this STA nothing to do here */
1342 if (!atomic_sub_and_test(skb_freed
, &mvm
->pending_frames
[sta_id
]))
1345 if (mvmsta
&& mvmsta
->vif
->type
== NL80211_IFTYPE_AP
) {
1348 * If there are no pending frames for this STA and
1349 * the tx to this station is not disabled, notify
1350 * mac80211 that this station can now wake up in its
1352 * If mvmsta is not NULL, sta is valid.
1355 spin_lock_bh(&mvmsta
->lock
);
1357 if (!mvmsta
->disable_tx
)
1358 ieee80211_sta_block_awake(mvm
->hw
, sta
, false);
1360 spin_unlock_bh(&mvmsta
->lock
);
1363 if (PTR_ERR(sta
) == -EBUSY
|| PTR_ERR(sta
) == -ENOENT
) {
1365 * We are draining and this was the last packet - pre_rcu_remove
1366 * has been called already. We might be after the
1367 * synchronize_net already.
1368 * Don't rely on iwl_mvm_rm_sta to see the empty Tx queues.
1370 set_bit(sta_id
, mvm
->sta_drained
);
1371 schedule_work(&mvm
->sta_drained_wk
);
1378 #ifdef CONFIG_IWLWIFI_DEBUG
1379 #define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x
1380 static const char *iwl_get_agg_tx_status(u16 status
)
1382 switch (status
& AGG_TX_STATE_STATUS_MSK
) {
1383 AGG_TX_STATE_(TRANSMITTED
);
1384 AGG_TX_STATE_(UNDERRUN
);
1385 AGG_TX_STATE_(BT_PRIO
);
1386 AGG_TX_STATE_(FEW_BYTES
);
1387 AGG_TX_STATE_(ABORT
);
1388 AGG_TX_STATE_(LAST_SENT_TTL
);
1389 AGG_TX_STATE_(LAST_SENT_TRY_CNT
);
1390 AGG_TX_STATE_(LAST_SENT_BT_KILL
);
1391 AGG_TX_STATE_(SCD_QUERY
);
1392 AGG_TX_STATE_(TEST_BAD_CRC32
);
1393 AGG_TX_STATE_(RESPONSE
);
1394 AGG_TX_STATE_(DUMP_TX
);
1395 AGG_TX_STATE_(DELAY_TX
);
1401 static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm
*mvm
,
1402 struct iwl_rx_packet
*pkt
)
1404 struct iwl_mvm_tx_resp
*tx_resp
= (void *)pkt
->data
;
1405 struct agg_tx_status
*frame_status
= &tx_resp
->status
;
1408 for (i
= 0; i
< tx_resp
->frame_count
; i
++) {
1409 u16 fstatus
= le16_to_cpu(frame_status
[i
].status
);
1411 IWL_DEBUG_TX_REPLY(mvm
,
1412 "status %s (0x%04x), try-count (%d) seq (0x%x)\n",
1413 iwl_get_agg_tx_status(fstatus
),
1414 fstatus
& AGG_TX_STATE_STATUS_MSK
,
1415 (fstatus
& AGG_TX_STATE_TRY_CNT_MSK
) >>
1416 AGG_TX_STATE_TRY_CNT_POS
,
1417 le16_to_cpu(frame_status
[i
].sequence
));
1421 static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm
*mvm
,
1422 struct iwl_rx_packet
*pkt
)
1424 #endif /* CONFIG_IWLWIFI_DEBUG */
1426 static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm
*mvm
,
1427 struct iwl_rx_packet
*pkt
)
1429 struct iwl_mvm_tx_resp
*tx_resp
= (void *)pkt
->data
;
1430 int sta_id
= IWL_MVM_TX_RES_GET_RA(tx_resp
->ra_tid
);
1431 int tid
= IWL_MVM_TX_RES_GET_TID(tx_resp
->ra_tid
);
1432 u16 sequence
= le16_to_cpu(pkt
->hdr
.sequence
);
1433 struct ieee80211_sta
*sta
;
1435 if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence
) < mvm
->first_agg_queue
))
1438 if (WARN_ON_ONCE(tid
== IWL_TID_NON_QOS
))
1441 iwl_mvm_rx_tx_cmd_agg_dbg(mvm
, pkt
);
1445 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
1447 if (!WARN_ON_ONCE(IS_ERR_OR_NULL(sta
))) {
1448 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1449 mvmsta
->tid_data
[tid
].rate_n_flags
=
1450 le32_to_cpu(tx_resp
->initial_rate
);
1451 mvmsta
->tid_data
[tid
].tx_time
=
1452 le16_to_cpu(tx_resp
->wireless_media_time
);
1458 void iwl_mvm_rx_tx_cmd(struct iwl_mvm
*mvm
, struct iwl_rx_cmd_buffer
*rxb
)
1460 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1461 struct iwl_mvm_tx_resp
*tx_resp
= (void *)pkt
->data
;
1463 if (tx_resp
->frame_count
== 1)
1464 iwl_mvm_rx_tx_cmd_single(mvm
, pkt
);
1466 iwl_mvm_rx_tx_cmd_agg(mvm
, pkt
);
1469 static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info
*info
,
1470 struct iwl_mvm_ba_notif
*ba_notif
,
1471 struct iwl_mvm_tid_data
*tid_data
)
1473 info
->flags
|= IEEE80211_TX_STAT_AMPDU
;
1474 info
->status
.ampdu_ack_len
= ba_notif
->txed_2_done
;
1475 info
->status
.ampdu_len
= ba_notif
->txed
;
1476 iwl_mvm_hwrate_to_tx_status(tid_data
->rate_n_flags
,
1478 /* TODO: not accounted if the whole A-MPDU failed */
1479 info
->status
.tx_time
= tid_data
->tx_time
;
1480 info
->status
.status_driver_data
[0] =
1481 (void *)(uintptr_t)ba_notif
->reduced_txp
;
1482 info
->status
.status_driver_data
[1] =
1483 (void *)(uintptr_t)tid_data
->rate_n_flags
;
1486 void iwl_mvm_rx_ba_notif(struct iwl_mvm
*mvm
, struct iwl_rx_cmd_buffer
*rxb
)
1488 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1489 struct iwl_mvm_ba_notif
*ba_notif
= (void *)pkt
->data
;
1490 struct sk_buff_head reclaimed_skbs
;
1491 struct iwl_mvm_tid_data
*tid_data
;
1492 struct ieee80211_sta
*sta
;
1493 struct iwl_mvm_sta
*mvmsta
;
1494 struct sk_buff
*skb
;
1495 int sta_id
, tid
, freed
;
1496 /* "flow" corresponds to Tx queue */
1497 u16 scd_flow
= le16_to_cpu(ba_notif
->scd_flow
);
1498 /* "ssn" is start of block-ack Tx window, corresponds to index
1499 * (in Tx queue's circular buffer) of first TFD/frame in window */
1500 u16 ba_resp_scd_ssn
= le16_to_cpu(ba_notif
->scd_ssn
);
1502 sta_id
= ba_notif
->sta_id
;
1503 tid
= ba_notif
->tid
;
1505 if (WARN_ONCE(sta_id
>= IWL_MVM_STATION_COUNT
||
1506 tid
>= IWL_MAX_TID_COUNT
,
1507 "sta_id %d tid %d", sta_id
, tid
))
1512 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
1514 /* Reclaiming frames for a station that has been deleted ? */
1515 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
))) {
1520 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1521 tid_data
= &mvmsta
->tid_data
[tid
];
1523 if (tid_data
->txq_id
!= scd_flow
) {
1525 "invalid BA notification: Q %d, tid %d, flow %d\n",
1526 tid_data
->txq_id
, tid
, scd_flow
);
1531 spin_lock_bh(&mvmsta
->lock
);
1533 __skb_queue_head_init(&reclaimed_skbs
);
1536 * Release all TFDs before the SSN, i.e. all TFDs in front of
1537 * block-ack window (we assume that they've been successfully
1538 * transmitted ... if not, it's too late anyway).
1540 iwl_trans_reclaim(mvm
->trans
, scd_flow
, ba_resp_scd_ssn
,
1543 IWL_DEBUG_TX_REPLY(mvm
,
1544 "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
1545 (u8
*)&ba_notif
->sta_addr_lo32
,
1547 IWL_DEBUG_TX_REPLY(mvm
,
1548 "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
1549 ba_notif
->tid
, le16_to_cpu(ba_notif
->seq_ctl
),
1550 (unsigned long long)le64_to_cpu(ba_notif
->bitmap
),
1551 scd_flow
, ba_resp_scd_ssn
, ba_notif
->txed
,
1552 ba_notif
->txed_2_done
);
1554 IWL_DEBUG_TX_REPLY(mvm
, "reduced txp from ba notif %d\n",
1555 ba_notif
->reduced_txp
);
1556 tid_data
->next_reclaimed
= ba_resp_scd_ssn
;
1558 iwl_mvm_check_ratid_empty(mvm
, sta
, tid
);
1562 skb_queue_walk(&reclaimed_skbs
, skb
) {
1563 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
1564 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1566 if (ieee80211_is_data_qos(hdr
->frame_control
))
1571 iwl_trans_free_tx_cmd(mvm
->trans
, info
->driver_data
[1]);
1573 memset(&info
->status
, 0, sizeof(info
->status
));
1574 /* Packet was transmitted successfully, failures come as single
1575 * frames because before failing a frame the firmware transmits
1576 * it without aggregation at least once.
1578 info
->flags
|= IEEE80211_TX_STAT_ACK
;
1580 /* this is the first skb we deliver in this batch */
1581 /* put the rate scaling data there */
1583 iwl_mvm_tx_info_from_ba_notif(info
, ba_notif
, tid_data
);
1586 spin_unlock_bh(&mvmsta
->lock
);
1588 /* We got a BA notif with 0 acked or scd_ssn didn't progress which is
1589 * possible (i.e. first MPDU in the aggregation wasn't acked)
1590 * Still it's important to update RS about sent vs. acked.
1592 if (skb_queue_empty(&reclaimed_skbs
)) {
1593 struct ieee80211_tx_info ba_info
= {};
1594 struct ieee80211_chanctx_conf
*chanctx_conf
= NULL
;
1598 rcu_dereference(mvmsta
->vif
->chanctx_conf
);
1600 if (WARN_ON_ONCE(!chanctx_conf
))
1603 ba_info
.band
= chanctx_conf
->def
.chan
->band
;
1604 iwl_mvm_tx_info_from_ba_notif(&ba_info
, ba_notif
, tid_data
);
1606 IWL_DEBUG_TX_REPLY(mvm
, "No reclaim. Update rs directly\n");
1607 iwl_mvm_rs_tx_status(mvm
, sta
, tid
, &ba_info
);
1613 while (!skb_queue_empty(&reclaimed_skbs
)) {
1614 skb
= __skb_dequeue(&reclaimed_skbs
);
1615 ieee80211_tx_status(mvm
->hw
, skb
);
1620 * Note that there are transports that buffer frames before they reach
1621 * the firmware. This means that after flush_tx_path is called, the
1622 * queue might not be empty. The race-free way to handle this is to:
1623 * 1) set the station as draining
1624 * 2) flush the Tx path
1625 * 3) wait for the transport queues to be empty
1627 int iwl_mvm_flush_tx_path(struct iwl_mvm
*mvm
, u32 tfd_msk
, u32 flags
)
1630 struct iwl_tx_path_flush_cmd flush_cmd
= {
1631 .queues_ctl
= cpu_to_le32(tfd_msk
),
1632 .flush_ctl
= cpu_to_le16(DUMP_TX_FIFO_FLUSH
),
1635 ret
= iwl_mvm_send_cmd_pdu(mvm
, TXPATH_FLUSH
, flags
,
1636 sizeof(flush_cmd
), &flush_cmd
);
1638 IWL_ERR(mvm
, "Failed to send flush command (%d)\n", ret
);