]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause | |
2 | /* | |
3 | * Copyright (C) 2012-2014, 2018-2020 Intel Corporation | |
4 | * Copyright (C) 2013-2015 Intel Mobile Communications GmbH | |
5 | * Copyright (C) 2016-2017 Intel Deutschland GmbH | |
6 | */ | |
7 | #include <linux/ieee80211.h> | |
8 | #include <linux/etherdevice.h> | |
9 | #include <linux/tcp.h> | |
10 | #include <net/ip.h> | |
11 | #include <net/ipv6.h> | |
12 | ||
13 | #include "iwl-trans.h" | |
14 | #include "iwl-eeprom-parse.h" | |
15 | #include "mvm.h" | |
16 | #include "sta.h" | |
17 | ||
18 | static void | |
19 | iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, | |
20 | u16 tid, u16 ssn) | |
21 | { | |
22 | struct iwl_fw_dbg_trigger_tlv *trig; | |
23 | struct iwl_fw_dbg_trigger_ba *ba_trig; | |
24 | ||
25 | trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA); | |
26 | if (!trig) | |
27 | return; | |
28 | ||
29 | ba_trig = (void *)trig->data; | |
30 | ||
31 | if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid))) | |
32 | return; | |
33 | ||
34 | iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, | |
35 | "BAR sent to %pM, tid %d, ssn %d", | |
36 | addr, tid, ssn); | |
37 | } | |
38 | ||
39 | #define OPT_HDR(type, skb, off) \ | |
40 | (type *)(skb_network_header(skb) + (off)) | |
41 | ||
42 | static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, | |
43 | struct ieee80211_hdr *hdr, | |
44 | struct ieee80211_tx_info *info, | |
45 | u16 offload_assist) | |
46 | { | |
47 | #if IS_ENABLED(CONFIG_INET) | |
48 | u16 mh_len = ieee80211_hdrlen(hdr->frame_control); | |
49 | u8 protocol = 0; | |
50 | ||
51 | /* Do not compute checksum if already computed */ | |
52 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
53 | goto out; | |
54 | ||
55 | /* We do not expect to be requested to csum stuff we do not support */ | |
56 | if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) || | |
57 | (skb->protocol != htons(ETH_P_IP) && | |
58 | skb->protocol != htons(ETH_P_IPV6)), | |
59 | "No support for requested checksum\n")) { | |
60 | skb_checksum_help(skb); | |
61 | goto out; | |
62 | } | |
63 | ||
64 | if (skb->protocol == htons(ETH_P_IP)) { | |
65 | protocol = ip_hdr(skb)->protocol; | |
66 | } else { | |
67 | #if IS_ENABLED(CONFIG_IPV6) | |
68 | struct ipv6hdr *ipv6h = | |
69 | (struct ipv6hdr *)skb_network_header(skb); | |
70 | unsigned int off = sizeof(*ipv6h); | |
71 | ||
72 | protocol = ipv6h->nexthdr; | |
73 | while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) { | |
74 | struct ipv6_opt_hdr *hp; | |
75 | ||
76 | /* only supported extension headers */ | |
77 | if (protocol != NEXTHDR_ROUTING && | |
78 | protocol != NEXTHDR_HOP && | |
79 | protocol != NEXTHDR_DEST) { | |
80 | skb_checksum_help(skb); | |
81 | goto out; | |
82 | } | |
83 | ||
84 | hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); | |
85 | protocol = hp->nexthdr; | |
86 | off += ipv6_optlen(hp); | |
87 | } | |
88 | /* if we get here - protocol now should be TCP/UDP */ | |
89 | #endif | |
90 | } | |
91 | ||
92 | if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) { | |
93 | WARN_ON_ONCE(1); | |
94 | skb_checksum_help(skb); | |
95 | goto out; | |
96 | } | |
97 | ||
98 | /* enable L4 csum */ | |
99 | offload_assist |= BIT(TX_CMD_OFFLD_L4_EN); | |
100 | ||
101 | /* | |
102 | * Set offset to IP header (snap). | |
103 | * We don't support tunneling so no need to take care of inner header. | |
104 | * Size is in words. | |
105 | */ | |
106 | offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR); | |
107 | ||
108 | /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */ | |
109 | if (skb->protocol == htons(ETH_P_IP) && | |
110 | (offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) { | |
111 | ip_hdr(skb)->check = 0; | |
112 | offload_assist |= BIT(TX_CMD_OFFLD_L3_EN); | |
113 | } | |
114 | ||
115 | /* reset UDP/TCP header csum */ | |
116 | if (protocol == IPPROTO_TCP) | |
117 | tcp_hdr(skb)->check = 0; | |
118 | else | |
119 | udp_hdr(skb)->check = 0; | |
120 | ||
121 | /* | |
122 | * mac header len should include IV, size is in words unless | |
123 | * the IV is added by the firmware like in WEP. | |
124 | * In new Tx API, the IV is always added by the firmware. | |
125 | */ | |
126 | if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key && | |
127 | info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 && | |
128 | info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104) | |
129 | mh_len += info->control.hw_key->iv_len; | |
130 | mh_len /= 2; | |
131 | offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE; | |
132 | ||
133 | out: | |
134 | #endif | |
135 | return offload_assist; | |
136 | } | |
137 | ||
138 | /* | |
139 | * Sets most of the Tx cmd's fields | |
140 | */ | |
141 | void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, | |
142 | struct iwl_tx_cmd *tx_cmd, | |
143 | struct ieee80211_tx_info *info, u8 sta_id) | |
144 | { | |
145 | struct ieee80211_hdr *hdr = (void *)skb->data; | |
146 | __le16 fc = hdr->frame_control; | |
147 | u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags); | |
148 | u32 len = skb->len + FCS_LEN; | |
149 | u16 offload_assist = 0; | |
150 | u8 ac; | |
151 | ||
152 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) || | |
153 | (ieee80211_is_probe_resp(fc) && | |
154 | !is_multicast_ether_addr(hdr->addr1))) | |
155 | tx_flags |= TX_CMD_FLG_ACK; | |
156 | else | |
157 | tx_flags &= ~TX_CMD_FLG_ACK; | |
158 | ||
159 | if (ieee80211_is_probe_resp(fc)) | |
160 | tx_flags |= TX_CMD_FLG_TSF; | |
161 | ||
162 | if (ieee80211_has_morefrags(fc)) | |
163 | tx_flags |= TX_CMD_FLG_MORE_FRAG; | |
164 | ||
165 | if (ieee80211_is_data_qos(fc)) { | |
166 | u8 *qc = ieee80211_get_qos_ctl(hdr); | |
167 | tx_cmd->tid_tspec = qc[0] & 0xf; | |
168 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL; | |
169 | if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) | |
170 | offload_assist |= BIT(TX_CMD_OFFLD_AMSDU); | |
171 | } else if (ieee80211_is_back_req(fc)) { | |
172 | struct ieee80211_bar *bar = (void *)skb->data; | |
173 | u16 control = le16_to_cpu(bar->control); | |
174 | u16 ssn = le16_to_cpu(bar->start_seq_num); | |
175 | ||
176 | tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; | |
177 | tx_cmd->tid_tspec = (control & | |
178 | IEEE80211_BAR_CTRL_TID_INFO_MASK) >> | |
179 | IEEE80211_BAR_CTRL_TID_INFO_SHIFT; | |
180 | WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT); | |
181 | iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec, | |
182 | ssn); | |
183 | } else { | |
184 | if (ieee80211_is_data(fc)) | |
185 | tx_cmd->tid_tspec = IWL_TID_NON_QOS; | |
186 | else | |
187 | tx_cmd->tid_tspec = IWL_MAX_TID_COUNT; | |
188 | ||
189 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) | |
190 | tx_flags |= TX_CMD_FLG_SEQ_CTL; | |
191 | else | |
192 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL; | |
193 | } | |
194 | ||
195 | /* Default to 0 (BE) when tid_spec is set to IWL_MAX_TID_COUNT */ | |
196 | if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT) | |
197 | ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; | |
198 | else | |
199 | ac = tid_to_mac80211_ac[0]; | |
200 | ||
201 | tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) << | |
202 | TX_CMD_FLG_BT_PRIO_POS; | |
203 | ||
204 | if (ieee80211_is_mgmt(fc)) { | |
205 | if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) | |
206 | tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC); | |
207 | else if (ieee80211_is_action(fc)) | |
208 | tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); | |
209 | else | |
210 | tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); | |
211 | ||
212 | /* The spec allows Action frames in A-MPDU, we don't support | |
213 | * it | |
214 | */ | |
215 | WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU); | |
216 | } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) { | |
217 | tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); | |
218 | } else { | |
219 | tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); | |
220 | } | |
221 | ||
222 | if (ieee80211_is_data(fc) && len > mvm->rts_threshold && | |
223 | !is_multicast_ether_addr(hdr->addr1)) | |
224 | tx_flags |= TX_CMD_FLG_PROT_REQUIRE; | |
225 | ||
226 | if (fw_has_capa(&mvm->fw->ucode_capa, | |
227 | IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) && | |
228 | ieee80211_action_contains_tpc(skb)) | |
229 | tx_flags |= TX_CMD_FLG_WRITE_TX_POWER; | |
230 | ||
231 | tx_cmd->tx_flags = cpu_to_le32(tx_flags); | |
232 | /* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */ | |
233 | tx_cmd->len = cpu_to_le16((u16)skb->len); | |
234 | tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); | |
235 | tx_cmd->sta_id = sta_id; | |
236 | ||
237 | /* padding is inserted later in transport */ | |
238 | if (ieee80211_hdrlen(fc) % 4 && | |
239 | !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) | |
240 | offload_assist |= BIT(TX_CMD_OFFLD_PAD); | |
241 | ||
242 | tx_cmd->offload_assist |= | |
243 | cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, hdr, info, | |
244 | offload_assist)); | |
245 | } | |
246 | ||
247 | static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm, | |
248 | struct ieee80211_tx_info *info, | |
249 | struct ieee80211_sta *sta, __le16 fc) | |
250 | { | |
251 | if (info->band == NL80211_BAND_2GHZ && | |
252 | !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) | |
253 | return mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; | |
254 | ||
255 | if (sta && ieee80211_is_data(fc)) { | |
256 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); | |
257 | ||
258 | return BIT(mvmsta->tx_ant) << RATE_MCS_ANT_POS; | |
259 | } | |
260 | ||
261 | return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; | |
262 | } | |
263 | ||
264 | static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, | |
265 | struct ieee80211_tx_info *info, | |
266 | struct ieee80211_sta *sta) | |
267 | { | |
268 | int rate_idx; | |
269 | u8 rate_plcp; | |
270 | u32 rate_flags = 0; | |
271 | ||
272 | /* HT rate doesn't make sense for a non data frame */ | |
273 | WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS, | |
274 | "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame\n", | |
275 | info->control.rates[0].flags, | |
276 | info->control.rates[0].idx); | |
277 | ||
278 | rate_idx = info->control.rates[0].idx; | |
279 | /* if the rate isn't a well known legacy rate, take the lowest one */ | |
280 | if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY) | |
281 | rate_idx = rate_lowest_index( | |
282 | &mvm->nvm_data->bands[info->band], sta); | |
283 | ||
284 | /* | |
285 | * For non 2 GHZ band, remap mac80211 rate | |
286 | * indices into driver indices | |
287 | */ | |
288 | if (info->band != NL80211_BAND_2GHZ) | |
289 | rate_idx += IWL_FIRST_OFDM_RATE; | |
290 | ||
291 | /* For 2.4 GHZ band, check that there is no need to remap */ | |
292 | BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); | |
293 | ||
294 | /* Get PLCP rate for tx_cmd->rate_n_flags */ | |
295 | rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx); | |
296 | ||
297 | /* Set CCK flag as needed */ | |
298 | if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) | |
299 | rate_flags |= RATE_MCS_CCK_MSK; | |
300 | ||
301 | return (u32)rate_plcp | rate_flags; | |
302 | } | |
303 | ||
304 | static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm, | |
305 | struct ieee80211_tx_info *info, | |
306 | struct ieee80211_sta *sta, __le16 fc) | |
307 | { | |
308 | return iwl_mvm_get_tx_rate(mvm, info, sta) | | |
309 | iwl_mvm_get_tx_ant(mvm, info, sta, fc); | |
310 | } | |
311 | ||
312 | /* | |
313 | * Sets the fields in the Tx cmd that are rate related | |
314 | */ | |
315 | void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, | |
316 | struct ieee80211_tx_info *info, | |
317 | struct ieee80211_sta *sta, __le16 fc) | |
318 | { | |
319 | /* Set retry limit on RTS packets */ | |
320 | tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT; | |
321 | ||
322 | /* Set retry limit on DATA packets and Probe Responses*/ | |
323 | if (ieee80211_is_probe_resp(fc)) { | |
324 | tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT; | |
325 | tx_cmd->rts_retry_limit = | |
326 | min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit); | |
327 | } else if (ieee80211_is_back_req(fc)) { | |
328 | tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT; | |
329 | } else { | |
330 | tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY; | |
331 | } | |
332 | ||
333 | /* | |
334 | * for data packets, rate info comes from the table inside the fw. This | |
335 | * table is controlled by LINK_QUALITY commands | |
336 | */ | |
337 | ||
338 | if (ieee80211_is_data(fc) && sta) { | |
339 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); | |
340 | ||
341 | if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) { | |
342 | tx_cmd->initial_rate_index = 0; | |
343 | tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); | |
344 | return; | |
345 | } | |
346 | } else if (ieee80211_is_back_req(fc)) { | |
347 | tx_cmd->tx_flags |= | |
348 | cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR); | |
349 | } | |
350 | ||
351 | /* Set the rate in the TX cmd */ | |
352 | tx_cmd->rate_n_flags = | |
353 | cpu_to_le32(iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc)); | |
354 | } | |
355 | ||
356 | static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info, | |
357 | u8 *crypto_hdr) | |
358 | { | |
359 | struct ieee80211_key_conf *keyconf = info->control.hw_key; | |
360 | u64 pn; | |
361 | ||
362 | pn = atomic64_inc_return(&keyconf->tx_pn); | |
363 | crypto_hdr[0] = pn; | |
364 | crypto_hdr[2] = 0; | |
365 | crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6); | |
366 | crypto_hdr[1] = pn >> 8; | |
367 | crypto_hdr[4] = pn >> 16; | |
368 | crypto_hdr[5] = pn >> 24; | |
369 | crypto_hdr[6] = pn >> 32; | |
370 | crypto_hdr[7] = pn >> 40; | |
371 | } | |
372 | ||
373 | /* | |
374 | * Sets the fields in the Tx cmd that are crypto related | |
375 | */ | |
376 | static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, | |
377 | struct ieee80211_tx_info *info, | |
378 | struct iwl_tx_cmd *tx_cmd, | |
379 | struct sk_buff *skb_frag, | |
380 | int hdrlen) | |
381 | { | |
382 | struct ieee80211_key_conf *keyconf = info->control.hw_key; | |
383 | u8 *crypto_hdr = skb_frag->data + hdrlen; | |
384 | enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM; | |
385 | u64 pn; | |
386 | ||
387 | switch (keyconf->cipher) { | |
388 | case WLAN_CIPHER_SUITE_CCMP: | |
389 | iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd); | |
390 | iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); | |
391 | break; | |
392 | ||
393 | case WLAN_CIPHER_SUITE_TKIP: | |
394 | tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; | |
395 | pn = atomic64_inc_return(&keyconf->tx_pn); | |
396 | ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn); | |
397 | ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); | |
398 | break; | |
399 | ||
400 | case WLAN_CIPHER_SUITE_WEP104: | |
401 | tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; | |
402 | fallthrough; | |
403 | case WLAN_CIPHER_SUITE_WEP40: | |
404 | tx_cmd->sec_ctl |= TX_CMD_SEC_WEP | | |
405 | ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) & | |
406 | TX_CMD_SEC_WEP_KEY_IDX_MSK); | |
407 | ||
408 | memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); | |
409 | break; | |
410 | case WLAN_CIPHER_SUITE_GCMP: | |
411 | case WLAN_CIPHER_SUITE_GCMP_256: | |
412 | type = TX_CMD_SEC_GCMP; | |
413 | fallthrough; | |
414 | case WLAN_CIPHER_SUITE_CCMP_256: | |
415 | /* TODO: Taking the key from the table might introduce a race | |
416 | * when PTK rekeying is done, having an old packets with a PN | |
417 | * based on the old key but the message encrypted with a new | |
418 | * one. | |
419 | * Need to handle this. | |
420 | */ | |
421 | tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE; | |
422 | tx_cmd->key[0] = keyconf->hw_key_idx; | |
423 | iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); | |
424 | break; | |
425 | default: | |
426 | tx_cmd->sec_ctl |= TX_CMD_SEC_EXT; | |
427 | } | |
428 | } | |
429 | ||
430 | /* | |
431 | * Allocates and sets the Tx cmd the driver data pointers in the skb | |
432 | */ | |
433 | static struct iwl_device_tx_cmd * | |
434 | iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, | |
435 | struct ieee80211_tx_info *info, int hdrlen, | |
436 | struct ieee80211_sta *sta, u8 sta_id) | |
437 | { | |
438 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | |
439 | struct iwl_device_tx_cmd *dev_cmd; | |
440 | struct iwl_tx_cmd *tx_cmd; | |
441 | ||
442 | dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans); | |
443 | ||
444 | if (unlikely(!dev_cmd)) | |
445 | return NULL; | |
446 | ||
447 | dev_cmd->hdr.cmd = TX_CMD; | |
448 | ||
449 | if (iwl_mvm_has_new_tx_api(mvm)) { | |
450 | u16 offload_assist = 0; | |
451 | u32 rate_n_flags = 0; | |
452 | u16 flags = 0; | |
453 | struct iwl_mvm_sta *mvmsta = sta ? | |
454 | iwl_mvm_sta_from_mac80211(sta) : NULL; | |
455 | ||
456 | if (ieee80211_is_data_qos(hdr->frame_control)) { | |
457 | u8 *qc = ieee80211_get_qos_ctl(hdr); | |
458 | ||
459 | if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) | |
460 | offload_assist |= BIT(TX_CMD_OFFLD_AMSDU); | |
461 | } | |
462 | ||
463 | offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info, | |
464 | offload_assist); | |
465 | ||
466 | /* padding is inserted later in transport */ | |
467 | if (ieee80211_hdrlen(hdr->frame_control) % 4 && | |
468 | !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) | |
469 | offload_assist |= BIT(TX_CMD_OFFLD_PAD); | |
470 | ||
471 | if (!info->control.hw_key) | |
472 | flags |= IWL_TX_FLAGS_ENCRYPT_DIS; | |
473 | ||
474 | /* | |
475 | * For data packets rate info comes from the fw. Only | |
476 | * set rate/antenna during connection establishment or in case | |
477 | * no station is given. | |
478 | */ | |
479 | if (!sta || !ieee80211_is_data(hdr->frame_control) || | |
480 | mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) { | |
481 | flags |= IWL_TX_FLAGS_CMD_RATE; | |
482 | rate_n_flags = | |
483 | iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, | |
484 | hdr->frame_control); | |
485 | } | |
486 | ||
487 | if (mvm->trans->trans_cfg->device_family >= | |
488 | IWL_DEVICE_FAMILY_AX210) { | |
489 | struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload; | |
490 | ||
491 | cmd->offload_assist |= cpu_to_le32(offload_assist); | |
492 | ||
493 | /* Total # bytes to be transmitted */ | |
494 | cmd->len = cpu_to_le16((u16)skb->len); | |
495 | ||
496 | /* Copy MAC header from skb into command buffer */ | |
497 | memcpy(cmd->hdr, hdr, hdrlen); | |
498 | ||
499 | cmd->flags = cpu_to_le16(flags); | |
500 | cmd->rate_n_flags = cpu_to_le32(rate_n_flags); | |
501 | } else { | |
502 | struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload; | |
503 | ||
504 | cmd->offload_assist |= cpu_to_le16(offload_assist); | |
505 | ||
506 | /* Total # bytes to be transmitted */ | |
507 | cmd->len = cpu_to_le16((u16)skb->len); | |
508 | ||
509 | /* Copy MAC header from skb into command buffer */ | |
510 | memcpy(cmd->hdr, hdr, hdrlen); | |
511 | ||
512 | cmd->flags = cpu_to_le32(flags); | |
513 | cmd->rate_n_flags = cpu_to_le32(rate_n_flags); | |
514 | } | |
515 | goto out; | |
516 | } | |
517 | ||
518 | tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; | |
519 | ||
520 | if (info->control.hw_key) | |
521 | iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen); | |
522 | ||
523 | iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id); | |
524 | ||
525 | iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); | |
526 | ||
527 | /* Copy MAC header from skb into command buffer */ | |
528 | memcpy(tx_cmd->hdr, hdr, hdrlen); | |
529 | ||
530 | out: | |
531 | return dev_cmd; | |
532 | } | |
533 | ||
534 | static void iwl_mvm_skb_prepare_status(struct sk_buff *skb, | |
535 | struct iwl_device_tx_cmd *cmd) | |
536 | { | |
537 | struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); | |
538 | ||
539 | memset(&skb_info->status, 0, sizeof(skb_info->status)); | |
540 | memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data)); | |
541 | ||
542 | skb_info->driver_data[1] = cmd; | |
543 | } | |
544 | ||
545 | static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, | |
546 | struct ieee80211_tx_info *info, | |
547 | struct ieee80211_hdr *hdr) | |
548 | { | |
549 | struct iwl_mvm_vif *mvmvif = | |
550 | iwl_mvm_vif_from_mac80211(info->control.vif); | |
551 | __le16 fc = hdr->frame_control; | |
552 | ||
553 | switch (info->control.vif->type) { | |
554 | case NL80211_IFTYPE_AP: | |
555 | case NL80211_IFTYPE_ADHOC: | |
556 | /* | |
557 | * Non-bufferable frames use the broadcast station, thus they | |
558 | * use the probe queue. | |
559 | * Also take care of the case where we send a deauth to a | |
560 | * station that we don't have, or similarly an association | |
561 | * response (with non-success status) for a station we can't | |
562 | * accept. | |
563 | * Also, disassociate frames might happen, particular with | |
564 | * reason 7 ("Class 3 frame received from nonassociated STA"). | |
565 | */ | |
566 | if (ieee80211_is_mgmt(fc) && | |
567 | (!ieee80211_is_bufferable_mmpdu(fc) || | |
568 | ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc))) | |
569 | return mvm->probe_queue; | |
570 | ||
571 | if (!ieee80211_has_order(fc) && !ieee80211_is_probe_req(fc) && | |
572 | is_multicast_ether_addr(hdr->addr1)) | |
573 | return mvmvif->cab_queue; | |
574 | ||
575 | WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC, | |
576 | "fc=0x%02x", le16_to_cpu(fc)); | |
577 | return mvm->probe_queue; | |
578 | case NL80211_IFTYPE_P2P_DEVICE: | |
579 | if (ieee80211_is_mgmt(fc)) | |
580 | return mvm->p2p_dev_queue; | |
581 | ||
582 | WARN_ON_ONCE(1); | |
583 | return mvm->p2p_dev_queue; | |
584 | default: | |
585 | WARN_ONCE(1, "Not a ctrl vif, no available queue\n"); | |
586 | return -1; | |
587 | } | |
588 | } | |
589 | ||
590 | static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm, | |
591 | struct sk_buff *skb) | |
592 | { | |
593 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | |
594 | struct iwl_mvm_vif *mvmvif = | |
595 | iwl_mvm_vif_from_mac80211(info->control.vif); | |
596 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; | |
597 | int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt; | |
598 | struct iwl_probe_resp_data *resp_data; | |
599 | u8 *ie, *pos; | |
600 | u8 match[] = { | |
601 | (WLAN_OUI_WFA >> 16) & 0xff, | |
602 | (WLAN_OUI_WFA >> 8) & 0xff, | |
603 | WLAN_OUI_WFA & 0xff, | |
604 | WLAN_OUI_TYPE_WFA_P2P, | |
605 | }; | |
606 | ||
607 | rcu_read_lock(); | |
608 | ||
609 | resp_data = rcu_dereference(mvmvif->probe_resp_data); | |
610 | if (!resp_data) | |
611 | goto out; | |
612 | ||
613 | if (!resp_data->notif.noa_active) | |
614 | goto out; | |
615 | ||
616 | ie = (u8 *)cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC, | |
617 | mgmt->u.probe_resp.variable, | |
618 | skb->len - base_len, | |
619 | match, 4, 2); | |
620 | if (!ie) { | |
621 | IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n"); | |
622 | goto out; | |
623 | } | |
624 | ||
625 | if (skb_tailroom(skb) < resp_data->noa_len) { | |
626 | if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) { | |
627 | IWL_ERR(mvm, | |
628 | "Failed to reallocate probe resp\n"); | |
629 | goto out; | |
630 | } | |
631 | } | |
632 | ||
633 | pos = skb_put(skb, resp_data->noa_len); | |
634 | ||
635 | *pos++ = WLAN_EID_VENDOR_SPECIFIC; | |
636 | /* Set length of IE body (not including ID and length itself) */ | |
637 | *pos++ = resp_data->noa_len - 2; | |
638 | *pos++ = (WLAN_OUI_WFA >> 16) & 0xff; | |
639 | *pos++ = (WLAN_OUI_WFA >> 8) & 0xff; | |
640 | *pos++ = WLAN_OUI_WFA & 0xff; | |
641 | *pos++ = WLAN_OUI_TYPE_WFA_P2P; | |
642 | ||
643 | memcpy(pos, &resp_data->notif.noa_attr, | |
644 | resp_data->noa_len - sizeof(struct ieee80211_vendor_ie)); | |
645 | ||
646 | out: | |
647 | rcu_read_unlock(); | |
648 | } | |
649 | ||
650 | int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) | |
651 | { | |
652 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | |
653 | struct ieee80211_tx_info info; | |
654 | struct iwl_device_tx_cmd *dev_cmd; | |
655 | u8 sta_id; | |
656 | int hdrlen = ieee80211_hdrlen(hdr->frame_control); | |
657 | __le16 fc = hdr->frame_control; | |
658 | bool offchannel = IEEE80211_SKB_CB(skb)->flags & | |
659 | IEEE80211_TX_CTL_TX_OFFCHAN; | |
660 | int queue = -1; | |
661 | ||
662 | if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc)) | |
663 | return -1; | |
664 | ||
665 | memcpy(&info, skb->cb, sizeof(info)); | |
666 | ||
667 | if (WARN_ON_ONCE(skb->len > IEEE80211_MAX_DATA_LEN + hdrlen)) | |
668 | return -1; | |
669 | ||
670 | if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU)) | |
671 | return -1; | |
672 | ||
673 | if (info.control.vif) { | |
674 | struct iwl_mvm_vif *mvmvif = | |
675 | iwl_mvm_vif_from_mac80211(info.control.vif); | |
676 | ||
677 | if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || | |
678 | info.control.vif->type == NL80211_IFTYPE_AP || | |
679 | info.control.vif->type == NL80211_IFTYPE_ADHOC) { | |
680 | if (!ieee80211_is_data(hdr->frame_control)) | |
681 | sta_id = mvmvif->bcast_sta.sta_id; | |
682 | else | |
683 | sta_id = mvmvif->mcast_sta.sta_id; | |
684 | ||
685 | queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr); | |
686 | } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { | |
687 | queue = mvm->snif_queue; | |
688 | sta_id = mvm->snif_sta.sta_id; | |
689 | } else if (info.control.vif->type == NL80211_IFTYPE_STATION && | |
690 | offchannel) { | |
691 | /* | |
692 | * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets | |
693 | * that can be used in 2 different types of vifs, P2P & | |
694 | * STATION. | |
695 | * P2P uses the offchannel queue. | |
696 | * STATION (HS2.0) uses the auxiliary context of the FW, | |
697 | * and hence needs to be sent on the aux queue. | |
698 | */ | |
699 | sta_id = mvm->aux_sta.sta_id; | |
700 | queue = mvm->aux_queue; | |
701 | } | |
702 | } | |
703 | ||
704 | if (queue < 0) { | |
705 | IWL_ERR(mvm, "No queue was found. Dropping TX\n"); | |
706 | return -1; | |
707 | } | |
708 | ||
709 | if (unlikely(ieee80211_is_probe_resp(fc))) | |
710 | iwl_mvm_probe_resp_set_noa(mvm, skb); | |
711 | ||
712 | IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue); | |
713 | ||
714 | dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id); | |
715 | if (!dev_cmd) | |
716 | return -1; | |
717 | ||
718 | /* From now on, we cannot access info->control */ | |
719 | iwl_mvm_skb_prepare_status(skb, dev_cmd); | |
720 | ||
721 | if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) { | |
722 | iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); | |
723 | return -1; | |
724 | } | |
725 | ||
726 | return 0; | |
727 | } | |
728 | ||
729 | unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, | |
730 | struct ieee80211_sta *sta, unsigned int tid) | |
731 | { | |
732 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); | |
733 | enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band; | |
734 | u8 ac = tid_to_mac80211_ac[tid]; | |
735 | unsigned int txf; | |
736 | int lmac = iwl_mvm_get_lmac_id(mvm->fw, band); | |
737 | ||
738 | /* For HE redirect to trigger based fifos */ | |
739 | if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm))) | |
740 | ac += 4; | |
741 | ||
742 | txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); | |
743 | ||
744 | /* | |
745 | * Don't send an AMSDU that will be longer than the TXF. | |
746 | * Add a security margin of 256 for the TX command + headers. | |
747 | * We also want to have the start of the next packet inside the | |
748 | * fifo to be able to send bursts. | |
749 | */ | |
750 | return min_t(unsigned int, mvmsta->max_amsdu_len, | |
751 | mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256); | |
752 | } | |
753 | ||
754 | #ifdef CONFIG_INET | |
755 | ||
756 | static int | |
757 | iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes, | |
758 | netdev_features_t netdev_flags, | |
759 | struct sk_buff_head *mpdus_skb) | |
760 | { | |
761 | struct sk_buff *tmp, *next; | |
762 | struct ieee80211_hdr *hdr = (void *)skb->data; | |
763 | char cb[sizeof(skb->cb)]; | |
764 | u16 i = 0; | |
765 | unsigned int tcp_payload_len; | |
766 | unsigned int mss = skb_shinfo(skb)->gso_size; | |
767 | bool ipv4 = (skb->protocol == htons(ETH_P_IP)); | |
768 | bool qos = ieee80211_is_data_qos(hdr->frame_control); | |
769 | u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; | |
770 | ||
771 | skb_shinfo(skb)->gso_size = num_subframes * mss; | |
772 | memcpy(cb, skb->cb, sizeof(cb)); | |
773 | ||
774 | next = skb_gso_segment(skb, netdev_flags); | |
775 | skb_shinfo(skb)->gso_size = mss; | |
776 | skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; | |
777 | if (WARN_ON_ONCE(IS_ERR(next))) | |
778 | return -EINVAL; | |
779 | else if (next) | |
780 | consume_skb(skb); | |
781 | ||
782 | skb_list_walk_safe(next, tmp, next) { | |
783 | memcpy(tmp->cb, cb, sizeof(tmp->cb)); | |
784 | /* | |
785 | * Compute the length of all the data added for the A-MSDU. | |
786 | * This will be used to compute the length to write in the TX | |
787 | * command. We have: SNAP + IP + TCP for n -1 subframes and | |
788 | * ETH header for n subframes. | |
789 | */ | |
790 | tcp_payload_len = skb_tail_pointer(tmp) - | |
791 | skb_transport_header(tmp) - | |
792 | tcp_hdrlen(tmp) + tmp->data_len; | |
793 | ||
794 | if (ipv4) | |
795 | ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes); | |
796 | ||
797 | if (tcp_payload_len > mss) { | |
798 | skb_shinfo(tmp)->gso_size = mss; | |
799 | skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 : | |
800 | SKB_GSO_TCPV6; | |
801 | } else { | |
802 | if (qos) { | |
803 | u8 *qc; | |
804 | ||
805 | if (ipv4) | |
806 | ip_send_check(ip_hdr(tmp)); | |
807 | ||
808 | qc = ieee80211_get_qos_ctl((void *)tmp->data); | |
809 | *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; | |
810 | } | |
811 | skb_shinfo(tmp)->gso_size = 0; | |
812 | } | |
813 | ||
814 | skb_mark_not_on_list(tmp); | |
815 | __skb_queue_tail(mpdus_skb, tmp); | |
816 | i++; | |
817 | } | |
818 | ||
819 | return 0; | |
820 | } | |
821 | ||
822 | static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, | |
823 | struct ieee80211_tx_info *info, | |
824 | struct ieee80211_sta *sta, | |
825 | struct sk_buff_head *mpdus_skb) | |
826 | { | |
827 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); | |
828 | struct ieee80211_hdr *hdr = (void *)skb->data; | |
829 | unsigned int mss = skb_shinfo(skb)->gso_size; | |
830 | unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len; | |
831 | u16 snap_ip_tcp, pad; | |
832 | netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG; | |
833 | u8 tid; | |
834 | ||
835 | snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + | |
836 | tcp_hdrlen(skb); | |
837 | ||
838 | if (!mvmsta->max_amsdu_len || | |
839 | !ieee80211_is_data_qos(hdr->frame_control) || | |
840 | !mvmsta->amsdu_enabled) | |
841 | return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); | |
842 | ||
843 | /* | |
844 | * Do not build AMSDU for IPv6 with extension headers. | |
845 | * ask stack to segment and checkum the generated MPDUs for us. | |
846 | */ | |
847 | if (skb->protocol == htons(ETH_P_IPV6) && | |
848 | ((struct ipv6hdr *)skb_network_header(skb))->nexthdr != | |
849 | IPPROTO_TCP) { | |
850 | netdev_flags &= ~NETIF_F_CSUM_MASK; | |
851 | return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); | |
852 | } | |
853 | ||
854 | tid = ieee80211_get_tid(hdr); | |
855 | if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) | |
856 | return -EINVAL; | |
857 | ||
858 | /* | |
859 | * No need to lock amsdu_in_ampdu_allowed since it can't be modified | |
860 | * during an BA session. | |
861 | */ | |
862 | if ((info->flags & IEEE80211_TX_CTL_AMPDU && | |
863 | !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) || | |
864 | !(mvmsta->amsdu_enabled & BIT(tid))) | |
865 | return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); | |
866 | ||
867 | /* | |
868 | * Take the min of ieee80211 station and mvm station | |
869 | */ | |
870 | max_amsdu_len = | |
871 | min_t(unsigned int, sta->max_amsdu_len, | |
872 | iwl_mvm_max_amsdu_size(mvm, sta, tid)); | |
873 | ||
874 | /* | |
875 | * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not | |
876 | * supported. This is a spec requirement (IEEE 802.11-2015 | |
877 | * section 8.7.3 NOTE 3). | |
878 | */ | |
879 | if (info->flags & IEEE80211_TX_CTL_AMPDU && | |
880 | !sta->vht_cap.vht_supported) | |
881 | max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095); | |
882 | ||
883 | /* Sub frame header + SNAP + IP header + TCP header + MSS */ | |
884 | subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss; | |
885 | pad = (4 - subf_len) & 0x3; | |
886 | ||
887 | /* | |
888 | * If we have N subframes in the A-MSDU, then the A-MSDU's size is | |
889 | * N * subf_len + (N - 1) * pad. | |
890 | */ | |
891 | num_subframes = (max_amsdu_len + pad) / (subf_len + pad); | |
892 | ||
893 | if (sta->max_amsdu_subframes && | |
894 | num_subframes > sta->max_amsdu_subframes) | |
895 | num_subframes = sta->max_amsdu_subframes; | |
896 | ||
897 | tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - | |
898 | tcp_hdrlen(skb) + skb->data_len; | |
899 | ||
900 | /* | |
901 | * Make sure we have enough TBs for the A-MSDU: | |
902 | * 2 for each subframe | |
903 | * 1 more for each fragment | |
904 | * 1 more for the potential data in the header | |
905 | */ | |
906 | if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) > | |
907 | mvm->trans->max_skb_frags) | |
908 | num_subframes = 1; | |
909 | ||
910 | if (num_subframes > 1) | |
911 | *ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; | |
912 | ||
913 | /* This skb fits in one single A-MSDU */ | |
914 | if (num_subframes * mss >= tcp_payload_len) { | |
915 | __skb_queue_tail(mpdus_skb, skb); | |
916 | return 0; | |
917 | } | |
918 | ||
919 | /* | |
920 | * Trick the segmentation function to make it | |
921 | * create SKBs that can fit into one A-MSDU. | |
922 | */ | |
923 | return iwl_mvm_tx_tso_segment(skb, num_subframes, netdev_flags, | |
924 | mpdus_skb); | |
925 | } | |
926 | #else /* CONFIG_INET */ | |
927 | static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, | |
928 | struct ieee80211_tx_info *info, | |
929 | struct ieee80211_sta *sta, | |
930 | struct sk_buff_head *mpdus_skb) | |
931 | { | |
932 | /* Impossible to get TSO with CONFIG_INET */ | |
933 | WARN_ON(1); | |
934 | ||
935 | return -1; | |
936 | } | |
937 | #endif | |
938 | ||
939 | /* Check if there are any timed-out TIDs on a given shared TXQ */ | |
940 | static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id) | |
941 | { | |
942 | unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap; | |
943 | unsigned long now = jiffies; | |
944 | int tid; | |
945 | ||
946 | if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) | |
947 | return false; | |
948 | ||
949 | for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) { | |
950 | if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] + | |
951 | IWL_MVM_DQA_QUEUE_TIMEOUT, now)) | |
952 | return true; | |
953 | } | |
954 | ||
955 | return false; | |
956 | } | |
957 | ||
958 | static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm, | |
959 | struct iwl_mvm_sta *mvmsta, | |
960 | int airtime) | |
961 | { | |
962 | int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; | |
963 | struct iwl_mvm_tcm_mac *mdata; | |
964 | ||
965 | if (mac >= NUM_MAC_INDEX_DRIVER) | |
966 | return; | |
967 | ||
968 | mdata = &mvm->tcm.data[mac]; | |
969 | ||
970 | if (mvm->tcm.paused) | |
971 | return; | |
972 | ||
973 | if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD)) | |
974 | schedule_delayed_work(&mvm->tcm.work, 0); | |
975 | ||
976 | mdata->tx.airtime += airtime; | |
977 | } | |
978 | ||
979 | static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm, | |
980 | struct iwl_mvm_sta *mvmsta, int tid) | |
981 | { | |
982 | u32 ac = tid_to_mac80211_ac[tid]; | |
983 | int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; | |
984 | struct iwl_mvm_tcm_mac *mdata; | |
985 | ||
986 | if (mac >= NUM_MAC_INDEX_DRIVER) | |
987 | return -EINVAL; | |
988 | ||
989 | mdata = &mvm->tcm.data[mac]; | |
990 | ||
991 | mdata->tx.pkts[ac]++; | |
992 | ||
993 | return 0; | |
994 | } | |
995 | ||
996 | /* | |
997 | * Sets the fields in the Tx cmd that are crypto related. | |
998 | * | |
999 | * This function must be called with BHs disabled. | |
1000 | */ | |
1001 | static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, | |
1002 | struct ieee80211_tx_info *info, | |
1003 | struct ieee80211_sta *sta) | |
1004 | { | |
1005 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | |
1006 | struct iwl_mvm_sta *mvmsta; | |
1007 | struct iwl_device_tx_cmd *dev_cmd; | |
1008 | __le16 fc; | |
1009 | u16 seq_number = 0; | |
1010 | u8 tid = IWL_MAX_TID_COUNT; | |
1011 | u16 txq_id; | |
1012 | bool is_ampdu = false; | |
1013 | int hdrlen; | |
1014 | ||
1015 | mvmsta = iwl_mvm_sta_from_mac80211(sta); | |
1016 | fc = hdr->frame_control; | |
1017 | hdrlen = ieee80211_hdrlen(fc); | |
1018 | ||
1019 | if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc)) | |
1020 | return -1; | |
1021 | ||
1022 | if (WARN_ON_ONCE(!mvmsta)) | |
1023 | return -1; | |
1024 | ||
1025 | if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA)) | |
1026 | return -1; | |
1027 | ||
1028 | if (unlikely(ieee80211_is_probe_resp(fc))) | |
1029 | iwl_mvm_probe_resp_set_noa(mvm, skb); | |
1030 | ||
1031 | dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen, | |
1032 | sta, mvmsta->sta_id); | |
1033 | if (!dev_cmd) | |
1034 | goto drop; | |
1035 | ||
1036 | /* | |
1037 | * we handle that entirely ourselves -- for uAPSD the firmware | |
1038 | * will always send a notification, and for PS-Poll responses | |
1039 | * we'll notify mac80211 when getting frame status | |
1040 | */ | |
1041 | info->flags &= ~IEEE80211_TX_STATUS_EOSP; | |
1042 | ||
1043 | spin_lock(&mvmsta->lock); | |
1044 | ||
1045 | /* nullfunc frames should go to the MGMT queue regardless of QOS, | |
1046 | * the condition of !ieee80211_is_qos_nullfunc(fc) keeps the default | |
1047 | * assignment of MGMT TID | |
1048 | */ | |
1049 | if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { | |
1050 | tid = ieee80211_get_tid(hdr); | |
1051 | if (WARN_ONCE(tid >= IWL_MAX_TID_COUNT, "Invalid TID %d", tid)) | |
1052 | goto drop_unlock_sta; | |
1053 | ||
1054 | is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; | |
1055 | if (WARN_ONCE(is_ampdu && | |
1056 | mvmsta->tid_data[tid].state != IWL_AGG_ON, | |
1057 | "Invalid internal agg state %d for TID %d", | |
1058 | mvmsta->tid_data[tid].state, tid)) | |
1059 | goto drop_unlock_sta; | |
1060 | ||
1061 | seq_number = mvmsta->tid_data[tid].seq_number; | |
1062 | seq_number &= IEEE80211_SCTL_SEQ; | |
1063 | ||
1064 | if (!iwl_mvm_has_new_tx_api(mvm)) { | |
1065 | struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; | |
1066 | ||
1067 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); | |
1068 | hdr->seq_ctrl |= cpu_to_le16(seq_number); | |
1069 | /* update the tx_cmd hdr as it was already copied */ | |
1070 | tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl; | |
1071 | } | |
1072 | } else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc)) { | |
1073 | tid = IWL_TID_NON_QOS; | |
1074 | } | |
1075 | ||
1076 | txq_id = mvmsta->tid_data[tid].txq_id; | |
1077 | ||
1078 | WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); | |
1079 | ||
1080 | if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) { | |
1081 | iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); | |
1082 | spin_unlock(&mvmsta->lock); | |
1083 | return -1; | |
1084 | } | |
1085 | ||
1086 | if (!iwl_mvm_has_new_tx_api(mvm)) { | |
1087 | /* Keep track of the time of the last frame for this RA/TID */ | |
1088 | mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; | |
1089 | ||
1090 | /* | |
1091 | * If we have timed-out TIDs - schedule the worker that will | |
1092 | * reconfig the queues and update them | |
1093 | * | |
1094 | * Note that the no lock is taken here in order to not serialize | |
1095 | * the TX flow. This isn't dangerous because scheduling | |
1096 | * mvm->add_stream_wk can't ruin the state, and if we DON'T | |
1097 | * schedule it due to some race condition then next TX we get | |
1098 | * here we will. | |
1099 | */ | |
1100 | if (unlikely(mvm->queue_info[txq_id].status == | |
1101 | IWL_MVM_QUEUE_SHARED && | |
1102 | iwl_mvm_txq_should_update(mvm, txq_id))) | |
1103 | schedule_work(&mvm->add_stream_wk); | |
1104 | } | |
1105 | ||
1106 | IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x len %d\n", | |
1107 | mvmsta->sta_id, tid, txq_id, | |
1108 | IEEE80211_SEQ_TO_SN(seq_number), skb->len); | |
1109 | ||
1110 | /* From now on, we cannot access info->control */ | |
1111 | iwl_mvm_skb_prepare_status(skb, dev_cmd); | |
1112 | ||
1113 | if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) | |
1114 | goto drop_unlock_sta; | |
1115 | ||
1116 | if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc)) | |
1117 | mvmsta->tid_data[tid].seq_number = seq_number + 0x10; | |
1118 | ||
1119 | spin_unlock(&mvmsta->lock); | |
1120 | ||
1121 | if (iwl_mvm_tx_pkt_queued(mvm, mvmsta, | |
1122 | tid == IWL_MAX_TID_COUNT ? 0 : tid)) | |
1123 | goto drop; | |
1124 | ||
1125 | return 0; | |
1126 | ||
1127 | drop_unlock_sta: | |
1128 | iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); | |
1129 | spin_unlock(&mvmsta->lock); | |
1130 | drop: | |
1131 | IWL_DEBUG_TX(mvm, "TX to [%d|%d] dropped\n", mvmsta->sta_id, tid); | |
1132 | return -1; | |
1133 | } | |
1134 | ||
1135 | int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, | |
1136 | struct ieee80211_sta *sta) | |
1137 | { | |
1138 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); | |
1139 | struct ieee80211_tx_info info; | |
1140 | struct sk_buff_head mpdus_skbs; | |
1141 | unsigned int payload_len; | |
1142 | int ret; | |
1143 | ||
1144 | if (WARN_ON_ONCE(!mvmsta)) | |
1145 | return -1; | |
1146 | ||
1147 | if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA)) | |
1148 | return -1; | |
1149 | ||
1150 | memcpy(&info, skb->cb, sizeof(info)); | |
1151 | ||
1152 | if (!skb_is_gso(skb)) | |
1153 | return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); | |
1154 | ||
1155 | payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - | |
1156 | tcp_hdrlen(skb) + skb->data_len; | |
1157 | ||
1158 | if (payload_len <= skb_shinfo(skb)->gso_size) | |
1159 | return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); | |
1160 | ||
1161 | __skb_queue_head_init(&mpdus_skbs); | |
1162 | ||
1163 | ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs); | |
1164 | if (ret) | |
1165 | return ret; | |
1166 | ||
1167 | if (WARN_ON(skb_queue_empty(&mpdus_skbs))) | |
1168 | return ret; | |
1169 | ||
1170 | while (!skb_queue_empty(&mpdus_skbs)) { | |
1171 | skb = __skb_dequeue(&mpdus_skbs); | |
1172 | ||
1173 | ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta); | |
1174 | if (ret) { | |
1175 | __skb_queue_purge(&mpdus_skbs); | |
1176 | return ret; | |
1177 | } | |
1178 | } | |
1179 | ||
1180 | return 0; | |
1181 | } | |
1182 | ||
1183 | static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, | |
1184 | struct ieee80211_sta *sta, u8 tid) | |
1185 | { | |
1186 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); | |
1187 | struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; | |
1188 | struct ieee80211_vif *vif = mvmsta->vif; | |
1189 | u16 normalized_ssn; | |
1190 | ||
1191 | lockdep_assert_held(&mvmsta->lock); | |
1192 | ||
1193 | if ((tid_data->state == IWL_AGG_ON || | |
1194 | tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) && | |
1195 | iwl_mvm_tid_queued(mvm, tid_data) == 0) { | |
1196 | /* | |
1197 | * Now that this aggregation or DQA queue is empty tell | |
1198 | * mac80211 so it knows we no longer have frames buffered for | |
1199 | * the station on this TID (for the TIM bitmap calculation.) | |
1200 | */ | |
1201 | ieee80211_sta_set_buffered(sta, tid, false); | |
1202 | } | |
1203 | ||
1204 | /* | |
1205 | * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need | |
1206 | * to align the wrap around of ssn so we compare relevant values. | |
1207 | */ | |
1208 | normalized_ssn = tid_data->ssn; | |
1209 | if (mvm->trans->trans_cfg->gen2) | |
1210 | normalized_ssn &= 0xff; | |
1211 | ||
1212 | if (normalized_ssn != tid_data->next_reclaimed) | |
1213 | return; | |
1214 | ||
1215 | switch (tid_data->state) { | |
1216 | case IWL_EMPTYING_HW_QUEUE_ADDBA: | |
1217 | IWL_DEBUG_TX_QUEUES(mvm, | |
1218 | "Can continue addBA flow ssn = next_recl = %d\n", | |
1219 | tid_data->next_reclaimed); | |
1220 | tid_data->state = IWL_AGG_STARTING; | |
1221 | ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); | |
1222 | break; | |
1223 | ||
1224 | case IWL_EMPTYING_HW_QUEUE_DELBA: | |
1225 | IWL_DEBUG_TX_QUEUES(mvm, | |
1226 | "Can continue DELBA flow ssn = next_recl = %d\n", | |
1227 | tid_data->next_reclaimed); | |
1228 | tid_data->state = IWL_AGG_OFF; | |
1229 | ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); | |
1230 | break; | |
1231 | ||
1232 | default: | |
1233 | break; | |
1234 | } | |
1235 | } | |
1236 | ||
1237 | #ifdef CONFIG_IWLWIFI_DEBUG | |
1238 | const char *iwl_mvm_get_tx_fail_reason(u32 status) | |
1239 | { | |
1240 | #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x | |
1241 | #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x | |
1242 | ||
1243 | switch (status & TX_STATUS_MSK) { | |
1244 | case TX_STATUS_SUCCESS: | |
1245 | return "SUCCESS"; | |
1246 | TX_STATUS_POSTPONE(DELAY); | |
1247 | TX_STATUS_POSTPONE(FEW_BYTES); | |
1248 | TX_STATUS_POSTPONE(BT_PRIO); | |
1249 | TX_STATUS_POSTPONE(QUIET_PERIOD); | |
1250 | TX_STATUS_POSTPONE(CALC_TTAK); | |
1251 | TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY); | |
1252 | TX_STATUS_FAIL(SHORT_LIMIT); | |
1253 | TX_STATUS_FAIL(LONG_LIMIT); | |
1254 | TX_STATUS_FAIL(UNDERRUN); | |
1255 | TX_STATUS_FAIL(DRAIN_FLOW); | |
1256 | TX_STATUS_FAIL(RFKILL_FLUSH); | |
1257 | TX_STATUS_FAIL(LIFE_EXPIRE); | |
1258 | TX_STATUS_FAIL(DEST_PS); | |
1259 | TX_STATUS_FAIL(HOST_ABORTED); | |
1260 | TX_STATUS_FAIL(BT_RETRY); | |
1261 | TX_STATUS_FAIL(STA_INVALID); | |
1262 | TX_STATUS_FAIL(FRAG_DROPPED); | |
1263 | TX_STATUS_FAIL(TID_DISABLE); | |
1264 | TX_STATUS_FAIL(FIFO_FLUSHED); | |
1265 | TX_STATUS_FAIL(SMALL_CF_POLL); | |
1266 | TX_STATUS_FAIL(FW_DROP); | |
1267 | TX_STATUS_FAIL(STA_COLOR_MISMATCH); | |
1268 | } | |
1269 | ||
1270 | return "UNKNOWN"; | |
1271 | ||
1272 | #undef TX_STATUS_FAIL | |
1273 | #undef TX_STATUS_POSTPONE | |
1274 | } | |
1275 | #endif /* CONFIG_IWLWIFI_DEBUG */ | |
1276 | ||
1277 | void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, | |
1278 | enum nl80211_band band, | |
1279 | struct ieee80211_tx_rate *r) | |
1280 | { | |
1281 | if (rate_n_flags & RATE_HT_MCS_GF_MSK) | |
1282 | r->flags |= IEEE80211_TX_RC_GREEN_FIELD; | |
1283 | switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { | |
1284 | case RATE_MCS_CHAN_WIDTH_20: | |
1285 | break; | |
1286 | case RATE_MCS_CHAN_WIDTH_40: | |
1287 | r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; | |
1288 | break; | |
1289 | case RATE_MCS_CHAN_WIDTH_80: | |
1290 | r->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; | |
1291 | break; | |
1292 | case RATE_MCS_CHAN_WIDTH_160: | |
1293 | r->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH; | |
1294 | break; | |
1295 | } | |
1296 | if (rate_n_flags & RATE_MCS_SGI_MSK) | |
1297 | r->flags |= IEEE80211_TX_RC_SHORT_GI; | |
1298 | if (rate_n_flags & RATE_MCS_HT_MSK) { | |
1299 | r->flags |= IEEE80211_TX_RC_MCS; | |
1300 | r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; | |
1301 | } else if (rate_n_flags & RATE_MCS_VHT_MSK) { | |
1302 | ieee80211_rate_set_vht( | |
1303 | r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK, | |
1304 | ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> | |
1305 | RATE_VHT_MCS_NSS_POS) + 1); | |
1306 | r->flags |= IEEE80211_TX_RC_VHT_MCS; | |
1307 | } else { | |
1308 | r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, | |
1309 | band); | |
1310 | } | |
1311 | } | |
1312 | ||
1313 | /* | |
1314 | * translate ucode response to mac80211 tx status control values | |
1315 | */ | |
1316 | static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags, | |
1317 | struct ieee80211_tx_info *info) | |
1318 | { | |
1319 | struct ieee80211_tx_rate *r = &info->status.rates[0]; | |
1320 | ||
1321 | info->status.antenna = | |
1322 | ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); | |
1323 | iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r); | |
1324 | } | |
1325 | ||
1326 | static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, | |
1327 | u32 status) | |
1328 | { | |
1329 | struct iwl_fw_dbg_trigger_tlv *trig; | |
1330 | struct iwl_fw_dbg_trigger_tx_status *status_trig; | |
1331 | int i; | |
1332 | ||
1333 | trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, | |
1334 | FW_DBG_TRIGGER_TX_STATUS); | |
1335 | if (!trig) | |
1336 | return; | |
1337 | ||
1338 | status_trig = (void *)trig->data; | |
1339 | ||
1340 | for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) { | |
1341 | /* don't collect on status 0 */ | |
1342 | if (!status_trig->statuses[i].status) | |
1343 | break; | |
1344 | ||
1345 | if (status_trig->statuses[i].status != (status & TX_STATUS_MSK)) | |
1346 | continue; | |
1347 | ||
1348 | iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, | |
1349 | "Tx status %d was received", | |
1350 | status & TX_STATUS_MSK); | |
1351 | break; | |
1352 | } | |
1353 | } | |
1354 | ||
1355 | /* | |
1356 | * iwl_mvm_get_scd_ssn - returns the SSN of the SCD | |
1357 | * @tx_resp: the Tx response from the fw (agg or non-agg) | |
1358 | * | |
1359 | * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since | |
1360 | * it can't know that everything will go well until the end of the AMPDU, it | |
1361 | * can't know in advance the number of MPDUs that will be sent in the current | |
1362 | * batch. This is why it writes the agg Tx response while it fetches the MPDUs. | |
1363 | * Hence, it can't know in advance what the SSN of the SCD will be at the end | |
1364 | * of the batch. This is why the SSN of the SCD is written at the end of the | |
1365 | * whole struct at a variable offset. This function knows how to cope with the | |
1366 | * variable offset and returns the SSN of the SCD. | |
1367 | */ | |
1368 | static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm, | |
1369 | struct iwl_mvm_tx_resp *tx_resp) | |
1370 | { | |
1371 | return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) + | |
1372 | tx_resp->frame_count) & 0xfff; | |
1373 | } | |
1374 | ||
1375 | static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, | |
1376 | struct iwl_rx_packet *pkt) | |
1377 | { | |
1378 | struct ieee80211_sta *sta; | |
1379 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | |
1380 | int txq_id = SEQ_TO_QUEUE(sequence); | |
1381 | /* struct iwl_mvm_tx_resp_v3 is almost the same */ | |
1382 | struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; | |
1383 | int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); | |
1384 | int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); | |
1385 | struct agg_tx_status *agg_status = | |
1386 | iwl_mvm_get_agg_status(mvm, tx_resp); | |
1387 | u32 status = le16_to_cpu(agg_status->status); | |
1388 | u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp); | |
1389 | struct sk_buff_head skbs; | |
1390 | u8 skb_freed = 0; | |
1391 | u8 lq_color; | |
1392 | u16 next_reclaimed, seq_ctl; | |
1393 | bool is_ndp = false; | |
1394 | ||
1395 | __skb_queue_head_init(&skbs); | |
1396 | ||
1397 | if (iwl_mvm_has_new_tx_api(mvm)) | |
1398 | txq_id = le16_to_cpu(tx_resp->tx_queue); | |
1399 | ||
1400 | seq_ctl = le16_to_cpu(tx_resp->seq_ctl); | |
1401 | ||
1402 | /* we can free until ssn % q.n_bd not inclusive */ | |
1403 | iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs); | |
1404 | ||
1405 | while (!skb_queue_empty(&skbs)) { | |
1406 | struct sk_buff *skb = __skb_dequeue(&skbs); | |
1407 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | |
1408 | struct ieee80211_hdr *hdr = (void *)skb->data; | |
1409 | bool flushed = false; | |
1410 | ||
1411 | skb_freed++; | |
1412 | ||
1413 | iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); | |
1414 | ||
1415 | memset(&info->status, 0, sizeof(info->status)); | |
1416 | ||
1417 | /* inform mac80211 about what happened with the frame */ | |
1418 | switch (status & TX_STATUS_MSK) { | |
1419 | case TX_STATUS_SUCCESS: | |
1420 | case TX_STATUS_DIRECT_DONE: | |
1421 | info->flags |= IEEE80211_TX_STAT_ACK; | |
1422 | break; | |
1423 | case TX_STATUS_FAIL_FIFO_FLUSHED: | |
1424 | case TX_STATUS_FAIL_DRAIN_FLOW: | |
1425 | flushed = true; | |
1426 | break; | |
1427 | case TX_STATUS_FAIL_DEST_PS: | |
1428 | /* the FW should have stopped the queue and not | |
1429 | * return this status | |
1430 | */ | |
1431 | WARN_ON(1); | |
1432 | info->flags |= IEEE80211_TX_STAT_TX_FILTERED; | |
1433 | break; | |
1434 | default: | |
1435 | break; | |
1436 | } | |
1437 | ||
1438 | if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS && | |
1439 | ieee80211_is_mgmt(hdr->frame_control)) | |
1440 | iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); | |
1441 | ||
1442 | /* | |
1443 | * If we are freeing multiple frames, mark all the frames | |
1444 | * but the first one as acked, since they were acknowledged | |
1445 | * before | |
1446 | * */ | |
1447 | if (skb_freed > 1) | |
1448 | info->flags |= IEEE80211_TX_STAT_ACK; | |
1449 | ||
1450 | iwl_mvm_tx_status_check_trigger(mvm, status); | |
1451 | ||
1452 | info->status.rates[0].count = tx_resp->failure_frame + 1; | |
1453 | iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate), | |
1454 | info); | |
1455 | info->status.status_driver_data[1] = | |
1456 | (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate); | |
1457 | ||
1458 | /* Single frame failure in an AMPDU queue => send BAR */ | |
1459 | if (info->flags & IEEE80211_TX_CTL_AMPDU && | |
1460 | !(info->flags & IEEE80211_TX_STAT_ACK) && | |
1461 | !(info->flags & IEEE80211_TX_STAT_TX_FILTERED) && !flushed) | |
1462 | info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; | |
1463 | info->flags &= ~IEEE80211_TX_CTL_AMPDU; | |
1464 | ||
1465 | /* W/A FW bug: seq_ctl is wrong upon failure / BAR frame */ | |
1466 | if (ieee80211_is_back_req(hdr->frame_control)) | |
1467 | seq_ctl = 0; | |
1468 | else if (status != TX_STATUS_SUCCESS) | |
1469 | seq_ctl = le16_to_cpu(hdr->seq_ctrl); | |
1470 | ||
1471 | if (unlikely(!seq_ctl)) { | |
1472 | struct ieee80211_hdr *hdr = (void *)skb->data; | |
1473 | ||
1474 | /* | |
1475 | * If it is an NDP, we can't update next_reclaim since | |
1476 | * its sequence control is 0. Note that for that same | |
1477 | * reason, NDPs are never sent to A-MPDU'able queues | |
1478 | * so that we can never have more than one freed frame | |
1479 | * for a single Tx resonse (see WARN_ON below). | |
1480 | */ | |
1481 | if (ieee80211_is_qos_nullfunc(hdr->frame_control)) | |
1482 | is_ndp = true; | |
1483 | } | |
1484 | ||
1485 | /* | |
1486 | * TODO: this is not accurate if we are freeing more than one | |
1487 | * packet. | |
1488 | */ | |
1489 | info->status.tx_time = | |
1490 | le16_to_cpu(tx_resp->wireless_media_time); | |
1491 | BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); | |
1492 | lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info); | |
1493 | info->status.status_driver_data[0] = | |
1494 | RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc); | |
1495 | ||
1496 | ieee80211_tx_status(mvm->hw, skb); | |
1497 | } | |
1498 | ||
1499 | /* This is an aggregation queue or might become one, so we use | |
1500 | * the ssn since: ssn = wifi seq_num % 256. | |
1501 | * The seq_ctl is the sequence control of the packet to which | |
1502 | * this Tx response relates. But if there is a hole in the | |
1503 | * bitmap of the BA we received, this Tx response may allow to | |
1504 | * reclaim the hole and all the subsequent packets that were | |
1505 | * already acked. In that case, seq_ctl != ssn, and the next | |
1506 | * packet to be reclaimed will be ssn and not seq_ctl. In that | |
1507 | * case, several packets will be reclaimed even if | |
1508 | * frame_count = 1. | |
1509 | * | |
1510 | * The ssn is the index (% 256) of the latest packet that has | |
1511 | * treated (acked / dropped) + 1. | |
1512 | */ | |
1513 | next_reclaimed = ssn; | |
1514 | ||
1515 | IWL_DEBUG_TX_REPLY(mvm, | |
1516 | "TXQ %d status %s (0x%08x)\n", | |
1517 | txq_id, iwl_mvm_get_tx_fail_reason(status), status); | |
1518 | ||
1519 | IWL_DEBUG_TX_REPLY(mvm, | |
1520 | "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n", | |
1521 | le32_to_cpu(tx_resp->initial_rate), | |
1522 | tx_resp->failure_frame, SEQ_TO_INDEX(sequence), | |
1523 | ssn, next_reclaimed, seq_ctl); | |
1524 | ||
1525 | rcu_read_lock(); | |
1526 | ||
1527 | sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); | |
1528 | /* | |
1529 | * sta can't be NULL otherwise it'd mean that the sta has been freed in | |
1530 | * the firmware while we still have packets for it in the Tx queues. | |
1531 | */ | |
1532 | if (WARN_ON_ONCE(!sta)) | |
1533 | goto out; | |
1534 | ||
1535 | if (!IS_ERR(sta)) { | |
1536 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); | |
1537 | ||
1538 | iwl_mvm_tx_airtime(mvm, mvmsta, | |
1539 | le16_to_cpu(tx_resp->wireless_media_time)); | |
1540 | ||
1541 | if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS && | |
1542 | mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) | |
1543 | iwl_mvm_toggle_tx_ant(mvm, &mvmsta->tx_ant); | |
1544 | ||
1545 | if (sta->wme && tid != IWL_MGMT_TID) { | |
1546 | struct iwl_mvm_tid_data *tid_data = | |
1547 | &mvmsta->tid_data[tid]; | |
1548 | bool send_eosp_ndp = false; | |
1549 | ||
1550 | spin_lock_bh(&mvmsta->lock); | |
1551 | ||
1552 | if (!is_ndp) { | |
1553 | tid_data->next_reclaimed = next_reclaimed; | |
1554 | IWL_DEBUG_TX_REPLY(mvm, | |
1555 | "Next reclaimed packet:%d\n", | |
1556 | next_reclaimed); | |
1557 | } else { | |
1558 | IWL_DEBUG_TX_REPLY(mvm, | |
1559 | "NDP - don't update next_reclaimed\n"); | |
1560 | } | |
1561 | ||
1562 | iwl_mvm_check_ratid_empty(mvm, sta, tid); | |
1563 | ||
1564 | if (mvmsta->sleep_tx_count) { | |
1565 | mvmsta->sleep_tx_count--; | |
1566 | if (mvmsta->sleep_tx_count && | |
1567 | !iwl_mvm_tid_queued(mvm, tid_data)) { | |
1568 | /* | |
1569 | * The number of frames in the queue | |
1570 | * dropped to 0 even if we sent less | |
1571 | * frames than we thought we had on the | |
1572 | * Tx queue. | |
1573 | * This means we had holes in the BA | |
1574 | * window that we just filled, ask | |
1575 | * mac80211 to send EOSP since the | |
1576 | * firmware won't know how to do that. | |
1577 | * Send NDP and the firmware will send | |
1578 | * EOSP notification that will trigger | |
1579 | * a call to ieee80211_sta_eosp(). | |
1580 | */ | |
1581 | send_eosp_ndp = true; | |
1582 | } | |
1583 | } | |
1584 | ||
1585 | spin_unlock_bh(&mvmsta->lock); | |
1586 | if (send_eosp_ndp) { | |
1587 | iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, | |
1588 | IEEE80211_FRAME_RELEASE_UAPSD, | |
1589 | 1, tid, false, false); | |
1590 | mvmsta->sleep_tx_count = 0; | |
1591 | ieee80211_send_eosp_nullfunc(sta, tid); | |
1592 | } | |
1593 | } | |
1594 | ||
1595 | if (mvmsta->next_status_eosp) { | |
1596 | mvmsta->next_status_eosp = false; | |
1597 | ieee80211_sta_eosp(sta); | |
1598 | } | |
1599 | } | |
1600 | out: | |
1601 | rcu_read_unlock(); | |
1602 | } | |
1603 | ||
1604 | #ifdef CONFIG_IWLWIFI_DEBUG | |
1605 | #define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x | |
1606 | static const char *iwl_get_agg_tx_status(u16 status) | |
1607 | { | |
1608 | switch (status & AGG_TX_STATE_STATUS_MSK) { | |
1609 | AGG_TX_STATE_(TRANSMITTED); | |
1610 | AGG_TX_STATE_(UNDERRUN); | |
1611 | AGG_TX_STATE_(BT_PRIO); | |
1612 | AGG_TX_STATE_(FEW_BYTES); | |
1613 | AGG_TX_STATE_(ABORT); | |
1614 | AGG_TX_STATE_(TX_ON_AIR_DROP); | |
1615 | AGG_TX_STATE_(LAST_SENT_TRY_CNT); | |
1616 | AGG_TX_STATE_(LAST_SENT_BT_KILL); | |
1617 | AGG_TX_STATE_(SCD_QUERY); | |
1618 | AGG_TX_STATE_(TEST_BAD_CRC32); | |
1619 | AGG_TX_STATE_(RESPONSE); | |
1620 | AGG_TX_STATE_(DUMP_TX); | |
1621 | AGG_TX_STATE_(DELAY_TX); | |
1622 | } | |
1623 | ||
1624 | return "UNKNOWN"; | |
1625 | } | |
1626 | ||
1627 | static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, | |
1628 | struct iwl_rx_packet *pkt) | |
1629 | { | |
1630 | struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; | |
1631 | struct agg_tx_status *frame_status = | |
1632 | iwl_mvm_get_agg_status(mvm, tx_resp); | |
1633 | int i; | |
1634 | ||
1635 | for (i = 0; i < tx_resp->frame_count; i++) { | |
1636 | u16 fstatus = le16_to_cpu(frame_status[i].status); | |
1637 | ||
1638 | IWL_DEBUG_TX_REPLY(mvm, | |
1639 | "status %s (0x%04x), try-count (%d) seq (0x%x)\n", | |
1640 | iwl_get_agg_tx_status(fstatus), | |
1641 | fstatus & AGG_TX_STATE_STATUS_MSK, | |
1642 | (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >> | |
1643 | AGG_TX_STATE_TRY_CNT_POS, | |
1644 | le16_to_cpu(frame_status[i].sequence)); | |
1645 | } | |
1646 | } | |
1647 | #else | |
1648 | static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, | |
1649 | struct iwl_rx_packet *pkt) | |
1650 | {} | |
1651 | #endif /* CONFIG_IWLWIFI_DEBUG */ | |
1652 | ||
1653 | static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, | |
1654 | struct iwl_rx_packet *pkt) | |
1655 | { | |
1656 | struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; | |
1657 | int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); | |
1658 | int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); | |
1659 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | |
1660 | struct iwl_mvm_sta *mvmsta; | |
1661 | int queue = SEQ_TO_QUEUE(sequence); | |
1662 | struct ieee80211_sta *sta; | |
1663 | ||
1664 | if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE && | |
1665 | (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))) | |
1666 | return; | |
1667 | ||
1668 | iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt); | |
1669 | ||
1670 | rcu_read_lock(); | |
1671 | ||
1672 | mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); | |
1673 | ||
1674 | sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); | |
1675 | if (WARN_ON_ONCE(!sta || !sta->wme)) { | |
1676 | rcu_read_unlock(); | |
1677 | return; | |
1678 | } | |
1679 | ||
1680 | if (!WARN_ON_ONCE(!mvmsta)) { | |
1681 | mvmsta->tid_data[tid].rate_n_flags = | |
1682 | le32_to_cpu(tx_resp->initial_rate); | |
1683 | mvmsta->tid_data[tid].tx_time = | |
1684 | le16_to_cpu(tx_resp->wireless_media_time); | |
1685 | mvmsta->tid_data[tid].lq_color = | |
1686 | TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info); | |
1687 | iwl_mvm_tx_airtime(mvm, mvmsta, | |
1688 | le16_to_cpu(tx_resp->wireless_media_time)); | |
1689 | } | |
1690 | ||
1691 | rcu_read_unlock(); | |
1692 | } | |
1693 | ||
1694 | void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) | |
1695 | { | |
1696 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | |
1697 | struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; | |
1698 | ||
1699 | if (tx_resp->frame_count == 1) | |
1700 | iwl_mvm_rx_tx_cmd_single(mvm, pkt); | |
1701 | else | |
1702 | iwl_mvm_rx_tx_cmd_agg(mvm, pkt); | |
1703 | } | |
1704 | ||
1705 | static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, | |
1706 | int txq, int index, | |
1707 | struct ieee80211_tx_info *ba_info, u32 rate) | |
1708 | { | |
1709 | struct sk_buff_head reclaimed_skbs; | |
1710 | struct iwl_mvm_tid_data *tid_data = NULL; | |
1711 | struct ieee80211_sta *sta; | |
1712 | struct iwl_mvm_sta *mvmsta = NULL; | |
1713 | struct sk_buff *skb; | |
1714 | int freed; | |
1715 | ||
1716 | if (WARN_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations || | |
1717 | tid > IWL_MAX_TID_COUNT, | |
1718 | "sta_id %d tid %d", sta_id, tid)) | |
1719 | return; | |
1720 | ||
1721 | rcu_read_lock(); | |
1722 | ||
1723 | sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); | |
1724 | ||
1725 | /* Reclaiming frames for a station that has been deleted ? */ | |
1726 | if (WARN_ON_ONCE(!sta)) { | |
1727 | rcu_read_unlock(); | |
1728 | return; | |
1729 | } | |
1730 | ||
1731 | __skb_queue_head_init(&reclaimed_skbs); | |
1732 | ||
1733 | /* | |
1734 | * Release all TFDs before the SSN, i.e. all TFDs in front of | |
1735 | * block-ack window (we assume that they've been successfully | |
1736 | * transmitted ... if not, it's too late anyway). | |
1737 | */ | |
1738 | iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs); | |
1739 | ||
1740 | skb_queue_walk(&reclaimed_skbs, skb) { | |
1741 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | |
1742 | ||
1743 | iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); | |
1744 | ||
1745 | memset(&info->status, 0, sizeof(info->status)); | |
1746 | /* Packet was transmitted successfully, failures come as single | |
1747 | * frames because before failing a frame the firmware transmits | |
1748 | * it without aggregation at least once. | |
1749 | */ | |
1750 | info->flags |= IEEE80211_TX_STAT_ACK; | |
1751 | } | |
1752 | ||
1753 | /* | |
1754 | * It's possible to get a BA response after invalidating the rcu (rcu is | |
1755 | * invalidated in order to prevent new Tx from being sent, but there may | |
1756 | * be some frames already in-flight). | |
1757 | * In this case we just want to reclaim, and could skip all the | |
1758 | * sta-dependent stuff since it's in the middle of being removed | |
1759 | * anyways. | |
1760 | */ | |
1761 | if (IS_ERR(sta)) | |
1762 | goto out; | |
1763 | ||
1764 | mvmsta = iwl_mvm_sta_from_mac80211(sta); | |
1765 | tid_data = &mvmsta->tid_data[tid]; | |
1766 | ||
1767 | if (tid_data->txq_id != txq) { | |
1768 | IWL_ERR(mvm, | |
1769 | "invalid BA notification: Q %d, tid %d\n", | |
1770 | tid_data->txq_id, tid); | |
1771 | rcu_read_unlock(); | |
1772 | return; | |
1773 | } | |
1774 | ||
1775 | spin_lock_bh(&mvmsta->lock); | |
1776 | ||
1777 | tid_data->next_reclaimed = index; | |
1778 | ||
1779 | iwl_mvm_check_ratid_empty(mvm, sta, tid); | |
1780 | ||
1781 | freed = 0; | |
1782 | ||
1783 | /* pack lq color from tid_data along the reduced txp */ | |
1784 | ba_info->status.status_driver_data[0] = | |
1785 | RS_DRV_DATA_PACK(tid_data->lq_color, | |
1786 | ba_info->status.status_driver_data[0]); | |
1787 | ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate; | |
1788 | ||
1789 | skb_queue_walk(&reclaimed_skbs, skb) { | |
1790 | struct ieee80211_hdr *hdr = (void *)skb->data; | |
1791 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | |
1792 | ||
1793 | if (ieee80211_is_data_qos(hdr->frame_control)) | |
1794 | freed++; | |
1795 | else | |
1796 | WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT); | |
1797 | ||
1798 | /* this is the first skb we deliver in this batch */ | |
1799 | /* put the rate scaling data there */ | |
1800 | if (freed == 1) { | |
1801 | info->flags |= IEEE80211_TX_STAT_AMPDU; | |
1802 | memcpy(&info->status, &ba_info->status, | |
1803 | sizeof(ba_info->status)); | |
1804 | iwl_mvm_hwrate_to_tx_status(rate, info); | |
1805 | } | |
1806 | } | |
1807 | ||
1808 | spin_unlock_bh(&mvmsta->lock); | |
1809 | ||
1810 | /* We got a BA notif with 0 acked or scd_ssn didn't progress which is | |
1811 | * possible (i.e. first MPDU in the aggregation wasn't acked) | |
1812 | * Still it's important to update RS about sent vs. acked. | |
1813 | */ | |
1814 | if (skb_queue_empty(&reclaimed_skbs)) { | |
1815 | struct ieee80211_chanctx_conf *chanctx_conf = NULL; | |
1816 | ||
1817 | if (mvmsta->vif) | |
1818 | chanctx_conf = | |
1819 | rcu_dereference(mvmsta->vif->chanctx_conf); | |
1820 | ||
1821 | if (WARN_ON_ONCE(!chanctx_conf)) | |
1822 | goto out; | |
1823 | ||
1824 | ba_info->band = chanctx_conf->def.chan->band; | |
1825 | iwl_mvm_hwrate_to_tx_status(rate, ba_info); | |
1826 | ||
1827 | if (!iwl_mvm_has_tlc_offload(mvm)) { | |
1828 | IWL_DEBUG_TX_REPLY(mvm, | |
1829 | "No reclaim. Update rs directly\n"); | |
1830 | iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false); | |
1831 | } | |
1832 | } | |
1833 | ||
1834 | out: | |
1835 | rcu_read_unlock(); | |
1836 | ||
1837 | while (!skb_queue_empty(&reclaimed_skbs)) { | |
1838 | skb = __skb_dequeue(&reclaimed_skbs); | |
1839 | ieee80211_tx_status(mvm->hw, skb); | |
1840 | } | |
1841 | } | |
1842 | ||
1843 | void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) | |
1844 | { | |
1845 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | |
1846 | int sta_id, tid, txq, index; | |
1847 | struct ieee80211_tx_info ba_info = {}; | |
1848 | struct iwl_mvm_ba_notif *ba_notif; | |
1849 | struct iwl_mvm_tid_data *tid_data; | |
1850 | struct iwl_mvm_sta *mvmsta; | |
1851 | ||
1852 | ba_info.flags = IEEE80211_TX_STAT_AMPDU; | |
1853 | ||
1854 | if (iwl_mvm_has_new_tx_api(mvm)) { | |
1855 | struct iwl_mvm_compressed_ba_notif *ba_res = | |
1856 | (void *)pkt->data; | |
1857 | u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info); | |
1858 | int i; | |
1859 | ||
1860 | sta_id = ba_res->sta_id; | |
1861 | ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done); | |
1862 | ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed); | |
1863 | ba_info.status.tx_time = | |
1864 | (u16)le32_to_cpu(ba_res->wireless_time); | |
1865 | ba_info.status.status_driver_data[0] = | |
1866 | (void *)(uintptr_t)ba_res->reduced_txp; | |
1867 | ||
1868 | if (!le16_to_cpu(ba_res->tfd_cnt)) | |
1869 | goto out; | |
1870 | ||
1871 | rcu_read_lock(); | |
1872 | ||
1873 | mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); | |
1874 | /* | |
1875 | * It's possible to get a BA response after invalidating the rcu | |
1876 | * (rcu is invalidated in order to prevent new Tx from being | |
1877 | * sent, but there may be some frames already in-flight). | |
1878 | * In this case we just want to reclaim, and could skip all the | |
1879 | * sta-dependent stuff since it's in the middle of being removed | |
1880 | * anyways. | |
1881 | */ | |
1882 | ||
1883 | /* Free per TID */ | |
1884 | for (i = 0; i < le16_to_cpu(ba_res->tfd_cnt); i++) { | |
1885 | struct iwl_mvm_compressed_ba_tfd *ba_tfd = | |
1886 | &ba_res->tfd[i]; | |
1887 | ||
1888 | tid = ba_tfd->tid; | |
1889 | if (tid == IWL_MGMT_TID) | |
1890 | tid = IWL_MAX_TID_COUNT; | |
1891 | ||
1892 | if (mvmsta) | |
1893 | mvmsta->tid_data[i].lq_color = lq_color; | |
1894 | ||
1895 | iwl_mvm_tx_reclaim(mvm, sta_id, tid, | |
1896 | (int)(le16_to_cpu(ba_tfd->q_num)), | |
1897 | le16_to_cpu(ba_tfd->tfd_index), | |
1898 | &ba_info, | |
1899 | le32_to_cpu(ba_res->tx_rate)); | |
1900 | } | |
1901 | ||
1902 | if (mvmsta) | |
1903 | iwl_mvm_tx_airtime(mvm, mvmsta, | |
1904 | le32_to_cpu(ba_res->wireless_time)); | |
1905 | rcu_read_unlock(); | |
1906 | out: | |
1907 | IWL_DEBUG_TX_REPLY(mvm, | |
1908 | "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n", | |
1909 | sta_id, le32_to_cpu(ba_res->flags), | |
1910 | le16_to_cpu(ba_res->txed), | |
1911 | le16_to_cpu(ba_res->done)); | |
1912 | return; | |
1913 | } | |
1914 | ||
1915 | ba_notif = (void *)pkt->data; | |
1916 | sta_id = ba_notif->sta_id; | |
1917 | tid = ba_notif->tid; | |
1918 | /* "flow" corresponds to Tx queue */ | |
1919 | txq = le16_to_cpu(ba_notif->scd_flow); | |
1920 | /* "ssn" is start of block-ack Tx window, corresponds to index | |
1921 | * (in Tx queue's circular buffer) of first TFD/frame in window */ | |
1922 | index = le16_to_cpu(ba_notif->scd_ssn); | |
1923 | ||
1924 | rcu_read_lock(); | |
1925 | mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); | |
1926 | if (WARN_ON_ONCE(!mvmsta)) { | |
1927 | rcu_read_unlock(); | |
1928 | return; | |
1929 | } | |
1930 | ||
1931 | tid_data = &mvmsta->tid_data[tid]; | |
1932 | ||
1933 | ba_info.status.ampdu_ack_len = ba_notif->txed_2_done; | |
1934 | ba_info.status.ampdu_len = ba_notif->txed; | |
1935 | ba_info.status.tx_time = tid_data->tx_time; | |
1936 | ba_info.status.status_driver_data[0] = | |
1937 | (void *)(uintptr_t)ba_notif->reduced_txp; | |
1938 | ||
1939 | rcu_read_unlock(); | |
1940 | ||
1941 | iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info, | |
1942 | tid_data->rate_n_flags); | |
1943 | ||
1944 | IWL_DEBUG_TX_REPLY(mvm, | |
1945 | "BA_NOTIFICATION Received from %pM, sta_id = %d\n", | |
1946 | ba_notif->sta_addr, ba_notif->sta_id); | |
1947 | ||
1948 | IWL_DEBUG_TX_REPLY(mvm, | |
1949 | "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n", | |
1950 | ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl), | |
1951 | le64_to_cpu(ba_notif->bitmap), txq, index, | |
1952 | ba_notif->txed, ba_notif->txed_2_done); | |
1953 | ||
1954 | IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n", | |
1955 | ba_notif->reduced_txp); | |
1956 | } | |
1957 | ||
1958 | /* | |
1959 | * Note that there are transports that buffer frames before they reach | |
1960 | * the firmware. This means that after flush_tx_path is called, the | |
1961 | * queue might not be empty. The race-free way to handle this is to: | |
1962 | * 1) set the station as draining | |
1963 | * 2) flush the Tx path | |
1964 | * 3) wait for the transport queues to be empty | |
1965 | */ | |
1966 | int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags) | |
1967 | { | |
1968 | int ret; | |
1969 | struct iwl_tx_path_flush_cmd_v1 flush_cmd = { | |
1970 | .queues_ctl = cpu_to_le32(tfd_msk), | |
1971 | .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH), | |
1972 | }; | |
1973 | ||
1974 | WARN_ON(iwl_mvm_has_new_tx_api(mvm)); | |
1975 | ||
1976 | ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags, | |
1977 | sizeof(flush_cmd), &flush_cmd); | |
1978 | if (ret) | |
1979 | IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret); | |
1980 | return ret; | |
1981 | } | |
1982 | ||
1983 | int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, | |
1984 | u16 tids, u32 flags) | |
1985 | { | |
1986 | int ret; | |
1987 | struct iwl_tx_path_flush_cmd flush_cmd = { | |
1988 | .sta_id = cpu_to_le32(sta_id), | |
1989 | .tid_mask = cpu_to_le16(tids), | |
1990 | }; | |
1991 | ||
1992 | WARN_ON(!iwl_mvm_has_new_tx_api(mvm)); | |
1993 | ||
1994 | ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags, | |
1995 | sizeof(flush_cmd), &flush_cmd); | |
1996 | if (ret) | |
1997 | IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret); | |
1998 | return ret; | |
1999 | } | |
2000 | ||
2001 | int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal) | |
2002 | { | |
2003 | struct iwl_mvm_int_sta *int_sta = sta; | |
2004 | struct iwl_mvm_sta *mvm_sta = sta; | |
2005 | ||
2006 | BUILD_BUG_ON(offsetof(struct iwl_mvm_int_sta, sta_id) != | |
2007 | offsetof(struct iwl_mvm_sta, sta_id)); | |
2008 | ||
2009 | if (iwl_mvm_has_new_tx_api(mvm)) | |
2010 | return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id, 0xffff, 0); | |
2011 | ||
2012 | if (internal) | |
2013 | return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk, 0); | |
2014 | ||
2015 | return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0); | |
2016 | } |