]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
netlink: pass extended ACK struct to parsing functions
[mirror_ubuntu-bionic-kernel.git] / drivers / net / wireless / intel / iwlwifi / mvm / mac80211.c
1 /******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
28 *
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
37 * All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 *
43 * * Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * * Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
48 * distribution.
49 * * Neither the name Intel Corporation nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *
65 *****************************************************************************/
66 #include <linux/kernel.h>
67 #include <linux/slab.h>
68 #include <linux/skbuff.h>
69 #include <linux/netdevice.h>
70 #include <linux/etherdevice.h>
71 #include <linux/ip.h>
72 #include <linux/if_arp.h>
73 #include <linux/time.h>
74 #include <net/mac80211.h>
75 #include <net/ieee80211_radiotap.h>
76 #include <net/tcp.h>
77
78 #include "iwl-op-mode.h"
79 #include "iwl-io.h"
80 #include "mvm.h"
81 #include "sta.h"
82 #include "time-event.h"
83 #include "iwl-eeprom-parse.h"
84 #include "iwl-phy-db.h"
85 #include "testmode.h"
86 #include "iwl-fw-error-dump.h"
87 #include "iwl-prph.h"
88 #include "iwl-nvm-parse.h"
89 #include "fw-dbg.h"
90
91 static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
92 {
93 .max = 1,
94 .types = BIT(NL80211_IFTYPE_STATION),
95 },
96 {
97 .max = 1,
98 .types = BIT(NL80211_IFTYPE_AP) |
99 BIT(NL80211_IFTYPE_P2P_CLIENT) |
100 BIT(NL80211_IFTYPE_P2P_GO),
101 },
102 {
103 .max = 1,
104 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
105 },
106 };
107
108 static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
109 {
110 .num_different_channels = 2,
111 .max_interfaces = 3,
112 .limits = iwl_mvm_limits,
113 .n_limits = ARRAY_SIZE(iwl_mvm_limits),
114 },
115 };
116
117 #ifdef CONFIG_PM_SLEEP
118 static const struct nl80211_wowlan_tcp_data_token_feature
119 iwl_mvm_wowlan_tcp_token_feature = {
120 .min_len = 0,
121 .max_len = 255,
122 .bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS,
123 };
124
125 static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
126 .tok = &iwl_mvm_wowlan_tcp_token_feature,
127 .data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN -
128 sizeof(struct ethhdr) -
129 sizeof(struct iphdr) -
130 sizeof(struct tcphdr),
131 .data_interval_max = 65535, /* __le16 in API */
132 .wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN -
133 sizeof(struct ethhdr) -
134 sizeof(struct iphdr) -
135 sizeof(struct tcphdr),
136 .seq = true,
137 };
138 #endif
139
140 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
141 /*
142 * Use the reserved field to indicate magic values.
143 * these values will only be used internally by the driver,
144 * and won't make it to the fw (reserved will be 0).
145 * BC_FILTER_MAGIC_IP - configure the val of this attribute to
146 * be the vif's ip address. in case there is not a single
147 * ip address (0, or more than 1), this attribute will
148 * be skipped.
149 * BC_FILTER_MAGIC_MAC - set the val of this attribute to
150 * the LSB bytes of the vif's mac address
151 */
152 enum {
153 BC_FILTER_MAGIC_NONE = 0,
154 BC_FILTER_MAGIC_IP,
155 BC_FILTER_MAGIC_MAC,
156 };
157
158 static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
159 {
160 /* arp */
161 .discard = 0,
162 .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
163 .attrs = {
164 {
165 /* frame type - arp, hw type - ethernet */
166 .offset_type =
167 BCAST_FILTER_OFFSET_PAYLOAD_START,
168 .offset = sizeof(rfc1042_header),
169 .val = cpu_to_be32(0x08060001),
170 .mask = cpu_to_be32(0xffffffff),
171 },
172 {
173 /* arp dest ip */
174 .offset_type =
175 BCAST_FILTER_OFFSET_PAYLOAD_START,
176 .offset = sizeof(rfc1042_header) + 2 +
177 sizeof(struct arphdr) +
178 ETH_ALEN + sizeof(__be32) +
179 ETH_ALEN,
180 .mask = cpu_to_be32(0xffffffff),
181 /* mark it as special field */
182 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
183 },
184 },
185 },
186 {
187 /* dhcp offer bcast */
188 .discard = 0,
189 .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
190 .attrs = {
191 {
192 /* udp dest port - 68 (bootp client)*/
193 .offset_type = BCAST_FILTER_OFFSET_IP_END,
194 .offset = offsetof(struct udphdr, dest),
195 .val = cpu_to_be32(0x00440000),
196 .mask = cpu_to_be32(0xffff0000),
197 },
198 {
199 /* dhcp - lsb bytes of client hw address */
200 .offset_type = BCAST_FILTER_OFFSET_IP_END,
201 .offset = 38,
202 .mask = cpu_to_be32(0xffffffff),
203 /* mark it as special field */
204 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
205 },
206 },
207 },
208 /* last filter must be empty */
209 {},
210 };
211 #endif
212
213 void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
214 {
215 if (!iwl_mvm_is_d0i3_supported(mvm))
216 return;
217
218 IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
219 spin_lock_bh(&mvm->refs_lock);
220 mvm->refs[ref_type]++;
221 spin_unlock_bh(&mvm->refs_lock);
222 iwl_trans_ref(mvm->trans);
223 }
224
225 void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
226 {
227 if (!iwl_mvm_is_d0i3_supported(mvm))
228 return;
229
230 IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
231 spin_lock_bh(&mvm->refs_lock);
232 if (WARN_ON(!mvm->refs[ref_type])) {
233 spin_unlock_bh(&mvm->refs_lock);
234 return;
235 }
236 mvm->refs[ref_type]--;
237 spin_unlock_bh(&mvm->refs_lock);
238 iwl_trans_unref(mvm->trans);
239 }
240
241 static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm,
242 enum iwl_mvm_ref_type except_ref)
243 {
244 int i, j;
245
246 if (!iwl_mvm_is_d0i3_supported(mvm))
247 return;
248
249 spin_lock_bh(&mvm->refs_lock);
250 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
251 if (except_ref == i || !mvm->refs[i])
252 continue;
253
254 IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n",
255 i, mvm->refs[i]);
256 for (j = 0; j < mvm->refs[i]; j++)
257 iwl_trans_unref(mvm->trans);
258 mvm->refs[i] = 0;
259 }
260 spin_unlock_bh(&mvm->refs_lock);
261 }
262
263 bool iwl_mvm_ref_taken(struct iwl_mvm *mvm)
264 {
265 int i;
266 bool taken = false;
267
268 if (!iwl_mvm_is_d0i3_supported(mvm))
269 return true;
270
271 spin_lock_bh(&mvm->refs_lock);
272 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
273 if (mvm->refs[i]) {
274 taken = true;
275 break;
276 }
277 }
278 spin_unlock_bh(&mvm->refs_lock);
279
280 return taken;
281 }
282
283 int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
284 {
285 iwl_mvm_ref(mvm, ref_type);
286
287 if (!wait_event_timeout(mvm->d0i3_exit_waitq,
288 !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status),
289 HZ)) {
290 WARN_ON_ONCE(1);
291 iwl_mvm_unref(mvm, ref_type);
292 return -EIO;
293 }
294
295 return 0;
296 }
297
298 static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
299 {
300 int i;
301
302 memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts));
303 for (i = 0; i < NUM_PHY_CTX; i++) {
304 mvm->phy_ctxts[i].id = i;
305 mvm->phy_ctxts[i].ref = 0;
306 }
307 }
308
309 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
310 const char *alpha2,
311 enum iwl_mcc_source src_id,
312 bool *changed)
313 {
314 struct ieee80211_regdomain *regd = NULL;
315 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
316 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
317 struct iwl_mcc_update_resp *resp;
318
319 IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
320
321 lockdep_assert_held(&mvm->mutex);
322
323 resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
324 if (IS_ERR_OR_NULL(resp)) {
325 IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
326 PTR_ERR_OR_ZERO(resp));
327 goto out;
328 }
329
330 if (changed)
331 *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
332
333 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
334 __le32_to_cpu(resp->n_channels),
335 resp->channels,
336 __le16_to_cpu(resp->mcc));
337 /* Store the return source id */
338 src_id = resp->source_id;
339 kfree(resp);
340 if (IS_ERR_OR_NULL(regd)) {
341 IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
342 PTR_ERR_OR_ZERO(regd));
343 goto out;
344 }
345
346 IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
347 regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
348 mvm->lar_regdom_set = true;
349 mvm->mcc_src = src_id;
350
351 out:
352 return regd;
353 }
354
355 void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
356 {
357 bool changed;
358 struct ieee80211_regdomain *regd;
359
360 if (!iwl_mvm_is_lar_supported(mvm))
361 return;
362
363 regd = iwl_mvm_get_current_regdomain(mvm, &changed);
364 if (!IS_ERR_OR_NULL(regd)) {
365 /* only update the regulatory core if changed */
366 if (changed)
367 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
368
369 kfree(regd);
370 }
371 }
372
373 struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
374 bool *changed)
375 {
376 return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
377 iwl_mvm_is_wifi_mcc_supported(mvm) ?
378 MCC_SOURCE_GET_CURRENT :
379 MCC_SOURCE_OLD_FW, changed);
380 }
381
382 int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
383 {
384 enum iwl_mcc_source used_src;
385 struct ieee80211_regdomain *regd;
386 int ret;
387 bool changed;
388 const struct ieee80211_regdomain *r =
389 rtnl_dereference(mvm->hw->wiphy->regd);
390
391 if (!r)
392 return -ENOENT;
393
394 /* save the last source in case we overwrite it below */
395 used_src = mvm->mcc_src;
396 if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
397 /* Notify the firmware we support wifi location updates */
398 regd = iwl_mvm_get_current_regdomain(mvm, NULL);
399 if (!IS_ERR_OR_NULL(regd))
400 kfree(regd);
401 }
402
403 /* Now set our last stored MCC and source */
404 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src,
405 &changed);
406 if (IS_ERR_OR_NULL(regd))
407 return -EIO;
408
409 /* update cfg80211 if the regdomain was changed */
410 if (changed)
411 ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
412 else
413 ret = 0;
414
415 kfree(regd);
416 return ret;
417 }
418
419 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
420 {
421 struct ieee80211_hw *hw = mvm->hw;
422 int num_mac, ret, i;
423 static const u32 mvm_ciphers[] = {
424 WLAN_CIPHER_SUITE_WEP40,
425 WLAN_CIPHER_SUITE_WEP104,
426 WLAN_CIPHER_SUITE_TKIP,
427 WLAN_CIPHER_SUITE_CCMP,
428 };
429
430 /* Tell mac80211 our characteristics */
431 ieee80211_hw_set(hw, SIGNAL_DBM);
432 ieee80211_hw_set(hw, SPECTRUM_MGMT);
433 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
434 ieee80211_hw_set(hw, QUEUE_CONTROL);
435 ieee80211_hw_set(hw, WANT_MONITOR_VIF);
436 ieee80211_hw_set(hw, SUPPORTS_PS);
437 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
438 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
439 ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
440 ieee80211_hw_set(hw, CONNECTION_MONITOR);
441 ieee80211_hw_set(hw, CHANCTX_STA_CSA);
442 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
443 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
444 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
445 ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
446 if (iwl_mvm_has_new_rx_api(mvm))
447 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
448 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF))
449 ieee80211_hw_set(hw, AP_LINK_PS);
450
451 if (mvm->trans->num_rx_queues > 1)
452 ieee80211_hw_set(hw, USES_RSS);
453
454 if (mvm->trans->max_skb_frags)
455 hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
456
457 if (!iwl_mvm_is_dqa_supported(mvm))
458 hw->queues = mvm->first_agg_queue;
459 else
460 hw->queues = IEEE80211_MAX_QUEUES;
461 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
462 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
463 IEEE80211_RADIOTAP_MCS_HAVE_STBC;
464 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
465 IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
466
467 hw->radiotap_timestamp.units_pos =
468 IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US |
469 IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ;
470 /* this is the case for CCK frames, it's better (only 8) for OFDM */
471 hw->radiotap_timestamp.accuracy = 22;
472
473 hw->rate_control_algorithm = "iwl-mvm-rs";
474 hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
475 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
476
477 BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6);
478 memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
479 hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
480 hw->wiphy->cipher_suites = mvm->ciphers;
481
482 if (iwl_mvm_has_new_rx_api(mvm)) {
483 mvm->ciphers[hw->wiphy->n_cipher_suites] =
484 WLAN_CIPHER_SUITE_GCMP;
485 hw->wiphy->n_cipher_suites++;
486 mvm->ciphers[hw->wiphy->n_cipher_suites] =
487 WLAN_CIPHER_SUITE_GCMP_256;
488 hw->wiphy->n_cipher_suites++;
489 }
490
491 /* Enable 11w if software crypto is not enabled (as the
492 * firmware will interpret some mgmt packets, so enabling it
493 * with software crypto isn't safe).
494 */
495 if (!iwlwifi_mod_params.sw_crypto) {
496 ieee80211_hw_set(hw, MFP_CAPABLE);
497 mvm->ciphers[hw->wiphy->n_cipher_suites] =
498 WLAN_CIPHER_SUITE_AES_CMAC;
499 hw->wiphy->n_cipher_suites++;
500 if (iwl_mvm_has_new_rx_api(mvm)) {
501 mvm->ciphers[hw->wiphy->n_cipher_suites] =
502 WLAN_CIPHER_SUITE_BIP_GMAC_128;
503 hw->wiphy->n_cipher_suites++;
504 mvm->ciphers[hw->wiphy->n_cipher_suites] =
505 WLAN_CIPHER_SUITE_BIP_GMAC_256;
506 hw->wiphy->n_cipher_suites++;
507 }
508 }
509
510 /* currently FW API supports only one optional cipher scheme */
511 if (mvm->fw->cs[0].cipher) {
512 const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0];
513 struct ieee80211_cipher_scheme *cs = &mvm->cs[0];
514
515 mvm->hw->n_cipher_schemes = 1;
516
517 cs->cipher = le32_to_cpu(fwcs->cipher);
518 cs->iftype = BIT(NL80211_IFTYPE_STATION);
519 cs->hdr_len = fwcs->hdr_len;
520 cs->pn_len = fwcs->pn_len;
521 cs->pn_off = fwcs->pn_off;
522 cs->key_idx_off = fwcs->key_idx_off;
523 cs->key_idx_mask = fwcs->key_idx_mask;
524 cs->key_idx_shift = fwcs->key_idx_shift;
525 cs->mic_len = fwcs->mic_len;
526
527 mvm->hw->cipher_schemes = mvm->cs;
528 mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher;
529 hw->wiphy->n_cipher_suites++;
530 }
531
532 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
533 hw->wiphy->features |=
534 NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
535 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
536 NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
537
538 hw->sta_data_size = sizeof(struct iwl_mvm_sta);
539 hw->vif_data_size = sizeof(struct iwl_mvm_vif);
540 hw->chanctx_data_size = sizeof(u16);
541
542 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
543 BIT(NL80211_IFTYPE_P2P_CLIENT) |
544 BIT(NL80211_IFTYPE_AP) |
545 BIT(NL80211_IFTYPE_P2P_GO) |
546 BIT(NL80211_IFTYPE_P2P_DEVICE) |
547 BIT(NL80211_IFTYPE_ADHOC);
548
549 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
550 hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
551 if (iwl_mvm_is_lar_supported(mvm))
552 hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
553 else
554 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
555 REGULATORY_DISABLE_BEACON_HINTS;
556
557 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
558 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
559
560 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
561 hw->wiphy->n_iface_combinations =
562 ARRAY_SIZE(iwl_mvm_iface_combinations);
563
564 hw->wiphy->max_remain_on_channel_duration = 10000;
565 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
566 /* we can compensate an offset of up to 3 channels = 15 MHz */
567 hw->wiphy->max_adj_channel_rssi_comp = 3 * 5;
568
569 /* Extract MAC address */
570 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
571 hw->wiphy->addresses = mvm->addresses;
572 hw->wiphy->n_addresses = 1;
573
574 /* Extract additional MAC addresses if available */
575 num_mac = (mvm->nvm_data->n_hw_addrs > 1) ?
576 min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1;
577
578 for (i = 1; i < num_mac; i++) {
579 memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr,
580 ETH_ALEN);
581 mvm->addresses[i].addr[5]++;
582 hw->wiphy->n_addresses++;
583 }
584
585 iwl_mvm_reset_phy_ctxts(mvm);
586
587 hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
588
589 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
590
591 BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
592 BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
593 IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
594
595 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
596 mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
597 else
598 mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
599
600 if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
601 hw->wiphy->bands[NL80211_BAND_2GHZ] =
602 &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
603 if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) {
604 hw->wiphy->bands[NL80211_BAND_5GHZ] =
605 &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
606
607 if (fw_has_capa(&mvm->fw->ucode_capa,
608 IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
609 fw_has_api(&mvm->fw->ucode_capa,
610 IWL_UCODE_TLV_API_LQ_SS_PARAMS))
611 hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |=
612 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
613 }
614
615 hw->wiphy->hw_version = mvm->trans->hw_id;
616
617 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
618 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
619 else
620 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
621
622 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
623 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
624 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
625 /* we create the 802.11 header and zero length SSID IE. */
626 hw->wiphy->max_sched_scan_ie_len =
627 SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
628 hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS;
629 hw->wiphy->max_sched_scan_plan_interval = U16_MAX;
630
631 /*
632 * the firmware uses u8 for num of iterations, but 0xff is saved for
633 * infinite loop, so the maximum number of iterations is actually 254.
634 */
635 hw->wiphy->max_sched_scan_plan_iterations = 254;
636
637 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
638 NL80211_FEATURE_LOW_PRIORITY_SCAN |
639 NL80211_FEATURE_P2P_GO_OPPPS |
640 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
641 NL80211_FEATURE_DYNAMIC_SMPS |
642 NL80211_FEATURE_STATIC_SMPS |
643 NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
644
645 if (fw_has_capa(&mvm->fw->ucode_capa,
646 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
647 hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
648 if (fw_has_capa(&mvm->fw->ucode_capa,
649 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
650 hw->wiphy->features |= NL80211_FEATURE_QUIET;
651
652 if (fw_has_capa(&mvm->fw->ucode_capa,
653 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
654 hw->wiphy->features |=
655 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
656
657 if (fw_has_capa(&mvm->fw->ucode_capa,
658 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
659 hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
660
661 if (fw_has_api(&mvm->fw->ucode_capa,
662 IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) {
663 wiphy_ext_feature_set(hw->wiphy,
664 NL80211_EXT_FEATURE_SCAN_START_TIME);
665 wiphy_ext_feature_set(hw->wiphy,
666 NL80211_EXT_FEATURE_BSS_PARENT_TSF);
667 wiphy_ext_feature_set(hw->wiphy,
668 NL80211_EXT_FEATURE_SET_SCAN_DWELL);
669 }
670
671 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
672
673 #ifdef CONFIG_PM_SLEEP
674 if (iwl_mvm_is_d0i3_supported(mvm) &&
675 device_can_wakeup(mvm->trans->dev)) {
676 mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
677 hw->wiphy->wowlan = &mvm->wowlan;
678 }
679
680 if (mvm->fw->img[IWL_UCODE_WOWLAN].num_sec &&
681 mvm->trans->ops->d3_suspend &&
682 mvm->trans->ops->d3_resume &&
683 device_can_wakeup(mvm->trans->dev)) {
684 mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
685 WIPHY_WOWLAN_DISCONNECT |
686 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
687 WIPHY_WOWLAN_RFKILL_RELEASE |
688 WIPHY_WOWLAN_NET_DETECT;
689 if (!iwlwifi_mod_params.sw_crypto)
690 mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
691 WIPHY_WOWLAN_GTK_REKEY_FAILURE |
692 WIPHY_WOWLAN_4WAY_HANDSHAKE;
693
694 mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
695 mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
696 mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
697 mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
698 mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
699 hw->wiphy->wowlan = &mvm->wowlan;
700 }
701 #endif
702
703 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
704 /* assign default bcast filtering configuration */
705 mvm->bcast_filters = iwl_mvm_default_bcast_filters;
706 #endif
707
708 ret = iwl_mvm_leds_init(mvm);
709 if (ret)
710 return ret;
711
712 if (fw_has_capa(&mvm->fw->ucode_capa,
713 IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
714 IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
715 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
716 ieee80211_hw_set(hw, TDLS_WIDER_BW);
717 }
718
719 if (fw_has_capa(&mvm->fw->ucode_capa,
720 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
721 IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
722 hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
723 }
724
725 hw->netdev_features |= mvm->cfg->features;
726 if (!iwl_mvm_is_csum_supported(mvm)) {
727 hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS |
728 NETIF_F_RXCSUM);
729 /* We may support SW TX CSUM */
730 if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
731 hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS;
732 }
733
734 ret = ieee80211_register_hw(mvm->hw);
735 if (ret)
736 iwl_mvm_leds_exit(mvm);
737
738 if (mvm->cfg->vht_mu_mimo_supported)
739 wiphy_ext_feature_set(hw->wiphy,
740 NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
741
742 return ret;
743 }
744
745 static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
746 struct ieee80211_sta *sta,
747 struct sk_buff *skb)
748 {
749 struct iwl_mvm_sta *mvmsta;
750 bool defer = false;
751
752 /*
753 * double check the IN_D0I3 flag both before and after
754 * taking the spinlock, in order to prevent taking
755 * the spinlock when not needed.
756 */
757 if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
758 return false;
759
760 spin_lock(&mvm->d0i3_tx_lock);
761 /*
762 * testing the flag again ensures the skb dequeue
763 * loop (on d0i3 exit) hasn't run yet.
764 */
765 if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
766 goto out;
767
768 mvmsta = iwl_mvm_sta_from_mac80211(sta);
769 if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
770 mvmsta->sta_id != mvm->d0i3_ap_sta_id)
771 goto out;
772
773 __skb_queue_tail(&mvm->d0i3_tx, skb);
774 ieee80211_stop_queues(mvm->hw);
775
776 /* trigger wakeup */
777 iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
778 iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
779
780 defer = true;
781 out:
782 spin_unlock(&mvm->d0i3_tx_lock);
783 return defer;
784 }
785
786 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
787 struct ieee80211_tx_control *control,
788 struct sk_buff *skb)
789 {
790 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
791 struct ieee80211_sta *sta = control->sta;
792 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
793 struct ieee80211_hdr *hdr = (void *)skb->data;
794
795 if (iwl_mvm_is_radio_killed(mvm)) {
796 IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
797 goto drop;
798 }
799
800 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
801 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
802 !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
803 goto drop;
804
805 /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
806 if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER &&
807 ieee80211_is_mgmt(hdr->frame_control) &&
808 !ieee80211_is_deauth(hdr->frame_control) &&
809 !ieee80211_is_disassoc(hdr->frame_control) &&
810 !ieee80211_is_action(hdr->frame_control)))
811 sta = NULL;
812
813 if (sta) {
814 if (iwl_mvm_defer_tx(mvm, sta, skb))
815 return;
816 if (iwl_mvm_tx_skb(mvm, skb, sta))
817 goto drop;
818 return;
819 }
820
821 if (iwl_mvm_tx_skb_non_sta(mvm, skb))
822 goto drop;
823 return;
824 drop:
825 ieee80211_free_txskb(hw, skb);
826 }
827
828 static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
829 {
830 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
831 return false;
832 return true;
833 }
834
835 static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
836 {
837 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
838 return false;
839 if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
840 return true;
841
842 /* enabled by default */
843 return true;
844 }
845
846 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
847 do { \
848 if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \
849 break; \
850 iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \
851 } while (0)
852
853 static void
854 iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
855 struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
856 enum ieee80211_ampdu_mlme_action action)
857 {
858 struct iwl_fw_dbg_trigger_tlv *trig;
859 struct iwl_fw_dbg_trigger_ba *ba_trig;
860
861 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
862 return;
863
864 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
865 ba_trig = (void *)trig->data;
866
867 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
868 return;
869
870 switch (action) {
871 case IEEE80211_AMPDU_TX_OPERATIONAL: {
872 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
873 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
874
875 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
876 "TX AGG START: MAC %pM tid %d ssn %d\n",
877 sta->addr, tid, tid_data->ssn);
878 break;
879 }
880 case IEEE80211_AMPDU_TX_STOP_CONT:
881 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
882 "TX AGG STOP: MAC %pM tid %d\n",
883 sta->addr, tid);
884 break;
885 case IEEE80211_AMPDU_RX_START:
886 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
887 "RX AGG START: MAC %pM tid %d ssn %d\n",
888 sta->addr, tid, rx_ba_ssn);
889 break;
890 case IEEE80211_AMPDU_RX_STOP:
891 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
892 "RX AGG STOP: MAC %pM tid %d\n",
893 sta->addr, tid);
894 break;
895 default:
896 break;
897 }
898 }
899
900 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
901 struct ieee80211_vif *vif,
902 struct ieee80211_ampdu_params *params)
903 {
904 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
905 int ret;
906 bool tx_agg_ref = false;
907 struct ieee80211_sta *sta = params->sta;
908 enum ieee80211_ampdu_mlme_action action = params->action;
909 u16 tid = params->tid;
910 u16 *ssn = &params->ssn;
911 u8 buf_size = params->buf_size;
912 bool amsdu = params->amsdu;
913 u16 timeout = params->timeout;
914
915 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
916 sta->addr, tid, action);
917
918 if (!(mvm->nvm_data->sku_cap_11n_enable))
919 return -EACCES;
920
921 /* return from D0i3 before starting a new Tx aggregation */
922 switch (action) {
923 case IEEE80211_AMPDU_TX_START:
924 case IEEE80211_AMPDU_TX_STOP_CONT:
925 case IEEE80211_AMPDU_TX_STOP_FLUSH:
926 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
927 case IEEE80211_AMPDU_TX_OPERATIONAL:
928 /*
929 * for tx start, wait synchronously until D0i3 exit to
930 * get the correct sequence number for the tid.
931 * additionally, some other ampdu actions use direct
932 * target access, which is not handled automatically
933 * by the trans layer (unlike commands), so wait for
934 * d0i3 exit in these cases as well.
935 */
936 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG);
937 if (ret)
938 return ret;
939
940 tx_agg_ref = true;
941 break;
942 default:
943 break;
944 }
945
946 mutex_lock(&mvm->mutex);
947
948 switch (action) {
949 case IEEE80211_AMPDU_RX_START:
950 if (!iwl_enable_rx_ampdu(mvm->cfg)) {
951 ret = -EINVAL;
952 break;
953 }
954 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size,
955 timeout);
956 break;
957 case IEEE80211_AMPDU_RX_STOP:
958 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size,
959 timeout);
960 break;
961 case IEEE80211_AMPDU_TX_START:
962 if (!iwl_enable_tx_ampdu(mvm->cfg)) {
963 ret = -EINVAL;
964 break;
965 }
966 ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
967 break;
968 case IEEE80211_AMPDU_TX_STOP_CONT:
969 ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
970 break;
971 case IEEE80211_AMPDU_TX_STOP_FLUSH:
972 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
973 ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
974 break;
975 case IEEE80211_AMPDU_TX_OPERATIONAL:
976 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid,
977 buf_size, amsdu);
978 break;
979 default:
980 WARN_ON_ONCE(1);
981 ret = -EINVAL;
982 break;
983 }
984
985 if (!ret) {
986 u16 rx_ba_ssn = 0;
987
988 if (action == IEEE80211_AMPDU_RX_START)
989 rx_ba_ssn = *ssn;
990
991 iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
992 rx_ba_ssn, action);
993 }
994 mutex_unlock(&mvm->mutex);
995
996 /*
997 * If the tid is marked as started, we won't use it for offloaded
998 * traffic on the next D0i3 entry. It's safe to unref.
999 */
1000 if (tx_agg_ref)
1001 iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
1002
1003 return ret;
1004 }
1005
1006 static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
1007 struct ieee80211_vif *vif)
1008 {
1009 struct iwl_mvm *mvm = data;
1010 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1011
1012 mvmvif->uploaded = false;
1013 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
1014
1015 spin_lock_bh(&mvm->time_event_lock);
1016 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
1017 spin_unlock_bh(&mvm->time_event_lock);
1018
1019 mvmvif->phy_ctxt = NULL;
1020 memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
1021 }
1022
1023 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
1024 {
1025 /* clear the D3 reconfig, we only need it to avoid dumping a
1026 * firmware coredump on reconfiguration, we shouldn't do that
1027 * on D3->D0 transition
1028 */
1029 if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
1030 mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert;
1031 iwl_mvm_fw_error_dump(mvm);
1032 }
1033
1034 /* cleanup all stale references (scan, roc), but keep the
1035 * ucode_down ref until reconfig is complete
1036 */
1037 iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
1038
1039 iwl_mvm_stop_device(mvm);
1040
1041 mvm->scan_status = 0;
1042 mvm->ps_disabled = false;
1043 mvm->calibrating = false;
1044
1045 /* just in case one was running */
1046 iwl_mvm_cleanup_roc_te(mvm);
1047 ieee80211_remain_on_channel_expired(mvm->hw);
1048
1049 /*
1050 * cleanup all interfaces, even inactive ones, as some might have
1051 * gone down during the HW restart
1052 */
1053 ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
1054
1055 mvm->p2p_device_vif = NULL;
1056 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1057
1058 iwl_mvm_reset_phy_ctxts(mvm);
1059 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
1060 memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
1061 memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
1062 memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
1063 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
1064 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
1065
1066 ieee80211_wake_queues(mvm->hw);
1067
1068 /* clear any stale d0i3 state */
1069 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1070
1071 mvm->vif_count = 0;
1072 mvm->rx_ba_sessions = 0;
1073 mvm->fw_dbg_conf = FW_DBG_INVALID;
1074
1075 /* keep statistics ticking */
1076 iwl_mvm_accu_radio_stats(mvm);
1077 }
1078
1079 int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
1080 {
1081 int ret;
1082
1083 lockdep_assert_held(&mvm->mutex);
1084
1085 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1086 /* Clean up some internal and mac80211 state on restart */
1087 iwl_mvm_restart_cleanup(mvm);
1088 } else {
1089 /* Hold the reference to prevent runtime suspend while
1090 * the start procedure runs. It's a bit confusing
1091 * that the UCODE_DOWN reference is taken, but it just
1092 * means "UCODE is not UP yet". ( TODO: rename this
1093 * reference).
1094 */
1095 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1096 }
1097 ret = iwl_mvm_up(mvm);
1098
1099 if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1100 /* Something went wrong - we need to finish some cleanup
1101 * that normally iwl_mvm_mac_restart_complete() below
1102 * would do.
1103 */
1104 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1105 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1106 }
1107
1108 return ret;
1109 }
1110
1111 static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
1112 {
1113 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1114 int ret;
1115
1116 /* Some hw restart cleanups must not hold the mutex */
1117 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1118 /*
1119 * Make sure we are out of d0i3. This is needed
1120 * to make sure the reference accounting is correct
1121 * (and there is no stale d0i3_exit_work).
1122 */
1123 wait_event_timeout(mvm->d0i3_exit_waitq,
1124 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1125 &mvm->status),
1126 HZ);
1127 }
1128
1129 mutex_lock(&mvm->mutex);
1130 ret = __iwl_mvm_mac_start(mvm);
1131 mutex_unlock(&mvm->mutex);
1132
1133 return ret;
1134 }
1135
1136 static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
1137 {
1138 int ret;
1139
1140 mutex_lock(&mvm->mutex);
1141
1142 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1143 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1144 ret = iwl_mvm_update_quotas(mvm, true, NULL);
1145 if (ret)
1146 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
1147 ret);
1148
1149 /* allow transport/FW low power modes */
1150 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1151
1152 /*
1153 * If we have TDLS peers, remove them. We don't know the last seqno/PN
1154 * of packets the FW sent out, so we must reconnect.
1155 */
1156 iwl_mvm_teardown_tdls_peers(mvm);
1157
1158 mutex_unlock(&mvm->mutex);
1159 }
1160
1161 static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
1162 {
1163 if (iwl_mvm_is_d0i3_supported(mvm) &&
1164 iwl_mvm_enter_d0i3_on_suspend(mvm))
1165 WARN_ONCE(!wait_event_timeout(mvm->d0i3_exit_waitq,
1166 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1167 &mvm->status),
1168 HZ),
1169 "D0i3 exit on resume timed out\n");
1170 }
1171
1172 static void
1173 iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
1174 enum ieee80211_reconfig_type reconfig_type)
1175 {
1176 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1177
1178 switch (reconfig_type) {
1179 case IEEE80211_RECONFIG_TYPE_RESTART:
1180 iwl_mvm_restart_complete(mvm);
1181 break;
1182 case IEEE80211_RECONFIG_TYPE_SUSPEND:
1183 iwl_mvm_resume_complete(mvm);
1184 break;
1185 }
1186 }
1187
1188 void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
1189 {
1190 lockdep_assert_held(&mvm->mutex);
1191
1192 /* firmware counters are obviously reset now, but we shouldn't
1193 * partially track so also clear the fw_reset_accu counters.
1194 */
1195 memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));
1196
1197 /* async_handlers_wk is now blocked */
1198
1199 /*
1200 * The work item could be running or queued if the
1201 * ROC time event stops just as we get here.
1202 */
1203 flush_work(&mvm->roc_done_wk);
1204
1205 iwl_mvm_stop_device(mvm);
1206
1207 iwl_mvm_async_handlers_purge(mvm);
1208 /* async_handlers_list is empty and will stay empty: HW is stopped */
1209
1210 /* the fw is stopped, the aux sta is dead: clean up driver state */
1211 iwl_mvm_del_aux_sta(mvm);
1212
1213 /*
1214 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
1215 * won't be called in this case).
1216 * But make sure to cleanup interfaces that have gone down before/during
1217 * HW restart was requested.
1218 */
1219 if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1220 ieee80211_iterate_interfaces(mvm->hw, 0,
1221 iwl_mvm_cleanup_iterator, mvm);
1222
1223 /* We shouldn't have any UIDs still set. Loop over all the UIDs to
1224 * make sure there's nothing left there and warn if any is found.
1225 */
1226 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1227 int i;
1228
1229 for (i = 0; i < mvm->max_scans; i++) {
1230 if (WARN_ONCE(mvm->scan_uid_status[i],
1231 "UMAC scan UID %d status was not cleaned\n",
1232 i))
1233 mvm->scan_uid_status[i] = 0;
1234 }
1235 }
1236 }
1237
1238 static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
1239 {
1240 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1241
1242 flush_work(&mvm->d0i3_exit_work);
1243 flush_work(&mvm->async_handlers_wk);
1244 flush_work(&mvm->add_stream_wk);
1245 cancel_delayed_work_sync(&mvm->fw_dump_wk);
1246 cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
1247 cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
1248 iwl_mvm_free_fw_dump_desc(mvm);
1249
1250 mutex_lock(&mvm->mutex);
1251 __iwl_mvm_mac_stop(mvm);
1252 mutex_unlock(&mvm->mutex);
1253
1254 /*
1255 * The worker might have been waiting for the mutex, let it run and
1256 * discover that its list is now empty.
1257 */
1258 cancel_work_sync(&mvm->async_handlers_wk);
1259 }
1260
1261 static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
1262 {
1263 u16 i;
1264
1265 lockdep_assert_held(&mvm->mutex);
1266
1267 for (i = 0; i < NUM_PHY_CTX; i++)
1268 if (!mvm->phy_ctxts[i].ref)
1269 return &mvm->phy_ctxts[i];
1270
1271 IWL_ERR(mvm, "No available PHY context\n");
1272 return NULL;
1273 }
1274
1275 static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1276 s16 tx_power)
1277 {
1278 struct iwl_dev_tx_power_cmd cmd = {
1279 .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
1280 .v3.mac_context_id =
1281 cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
1282 .v3.pwr_restriction = cpu_to_le16(8 * tx_power),
1283 };
1284 int len = sizeof(cmd);
1285
1286 if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
1287 cmd.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
1288
1289 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
1290 len = sizeof(cmd.v3);
1291
1292 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
1293 }
1294
1295 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
1296 struct ieee80211_vif *vif)
1297 {
1298 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1299 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1300 int ret;
1301
1302 mvmvif->mvm = mvm;
1303
1304 /*
1305 * make sure D0i3 exit is completed, otherwise a target access
1306 * during tx queue configuration could be done when still in
1307 * D0i3 state.
1308 */
1309 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF);
1310 if (ret)
1311 return ret;
1312
1313 /*
1314 * Not much to do here. The stack will not allow interface
1315 * types or combinations that we didn't advertise, so we
1316 * don't really have to check the types.
1317 */
1318
1319 mutex_lock(&mvm->mutex);
1320
1321 /* make sure that beacon statistics don't go backwards with FW reset */
1322 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1323 mvmvif->beacon_stats.accu_num_beacons +=
1324 mvmvif->beacon_stats.num_beacons;
1325
1326 /* Allocate resources for the MAC context, and add it to the fw */
1327 ret = iwl_mvm_mac_ctxt_init(mvm, vif);
1328 if (ret)
1329 goto out_unlock;
1330
1331 /* Counting number of interfaces is needed for legacy PM */
1332 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1333 mvm->vif_count++;
1334
1335 /*
1336 * The AP binding flow can be done only after the beacon
1337 * template is configured (which happens only in the mac80211
1338 * start_ap() flow), and adding the broadcast station can happen
1339 * only after the binding.
1340 * In addition, since modifying the MAC before adding a bcast
1341 * station is not allowed by the FW, delay the adding of MAC context to
1342 * the point where we can also add the bcast station.
1343 * In short: there's not much we can do at this point, other than
1344 * allocating resources :)
1345 */
1346 if (vif->type == NL80211_IFTYPE_AP ||
1347 vif->type == NL80211_IFTYPE_ADHOC) {
1348 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
1349 if (ret) {
1350 IWL_ERR(mvm, "Failed to allocate bcast sta\n");
1351 goto out_release;
1352 }
1353
1354 iwl_mvm_vif_dbgfs_register(mvm, vif);
1355 goto out_unlock;
1356 }
1357
1358 mvmvif->features |= hw->netdev_features;
1359
1360 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
1361 if (ret)
1362 goto out_release;
1363
1364 ret = iwl_mvm_power_update_mac(mvm);
1365 if (ret)
1366 goto out_remove_mac;
1367
1368 /* beacon filtering */
1369 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
1370 if (ret)
1371 goto out_remove_mac;
1372
1373 if (!mvm->bf_allowed_vif &&
1374 vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
1375 mvm->bf_allowed_vif = mvmvif;
1376 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
1377 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
1378 }
1379
1380 /*
1381 * P2P_DEVICE interface does not have a channel context assigned to it,
1382 * so a dedicated PHY context is allocated to it and the corresponding
1383 * MAC context is bound to it at this stage.
1384 */
1385 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1386
1387 mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
1388 if (!mvmvif->phy_ctxt) {
1389 ret = -ENOSPC;
1390 goto out_free_bf;
1391 }
1392
1393 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
1394 ret = iwl_mvm_binding_add_vif(mvm, vif);
1395 if (ret)
1396 goto out_unref_phy;
1397
1398 ret = iwl_mvm_add_bcast_sta(mvm, vif);
1399 if (ret)
1400 goto out_unbind;
1401
1402 /* Save a pointer to p2p device vif, so it can later be used to
1403 * update the p2p device MAC when a GO is started/stopped */
1404 mvm->p2p_device_vif = vif;
1405 }
1406
1407 iwl_mvm_vif_dbgfs_register(mvm, vif);
1408 goto out_unlock;
1409
1410 out_unbind:
1411 iwl_mvm_binding_remove_vif(mvm, vif);
1412 out_unref_phy:
1413 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1414 out_free_bf:
1415 if (mvm->bf_allowed_vif == mvmvif) {
1416 mvm->bf_allowed_vif = NULL;
1417 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1418 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1419 }
1420 out_remove_mac:
1421 mvmvif->phy_ctxt = NULL;
1422 iwl_mvm_mac_ctxt_remove(mvm, vif);
1423 out_release:
1424 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1425 mvm->vif_count--;
1426
1427 iwl_mvm_mac_ctxt_release(mvm, vif);
1428 out_unlock:
1429 mutex_unlock(&mvm->mutex);
1430
1431 iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF);
1432
1433 return ret;
1434 }
1435
1436 static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
1437 struct ieee80211_vif *vif)
1438 {
1439 u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif);
1440
1441 if (tfd_msk) {
1442 /*
1443 * mac80211 first removes all the stations of the vif and
1444 * then removes the vif. When it removes a station it also
1445 * flushes the AMPDU session. So by now, all the AMPDU sessions
1446 * of all the stations of this vif are closed, and the queues
1447 * of these AMPDU sessions are properly closed.
1448 * We still need to take care of the shared queues of the vif.
1449 * Flush them here.
1450 */
1451 mutex_lock(&mvm->mutex);
1452 iwl_mvm_flush_tx_path(mvm, tfd_msk, 0);
1453 mutex_unlock(&mvm->mutex);
1454
1455 /*
1456 * There are transports that buffer a few frames in the host.
1457 * For these, the flush above isn't enough since while we were
1458 * flushing, the transport might have sent more frames to the
1459 * device. To solve this, wait here until the transport is
1460 * empty. Technically, this could have replaced the flush
1461 * above, but flush is much faster than draining. So flush
1462 * first, and drain to make sure we have no frames in the
1463 * transport anymore.
1464 * If a station still had frames on the shared queues, it is
1465 * already marked as draining, so to complete the draining, we
1466 * just need to wait until the transport is empty.
1467 */
1468 iwl_trans_wait_tx_queue_empty(mvm->trans, tfd_msk);
1469 }
1470
1471 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1472 /*
1473 * Flush the ROC worker which will flush the OFFCHANNEL queue.
1474 * We assume here that all the packets sent to the OFFCHANNEL
1475 * queue are sent in ROC session.
1476 */
1477 flush_work(&mvm->roc_done_wk);
1478 } else {
1479 /*
1480 * By now, all the AC queues are empty. The AGG queues are
1481 * empty too. We already got all the Tx responses for all the
1482 * packets in the queues. The drain work can have been
1483 * triggered. Flush it.
1484 */
1485 flush_work(&mvm->sta_drained_wk);
1486 }
1487 }
1488
1489 static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
1490 struct ieee80211_vif *vif)
1491 {
1492 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1493 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1494
1495 iwl_mvm_prepare_mac_removal(mvm, vif);
1496
1497 mutex_lock(&mvm->mutex);
1498
1499 if (mvm->bf_allowed_vif == mvmvif) {
1500 mvm->bf_allowed_vif = NULL;
1501 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1502 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1503 }
1504
1505 iwl_mvm_vif_dbgfs_clean(mvm, vif);
1506
1507 /*
1508 * For AP/GO interface, the tear down of the resources allocated to the
1509 * interface is be handled as part of the stop_ap flow.
1510 */
1511 if (vif->type == NL80211_IFTYPE_AP ||
1512 vif->type == NL80211_IFTYPE_ADHOC) {
1513 #ifdef CONFIG_NL80211_TESTMODE
1514 if (vif == mvm->noa_vif) {
1515 mvm->noa_vif = NULL;
1516 mvm->noa_duration = 0;
1517 }
1518 #endif
1519 iwl_mvm_dealloc_bcast_sta(mvm, vif);
1520 goto out_release;
1521 }
1522
1523 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1524 mvm->p2p_device_vif = NULL;
1525 iwl_mvm_rm_bcast_sta(mvm, vif);
1526 iwl_mvm_binding_remove_vif(mvm, vif);
1527 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1528 mvmvif->phy_ctxt = NULL;
1529 }
1530
1531 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
1532 mvm->vif_count--;
1533
1534 iwl_mvm_power_update_mac(mvm);
1535 iwl_mvm_mac_ctxt_remove(mvm, vif);
1536
1537 out_release:
1538 iwl_mvm_mac_ctxt_release(mvm, vif);
1539 mutex_unlock(&mvm->mutex);
1540 }
1541
1542 static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
1543 {
1544 return 0;
1545 }
1546
1547 struct iwl_mvm_mc_iter_data {
1548 struct iwl_mvm *mvm;
1549 int port_id;
1550 };
1551
1552 static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1553 struct ieee80211_vif *vif)
1554 {
1555 struct iwl_mvm_mc_iter_data *data = _data;
1556 struct iwl_mvm *mvm = data->mvm;
1557 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
1558 int ret, len;
1559
1560 /* if we don't have free ports, mcast frames will be dropped */
1561 if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
1562 return;
1563
1564 if (vif->type != NL80211_IFTYPE_STATION ||
1565 !vif->bss_conf.assoc)
1566 return;
1567
1568 cmd->port_id = data->port_id++;
1569 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1570 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1571
1572 ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
1573 if (ret)
1574 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1575 }
1576
1577 static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
1578 {
1579 struct iwl_mvm_mc_iter_data iter_data = {
1580 .mvm = mvm,
1581 };
1582
1583 lockdep_assert_held(&mvm->mutex);
1584
1585 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
1586 return;
1587
1588 ieee80211_iterate_active_interfaces_atomic(
1589 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1590 iwl_mvm_mc_iface_iterator, &iter_data);
1591 }
1592
1593 static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
1594 struct netdev_hw_addr_list *mc_list)
1595 {
1596 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1597 struct iwl_mcast_filter_cmd *cmd;
1598 struct netdev_hw_addr *addr;
1599 int addr_count;
1600 bool pass_all;
1601 int len;
1602
1603 addr_count = netdev_hw_addr_list_count(mc_list);
1604 pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES ||
1605 IWL_MVM_FW_MCAST_FILTER_PASS_ALL;
1606 if (pass_all)
1607 addr_count = 0;
1608
1609 len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
1610 cmd = kzalloc(len, GFP_ATOMIC);
1611 if (!cmd)
1612 return 0;
1613
1614 if (pass_all) {
1615 cmd->pass_all = 1;
1616 return (u64)(unsigned long)cmd;
1617 }
1618
1619 netdev_hw_addr_list_for_each(addr, mc_list) {
1620 IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
1621 cmd->count, addr->addr);
1622 memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
1623 addr->addr, ETH_ALEN);
1624 cmd->count++;
1625 }
1626
1627 return (u64)(unsigned long)cmd;
1628 }
1629
1630 static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
1631 unsigned int changed_flags,
1632 unsigned int *total_flags,
1633 u64 multicast)
1634 {
1635 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1636 struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
1637
1638 mutex_lock(&mvm->mutex);
1639
1640 /* replace previous configuration */
1641 kfree(mvm->mcast_filter_cmd);
1642 mvm->mcast_filter_cmd = cmd;
1643
1644 if (!cmd)
1645 goto out;
1646
1647 iwl_mvm_recalc_multicast(mvm);
1648 out:
1649 mutex_unlock(&mvm->mutex);
1650 *total_flags = 0;
1651 }
1652
1653 static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
1654 struct ieee80211_vif *vif,
1655 unsigned int filter_flags,
1656 unsigned int changed_flags)
1657 {
1658 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1659
1660 /* We support only filter for probe requests */
1661 if (!(changed_flags & FIF_PROBE_REQ))
1662 return;
1663
1664 /* Supported only for p2p client interfaces */
1665 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
1666 !vif->p2p)
1667 return;
1668
1669 mutex_lock(&mvm->mutex);
1670 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
1671 mutex_unlock(&mvm->mutex);
1672 }
1673
1674 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
1675 struct iwl_bcast_iter_data {
1676 struct iwl_mvm *mvm;
1677 struct iwl_bcast_filter_cmd *cmd;
1678 u8 current_filter;
1679 };
1680
1681 static void
1682 iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
1683 const struct iwl_fw_bcast_filter *in_filter,
1684 struct iwl_fw_bcast_filter *out_filter)
1685 {
1686 struct iwl_fw_bcast_filter_attr *attr;
1687 int i;
1688
1689 memcpy(out_filter, in_filter, sizeof(*out_filter));
1690
1691 for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
1692 attr = &out_filter->attrs[i];
1693
1694 if (!attr->mask)
1695 break;
1696
1697 switch (attr->reserved1) {
1698 case cpu_to_le16(BC_FILTER_MAGIC_IP):
1699 if (vif->bss_conf.arp_addr_cnt != 1) {
1700 attr->mask = 0;
1701 continue;
1702 }
1703
1704 attr->val = vif->bss_conf.arp_addr_list[0];
1705 break;
1706 case cpu_to_le16(BC_FILTER_MAGIC_MAC):
1707 attr->val = *(__be32 *)&vif->addr[2];
1708 break;
1709 default:
1710 break;
1711 }
1712 attr->reserved1 = 0;
1713 out_filter->num_attrs++;
1714 }
1715 }
1716
1717 static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
1718 struct ieee80211_vif *vif)
1719 {
1720 struct iwl_bcast_iter_data *data = _data;
1721 struct iwl_mvm *mvm = data->mvm;
1722 struct iwl_bcast_filter_cmd *cmd = data->cmd;
1723 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1724 struct iwl_fw_bcast_mac *bcast_mac;
1725 int i;
1726
1727 if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
1728 return;
1729
1730 bcast_mac = &cmd->macs[mvmvif->id];
1731
1732 /*
1733 * enable filtering only for associated stations, but not for P2P
1734 * Clients
1735 */
1736 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
1737 !vif->bss_conf.assoc)
1738 return;
1739
1740 bcast_mac->default_discard = 1;
1741
1742 /* copy all configured filters */
1743 for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
1744 /*
1745 * Make sure we don't exceed our filters limit.
1746 * if there is still a valid filter to be configured,
1747 * be on the safe side and just allow bcast for this mac.
1748 */
1749 if (WARN_ON_ONCE(data->current_filter >=
1750 ARRAY_SIZE(cmd->filters))) {
1751 bcast_mac->default_discard = 0;
1752 bcast_mac->attached_filters = 0;
1753 break;
1754 }
1755
1756 iwl_mvm_set_bcast_filter(vif,
1757 &mvm->bcast_filters[i],
1758 &cmd->filters[data->current_filter]);
1759
1760 /* skip current filter if it contains no attributes */
1761 if (!cmd->filters[data->current_filter].num_attrs)
1762 continue;
1763
1764 /* attach the filter to current mac */
1765 bcast_mac->attached_filters |=
1766 cpu_to_le16(BIT(data->current_filter));
1767
1768 data->current_filter++;
1769 }
1770 }
1771
1772 bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
1773 struct iwl_bcast_filter_cmd *cmd)
1774 {
1775 struct iwl_bcast_iter_data iter_data = {
1776 .mvm = mvm,
1777 .cmd = cmd,
1778 };
1779
1780 if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
1781 return false;
1782
1783 memset(cmd, 0, sizeof(*cmd));
1784 cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
1785 cmd->max_macs = ARRAY_SIZE(cmd->macs);
1786
1787 #ifdef CONFIG_IWLWIFI_DEBUGFS
1788 /* use debugfs filters/macs if override is configured */
1789 if (mvm->dbgfs_bcast_filtering.override) {
1790 memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
1791 sizeof(cmd->filters));
1792 memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
1793 sizeof(cmd->macs));
1794 return true;
1795 }
1796 #endif
1797
1798 /* if no filters are configured, do nothing */
1799 if (!mvm->bcast_filters)
1800 return false;
1801
1802 /* configure and attach these filters for each associated sta vif */
1803 ieee80211_iterate_active_interfaces(
1804 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1805 iwl_mvm_bcast_filter_iterator, &iter_data);
1806
1807 return true;
1808 }
1809
1810 static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
1811 {
1812 struct iwl_bcast_filter_cmd cmd;
1813
1814 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
1815 return 0;
1816
1817 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
1818 return 0;
1819
1820 return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
1821 sizeof(cmd), &cmd);
1822 }
1823 #else
1824 static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
1825 {
1826 return 0;
1827 }
1828 #endif
1829
1830 static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
1831 struct ieee80211_vif *vif)
1832 {
1833 struct iwl_mu_group_mgmt_cmd cmd = {};
1834
1835 memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership,
1836 WLAN_MEMBERSHIP_LEN);
1837 memcpy(cmd.user_position, vif->bss_conf.mu_group.position,
1838 WLAN_USER_POSITION_LEN);
1839
1840 return iwl_mvm_send_cmd_pdu(mvm,
1841 WIDE_ID(DATA_PATH_GROUP,
1842 UPDATE_MU_GROUPS_CMD),
1843 0, sizeof(cmd), &cmd);
1844 }
1845
1846 static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac,
1847 struct ieee80211_vif *vif)
1848 {
1849 if (vif->mu_mimo_owner) {
1850 struct iwl_mu_group_mgmt_notif *notif = _data;
1851
1852 /*
1853 * MU-MIMO Group Id action frame is little endian. We treat
1854 * the data received from firmware as if it came from the
1855 * action frame, so no conversion is needed.
1856 */
1857 ieee80211_update_mu_groups(vif,
1858 (u8 *)&notif->membership_status,
1859 (u8 *)&notif->user_position);
1860 }
1861 }
1862
1863 void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
1864 struct iwl_rx_cmd_buffer *rxb)
1865 {
1866 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1867 struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data;
1868
1869 ieee80211_iterate_active_interfaces_atomic(
1870 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1871 iwl_mvm_mu_mimo_iface_iterator, notif);
1872 }
1873
1874 static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1875 struct ieee80211_vif *vif,
1876 struct ieee80211_bss_conf *bss_conf,
1877 u32 changes)
1878 {
1879 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1880 int ret;
1881
1882 /*
1883 * Re-calculate the tsf id, as the master-slave relations depend on the
1884 * beacon interval, which was not known when the station interface was
1885 * added.
1886 */
1887 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
1888 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
1889
1890 if (changes & BSS_CHANGED_ASSOC && !bss_conf->assoc &&
1891 mvmvif->lqm_active)
1892 iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_STOP_MEASUREMENT,
1893 0, 0);
1894
1895 /*
1896 * If we're not associated yet, take the (new) BSSID before associating
1897 * so the firmware knows. If we're already associated, then use the old
1898 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC
1899 * branch for disassociation below.
1900 */
1901 if (changes & BSS_CHANGED_BSSID && !mvmvif->associated)
1902 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
1903
1904 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid);
1905 if (ret)
1906 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
1907
1908 /* after sending it once, adopt mac80211 data */
1909 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
1910 mvmvif->associated = bss_conf->assoc;
1911
1912 if (changes & BSS_CHANGED_ASSOC) {
1913 if (bss_conf->assoc) {
1914 /* clear statistics to get clean beacon counter */
1915 iwl_mvm_request_statistics(mvm, true);
1916 memset(&mvmvif->beacon_stats, 0,
1917 sizeof(mvmvif->beacon_stats));
1918
1919 /* add quota for this interface */
1920 ret = iwl_mvm_update_quotas(mvm, true, NULL);
1921 if (ret) {
1922 IWL_ERR(mvm, "failed to update quotas\n");
1923 return;
1924 }
1925
1926 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
1927 &mvm->status)) {
1928 /*
1929 * If we're restarting then the firmware will
1930 * obviously have lost synchronisation with
1931 * the AP. It will attempt to synchronise by
1932 * itself, but we can make it more reliable by
1933 * scheduling a session protection time event.
1934 *
1935 * The firmware needs to receive a beacon to
1936 * catch up with synchronisation, use 110% of
1937 * the beacon interval.
1938 *
1939 * Set a large maximum delay to allow for more
1940 * than a single interface.
1941 */
1942 u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
1943 iwl_mvm_protect_session(mvm, vif, dur, dur,
1944 5 * dur, false);
1945 }
1946
1947 iwl_mvm_sf_update(mvm, vif, false);
1948 iwl_mvm_power_vif_assoc(mvm, vif);
1949 if (vif->p2p) {
1950 iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
1951 iwl_mvm_update_smps(mvm, vif,
1952 IWL_MVM_SMPS_REQ_PROT,
1953 IEEE80211_SMPS_DYNAMIC);
1954 }
1955 } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
1956 /*
1957 * If update fails - SF might be running in associated
1958 * mode while disassociated - which is forbidden.
1959 */
1960 WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
1961 "Failed to update SF upon disassociation\n");
1962
1963 /* remove AP station now that the MAC is unassoc */
1964 ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
1965 if (ret)
1966 IWL_ERR(mvm, "failed to remove AP station\n");
1967
1968 if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
1969 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1970 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
1971 /* remove quota for this interface */
1972 ret = iwl_mvm_update_quotas(mvm, false, NULL);
1973 if (ret)
1974 IWL_ERR(mvm, "failed to update quotas\n");
1975
1976 if (vif->p2p)
1977 iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
1978
1979 /* this will take the cleared BSSID from bss_conf */
1980 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
1981 if (ret)
1982 IWL_ERR(mvm,
1983 "failed to update MAC %pM (clear after unassoc)\n",
1984 vif->addr);
1985 }
1986
1987 /*
1988 * The firmware tracks the MU-MIMO group on its own.
1989 * However, on HW restart we should restore this data.
1990 */
1991 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
1992 (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) {
1993 ret = iwl_mvm_update_mu_groups(mvm, vif);
1994 if (ret)
1995 IWL_ERR(mvm,
1996 "failed to update VHT MU_MIMO groups\n");
1997 }
1998
1999 iwl_mvm_recalc_multicast(mvm);
2000 iwl_mvm_configure_bcast_filter(mvm);
2001
2002 /* reset rssi values */
2003 mvmvif->bf_data.ave_beacon_signal = 0;
2004
2005 iwl_mvm_bt_coex_vif_change(mvm);
2006 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
2007 IEEE80211_SMPS_AUTOMATIC);
2008 if (fw_has_capa(&mvm->fw->ucode_capa,
2009 IWL_UCODE_TLV_CAPA_UMAC_SCAN))
2010 iwl_mvm_config_scan(mvm);
2011 }
2012
2013 if (changes & BSS_CHANGED_BEACON_INFO) {
2014 /*
2015 * We received a beacon from the associated AP so
2016 * remove the session protection.
2017 */
2018 iwl_mvm_remove_time_event(mvm, mvmvif,
2019 &mvmvif->time_event_data);
2020
2021 iwl_mvm_sf_update(mvm, vif, false);
2022 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2023 }
2024
2025 if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS |
2026 /*
2027 * Send power command on every beacon change,
2028 * because we may have not enabled beacon abort yet.
2029 */
2030 BSS_CHANGED_BEACON_INFO)) {
2031 ret = iwl_mvm_power_update_mac(mvm);
2032 if (ret)
2033 IWL_ERR(mvm, "failed to update power mode\n");
2034 }
2035
2036 if (changes & BSS_CHANGED_TXPOWER) {
2037 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2038 bss_conf->txpower);
2039 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2040 }
2041
2042 if (changes & BSS_CHANGED_CQM) {
2043 IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
2044 /* reset cqm events tracking */
2045 mvmvif->bf_data.last_cqm_event = 0;
2046 if (mvmvif->bf_data.bf_enabled) {
2047 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
2048 if (ret)
2049 IWL_ERR(mvm,
2050 "failed to update CQM thresholds\n");
2051 }
2052 }
2053
2054 if (changes & BSS_CHANGED_ARP_FILTER) {
2055 IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
2056 iwl_mvm_configure_bcast_filter(mvm);
2057 }
2058 }
2059
2060 static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
2061 struct ieee80211_vif *vif)
2062 {
2063 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2064 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2065 int ret;
2066
2067 /*
2068 * iwl_mvm_mac_ctxt_add() might read directly from the device
2069 * (the system time), so make sure it is available.
2070 */
2071 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP);
2072 if (ret)
2073 return ret;
2074
2075 mutex_lock(&mvm->mutex);
2076
2077 /* Send the beacon template */
2078 ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif);
2079 if (ret)
2080 goto out_unlock;
2081
2082 /*
2083 * Re-calculate the tsf id, as the master-slave relations depend on the
2084 * beacon interval, which was not known when the AP interface was added.
2085 */
2086 if (vif->type == NL80211_IFTYPE_AP)
2087 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2088
2089 mvmvif->ap_assoc_sta_count = 0;
2090
2091 /* Add the mac context */
2092 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
2093 if (ret)
2094 goto out_unlock;
2095
2096 /* Perform the binding */
2097 ret = iwl_mvm_binding_add_vif(mvm, vif);
2098 if (ret)
2099 goto out_remove;
2100
2101 /* Send the bcast station. At this stage the TBTT and DTIM time events
2102 * are added and applied to the scheduler */
2103 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2104 if (ret)
2105 goto out_unbind;
2106
2107 /* must be set before quota calculations */
2108 mvmvif->ap_ibss_active = true;
2109
2110 /* power updated needs to be done before quotas */
2111 iwl_mvm_power_update_mac(mvm);
2112
2113 ret = iwl_mvm_update_quotas(mvm, false, NULL);
2114 if (ret)
2115 goto out_quota_failed;
2116
2117 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2118 if (vif->p2p && mvm->p2p_device_vif)
2119 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2120
2121 iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
2122
2123 iwl_mvm_bt_coex_vif_change(mvm);
2124
2125 /* we don't support TDLS during DCM */
2126 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2127 iwl_mvm_teardown_tdls_peers(mvm);
2128
2129 goto out_unlock;
2130
2131 out_quota_failed:
2132 iwl_mvm_power_update_mac(mvm);
2133 mvmvif->ap_ibss_active = false;
2134 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2135 out_unbind:
2136 iwl_mvm_binding_remove_vif(mvm, vif);
2137 out_remove:
2138 iwl_mvm_mac_ctxt_remove(mvm, vif);
2139 out_unlock:
2140 mutex_unlock(&mvm->mutex);
2141 iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP);
2142 return ret;
2143 }
2144
2145 static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
2146 struct ieee80211_vif *vif)
2147 {
2148 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2149 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2150
2151 iwl_mvm_prepare_mac_removal(mvm, vif);
2152
2153 mutex_lock(&mvm->mutex);
2154
2155 /* Handle AP stop while in CSA */
2156 if (rcu_access_pointer(mvm->csa_vif) == vif) {
2157 iwl_mvm_remove_time_event(mvm, mvmvif,
2158 &mvmvif->time_event_data);
2159 RCU_INIT_POINTER(mvm->csa_vif, NULL);
2160 mvmvif->csa_countdown = false;
2161 }
2162
2163 if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
2164 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
2165 mvm->csa_tx_block_bcn_timeout = 0;
2166 }
2167
2168 mvmvif->ap_ibss_active = false;
2169 mvm->ap_last_beacon_gp2 = 0;
2170
2171 iwl_mvm_bt_coex_vif_change(mvm);
2172
2173 iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
2174
2175 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2176 if (vif->p2p && mvm->p2p_device_vif)
2177 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2178
2179 iwl_mvm_update_quotas(mvm, false, NULL);
2180 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2181 iwl_mvm_binding_remove_vif(mvm, vif);
2182
2183 iwl_mvm_power_update_mac(mvm);
2184
2185 iwl_mvm_mac_ctxt_remove(mvm, vif);
2186
2187 mutex_unlock(&mvm->mutex);
2188 }
2189
2190 static void
2191 iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
2192 struct ieee80211_vif *vif,
2193 struct ieee80211_bss_conf *bss_conf,
2194 u32 changes)
2195 {
2196 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2197
2198 /* Changes will be applied when the AP/IBSS is started */
2199 if (!mvmvif->ap_ibss_active)
2200 return;
2201
2202 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
2203 BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) &&
2204 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL))
2205 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2206
2207 /* Need to send a new beacon template to the FW */
2208 if (changes & BSS_CHANGED_BEACON &&
2209 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
2210 IWL_WARN(mvm, "Failed updating beacon data\n");
2211
2212 if (changes & BSS_CHANGED_TXPOWER) {
2213 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2214 bss_conf->txpower);
2215 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2216 }
2217 }
2218
2219 static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
2220 struct ieee80211_vif *vif,
2221 struct ieee80211_bss_conf *bss_conf,
2222 u32 changes)
2223 {
2224 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2225
2226 /*
2227 * iwl_mvm_bss_info_changed_station() might call
2228 * iwl_mvm_protect_session(), which reads directly from
2229 * the device (the system time), so make sure it is available.
2230 */
2231 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED))
2232 return;
2233
2234 mutex_lock(&mvm->mutex);
2235
2236 if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
2237 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
2238
2239 switch (vif->type) {
2240 case NL80211_IFTYPE_STATION:
2241 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
2242 break;
2243 case NL80211_IFTYPE_AP:
2244 case NL80211_IFTYPE_ADHOC:
2245 iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
2246 break;
2247 case NL80211_IFTYPE_MONITOR:
2248 if (changes & BSS_CHANGED_MU_GROUPS)
2249 iwl_mvm_update_mu_groups(mvm, vif);
2250 break;
2251 default:
2252 /* shouldn't happen */
2253 WARN_ON_ONCE(1);
2254 }
2255
2256 mutex_unlock(&mvm->mutex);
2257 iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
2258 }
2259
2260 static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
2261 struct ieee80211_vif *vif,
2262 struct ieee80211_scan_request *hw_req)
2263 {
2264 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2265 int ret;
2266
2267 if (hw_req->req.n_channels == 0 ||
2268 hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
2269 return -EINVAL;
2270
2271 mutex_lock(&mvm->mutex);
2272 ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
2273 mutex_unlock(&mvm->mutex);
2274
2275 return ret;
2276 }
2277
2278 static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
2279 struct ieee80211_vif *vif)
2280 {
2281 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2282
2283 mutex_lock(&mvm->mutex);
2284
2285 /* Due to a race condition, it's possible that mac80211 asks
2286 * us to stop a hw_scan when it's already stopped. This can
2287 * happen, for instance, if we stopped the scan ourselves,
2288 * called ieee80211_scan_completed() and the userspace called
2289 * cancel scan scan before ieee80211_scan_work() could run.
2290 * To handle that, simply return if the scan is not running.
2291 */
2292 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
2293 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
2294
2295 mutex_unlock(&mvm->mutex);
2296 }
2297
2298 static void
2299 iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
2300 struct ieee80211_sta *sta, u16 tids,
2301 int num_frames,
2302 enum ieee80211_frame_release_type reason,
2303 bool more_data)
2304 {
2305 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2306
2307 /* Called when we need to transmit (a) frame(s) from mac80211 */
2308
2309 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2310 tids, more_data, false);
2311 }
2312
2313 static void
2314 iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
2315 struct ieee80211_sta *sta, u16 tids,
2316 int num_frames,
2317 enum ieee80211_frame_release_type reason,
2318 bool more_data)
2319 {
2320 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2321
2322 /* Called when we need to transmit (a) frame(s) from agg or dqa queue */
2323
2324 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2325 tids, more_data, true);
2326 }
2327
2328 static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2329 enum sta_notify_cmd cmd,
2330 struct ieee80211_sta *sta)
2331 {
2332 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2333 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2334 unsigned long txqs = 0, tids = 0;
2335 int tid;
2336
2337 spin_lock_bh(&mvmsta->lock);
2338 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2339 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2340
2341 if (!iwl_mvm_is_dqa_supported(mvm) &&
2342 tid_data->state != IWL_AGG_ON &&
2343 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
2344 continue;
2345
2346 __set_bit(tid_data->txq_id, &txqs);
2347
2348 if (iwl_mvm_tid_queued(tid_data) == 0)
2349 continue;
2350
2351 __set_bit(tid, &tids);
2352 }
2353
2354 switch (cmd) {
2355 case STA_NOTIFY_SLEEP:
2356 if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
2357 ieee80211_sta_block_awake(hw, sta, true);
2358
2359 for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
2360 ieee80211_sta_set_buffered(sta, tid, true);
2361
2362 if (txqs)
2363 iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
2364 /*
2365 * The fw updates the STA to be asleep. Tx packets on the Tx
2366 * queues to this station will not be transmitted. The fw will
2367 * send a Tx response with TX_STATUS_FAIL_DEST_PS.
2368 */
2369 break;
2370 case STA_NOTIFY_AWAKE:
2371 if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
2372 break;
2373
2374 if (txqs)
2375 iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
2376 iwl_mvm_sta_modify_ps_wake(mvm, sta);
2377 break;
2378 default:
2379 break;
2380 }
2381 spin_unlock_bh(&mvmsta->lock);
2382 }
2383
2384 static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2385 struct ieee80211_vif *vif,
2386 enum sta_notify_cmd cmd,
2387 struct ieee80211_sta *sta)
2388 {
2389 __iwl_mvm_mac_sta_notify(hw, cmd, sta);
2390 }
2391
2392 void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
2393 {
2394 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2395 struct iwl_mvm_pm_state_notification *notif = (void *)pkt->data;
2396 struct ieee80211_sta *sta;
2397 struct iwl_mvm_sta *mvmsta;
2398 bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE);
2399
2400 if (WARN_ON(notif->sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)))
2401 return;
2402
2403 rcu_read_lock();
2404 sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]);
2405 if (WARN_ON(IS_ERR_OR_NULL(sta))) {
2406 rcu_read_unlock();
2407 return;
2408 }
2409
2410 mvmsta = iwl_mvm_sta_from_mac80211(sta);
2411
2412 if (!mvmsta->vif ||
2413 mvmsta->vif->type != NL80211_IFTYPE_AP) {
2414 rcu_read_unlock();
2415 return;
2416 }
2417
2418 if (mvmsta->sleeping != sleeping) {
2419 mvmsta->sleeping = sleeping;
2420 __iwl_mvm_mac_sta_notify(mvm->hw,
2421 sleeping ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE,
2422 sta);
2423 ieee80211_sta_ps_transition(sta, sleeping);
2424 }
2425
2426 if (sleeping) {
2427 switch (notif->type) {
2428 case IWL_MVM_PM_EVENT_AWAKE:
2429 case IWL_MVM_PM_EVENT_ASLEEP:
2430 break;
2431 case IWL_MVM_PM_EVENT_UAPSD:
2432 ieee80211_sta_uapsd_trigger(sta, IEEE80211_NUM_TIDS);
2433 break;
2434 case IWL_MVM_PM_EVENT_PS_POLL:
2435 ieee80211_sta_pspoll(sta);
2436 break;
2437 default:
2438 break;
2439 }
2440 }
2441
2442 rcu_read_unlock();
2443 }
2444
2445 static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
2446 struct ieee80211_vif *vif,
2447 struct ieee80211_sta *sta)
2448 {
2449 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2450 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2451
2452 /*
2453 * This is called before mac80211 does RCU synchronisation,
2454 * so here we already invalidate our internal RCU-protected
2455 * station pointer. The rest of the code will thus no longer
2456 * be able to find the station this way, and we don't rely
2457 * on further RCU synchronisation after the sta_state()
2458 * callback deleted the station.
2459 */
2460 mutex_lock(&mvm->mutex);
2461 if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
2462 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
2463 ERR_PTR(-ENOENT));
2464
2465 mutex_unlock(&mvm->mutex);
2466 }
2467
2468 static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2469 const u8 *bssid)
2470 {
2471 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
2472 return;
2473
2474 if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) {
2475 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2476 return;
2477 }
2478
2479 if (!vif->p2p &&
2480 (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) {
2481 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2482 return;
2483 }
2484
2485 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
2486 }
2487
2488 static void
2489 iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
2490 struct ieee80211_vif *vif, u8 *peer_addr,
2491 enum nl80211_tdls_operation action)
2492 {
2493 struct iwl_fw_dbg_trigger_tlv *trig;
2494 struct iwl_fw_dbg_trigger_tdls *tdls_trig;
2495
2496 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TDLS))
2497 return;
2498
2499 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS);
2500 tdls_trig = (void *)trig->data;
2501 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
2502 return;
2503
2504 if (!(tdls_trig->action_bitmap & BIT(action)))
2505 return;
2506
2507 if (tdls_trig->peer_mode &&
2508 memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0)
2509 return;
2510
2511 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
2512 "TDLS event occurred, peer %pM, action %d",
2513 peer_addr, action);
2514 }
2515
2516 static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
2517 struct iwl_mvm_sta *mvm_sta)
2518 {
2519 struct iwl_mvm_tid_data *tid_data;
2520 struct sk_buff *skb;
2521 int i;
2522
2523 spin_lock_bh(&mvm_sta->lock);
2524 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
2525 tid_data = &mvm_sta->tid_data[i];
2526 while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames)))
2527 ieee80211_free_txskb(mvm->hw, skb);
2528 }
2529 spin_unlock_bh(&mvm_sta->lock);
2530 }
2531
2532 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2533 struct ieee80211_vif *vif,
2534 struct ieee80211_sta *sta,
2535 enum ieee80211_sta_state old_state,
2536 enum ieee80211_sta_state new_state)
2537 {
2538 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2539 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2540 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2541 int ret;
2542
2543 IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
2544 sta->addr, old_state, new_state);
2545
2546 /* this would be a mac80211 bug ... but don't crash */
2547 if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
2548 return -EINVAL;
2549
2550 /* if a STA is being removed, reuse its ID */
2551 flush_work(&mvm->sta_drained_wk);
2552
2553 /*
2554 * If we are in a STA removal flow and in DQA mode:
2555 *
2556 * This is after the sync_rcu part, so the queues have already been
2557 * flushed. No more TXs on their way in mac80211's path, and no more in
2558 * the queues.
2559 * Also, we won't be getting any new TX frames for this station.
2560 * What we might have are deferred TX frames that need to be taken care
2561 * of.
2562 *
2563 * Drop any still-queued deferred-frame before removing the STA, and
2564 * make sure the worker is no longer handling frames for this STA.
2565 */
2566 if (old_state == IEEE80211_STA_NONE &&
2567 new_state == IEEE80211_STA_NOTEXIST &&
2568 iwl_mvm_is_dqa_supported(mvm)) {
2569 iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
2570 flush_work(&mvm->add_stream_wk);
2571
2572 /*
2573 * No need to make sure deferred TX indication is off since the
2574 * worker will already remove it if it was on
2575 */
2576 }
2577
2578 mutex_lock(&mvm->mutex);
2579 /* track whether or not the station is associated */
2580 mvm_sta->associated = new_state >= IEEE80211_STA_ASSOC;
2581
2582 if (old_state == IEEE80211_STA_NOTEXIST &&
2583 new_state == IEEE80211_STA_NONE) {
2584 /*
2585 * Firmware bug - it'll crash if the beacon interval is less
2586 * than 16. We can't avoid connecting at all, so refuse the
2587 * station state change, this will cause mac80211 to abandon
2588 * attempts to connect to this AP, and eventually wpa_s will
2589 * blacklist the AP...
2590 */
2591 if (vif->type == NL80211_IFTYPE_STATION &&
2592 vif->bss_conf.beacon_int < 16) {
2593 IWL_ERR(mvm,
2594 "AP %pM beacon interval is %d, refusing due to firmware bug!\n",
2595 sta->addr, vif->bss_conf.beacon_int);
2596 ret = -EINVAL;
2597 goto out_unlock;
2598 }
2599
2600 if (sta->tdls &&
2601 (vif->p2p ||
2602 iwl_mvm_tdls_sta_count(mvm, NULL) ==
2603 IWL_MVM_TDLS_STA_COUNT ||
2604 iwl_mvm_phy_ctx_count(mvm) > 1)) {
2605 IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n");
2606 ret = -EBUSY;
2607 goto out_unlock;
2608 }
2609
2610 ret = iwl_mvm_add_sta(mvm, vif, sta);
2611 if (sta->tdls && ret == 0) {
2612 iwl_mvm_recalc_tdls_state(mvm, vif, true);
2613 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
2614 NL80211_TDLS_SETUP);
2615 }
2616 } else if (old_state == IEEE80211_STA_NONE &&
2617 new_state == IEEE80211_STA_AUTH) {
2618 /*
2619 * EBS may be disabled due to previous failures reported by FW.
2620 * Reset EBS status here assuming environment has been changed.
2621 */
2622 mvm->last_ebs_successful = true;
2623 iwl_mvm_check_uapsd(mvm, vif, sta->addr);
2624 ret = 0;
2625 } else if (old_state == IEEE80211_STA_AUTH &&
2626 new_state == IEEE80211_STA_ASSOC) {
2627 if (vif->type == NL80211_IFTYPE_AP) {
2628 mvmvif->ap_assoc_sta_count++;
2629 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2630 }
2631
2632 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
2633 true);
2634 ret = iwl_mvm_update_sta(mvm, vif, sta);
2635 } else if (old_state == IEEE80211_STA_ASSOC &&
2636 new_state == IEEE80211_STA_AUTHORIZED) {
2637
2638 /* we don't support TDLS during DCM */
2639 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2640 iwl_mvm_teardown_tdls_peers(mvm);
2641
2642 if (sta->tdls)
2643 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
2644 NL80211_TDLS_ENABLE_LINK);
2645
2646 /* enable beacon filtering */
2647 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2648 ret = 0;
2649 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
2650 new_state == IEEE80211_STA_ASSOC) {
2651 /* disable beacon filtering */
2652 WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0));
2653 ret = 0;
2654 } else if (old_state == IEEE80211_STA_ASSOC &&
2655 new_state == IEEE80211_STA_AUTH) {
2656 if (vif->type == NL80211_IFTYPE_AP) {
2657 mvmvif->ap_assoc_sta_count--;
2658 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2659 }
2660 ret = 0;
2661 } else if (old_state == IEEE80211_STA_AUTH &&
2662 new_state == IEEE80211_STA_NONE) {
2663 ret = 0;
2664 } else if (old_state == IEEE80211_STA_NONE &&
2665 new_state == IEEE80211_STA_NOTEXIST) {
2666 ret = iwl_mvm_rm_sta(mvm, vif, sta);
2667 if (sta->tdls) {
2668 iwl_mvm_recalc_tdls_state(mvm, vif, false);
2669 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
2670 NL80211_TDLS_DISABLE_LINK);
2671 }
2672 } else {
2673 ret = -EIO;
2674 }
2675 out_unlock:
2676 mutex_unlock(&mvm->mutex);
2677
2678 if (sta->tdls && ret == 0) {
2679 if (old_state == IEEE80211_STA_NOTEXIST &&
2680 new_state == IEEE80211_STA_NONE)
2681 ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2682 else if (old_state == IEEE80211_STA_NONE &&
2683 new_state == IEEE80211_STA_NOTEXIST)
2684 ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2685 }
2686
2687 return ret;
2688 }
2689
2690 static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2691 {
2692 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2693
2694 mvm->rts_threshold = value;
2695
2696 return 0;
2697 }
2698
2699 static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
2700 struct ieee80211_vif *vif,
2701 struct ieee80211_sta *sta, u32 changed)
2702 {
2703 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2704
2705 if (vif->type == NL80211_IFTYPE_STATION &&
2706 changed & IEEE80211_RC_NSS_CHANGED)
2707 iwl_mvm_sf_update(mvm, vif, false);
2708 }
2709
2710 static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
2711 struct ieee80211_vif *vif, u16 ac,
2712 const struct ieee80211_tx_queue_params *params)
2713 {
2714 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2715 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2716
2717 mvmvif->queue_params[ac] = *params;
2718
2719 /*
2720 * No need to update right away, we'll get BSS_CHANGED_QOS
2721 * The exception is P2P_DEVICE interface which needs immediate update.
2722 */
2723 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2724 int ret;
2725
2726 mutex_lock(&mvm->mutex);
2727 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2728 mutex_unlock(&mvm->mutex);
2729 return ret;
2730 }
2731 return 0;
2732 }
2733
2734 static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
2735 struct ieee80211_vif *vif)
2736 {
2737 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2738 u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
2739 u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS;
2740
2741 if (WARN_ON_ONCE(vif->bss_conf.assoc))
2742 return;
2743
2744 /*
2745 * iwl_mvm_protect_session() reads directly from the device
2746 * (the system time), so make sure it is available.
2747 */
2748 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX))
2749 return;
2750
2751 mutex_lock(&mvm->mutex);
2752 /* Try really hard to protect the session and hear a beacon */
2753 iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false);
2754 mutex_unlock(&mvm->mutex);
2755
2756 iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX);
2757 }
2758
2759 static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
2760 struct ieee80211_vif *vif,
2761 struct cfg80211_sched_scan_request *req,
2762 struct ieee80211_scan_ies *ies)
2763 {
2764 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2765
2766 int ret;
2767
2768 mutex_lock(&mvm->mutex);
2769
2770 if (!vif->bss_conf.idle) {
2771 ret = -EBUSY;
2772 goto out;
2773 }
2774
2775 ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
2776
2777 out:
2778 mutex_unlock(&mvm->mutex);
2779 return ret;
2780 }
2781
2782 static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
2783 struct ieee80211_vif *vif)
2784 {
2785 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2786 int ret;
2787
2788 mutex_lock(&mvm->mutex);
2789
2790 /* Due to a race condition, it's possible that mac80211 asks
2791 * us to stop a sched_scan when it's already stopped. This
2792 * can happen, for instance, if we stopped the scan ourselves,
2793 * called ieee80211_sched_scan_stopped() and the userspace called
2794 * stop sched scan scan before ieee80211_sched_scan_stopped_work()
2795 * could run. To handle this, simply return if the scan is
2796 * not running.
2797 */
2798 if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
2799 mutex_unlock(&mvm->mutex);
2800 return 0;
2801 }
2802
2803 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
2804 mutex_unlock(&mvm->mutex);
2805 iwl_mvm_wait_for_async_handlers(mvm);
2806
2807 return ret;
2808 }
2809
2810 static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
2811 enum set_key_cmd cmd,
2812 struct ieee80211_vif *vif,
2813 struct ieee80211_sta *sta,
2814 struct ieee80211_key_conf *key)
2815 {
2816 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2817 struct iwl_mvm_sta *mvmsta;
2818 struct iwl_mvm_key_pn *ptk_pn;
2819 int keyidx = key->keyidx;
2820 int ret;
2821 u8 key_offset;
2822
2823 if (iwlwifi_mod_params.sw_crypto) {
2824 IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
2825 return -EOPNOTSUPP;
2826 }
2827
2828 switch (key->cipher) {
2829 case WLAN_CIPHER_SUITE_TKIP:
2830 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2831 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2832 break;
2833 case WLAN_CIPHER_SUITE_CCMP:
2834 case WLAN_CIPHER_SUITE_GCMP:
2835 case WLAN_CIPHER_SUITE_GCMP_256:
2836 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2837 break;
2838 case WLAN_CIPHER_SUITE_AES_CMAC:
2839 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
2840 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
2841 WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
2842 break;
2843 case WLAN_CIPHER_SUITE_WEP40:
2844 case WLAN_CIPHER_SUITE_WEP104:
2845 /* For non-client mode, only use WEP keys for TX as we probably
2846 * don't have a station yet anyway and would then have to keep
2847 * track of the keys, linking them to each of the clients/peers
2848 * as they appear. For now, don't do that, for performance WEP
2849 * offload doesn't really matter much, but we need it for some
2850 * other offload features in client mode.
2851 */
2852 if (vif->type != NL80211_IFTYPE_STATION)
2853 return 0;
2854 break;
2855 default:
2856 /* currently FW supports only one optional cipher scheme */
2857 if (hw->n_cipher_schemes &&
2858 hw->cipher_schemes->cipher == key->cipher)
2859 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2860 else
2861 return -EOPNOTSUPP;
2862 }
2863
2864 mutex_lock(&mvm->mutex);
2865
2866 switch (cmd) {
2867 case SET_KEY:
2868 if ((vif->type == NL80211_IFTYPE_ADHOC ||
2869 vif->type == NL80211_IFTYPE_AP) && !sta) {
2870 /*
2871 * GTK on AP interface is a TX-only key, return 0;
2872 * on IBSS they're per-station and because we're lazy
2873 * we don't support them for RX, so do the same.
2874 * CMAC/GMAC in AP/IBSS modes must be done in software.
2875 */
2876 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
2877 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
2878 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
2879 ret = -EOPNOTSUPP;
2880 else
2881 ret = 0;
2882 key->hw_key_idx = STA_KEY_IDX_INVALID;
2883 break;
2884 }
2885
2886 /* During FW restart, in order to restore the state as it was,
2887 * don't try to reprogram keys we previously failed for.
2888 */
2889 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2890 key->hw_key_idx == STA_KEY_IDX_INVALID) {
2891 IWL_DEBUG_MAC80211(mvm,
2892 "skip invalid idx key programming during restart\n");
2893 ret = 0;
2894 break;
2895 }
2896
2897 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2898 sta && iwl_mvm_has_new_rx_api(mvm) &&
2899 key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
2900 (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
2901 key->cipher == WLAN_CIPHER_SUITE_GCMP ||
2902 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
2903 struct ieee80211_key_seq seq;
2904 int tid, q;
2905
2906 mvmsta = iwl_mvm_sta_from_mac80211(sta);
2907 WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx]));
2908 ptk_pn = kzalloc(sizeof(*ptk_pn) +
2909 mvm->trans->num_rx_queues *
2910 sizeof(ptk_pn->q[0]),
2911 GFP_KERNEL);
2912 if (!ptk_pn) {
2913 ret = -ENOMEM;
2914 break;
2915 }
2916
2917 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2918 ieee80211_get_key_rx_seq(key, tid, &seq);
2919 for (q = 0; q < mvm->trans->num_rx_queues; q++)
2920 memcpy(ptk_pn->q[q].pn[tid],
2921 seq.ccmp.pn,
2922 IEEE80211_CCMP_PN_LEN);
2923 }
2924
2925 rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn);
2926 }
2927
2928 /* in HW restart reuse the index, otherwise request a new one */
2929 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
2930 key_offset = key->hw_key_idx;
2931 else
2932 key_offset = STA_KEY_IDX_INVALID;
2933
2934 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
2935 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
2936 if (ret) {
2937 IWL_WARN(mvm, "set key failed\n");
2938 /*
2939 * can't add key for RX, but we don't need it
2940 * in the device for TX so still return 0
2941 */
2942 key->hw_key_idx = STA_KEY_IDX_INVALID;
2943 ret = 0;
2944 }
2945
2946 break;
2947 case DISABLE_KEY:
2948 if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
2949 ret = 0;
2950 break;
2951 }
2952
2953 if (sta && iwl_mvm_has_new_rx_api(mvm) &&
2954 key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
2955 (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
2956 key->cipher == WLAN_CIPHER_SUITE_GCMP ||
2957 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
2958 mvmsta = iwl_mvm_sta_from_mac80211(sta);
2959 ptk_pn = rcu_dereference_protected(
2960 mvmsta->ptk_pn[keyidx],
2961 lockdep_is_held(&mvm->mutex));
2962 RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL);
2963 if (ptk_pn)
2964 kfree_rcu(ptk_pn, rcu_head);
2965 }
2966
2967 IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
2968 ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
2969 break;
2970 default:
2971 ret = -EINVAL;
2972 }
2973
2974 mutex_unlock(&mvm->mutex);
2975 return ret;
2976 }
2977
2978 static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
2979 struct ieee80211_vif *vif,
2980 struct ieee80211_key_conf *keyconf,
2981 struct ieee80211_sta *sta,
2982 u32 iv32, u16 *phase1key)
2983 {
2984 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2985
2986 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
2987 return;
2988
2989 iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
2990 }
2991
2992
2993 static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
2994 struct iwl_rx_packet *pkt, void *data)
2995 {
2996 struct iwl_mvm *mvm =
2997 container_of(notif_wait, struct iwl_mvm, notif_wait);
2998 struct iwl_hs20_roc_res *resp;
2999 int resp_len = iwl_rx_packet_payload_len(pkt);
3000 struct iwl_mvm_time_event_data *te_data = data;
3001
3002 if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD))
3003 return true;
3004
3005 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
3006 IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n");
3007 return true;
3008 }
3009
3010 resp = (void *)pkt->data;
3011
3012 IWL_DEBUG_TE(mvm,
3013 "Aux ROC: Recieved response from ucode: status=%d uid=%d\n",
3014 resp->status, resp->event_unique_id);
3015
3016 te_data->uid = le32_to_cpu(resp->event_unique_id);
3017 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
3018 te_data->uid);
3019
3020 spin_lock_bh(&mvm->time_event_lock);
3021 list_add_tail(&te_data->list, &mvm->aux_roc_te_list);
3022 spin_unlock_bh(&mvm->time_event_lock);
3023
3024 return true;
3025 }
3026
3027 #define AUX_ROC_MIN_DURATION MSEC_TO_TU(100)
3028 #define AUX_ROC_MIN_DELAY MSEC_TO_TU(200)
3029 #define AUX_ROC_MAX_DELAY MSEC_TO_TU(600)
3030 #define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20)
3031 #define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10)
3032 static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
3033 struct ieee80211_channel *channel,
3034 struct ieee80211_vif *vif,
3035 int duration)
3036 {
3037 int res, time_reg = DEVICE_SYSTEM_TIME_REG;
3038 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3039 struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
3040 static const u16 time_event_response[] = { HOT_SPOT_CMD };
3041 struct iwl_notification_wait wait_time_event;
3042 u32 dtim_interval = vif->bss_conf.dtim_period *
3043 vif->bss_conf.beacon_int;
3044 u32 req_dur, delay;
3045 struct iwl_hs20_roc_req aux_roc_req = {
3046 .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
3047 .id_and_color =
3048 cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
3049 .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
3050 /* Set the channel info data */
3051 .channel_info.band = (channel->band == NL80211_BAND_2GHZ) ?
3052 PHY_BAND_24 : PHY_BAND_5,
3053 .channel_info.channel = channel->hw_value,
3054 .channel_info.width = PHY_VHT_CHANNEL_MODE20,
3055 /* Set the time and duration */
3056 .apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)),
3057 };
3058
3059 delay = AUX_ROC_MIN_DELAY;
3060 req_dur = MSEC_TO_TU(duration);
3061
3062 /*
3063 * If we are associated we want the delay time to be at least one
3064 * dtim interval so that the FW can wait until after the DTIM and
3065 * then start the time event, this will potentially allow us to
3066 * remain off-channel for the max duration.
3067 * Since we want to use almost a whole dtim interval we would also
3068 * like the delay to be for 2-3 dtim intervals, in case there are
3069 * other time events with higher priority.
3070 */
3071 if (vif->bss_conf.assoc) {
3072 delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
3073 /* We cannot remain off-channel longer than the DTIM interval */
3074 if (dtim_interval <= req_dur) {
3075 req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER;
3076 if (req_dur <= AUX_ROC_MIN_DURATION)
3077 req_dur = dtim_interval -
3078 AUX_ROC_MIN_SAFETY_BUFFER;
3079 }
3080 }
3081
3082 aux_roc_req.duration = cpu_to_le32(req_dur);
3083 aux_roc_req.apply_time_max_delay = cpu_to_le32(delay);
3084
3085 IWL_DEBUG_TE(mvm,
3086 "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
3087 channel->hw_value, req_dur, duration, delay,
3088 dtim_interval);
3089 /* Set the node address */
3090 memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
3091
3092 lockdep_assert_held(&mvm->mutex);
3093
3094 spin_lock_bh(&mvm->time_event_lock);
3095
3096 if (WARN_ON(te_data->id == HOT_SPOT_CMD)) {
3097 spin_unlock_bh(&mvm->time_event_lock);
3098 return -EIO;
3099 }
3100
3101 te_data->vif = vif;
3102 te_data->duration = duration;
3103 te_data->id = HOT_SPOT_CMD;
3104
3105 spin_unlock_bh(&mvm->time_event_lock);
3106
3107 /*
3108 * Use a notification wait, which really just processes the
3109 * command response and doesn't wait for anything, in order
3110 * to be able to process the response and get the UID inside
3111 * the RX path. Using CMD_WANT_SKB doesn't work because it
3112 * stores the buffer and then wakes up this thread, by which
3113 * time another notification (that the time event started)
3114 * might already be processed unsuccessfully.
3115 */
3116 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
3117 time_event_response,
3118 ARRAY_SIZE(time_event_response),
3119 iwl_mvm_rx_aux_roc, te_data);
3120
3121 res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req),
3122 &aux_roc_req);
3123
3124 if (res) {
3125 IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res);
3126 iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
3127 goto out_clear_te;
3128 }
3129
3130 /* No need to wait for anything, so just pass 1 (0 isn't valid) */
3131 res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
3132 /* should never fail */
3133 WARN_ON_ONCE(res);
3134
3135 if (res) {
3136 out_clear_te:
3137 spin_lock_bh(&mvm->time_event_lock);
3138 iwl_mvm_te_clear_data(mvm, te_data);
3139 spin_unlock_bh(&mvm->time_event_lock);
3140 }
3141
3142 return res;
3143 }
3144
3145 static int iwl_mvm_roc(struct ieee80211_hw *hw,
3146 struct ieee80211_vif *vif,
3147 struct ieee80211_channel *channel,
3148 int duration,
3149 enum ieee80211_roc_type type)
3150 {
3151 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3152 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3153 struct cfg80211_chan_def chandef;
3154 struct iwl_mvm_phy_ctxt *phy_ctxt;
3155 int ret, i;
3156
3157 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
3158 duration, type);
3159
3160 flush_work(&mvm->roc_done_wk);
3161
3162 mutex_lock(&mvm->mutex);
3163
3164 switch (vif->type) {
3165 case NL80211_IFTYPE_STATION:
3166 if (fw_has_capa(&mvm->fw->ucode_capa,
3167 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
3168 /* Use aux roc framework (HS20) */
3169 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
3170 vif, duration);
3171 goto out_unlock;
3172 }
3173 IWL_ERR(mvm, "hotspot not supported\n");
3174 ret = -EINVAL;
3175 goto out_unlock;
3176 case NL80211_IFTYPE_P2P_DEVICE:
3177 /* handle below */
3178 break;
3179 default:
3180 IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
3181 ret = -EINVAL;
3182 goto out_unlock;
3183 }
3184
3185 for (i = 0; i < NUM_PHY_CTX; i++) {
3186 phy_ctxt = &mvm->phy_ctxts[i];
3187 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
3188 continue;
3189
3190 if (phy_ctxt->ref && channel == phy_ctxt->channel) {
3191 /*
3192 * Unbind the P2P_DEVICE from the current PHY context,
3193 * and if the PHY context is not used remove it.
3194 */
3195 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3196 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3197 goto out_unlock;
3198
3199 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3200
3201 /* Bind the P2P_DEVICE to the current PHY Context */
3202 mvmvif->phy_ctxt = phy_ctxt;
3203
3204 ret = iwl_mvm_binding_add_vif(mvm, vif);
3205 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3206 goto out_unlock;
3207
3208 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3209 goto schedule_time_event;
3210 }
3211 }
3212
3213 /* Need to update the PHY context only if the ROC channel changed */
3214 if (channel == mvmvif->phy_ctxt->channel)
3215 goto schedule_time_event;
3216
3217 cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
3218
3219 /*
3220 * Change the PHY context configuration as it is currently referenced
3221 * only by the P2P Device MAC
3222 */
3223 if (mvmvif->phy_ctxt->ref == 1) {
3224 ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
3225 &chandef, 1, 1);
3226 if (ret)
3227 goto out_unlock;
3228 } else {
3229 /*
3230 * The PHY context is shared with other MACs. Need to remove the
3231 * P2P Device from the binding, allocate an new PHY context and
3232 * create a new binding
3233 */
3234 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3235 if (!phy_ctxt) {
3236 ret = -ENOSPC;
3237 goto out_unlock;
3238 }
3239
3240 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
3241 1, 1);
3242 if (ret) {
3243 IWL_ERR(mvm, "Failed to change PHY context\n");
3244 goto out_unlock;
3245 }
3246
3247 /* Unbind the P2P_DEVICE from the current PHY context */
3248 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3249 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3250 goto out_unlock;
3251
3252 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3253
3254 /* Bind the P2P_DEVICE to the new allocated PHY context */
3255 mvmvif->phy_ctxt = phy_ctxt;
3256
3257 ret = iwl_mvm_binding_add_vif(mvm, vif);
3258 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3259 goto out_unlock;
3260
3261 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3262 }
3263
3264 schedule_time_event:
3265 /* Schedule the time events */
3266 ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
3267
3268 out_unlock:
3269 mutex_unlock(&mvm->mutex);
3270 IWL_DEBUG_MAC80211(mvm, "leave\n");
3271 return ret;
3272 }
3273
3274 static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
3275 {
3276 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3277
3278 IWL_DEBUG_MAC80211(mvm, "enter\n");
3279
3280 mutex_lock(&mvm->mutex);
3281 iwl_mvm_stop_roc(mvm);
3282 mutex_unlock(&mvm->mutex);
3283
3284 IWL_DEBUG_MAC80211(mvm, "leave\n");
3285 return 0;
3286 }
3287
3288 static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
3289 struct ieee80211_chanctx_conf *ctx)
3290 {
3291 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3292 struct iwl_mvm_phy_ctxt *phy_ctxt;
3293 int ret;
3294
3295 lockdep_assert_held(&mvm->mutex);
3296
3297 IWL_DEBUG_MAC80211(mvm, "Add channel context\n");
3298
3299 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3300 if (!phy_ctxt) {
3301 ret = -ENOSPC;
3302 goto out;
3303 }
3304
3305 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3306 ctx->rx_chains_static,
3307 ctx->rx_chains_dynamic);
3308 if (ret) {
3309 IWL_ERR(mvm, "Failed to add PHY context\n");
3310 goto out;
3311 }
3312
3313 iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt);
3314 *phy_ctxt_id = phy_ctxt->id;
3315 out:
3316 return ret;
3317 }
3318
3319 static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
3320 struct ieee80211_chanctx_conf *ctx)
3321 {
3322 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3323 int ret;
3324
3325 mutex_lock(&mvm->mutex);
3326 ret = __iwl_mvm_add_chanctx(mvm, ctx);
3327 mutex_unlock(&mvm->mutex);
3328
3329 return ret;
3330 }
3331
3332 static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm,
3333 struct ieee80211_chanctx_conf *ctx)
3334 {
3335 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3336 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3337
3338 lockdep_assert_held(&mvm->mutex);
3339
3340 iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
3341 }
3342
3343 static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
3344 struct ieee80211_chanctx_conf *ctx)
3345 {
3346 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3347
3348 mutex_lock(&mvm->mutex);
3349 __iwl_mvm_remove_chanctx(mvm, ctx);
3350 mutex_unlock(&mvm->mutex);
3351 }
3352
3353 static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
3354 struct ieee80211_chanctx_conf *ctx,
3355 u32 changed)
3356 {
3357 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3358 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3359 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3360
3361 if (WARN_ONCE((phy_ctxt->ref > 1) &&
3362 (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
3363 IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
3364 IEEE80211_CHANCTX_CHANGE_RADAR |
3365 IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)),
3366 "Cannot change PHY. Ref=%d, changed=0x%X\n",
3367 phy_ctxt->ref, changed))
3368 return;
3369
3370 mutex_lock(&mvm->mutex);
3371 iwl_mvm_bt_coex_vif_change(mvm);
3372 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3373 ctx->rx_chains_static,
3374 ctx->rx_chains_dynamic);
3375 mutex_unlock(&mvm->mutex);
3376 }
3377
3378 static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
3379 struct ieee80211_vif *vif,
3380 struct ieee80211_chanctx_conf *ctx,
3381 bool switching_chanctx)
3382 {
3383 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3384 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3385 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3386 int ret;
3387
3388 lockdep_assert_held(&mvm->mutex);
3389
3390 mvmvif->phy_ctxt = phy_ctxt;
3391
3392 switch (vif->type) {
3393 case NL80211_IFTYPE_AP:
3394 /* only needed if we're switching chanctx (i.e. during CSA) */
3395 if (switching_chanctx) {
3396 mvmvif->ap_ibss_active = true;
3397 break;
3398 }
3399 case NL80211_IFTYPE_ADHOC:
3400 /*
3401 * The AP binding flow is handled as part of the start_ap flow
3402 * (in bss_info_changed), similarly for IBSS.
3403 */
3404 ret = 0;
3405 goto out;
3406 case NL80211_IFTYPE_STATION:
3407 break;
3408 case NL80211_IFTYPE_MONITOR:
3409 /* always disable PS when a monitor interface is active */
3410 mvmvif->ps_disabled = true;
3411 break;
3412 default:
3413 ret = -EINVAL;
3414 goto out;
3415 }
3416
3417 ret = iwl_mvm_binding_add_vif(mvm, vif);
3418 if (ret)
3419 goto out;
3420
3421 /*
3422 * Power state must be updated before quotas,
3423 * otherwise fw will complain.
3424 */
3425 iwl_mvm_power_update_mac(mvm);
3426
3427 /* Setting the quota at this stage is only required for monitor
3428 * interfaces. For the other types, the bss_info changed flow
3429 * will handle quota settings.
3430 */
3431 if (vif->type == NL80211_IFTYPE_MONITOR) {
3432 mvmvif->monitor_active = true;
3433 ret = iwl_mvm_update_quotas(mvm, false, NULL);
3434 if (ret)
3435 goto out_remove_binding;
3436
3437 ret = iwl_mvm_add_snif_sta(mvm, vif);
3438 if (ret)
3439 goto out_remove_binding;
3440
3441 }
3442
3443 /* Handle binding during CSA */
3444 if (vif->type == NL80211_IFTYPE_AP) {
3445 iwl_mvm_update_quotas(mvm, false, NULL);
3446 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3447 }
3448
3449 if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
3450 u32 duration = 2 * vif->bss_conf.beacon_int;
3451
3452 /* iwl_mvm_protect_session() reads directly from the
3453 * device (the system time), so make sure it is
3454 * available.
3455 */
3456 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA);
3457 if (ret)
3458 goto out_remove_binding;
3459
3460 /* Protect the session to make sure we hear the first
3461 * beacon on the new channel.
3462 */
3463 iwl_mvm_protect_session(mvm, vif, duration, duration,
3464 vif->bss_conf.beacon_int / 2,
3465 true);
3466
3467 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
3468
3469 iwl_mvm_update_quotas(mvm, false, NULL);
3470 }
3471
3472 goto out;
3473
3474 out_remove_binding:
3475 iwl_mvm_binding_remove_vif(mvm, vif);
3476 iwl_mvm_power_update_mac(mvm);
3477 out:
3478 if (ret)
3479 mvmvif->phy_ctxt = NULL;
3480 return ret;
3481 }
3482 static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
3483 struct ieee80211_vif *vif,
3484 struct ieee80211_chanctx_conf *ctx)
3485 {
3486 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3487 int ret;
3488
3489 mutex_lock(&mvm->mutex);
3490 ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false);
3491 mutex_unlock(&mvm->mutex);
3492
3493 return ret;
3494 }
3495
3496 static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
3497 struct ieee80211_vif *vif,
3498 struct ieee80211_chanctx_conf *ctx,
3499 bool switching_chanctx)
3500 {
3501 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3502 struct ieee80211_vif *disabled_vif = NULL;
3503
3504 lockdep_assert_held(&mvm->mutex);
3505
3506 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
3507
3508 switch (vif->type) {
3509 case NL80211_IFTYPE_ADHOC:
3510 goto out;
3511 case NL80211_IFTYPE_MONITOR:
3512 mvmvif->monitor_active = false;
3513 mvmvif->ps_disabled = false;
3514 iwl_mvm_rm_snif_sta(mvm, vif);
3515 break;
3516 case NL80211_IFTYPE_AP:
3517 /* This part is triggered only during CSA */
3518 if (!switching_chanctx || !mvmvif->ap_ibss_active)
3519 goto out;
3520
3521 mvmvif->csa_countdown = false;
3522
3523 /* Set CS bit on all the stations */
3524 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
3525
3526 /* Save blocked iface, the timeout is set on the next beacon */
3527 rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif);
3528
3529 mvmvif->ap_ibss_active = false;
3530 break;
3531 case NL80211_IFTYPE_STATION:
3532 if (!switching_chanctx)
3533 break;
3534
3535 disabled_vif = vif;
3536
3537 iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
3538 break;
3539 default:
3540 break;
3541 }
3542
3543 iwl_mvm_update_quotas(mvm, false, disabled_vif);
3544 iwl_mvm_binding_remove_vif(mvm, vif);
3545
3546 out:
3547 mvmvif->phy_ctxt = NULL;
3548 iwl_mvm_power_update_mac(mvm);
3549 }
3550
3551 static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
3552 struct ieee80211_vif *vif,
3553 struct ieee80211_chanctx_conf *ctx)
3554 {
3555 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3556
3557 mutex_lock(&mvm->mutex);
3558 __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false);
3559 mutex_unlock(&mvm->mutex);
3560 }
3561
3562 static int
3563 iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
3564 struct ieee80211_vif_chanctx_switch *vifs)
3565 {
3566 int ret;
3567
3568 mutex_lock(&mvm->mutex);
3569 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3570 __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
3571
3572 ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx);
3573 if (ret) {
3574 IWL_ERR(mvm, "failed to add new_ctx during channel switch\n");
3575 goto out_reassign;
3576 }
3577
3578 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3579 true);
3580 if (ret) {
3581 IWL_ERR(mvm,
3582 "failed to assign new_ctx during channel switch\n");
3583 goto out_remove;
3584 }
3585
3586 /* we don't support TDLS during DCM - can be caused by channel switch */
3587 if (iwl_mvm_phy_ctx_count(mvm) > 1)
3588 iwl_mvm_teardown_tdls_peers(mvm);
3589
3590 goto out;
3591
3592 out_remove:
3593 __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
3594
3595 out_reassign:
3596 if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) {
3597 IWL_ERR(mvm, "failed to add old_ctx back after failure.\n");
3598 goto out_restart;
3599 }
3600
3601 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3602 true)) {
3603 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3604 goto out_restart;
3605 }
3606
3607 goto out;
3608
3609 out_restart:
3610 /* things keep failing, better restart the hw */
3611 iwl_mvm_nic_restart(mvm, false);
3612
3613 out:
3614 mutex_unlock(&mvm->mutex);
3615
3616 return ret;
3617 }
3618
3619 static int
3620 iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
3621 struct ieee80211_vif_chanctx_switch *vifs)
3622 {
3623 int ret;
3624
3625 mutex_lock(&mvm->mutex);
3626 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3627
3628 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3629 true);
3630 if (ret) {
3631 IWL_ERR(mvm,
3632 "failed to assign new_ctx during channel switch\n");
3633 goto out_reassign;
3634 }
3635
3636 goto out;
3637
3638 out_reassign:
3639 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3640 true)) {
3641 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3642 goto out_restart;
3643 }
3644
3645 goto out;
3646
3647 out_restart:
3648 /* things keep failing, better restart the hw */
3649 iwl_mvm_nic_restart(mvm, false);
3650
3651 out:
3652 mutex_unlock(&mvm->mutex);
3653
3654 return ret;
3655 }
3656
3657 static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
3658 struct ieee80211_vif_chanctx_switch *vifs,
3659 int n_vifs,
3660 enum ieee80211_chanctx_switch_mode mode)
3661 {
3662 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3663 int ret;
3664
3665 /* we only support a single-vif right now */
3666 if (n_vifs > 1)
3667 return -EOPNOTSUPP;
3668
3669 switch (mode) {
3670 case CHANCTX_SWMODE_SWAP_CONTEXTS:
3671 ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs);
3672 break;
3673 case CHANCTX_SWMODE_REASSIGN_VIF:
3674 ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs);
3675 break;
3676 default:
3677 ret = -EOPNOTSUPP;
3678 break;
3679 }
3680
3681 return ret;
3682 }
3683
3684 static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
3685 struct ieee80211_sta *sta,
3686 bool set)
3687 {
3688 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3689 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3690
3691 if (!mvm_sta || !mvm_sta->vif) {
3692 IWL_ERR(mvm, "Station is not associated to a vif\n");
3693 return -EINVAL;
3694 }
3695
3696 return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
3697 }
3698
3699 #ifdef CONFIG_NL80211_TESTMODE
3700 static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
3701 [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
3702 [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
3703 [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
3704 };
3705
3706 static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
3707 struct ieee80211_vif *vif,
3708 void *data, int len)
3709 {
3710 struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
3711 int err;
3712 u32 noa_duration;
3713
3714 err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy,
3715 NULL);
3716 if (err)
3717 return err;
3718
3719 if (!tb[IWL_MVM_TM_ATTR_CMD])
3720 return -EINVAL;
3721
3722 switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
3723 case IWL_MVM_TM_CMD_SET_NOA:
3724 if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
3725 !vif->bss_conf.enable_beacon ||
3726 !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
3727 return -EINVAL;
3728
3729 noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
3730 if (noa_duration >= vif->bss_conf.beacon_int)
3731 return -EINVAL;
3732
3733 mvm->noa_duration = noa_duration;
3734 mvm->noa_vif = vif;
3735
3736 return iwl_mvm_update_quotas(mvm, false, NULL);
3737 case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
3738 /* must be associated client vif - ignore authorized */
3739 if (!vif || vif->type != NL80211_IFTYPE_STATION ||
3740 !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
3741 !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
3742 return -EINVAL;
3743
3744 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
3745 return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3746 return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3747 }
3748
3749 return -EOPNOTSUPP;
3750 }
3751
3752 static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
3753 struct ieee80211_vif *vif,
3754 void *data, int len)
3755 {
3756 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3757 int err;
3758
3759 mutex_lock(&mvm->mutex);
3760 err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
3761 mutex_unlock(&mvm->mutex);
3762
3763 return err;
3764 }
3765 #endif
3766
3767 static void iwl_mvm_channel_switch(struct ieee80211_hw *hw,
3768 struct ieee80211_vif *vif,
3769 struct ieee80211_channel_switch *chsw)
3770 {
3771 /* By implementing this operation, we prevent mac80211 from
3772 * starting its own channel switch timer, so that we can call
3773 * ieee80211_chswitch_done() ourselves at the right time
3774 * (which is when the absence time event starts).
3775 */
3776
3777 IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw),
3778 "dummy channel switch op\n");
3779 }
3780
3781 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
3782 struct ieee80211_vif *vif,
3783 struct ieee80211_channel_switch *chsw)
3784 {
3785 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3786 struct ieee80211_vif *csa_vif;
3787 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3788 u32 apply_time;
3789 int ret;
3790
3791 mutex_lock(&mvm->mutex);
3792
3793 mvmvif->csa_failed = false;
3794
3795 IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
3796 chsw->chandef.center_freq1);
3797
3798 iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH);
3799
3800 switch (vif->type) {
3801 case NL80211_IFTYPE_AP:
3802 csa_vif =
3803 rcu_dereference_protected(mvm->csa_vif,
3804 lockdep_is_held(&mvm->mutex));
3805 if (WARN_ONCE(csa_vif && csa_vif->csa_active,
3806 "Another CSA is already in progress")) {
3807 ret = -EBUSY;
3808 goto out_unlock;
3809 }
3810
3811 /* we still didn't unblock tx. prevent new CS meanwhile */
3812 if (rcu_dereference_protected(mvm->csa_tx_blocked_vif,
3813 lockdep_is_held(&mvm->mutex))) {
3814 ret = -EBUSY;
3815 goto out_unlock;
3816 }
3817
3818 rcu_assign_pointer(mvm->csa_vif, vif);
3819
3820 if (WARN_ONCE(mvmvif->csa_countdown,
3821 "Previous CSA countdown didn't complete")) {
3822 ret = -EBUSY;
3823 goto out_unlock;
3824 }
3825
3826 mvmvif->csa_target_freq = chsw->chandef.chan->center_freq;
3827
3828 break;
3829 case NL80211_IFTYPE_STATION:
3830 if (mvmvif->lqm_active)
3831 iwl_mvm_send_lqm_cmd(vif,
3832 LQM_CMD_OPERATION_STOP_MEASUREMENT,
3833 0, 0);
3834
3835 /* Schedule the time event to a bit before beacon 1,
3836 * to make sure we're in the new channel when the
3837 * GO/AP arrives.
3838 */
3839 apply_time = chsw->device_timestamp +
3840 ((vif->bss_conf.beacon_int * (chsw->count - 1) -
3841 IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
3842
3843 if (chsw->block_tx)
3844 iwl_mvm_csa_client_absent(mvm, vif);
3845
3846 iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int,
3847 apply_time);
3848 if (mvmvif->bf_data.bf_enabled) {
3849 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3850 if (ret)
3851 goto out_unlock;
3852 }
3853
3854 break;
3855 default:
3856 break;
3857 }
3858
3859 mvmvif->ps_disabled = true;
3860
3861 ret = iwl_mvm_power_update_ps(mvm);
3862 if (ret)
3863 goto out_unlock;
3864
3865 /* we won't be on this channel any longer */
3866 iwl_mvm_teardown_tdls_peers(mvm);
3867
3868 out_unlock:
3869 mutex_unlock(&mvm->mutex);
3870
3871 return ret;
3872 }
3873
3874 static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
3875 struct ieee80211_vif *vif)
3876 {
3877 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3878 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3879 int ret;
3880
3881 mutex_lock(&mvm->mutex);
3882
3883 if (mvmvif->csa_failed) {
3884 mvmvif->csa_failed = false;
3885 ret = -EIO;
3886 goto out_unlock;
3887 }
3888
3889 if (vif->type == NL80211_IFTYPE_STATION) {
3890 struct iwl_mvm_sta *mvmsta;
3891
3892 mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
3893 mvmvif->ap_sta_id);
3894
3895 if (WARN_ON(!mvmsta)) {
3896 ret = -EIO;
3897 goto out_unlock;
3898 }
3899
3900 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
3901
3902 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3903
3904 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3905 if (ret)
3906 goto out_unlock;
3907
3908 iwl_mvm_stop_session_protection(mvm, vif);
3909 }
3910
3911 mvmvif->ps_disabled = false;
3912
3913 ret = iwl_mvm_power_update_ps(mvm);
3914
3915 out_unlock:
3916 mutex_unlock(&mvm->mutex);
3917
3918 return ret;
3919 }
3920
3921 static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
3922 struct ieee80211_vif *vif, u32 queues, bool drop)
3923 {
3924 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3925 struct iwl_mvm_vif *mvmvif;
3926 struct iwl_mvm_sta *mvmsta;
3927 struct ieee80211_sta *sta;
3928 int i;
3929 u32 msk = 0;
3930
3931 if (!vif || vif->type != NL80211_IFTYPE_STATION)
3932 return;
3933
3934 /* Make sure we're done with the deferred traffic before flushing */
3935 if (iwl_mvm_is_dqa_supported(mvm))
3936 flush_work(&mvm->add_stream_wk);
3937
3938 mutex_lock(&mvm->mutex);
3939 mvmvif = iwl_mvm_vif_from_mac80211(vif);
3940
3941 /* flush the AP-station and all TDLS peers */
3942 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
3943 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3944 lockdep_is_held(&mvm->mutex));
3945 if (IS_ERR_OR_NULL(sta))
3946 continue;
3947
3948 mvmsta = iwl_mvm_sta_from_mac80211(sta);
3949 if (mvmsta->vif != vif)
3950 continue;
3951
3952 /* make sure only TDLS peers or the AP are flushed */
3953 WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
3954
3955 msk |= mvmsta->tfd_queue_msk;
3956 }
3957
3958 if (drop) {
3959 if (iwl_mvm_flush_tx_path(mvm, msk, 0))
3960 IWL_ERR(mvm, "flush request fail\n");
3961 mutex_unlock(&mvm->mutex);
3962 } else {
3963 mutex_unlock(&mvm->mutex);
3964
3965 /* this can take a while, and we may need/want other operations
3966 * to succeed while doing this, so do it without the mutex held
3967 */
3968 iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
3969 }
3970 }
3971
3972 static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
3973 struct survey_info *survey)
3974 {
3975 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3976 int ret;
3977
3978 memset(survey, 0, sizeof(*survey));
3979
3980 /* only support global statistics right now */
3981 if (idx != 0)
3982 return -ENOENT;
3983
3984 if (!fw_has_capa(&mvm->fw->ucode_capa,
3985 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
3986 return -ENOENT;
3987
3988 mutex_lock(&mvm->mutex);
3989
3990 if (mvm->ucode_loaded) {
3991 ret = iwl_mvm_request_statistics(mvm, false);
3992 if (ret)
3993 goto out;
3994 }
3995
3996 survey->filled = SURVEY_INFO_TIME |
3997 SURVEY_INFO_TIME_RX |
3998 SURVEY_INFO_TIME_TX |
3999 SURVEY_INFO_TIME_SCAN;
4000 survey->time = mvm->accu_radio_stats.on_time_rf +
4001 mvm->radio_stats.on_time_rf;
4002 do_div(survey->time, USEC_PER_MSEC);
4003
4004 survey->time_rx = mvm->accu_radio_stats.rx_time +
4005 mvm->radio_stats.rx_time;
4006 do_div(survey->time_rx, USEC_PER_MSEC);
4007
4008 survey->time_tx = mvm->accu_radio_stats.tx_time +
4009 mvm->radio_stats.tx_time;
4010 do_div(survey->time_tx, USEC_PER_MSEC);
4011
4012 survey->time_scan = mvm->accu_radio_stats.on_time_scan +
4013 mvm->radio_stats.on_time_scan;
4014 do_div(survey->time_scan, USEC_PER_MSEC);
4015
4016 ret = 0;
4017 out:
4018 mutex_unlock(&mvm->mutex);
4019 return ret;
4020 }
4021
4022 static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
4023 struct ieee80211_vif *vif,
4024 struct ieee80211_sta *sta,
4025 struct station_info *sinfo)
4026 {
4027 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4028 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4029 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
4030
4031 if (mvmsta->avg_energy) {
4032 sinfo->signal_avg = mvmsta->avg_energy;
4033 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
4034 }
4035
4036 if (!fw_has_capa(&mvm->fw->ucode_capa,
4037 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
4038 return;
4039
4040 /* if beacon filtering isn't on mac80211 does it anyway */
4041 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
4042 return;
4043
4044 if (!vif->bss_conf.assoc)
4045 return;
4046
4047 mutex_lock(&mvm->mutex);
4048
4049 if (mvmvif->ap_sta_id != mvmsta->sta_id)
4050 goto unlock;
4051
4052 if (iwl_mvm_request_statistics(mvm, false))
4053 goto unlock;
4054
4055 sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
4056 mvmvif->beacon_stats.accu_num_beacons;
4057 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
4058 if (mvmvif->beacon_stats.avg_signal) {
4059 /* firmware only reports a value after RXing a few beacons */
4060 sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
4061 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
4062 }
4063 unlock:
4064 mutex_unlock(&mvm->mutex);
4065 }
4066
4067 static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
4068 struct ieee80211_vif *vif,
4069 const struct ieee80211_event *event)
4070 {
4071 #define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...) \
4072 do { \
4073 if ((_cnt) && --(_cnt)) \
4074 break; \
4075 iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\
4076 } while (0)
4077
4078 struct iwl_fw_dbg_trigger_tlv *trig;
4079 struct iwl_fw_dbg_trigger_mlme *trig_mlme;
4080
4081 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
4082 return;
4083
4084 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
4085 trig_mlme = (void *)trig->data;
4086 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4087 return;
4088
4089 if (event->u.mlme.data == ASSOC_EVENT) {
4090 if (event->u.mlme.status == MLME_DENIED)
4091 CHECK_MLME_TRIGGER(mvm, trig, buf,
4092 trig_mlme->stop_assoc_denied,
4093 "DENIED ASSOC: reason %d",
4094 event->u.mlme.reason);
4095 else if (event->u.mlme.status == MLME_TIMEOUT)
4096 CHECK_MLME_TRIGGER(mvm, trig, buf,
4097 trig_mlme->stop_assoc_timeout,
4098 "ASSOC TIMEOUT");
4099 } else if (event->u.mlme.data == AUTH_EVENT) {
4100 if (event->u.mlme.status == MLME_DENIED)
4101 CHECK_MLME_TRIGGER(mvm, trig, buf,
4102 trig_mlme->stop_auth_denied,
4103 "DENIED AUTH: reason %d",
4104 event->u.mlme.reason);
4105 else if (event->u.mlme.status == MLME_TIMEOUT)
4106 CHECK_MLME_TRIGGER(mvm, trig, buf,
4107 trig_mlme->stop_auth_timeout,
4108 "AUTH TIMEOUT");
4109 } else if (event->u.mlme.data == DEAUTH_RX_EVENT) {
4110 CHECK_MLME_TRIGGER(mvm, trig, buf,
4111 trig_mlme->stop_rx_deauth,
4112 "DEAUTH RX %d", event->u.mlme.reason);
4113 } else if (event->u.mlme.data == DEAUTH_TX_EVENT) {
4114 CHECK_MLME_TRIGGER(mvm, trig, buf,
4115 trig_mlme->stop_tx_deauth,
4116 "DEAUTH TX %d", event->u.mlme.reason);
4117 }
4118 #undef CHECK_MLME_TRIGGER
4119 }
4120
4121 static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
4122 struct ieee80211_vif *vif,
4123 const struct ieee80211_event *event)
4124 {
4125 struct iwl_fw_dbg_trigger_tlv *trig;
4126 struct iwl_fw_dbg_trigger_ba *ba_trig;
4127
4128 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4129 return;
4130
4131 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4132 ba_trig = (void *)trig->data;
4133 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4134 return;
4135
4136 if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
4137 return;
4138
4139 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4140 "BAR received from %pM, tid %d, ssn %d",
4141 event->u.ba.sta->addr, event->u.ba.tid,
4142 event->u.ba.ssn);
4143 }
4144
4145 static void
4146 iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
4147 struct ieee80211_vif *vif,
4148 const struct ieee80211_event *event)
4149 {
4150 struct iwl_fw_dbg_trigger_tlv *trig;
4151 struct iwl_fw_dbg_trigger_ba *ba_trig;
4152
4153 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4154 return;
4155
4156 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4157 ba_trig = (void *)trig->data;
4158 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4159 return;
4160
4161 if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid)))
4162 return;
4163
4164 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4165 "Frame from %pM timed out, tid %d",
4166 event->u.ba.sta->addr, event->u.ba.tid);
4167 }
4168
4169 static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
4170 struct ieee80211_vif *vif,
4171 const struct ieee80211_event *event)
4172 {
4173 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4174
4175 switch (event->type) {
4176 case MLME_EVENT:
4177 iwl_mvm_event_mlme_callback(mvm, vif, event);
4178 break;
4179 case BAR_RX_EVENT:
4180 iwl_mvm_event_bar_rx_callback(mvm, vif, event);
4181 break;
4182 case BA_FRAME_TIMEOUT:
4183 iwl_mvm_event_frame_timeout_callback(mvm, vif, event);
4184 break;
4185 default:
4186 break;
4187 }
4188 }
4189
4190 void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
4191 struct iwl_mvm_internal_rxq_notif *notif,
4192 u32 size)
4193 {
4194 u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
4195 int ret;
4196
4197 lockdep_assert_held(&mvm->mutex);
4198
4199 if (!iwl_mvm_has_new_rx_api(mvm))
4200 return;
4201
4202 notif->cookie = mvm->queue_sync_cookie;
4203
4204 if (notif->sync)
4205 atomic_set(&mvm->queue_sync_counter,
4206 mvm->trans->num_rx_queues);
4207
4208 ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
4209 if (ret) {
4210 IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
4211 goto out;
4212 }
4213
4214 if (notif->sync)
4215 ret = wait_event_timeout(mvm->rx_sync_waitq,
4216 atomic_read(&mvm->queue_sync_counter) == 0,
4217 HZ);
4218 WARN_ON_ONCE(!ret);
4219
4220 out:
4221 atomic_set(&mvm->queue_sync_counter, 0);
4222 mvm->queue_sync_cookie++;
4223 }
4224
4225 static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
4226 {
4227 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4228 struct iwl_mvm_internal_rxq_notif data = {
4229 .type = IWL_MVM_RXQ_EMPTY,
4230 .sync = 1,
4231 };
4232
4233 mutex_lock(&mvm->mutex);
4234 iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data));
4235 mutex_unlock(&mvm->mutex);
4236 }
4237
4238 const struct ieee80211_ops iwl_mvm_hw_ops = {
4239 .tx = iwl_mvm_mac_tx,
4240 .ampdu_action = iwl_mvm_mac_ampdu_action,
4241 .start = iwl_mvm_mac_start,
4242 .reconfig_complete = iwl_mvm_mac_reconfig_complete,
4243 .stop = iwl_mvm_mac_stop,
4244 .add_interface = iwl_mvm_mac_add_interface,
4245 .remove_interface = iwl_mvm_mac_remove_interface,
4246 .config = iwl_mvm_mac_config,
4247 .prepare_multicast = iwl_mvm_prepare_multicast,
4248 .configure_filter = iwl_mvm_configure_filter,
4249 .config_iface_filter = iwl_mvm_config_iface_filter,
4250 .bss_info_changed = iwl_mvm_bss_info_changed,
4251 .hw_scan = iwl_mvm_mac_hw_scan,
4252 .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
4253 .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
4254 .sta_state = iwl_mvm_mac_sta_state,
4255 .sta_notify = iwl_mvm_mac_sta_notify,
4256 .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
4257 .release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
4258 .set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
4259 .sta_rc_update = iwl_mvm_sta_rc_update,
4260 .conf_tx = iwl_mvm_mac_conf_tx,
4261 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
4262 .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover,
4263 .flush = iwl_mvm_mac_flush,
4264 .sched_scan_start = iwl_mvm_mac_sched_scan_start,
4265 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
4266 .set_key = iwl_mvm_mac_set_key,
4267 .update_tkip_key = iwl_mvm_mac_update_tkip_key,
4268 .remain_on_channel = iwl_mvm_roc,
4269 .cancel_remain_on_channel = iwl_mvm_cancel_roc,
4270 .add_chanctx = iwl_mvm_add_chanctx,
4271 .remove_chanctx = iwl_mvm_remove_chanctx,
4272 .change_chanctx = iwl_mvm_change_chanctx,
4273 .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
4274 .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
4275 .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx,
4276
4277 .start_ap = iwl_mvm_start_ap_ibss,
4278 .stop_ap = iwl_mvm_stop_ap_ibss,
4279 .join_ibss = iwl_mvm_start_ap_ibss,
4280 .leave_ibss = iwl_mvm_stop_ap_ibss,
4281
4282 .set_tim = iwl_mvm_set_tim,
4283
4284 .channel_switch = iwl_mvm_channel_switch,
4285 .pre_channel_switch = iwl_mvm_pre_channel_switch,
4286 .post_channel_switch = iwl_mvm_post_channel_switch,
4287
4288 .tdls_channel_switch = iwl_mvm_tdls_channel_switch,
4289 .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
4290 .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
4291
4292 .event_callback = iwl_mvm_mac_event_callback,
4293
4294 .sync_rx_queues = iwl_mvm_sync_rx_queues,
4295
4296 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
4297
4298 #ifdef CONFIG_PM_SLEEP
4299 /* look at d3.c */
4300 .suspend = iwl_mvm_suspend,
4301 .resume = iwl_mvm_resume,
4302 .set_wakeup = iwl_mvm_set_wakeup,
4303 .set_rekey_data = iwl_mvm_set_rekey_data,
4304 #if IS_ENABLED(CONFIG_IPV6)
4305 .ipv6_addr_change = iwl_mvm_ipv6_addr_change,
4306 #endif
4307 .set_default_unicast_key = iwl_mvm_set_default_unicast_key,
4308 #endif
4309 .get_survey = iwl_mvm_mac_get_survey,
4310 .sta_statistics = iwl_mvm_mac_sta_statistics,
4311 };