]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
ASoC: wm_adsp: add support for DSP region lock
[mirror_ubuntu-bionic-kernel.git] / drivers / net / wireless / intel / iwlwifi / mvm / mac80211.c
1 /******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
28 *
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
37 * All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 *
43 * * Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * * Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
48 * distribution.
49 * * Neither the name Intel Corporation nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *
65 *****************************************************************************/
66 #include <linux/kernel.h>
67 #include <linux/slab.h>
68 #include <linux/skbuff.h>
69 #include <linux/netdevice.h>
70 #include <linux/etherdevice.h>
71 #include <linux/ip.h>
72 #include <linux/if_arp.h>
73 #include <linux/time.h>
74 #include <net/mac80211.h>
75 #include <net/ieee80211_radiotap.h>
76 #include <net/tcp.h>
77
78 #include "iwl-op-mode.h"
79 #include "iwl-io.h"
80 #include "mvm.h"
81 #include "sta.h"
82 #include "time-event.h"
83 #include "iwl-eeprom-parse.h"
84 #include "iwl-phy-db.h"
85 #include "testmode.h"
86 #include "iwl-fw-error-dump.h"
87 #include "iwl-prph.h"
88 #include "iwl-nvm-parse.h"
89 #include "fw-dbg.h"
90
91 static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
92 {
93 .max = 1,
94 .types = BIT(NL80211_IFTYPE_STATION),
95 },
96 {
97 .max = 1,
98 .types = BIT(NL80211_IFTYPE_AP) |
99 BIT(NL80211_IFTYPE_P2P_CLIENT) |
100 BIT(NL80211_IFTYPE_P2P_GO),
101 },
102 {
103 .max = 1,
104 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
105 },
106 };
107
108 static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
109 {
110 .num_different_channels = 2,
111 .max_interfaces = 3,
112 .limits = iwl_mvm_limits,
113 .n_limits = ARRAY_SIZE(iwl_mvm_limits),
114 },
115 };
116
117 #ifdef CONFIG_PM_SLEEP
118 static const struct nl80211_wowlan_tcp_data_token_feature
119 iwl_mvm_wowlan_tcp_token_feature = {
120 .min_len = 0,
121 .max_len = 255,
122 .bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS,
123 };
124
125 static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
126 .tok = &iwl_mvm_wowlan_tcp_token_feature,
127 .data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN -
128 sizeof(struct ethhdr) -
129 sizeof(struct iphdr) -
130 sizeof(struct tcphdr),
131 .data_interval_max = 65535, /* __le16 in API */
132 .wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN -
133 sizeof(struct ethhdr) -
134 sizeof(struct iphdr) -
135 sizeof(struct tcphdr),
136 .seq = true,
137 };
138 #endif
139
140 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
141 /*
142 * Use the reserved field to indicate magic values.
143 * these values will only be used internally by the driver,
144 * and won't make it to the fw (reserved will be 0).
145 * BC_FILTER_MAGIC_IP - configure the val of this attribute to
146 * be the vif's ip address. in case there is not a single
147 * ip address (0, or more than 1), this attribute will
148 * be skipped.
149 * BC_FILTER_MAGIC_MAC - set the val of this attribute to
150 * the LSB bytes of the vif's mac address
151 */
152 enum {
153 BC_FILTER_MAGIC_NONE = 0,
154 BC_FILTER_MAGIC_IP,
155 BC_FILTER_MAGIC_MAC,
156 };
157
158 static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
159 {
160 /* arp */
161 .discard = 0,
162 .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
163 .attrs = {
164 {
165 /* frame type - arp, hw type - ethernet */
166 .offset_type =
167 BCAST_FILTER_OFFSET_PAYLOAD_START,
168 .offset = sizeof(rfc1042_header),
169 .val = cpu_to_be32(0x08060001),
170 .mask = cpu_to_be32(0xffffffff),
171 },
172 {
173 /* arp dest ip */
174 .offset_type =
175 BCAST_FILTER_OFFSET_PAYLOAD_START,
176 .offset = sizeof(rfc1042_header) + 2 +
177 sizeof(struct arphdr) +
178 ETH_ALEN + sizeof(__be32) +
179 ETH_ALEN,
180 .mask = cpu_to_be32(0xffffffff),
181 /* mark it as special field */
182 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
183 },
184 },
185 },
186 {
187 /* dhcp offer bcast */
188 .discard = 0,
189 .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
190 .attrs = {
191 {
192 /* udp dest port - 68 (bootp client)*/
193 .offset_type = BCAST_FILTER_OFFSET_IP_END,
194 .offset = offsetof(struct udphdr, dest),
195 .val = cpu_to_be32(0x00440000),
196 .mask = cpu_to_be32(0xffff0000),
197 },
198 {
199 /* dhcp - lsb bytes of client hw address */
200 .offset_type = BCAST_FILTER_OFFSET_IP_END,
201 .offset = 38,
202 .mask = cpu_to_be32(0xffffffff),
203 /* mark it as special field */
204 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
205 },
206 },
207 },
208 /* last filter must be empty */
209 {},
210 };
211 #endif
212
213 void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
214 {
215 if (!iwl_mvm_is_d0i3_supported(mvm))
216 return;
217
218 IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
219 spin_lock_bh(&mvm->refs_lock);
220 mvm->refs[ref_type]++;
221 spin_unlock_bh(&mvm->refs_lock);
222 iwl_trans_ref(mvm->trans);
223 }
224
225 void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
226 {
227 if (!iwl_mvm_is_d0i3_supported(mvm))
228 return;
229
230 IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
231 spin_lock_bh(&mvm->refs_lock);
232 if (WARN_ON(!mvm->refs[ref_type])) {
233 spin_unlock_bh(&mvm->refs_lock);
234 return;
235 }
236 mvm->refs[ref_type]--;
237 spin_unlock_bh(&mvm->refs_lock);
238 iwl_trans_unref(mvm->trans);
239 }
240
241 static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm,
242 enum iwl_mvm_ref_type except_ref)
243 {
244 int i, j;
245
246 if (!iwl_mvm_is_d0i3_supported(mvm))
247 return;
248
249 spin_lock_bh(&mvm->refs_lock);
250 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
251 if (except_ref == i || !mvm->refs[i])
252 continue;
253
254 IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n",
255 i, mvm->refs[i]);
256 for (j = 0; j < mvm->refs[i]; j++)
257 iwl_trans_unref(mvm->trans);
258 mvm->refs[i] = 0;
259 }
260 spin_unlock_bh(&mvm->refs_lock);
261 }
262
263 bool iwl_mvm_ref_taken(struct iwl_mvm *mvm)
264 {
265 int i;
266 bool taken = false;
267
268 if (!iwl_mvm_is_d0i3_supported(mvm))
269 return true;
270
271 spin_lock_bh(&mvm->refs_lock);
272 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
273 if (mvm->refs[i]) {
274 taken = true;
275 break;
276 }
277 }
278 spin_unlock_bh(&mvm->refs_lock);
279
280 return taken;
281 }
282
283 int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
284 {
285 iwl_mvm_ref(mvm, ref_type);
286
287 if (!wait_event_timeout(mvm->d0i3_exit_waitq,
288 !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status),
289 HZ)) {
290 WARN_ON_ONCE(1);
291 iwl_mvm_unref(mvm, ref_type);
292 return -EIO;
293 }
294
295 return 0;
296 }
297
298 static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
299 {
300 int i;
301
302 memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts));
303 for (i = 0; i < NUM_PHY_CTX; i++) {
304 mvm->phy_ctxts[i].id = i;
305 mvm->phy_ctxts[i].ref = 0;
306 }
307 }
308
309 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
310 const char *alpha2,
311 enum iwl_mcc_source src_id,
312 bool *changed)
313 {
314 struct ieee80211_regdomain *regd = NULL;
315 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
316 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
317 struct iwl_mcc_update_resp *resp;
318
319 IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
320
321 lockdep_assert_held(&mvm->mutex);
322
323 resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
324 if (IS_ERR_OR_NULL(resp)) {
325 IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
326 PTR_ERR_OR_ZERO(resp));
327 goto out;
328 }
329
330 if (changed)
331 *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
332
333 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
334 __le32_to_cpu(resp->n_channels),
335 resp->channels,
336 __le16_to_cpu(resp->mcc));
337 /* Store the return source id */
338 src_id = resp->source_id;
339 kfree(resp);
340 if (IS_ERR_OR_NULL(regd)) {
341 IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
342 PTR_ERR_OR_ZERO(regd));
343 goto out;
344 }
345
346 IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
347 regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
348 mvm->lar_regdom_set = true;
349 mvm->mcc_src = src_id;
350
351 out:
352 return regd;
353 }
354
355 void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
356 {
357 bool changed;
358 struct ieee80211_regdomain *regd;
359
360 if (!iwl_mvm_is_lar_supported(mvm))
361 return;
362
363 regd = iwl_mvm_get_current_regdomain(mvm, &changed);
364 if (!IS_ERR_OR_NULL(regd)) {
365 /* only update the regulatory core if changed */
366 if (changed)
367 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
368
369 kfree(regd);
370 }
371 }
372
373 struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
374 bool *changed)
375 {
376 return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
377 iwl_mvm_is_wifi_mcc_supported(mvm) ?
378 MCC_SOURCE_GET_CURRENT :
379 MCC_SOURCE_OLD_FW, changed);
380 }
381
382 int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
383 {
384 enum iwl_mcc_source used_src;
385 struct ieee80211_regdomain *regd;
386 int ret;
387 bool changed;
388 const struct ieee80211_regdomain *r =
389 rtnl_dereference(mvm->hw->wiphy->regd);
390
391 if (!r)
392 return -ENOENT;
393
394 /* save the last source in case we overwrite it below */
395 used_src = mvm->mcc_src;
396 if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
397 /* Notify the firmware we support wifi location updates */
398 regd = iwl_mvm_get_current_regdomain(mvm, NULL);
399 if (!IS_ERR_OR_NULL(regd))
400 kfree(regd);
401 }
402
403 /* Now set our last stored MCC and source */
404 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src,
405 &changed);
406 if (IS_ERR_OR_NULL(regd))
407 return -EIO;
408
409 /* update cfg80211 if the regdomain was changed */
410 if (changed)
411 ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
412 else
413 ret = 0;
414
415 kfree(regd);
416 return ret;
417 }
418
419 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
420 {
421 struct ieee80211_hw *hw = mvm->hw;
422 int num_mac, ret, i;
423 static const u32 mvm_ciphers[] = {
424 WLAN_CIPHER_SUITE_WEP40,
425 WLAN_CIPHER_SUITE_WEP104,
426 WLAN_CIPHER_SUITE_TKIP,
427 WLAN_CIPHER_SUITE_CCMP,
428 };
429
430 /* Tell mac80211 our characteristics */
431 ieee80211_hw_set(hw, SIGNAL_DBM);
432 ieee80211_hw_set(hw, SPECTRUM_MGMT);
433 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
434 ieee80211_hw_set(hw, QUEUE_CONTROL);
435 ieee80211_hw_set(hw, WANT_MONITOR_VIF);
436 ieee80211_hw_set(hw, SUPPORTS_PS);
437 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
438 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
439 ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
440 ieee80211_hw_set(hw, CONNECTION_MONITOR);
441 ieee80211_hw_set(hw, CHANCTX_STA_CSA);
442 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
443 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
444 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
445 ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
446 if (iwl_mvm_has_new_rx_api(mvm))
447 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
448 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF))
449 ieee80211_hw_set(hw, AP_LINK_PS);
450
451 if (mvm->trans->num_rx_queues > 1)
452 ieee80211_hw_set(hw, USES_RSS);
453
454 if (mvm->trans->max_skb_frags)
455 hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
456
457 if (!iwl_mvm_is_dqa_supported(mvm))
458 hw->queues = mvm->first_agg_queue;
459 else
460 hw->queues = IEEE80211_MAX_QUEUES;
461 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
462 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
463 IEEE80211_RADIOTAP_MCS_HAVE_STBC;
464 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
465 IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
466
467 hw->radiotap_timestamp.units_pos =
468 IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US |
469 IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ;
470 /* this is the case for CCK frames, it's better (only 8) for OFDM */
471 hw->radiotap_timestamp.accuracy = 22;
472
473 hw->rate_control_algorithm = "iwl-mvm-rs";
474 hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
475 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
476
477 BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6);
478 memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
479 hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
480 hw->wiphy->cipher_suites = mvm->ciphers;
481
482 if (iwl_mvm_has_new_rx_api(mvm)) {
483 mvm->ciphers[hw->wiphy->n_cipher_suites] =
484 WLAN_CIPHER_SUITE_GCMP;
485 hw->wiphy->n_cipher_suites++;
486 mvm->ciphers[hw->wiphy->n_cipher_suites] =
487 WLAN_CIPHER_SUITE_GCMP_256;
488 hw->wiphy->n_cipher_suites++;
489 }
490
491 /* Enable 11w if software crypto is not enabled (as the
492 * firmware will interpret some mgmt packets, so enabling it
493 * with software crypto isn't safe).
494 */
495 if (!iwlwifi_mod_params.sw_crypto) {
496 ieee80211_hw_set(hw, MFP_CAPABLE);
497 mvm->ciphers[hw->wiphy->n_cipher_suites] =
498 WLAN_CIPHER_SUITE_AES_CMAC;
499 hw->wiphy->n_cipher_suites++;
500 if (iwl_mvm_has_new_rx_api(mvm)) {
501 mvm->ciphers[hw->wiphy->n_cipher_suites] =
502 WLAN_CIPHER_SUITE_BIP_GMAC_128;
503 hw->wiphy->n_cipher_suites++;
504 mvm->ciphers[hw->wiphy->n_cipher_suites] =
505 WLAN_CIPHER_SUITE_BIP_GMAC_256;
506 hw->wiphy->n_cipher_suites++;
507 }
508 }
509
510 /* currently FW API supports only one optional cipher scheme */
511 if (mvm->fw->cs[0].cipher) {
512 const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0];
513 struct ieee80211_cipher_scheme *cs = &mvm->cs[0];
514
515 mvm->hw->n_cipher_schemes = 1;
516
517 cs->cipher = le32_to_cpu(fwcs->cipher);
518 cs->iftype = BIT(NL80211_IFTYPE_STATION);
519 cs->hdr_len = fwcs->hdr_len;
520 cs->pn_len = fwcs->pn_len;
521 cs->pn_off = fwcs->pn_off;
522 cs->key_idx_off = fwcs->key_idx_off;
523 cs->key_idx_mask = fwcs->key_idx_mask;
524 cs->key_idx_shift = fwcs->key_idx_shift;
525 cs->mic_len = fwcs->mic_len;
526
527 mvm->hw->cipher_schemes = mvm->cs;
528 mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher;
529 hw->wiphy->n_cipher_suites++;
530 }
531
532 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
533 hw->wiphy->features |=
534 NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
535 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
536 NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
537
538 hw->sta_data_size = sizeof(struct iwl_mvm_sta);
539 hw->vif_data_size = sizeof(struct iwl_mvm_vif);
540 hw->chanctx_data_size = sizeof(u16);
541
542 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
543 BIT(NL80211_IFTYPE_P2P_CLIENT) |
544 BIT(NL80211_IFTYPE_AP) |
545 BIT(NL80211_IFTYPE_P2P_GO) |
546 BIT(NL80211_IFTYPE_P2P_DEVICE) |
547 BIT(NL80211_IFTYPE_ADHOC);
548
549 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
550 hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
551 if (iwl_mvm_is_lar_supported(mvm))
552 hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
553 else
554 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
555 REGULATORY_DISABLE_BEACON_HINTS;
556
557 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
558 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
559
560 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
561 hw->wiphy->n_iface_combinations =
562 ARRAY_SIZE(iwl_mvm_iface_combinations);
563
564 hw->wiphy->max_remain_on_channel_duration = 10000;
565 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
566 /* we can compensate an offset of up to 3 channels = 15 MHz */
567 hw->wiphy->max_adj_channel_rssi_comp = 3 * 5;
568
569 /* Extract MAC address */
570 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
571 hw->wiphy->addresses = mvm->addresses;
572 hw->wiphy->n_addresses = 1;
573
574 /* Extract additional MAC addresses if available */
575 num_mac = (mvm->nvm_data->n_hw_addrs > 1) ?
576 min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1;
577
578 for (i = 1; i < num_mac; i++) {
579 memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr,
580 ETH_ALEN);
581 mvm->addresses[i].addr[5]++;
582 hw->wiphy->n_addresses++;
583 }
584
585 iwl_mvm_reset_phy_ctxts(mvm);
586
587 hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
588
589 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
590
591 BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
592 BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
593 IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
594
595 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
596 mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
597 else
598 mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
599
600 if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
601 hw->wiphy->bands[NL80211_BAND_2GHZ] =
602 &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
603 if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) {
604 hw->wiphy->bands[NL80211_BAND_5GHZ] =
605 &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
606
607 if (fw_has_capa(&mvm->fw->ucode_capa,
608 IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
609 fw_has_api(&mvm->fw->ucode_capa,
610 IWL_UCODE_TLV_API_LQ_SS_PARAMS))
611 hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |=
612 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
613 }
614
615 hw->wiphy->hw_version = mvm->trans->hw_id;
616
617 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
618 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
619 else
620 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
621
622 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
623 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
624 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
625 /* we create the 802.11 header and zero length SSID IE. */
626 hw->wiphy->max_sched_scan_ie_len =
627 SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
628 hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS;
629 hw->wiphy->max_sched_scan_plan_interval = U16_MAX;
630
631 /*
632 * the firmware uses u8 for num of iterations, but 0xff is saved for
633 * infinite loop, so the maximum number of iterations is actually 254.
634 */
635 hw->wiphy->max_sched_scan_plan_iterations = 254;
636
637 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
638 NL80211_FEATURE_LOW_PRIORITY_SCAN |
639 NL80211_FEATURE_P2P_GO_OPPPS |
640 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
641 NL80211_FEATURE_DYNAMIC_SMPS |
642 NL80211_FEATURE_STATIC_SMPS |
643 NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
644
645 if (fw_has_capa(&mvm->fw->ucode_capa,
646 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
647 hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
648 if (fw_has_capa(&mvm->fw->ucode_capa,
649 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
650 hw->wiphy->features |= NL80211_FEATURE_QUIET;
651
652 if (fw_has_capa(&mvm->fw->ucode_capa,
653 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
654 hw->wiphy->features |=
655 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
656
657 if (fw_has_capa(&mvm->fw->ucode_capa,
658 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
659 hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
660
661 if (fw_has_api(&mvm->fw->ucode_capa,
662 IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) {
663 wiphy_ext_feature_set(hw->wiphy,
664 NL80211_EXT_FEATURE_SCAN_START_TIME);
665 wiphy_ext_feature_set(hw->wiphy,
666 NL80211_EXT_FEATURE_BSS_PARENT_TSF);
667 wiphy_ext_feature_set(hw->wiphy,
668 NL80211_EXT_FEATURE_SET_SCAN_DWELL);
669 }
670
671 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
672
673 #ifdef CONFIG_PM_SLEEP
674 if (iwl_mvm_is_d0i3_supported(mvm) &&
675 device_can_wakeup(mvm->trans->dev)) {
676 mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
677 hw->wiphy->wowlan = &mvm->wowlan;
678 }
679
680 if (mvm->fw->img[IWL_UCODE_WOWLAN].num_sec &&
681 mvm->trans->ops->d3_suspend &&
682 mvm->trans->ops->d3_resume &&
683 device_can_wakeup(mvm->trans->dev)) {
684 mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
685 WIPHY_WOWLAN_DISCONNECT |
686 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
687 WIPHY_WOWLAN_RFKILL_RELEASE |
688 WIPHY_WOWLAN_NET_DETECT;
689 if (!iwlwifi_mod_params.sw_crypto)
690 mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
691 WIPHY_WOWLAN_GTK_REKEY_FAILURE |
692 WIPHY_WOWLAN_4WAY_HANDSHAKE;
693
694 mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
695 mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
696 mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
697 mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
698 mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
699 hw->wiphy->wowlan = &mvm->wowlan;
700 }
701 #endif
702
703 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
704 /* assign default bcast filtering configuration */
705 mvm->bcast_filters = iwl_mvm_default_bcast_filters;
706 #endif
707
708 ret = iwl_mvm_leds_init(mvm);
709 if (ret)
710 return ret;
711
712 if (fw_has_capa(&mvm->fw->ucode_capa,
713 IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
714 IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
715 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
716 ieee80211_hw_set(hw, TDLS_WIDER_BW);
717 }
718
719 if (fw_has_capa(&mvm->fw->ucode_capa,
720 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
721 IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
722 hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
723 }
724
725 hw->netdev_features |= mvm->cfg->features;
726 if (!iwl_mvm_is_csum_supported(mvm)) {
727 hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS |
728 NETIF_F_RXCSUM);
729 /* We may support SW TX CSUM */
730 if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
731 hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS;
732 }
733
734 ret = ieee80211_register_hw(mvm->hw);
735 if (ret)
736 iwl_mvm_leds_exit(mvm);
737
738 if (mvm->cfg->vht_mu_mimo_supported)
739 wiphy_ext_feature_set(hw->wiphy,
740 NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
741
742 return ret;
743 }
744
745 static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
746 struct ieee80211_sta *sta,
747 struct sk_buff *skb)
748 {
749 struct iwl_mvm_sta *mvmsta;
750 bool defer = false;
751
752 /*
753 * double check the IN_D0I3 flag both before and after
754 * taking the spinlock, in order to prevent taking
755 * the spinlock when not needed.
756 */
757 if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
758 return false;
759
760 spin_lock(&mvm->d0i3_tx_lock);
761 /*
762 * testing the flag again ensures the skb dequeue
763 * loop (on d0i3 exit) hasn't run yet.
764 */
765 if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
766 goto out;
767
768 mvmsta = iwl_mvm_sta_from_mac80211(sta);
769 if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
770 mvmsta->sta_id != mvm->d0i3_ap_sta_id)
771 goto out;
772
773 __skb_queue_tail(&mvm->d0i3_tx, skb);
774 ieee80211_stop_queues(mvm->hw);
775
776 /* trigger wakeup */
777 iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
778 iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
779
780 defer = true;
781 out:
782 spin_unlock(&mvm->d0i3_tx_lock);
783 return defer;
784 }
785
786 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
787 struct ieee80211_tx_control *control,
788 struct sk_buff *skb)
789 {
790 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
791 struct ieee80211_sta *sta = control->sta;
792 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
793 struct ieee80211_hdr *hdr = (void *)skb->data;
794
795 if (iwl_mvm_is_radio_killed(mvm)) {
796 IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
797 goto drop;
798 }
799
800 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
801 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
802 !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
803 goto drop;
804
805 /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
806 if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER &&
807 ieee80211_is_mgmt(hdr->frame_control) &&
808 !ieee80211_is_deauth(hdr->frame_control) &&
809 !ieee80211_is_disassoc(hdr->frame_control) &&
810 !ieee80211_is_action(hdr->frame_control)))
811 sta = NULL;
812
813 if (sta) {
814 if (iwl_mvm_defer_tx(mvm, sta, skb))
815 return;
816 if (iwl_mvm_tx_skb(mvm, skb, sta))
817 goto drop;
818 return;
819 }
820
821 if (iwl_mvm_tx_skb_non_sta(mvm, skb))
822 goto drop;
823 return;
824 drop:
825 ieee80211_free_txskb(hw, skb);
826 }
827
828 static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
829 {
830 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
831 return false;
832 return true;
833 }
834
835 static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
836 {
837 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
838 return false;
839 if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
840 return true;
841
842 /* enabled by default */
843 return true;
844 }
845
846 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
847 do { \
848 if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \
849 break; \
850 iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \
851 } while (0)
852
853 static void
854 iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
855 struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
856 enum ieee80211_ampdu_mlme_action action)
857 {
858 struct iwl_fw_dbg_trigger_tlv *trig;
859 struct iwl_fw_dbg_trigger_ba *ba_trig;
860
861 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
862 return;
863
864 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
865 ba_trig = (void *)trig->data;
866
867 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
868 return;
869
870 switch (action) {
871 case IEEE80211_AMPDU_TX_OPERATIONAL: {
872 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
873 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
874
875 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
876 "TX AGG START: MAC %pM tid %d ssn %d\n",
877 sta->addr, tid, tid_data->ssn);
878 break;
879 }
880 case IEEE80211_AMPDU_TX_STOP_CONT:
881 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
882 "TX AGG STOP: MAC %pM tid %d\n",
883 sta->addr, tid);
884 break;
885 case IEEE80211_AMPDU_RX_START:
886 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
887 "RX AGG START: MAC %pM tid %d ssn %d\n",
888 sta->addr, tid, rx_ba_ssn);
889 break;
890 case IEEE80211_AMPDU_RX_STOP:
891 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
892 "RX AGG STOP: MAC %pM tid %d\n",
893 sta->addr, tid);
894 break;
895 default:
896 break;
897 }
898 }
899
900 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
901 struct ieee80211_vif *vif,
902 struct ieee80211_ampdu_params *params)
903 {
904 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
905 int ret;
906 bool tx_agg_ref = false;
907 struct ieee80211_sta *sta = params->sta;
908 enum ieee80211_ampdu_mlme_action action = params->action;
909 u16 tid = params->tid;
910 u16 *ssn = &params->ssn;
911 u8 buf_size = params->buf_size;
912 bool amsdu = params->amsdu;
913 u16 timeout = params->timeout;
914
915 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
916 sta->addr, tid, action);
917
918 if (!(mvm->nvm_data->sku_cap_11n_enable))
919 return -EACCES;
920
921 /* return from D0i3 before starting a new Tx aggregation */
922 switch (action) {
923 case IEEE80211_AMPDU_TX_START:
924 case IEEE80211_AMPDU_TX_STOP_CONT:
925 case IEEE80211_AMPDU_TX_STOP_FLUSH:
926 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
927 case IEEE80211_AMPDU_TX_OPERATIONAL:
928 /*
929 * for tx start, wait synchronously until D0i3 exit to
930 * get the correct sequence number for the tid.
931 * additionally, some other ampdu actions use direct
932 * target access, which is not handled automatically
933 * by the trans layer (unlike commands), so wait for
934 * d0i3 exit in these cases as well.
935 */
936 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG);
937 if (ret)
938 return ret;
939
940 tx_agg_ref = true;
941 break;
942 default:
943 break;
944 }
945
946 mutex_lock(&mvm->mutex);
947
948 switch (action) {
949 case IEEE80211_AMPDU_RX_START:
950 if (!iwl_enable_rx_ampdu(mvm->cfg)) {
951 ret = -EINVAL;
952 break;
953 }
954 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size,
955 timeout);
956 break;
957 case IEEE80211_AMPDU_RX_STOP:
958 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size,
959 timeout);
960 break;
961 case IEEE80211_AMPDU_TX_START:
962 if (!iwl_enable_tx_ampdu(mvm->cfg)) {
963 ret = -EINVAL;
964 break;
965 }
966 ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
967 break;
968 case IEEE80211_AMPDU_TX_STOP_CONT:
969 ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
970 break;
971 case IEEE80211_AMPDU_TX_STOP_FLUSH:
972 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
973 ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
974 break;
975 case IEEE80211_AMPDU_TX_OPERATIONAL:
976 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid,
977 buf_size, amsdu);
978 break;
979 default:
980 WARN_ON_ONCE(1);
981 ret = -EINVAL;
982 break;
983 }
984
985 if (!ret) {
986 u16 rx_ba_ssn = 0;
987
988 if (action == IEEE80211_AMPDU_RX_START)
989 rx_ba_ssn = *ssn;
990
991 iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
992 rx_ba_ssn, action);
993 }
994 mutex_unlock(&mvm->mutex);
995
996 /*
997 * If the tid is marked as started, we won't use it for offloaded
998 * traffic on the next D0i3 entry. It's safe to unref.
999 */
1000 if (tx_agg_ref)
1001 iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
1002
1003 return ret;
1004 }
1005
1006 static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
1007 struct ieee80211_vif *vif)
1008 {
1009 struct iwl_mvm *mvm = data;
1010 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1011
1012 mvmvif->uploaded = false;
1013 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
1014
1015 spin_lock_bh(&mvm->time_event_lock);
1016 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
1017 spin_unlock_bh(&mvm->time_event_lock);
1018
1019 mvmvif->phy_ctxt = NULL;
1020 memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
1021 }
1022
1023 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
1024 {
1025 /* clear the D3 reconfig, we only need it to avoid dumping a
1026 * firmware coredump on reconfiguration, we shouldn't do that
1027 * on D3->D0 transition
1028 */
1029 if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
1030 mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert;
1031 iwl_mvm_fw_error_dump(mvm);
1032 }
1033
1034 /* cleanup all stale references (scan, roc), but keep the
1035 * ucode_down ref until reconfig is complete
1036 */
1037 iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
1038
1039 iwl_mvm_stop_device(mvm);
1040
1041 mvm->scan_status = 0;
1042 mvm->ps_disabled = false;
1043 mvm->calibrating = false;
1044
1045 /* just in case one was running */
1046 iwl_mvm_cleanup_roc_te(mvm);
1047 ieee80211_remain_on_channel_expired(mvm->hw);
1048
1049 /*
1050 * cleanup all interfaces, even inactive ones, as some might have
1051 * gone down during the HW restart
1052 */
1053 ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
1054
1055 mvm->p2p_device_vif = NULL;
1056 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1057
1058 iwl_mvm_reset_phy_ctxts(mvm);
1059 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
1060 memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
1061 memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
1062 memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
1063 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
1064 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
1065
1066 ieee80211_wake_queues(mvm->hw);
1067
1068 /* clear any stale d0i3 state */
1069 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1070
1071 mvm->vif_count = 0;
1072 mvm->rx_ba_sessions = 0;
1073 mvm->fw_dbg_conf = FW_DBG_INVALID;
1074
1075 /* keep statistics ticking */
1076 iwl_mvm_accu_radio_stats(mvm);
1077 }
1078
1079 int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
1080 {
1081 int ret;
1082
1083 lockdep_assert_held(&mvm->mutex);
1084
1085 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1086 /* Clean up some internal and mac80211 state on restart */
1087 iwl_mvm_restart_cleanup(mvm);
1088 } else {
1089 /* Hold the reference to prevent runtime suspend while
1090 * the start procedure runs. It's a bit confusing
1091 * that the UCODE_DOWN reference is taken, but it just
1092 * means "UCODE is not UP yet". ( TODO: rename this
1093 * reference).
1094 */
1095 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1096 }
1097 ret = iwl_mvm_up(mvm);
1098
1099 if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1100 /* Something went wrong - we need to finish some cleanup
1101 * that normally iwl_mvm_mac_restart_complete() below
1102 * would do.
1103 */
1104 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1105 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1106 }
1107
1108 return ret;
1109 }
1110
1111 static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
1112 {
1113 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1114 int ret;
1115
1116 /* Some hw restart cleanups must not hold the mutex */
1117 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1118 /*
1119 * Make sure we are out of d0i3. This is needed
1120 * to make sure the reference accounting is correct
1121 * (and there is no stale d0i3_exit_work).
1122 */
1123 wait_event_timeout(mvm->d0i3_exit_waitq,
1124 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1125 &mvm->status),
1126 HZ);
1127 }
1128
1129 mutex_lock(&mvm->mutex);
1130 ret = __iwl_mvm_mac_start(mvm);
1131 mutex_unlock(&mvm->mutex);
1132
1133 return ret;
1134 }
1135
1136 static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
1137 {
1138 int ret;
1139
1140 mutex_lock(&mvm->mutex);
1141
1142 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1143 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1144 ret = iwl_mvm_update_quotas(mvm, true, NULL);
1145 if (ret)
1146 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
1147 ret);
1148
1149 /* allow transport/FW low power modes */
1150 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1151
1152 /*
1153 * If we have TDLS peers, remove them. We don't know the last seqno/PN
1154 * of packets the FW sent out, so we must reconnect.
1155 */
1156 iwl_mvm_teardown_tdls_peers(mvm);
1157
1158 mutex_unlock(&mvm->mutex);
1159 }
1160
1161 static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
1162 {
1163 if (iwl_mvm_is_d0i3_supported(mvm) &&
1164 iwl_mvm_enter_d0i3_on_suspend(mvm))
1165 WARN_ONCE(!wait_event_timeout(mvm->d0i3_exit_waitq,
1166 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1167 &mvm->status),
1168 HZ),
1169 "D0i3 exit on resume timed out\n");
1170 }
1171
1172 static void
1173 iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
1174 enum ieee80211_reconfig_type reconfig_type)
1175 {
1176 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1177
1178 switch (reconfig_type) {
1179 case IEEE80211_RECONFIG_TYPE_RESTART:
1180 iwl_mvm_restart_complete(mvm);
1181 break;
1182 case IEEE80211_RECONFIG_TYPE_SUSPEND:
1183 iwl_mvm_resume_complete(mvm);
1184 break;
1185 }
1186 }
1187
1188 void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
1189 {
1190 lockdep_assert_held(&mvm->mutex);
1191
1192 /* firmware counters are obviously reset now, but we shouldn't
1193 * partially track so also clear the fw_reset_accu counters.
1194 */
1195 memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));
1196
1197 /* async_handlers_wk is now blocked */
1198
1199 /*
1200 * The work item could be running or queued if the
1201 * ROC time event stops just as we get here.
1202 */
1203 flush_work(&mvm->roc_done_wk);
1204
1205 iwl_mvm_stop_device(mvm);
1206
1207 iwl_mvm_async_handlers_purge(mvm);
1208 /* async_handlers_list is empty and will stay empty: HW is stopped */
1209
1210 /* the fw is stopped, the aux sta is dead: clean up driver state */
1211 iwl_mvm_del_aux_sta(mvm);
1212
1213 /*
1214 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
1215 * won't be called in this case).
1216 * But make sure to cleanup interfaces that have gone down before/during
1217 * HW restart was requested.
1218 */
1219 if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1220 ieee80211_iterate_interfaces(mvm->hw, 0,
1221 iwl_mvm_cleanup_iterator, mvm);
1222
1223 /* We shouldn't have any UIDs still set. Loop over all the UIDs to
1224 * make sure there's nothing left there and warn if any is found.
1225 */
1226 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1227 int i;
1228
1229 for (i = 0; i < mvm->max_scans; i++) {
1230 if (WARN_ONCE(mvm->scan_uid_status[i],
1231 "UMAC scan UID %d status was not cleaned\n",
1232 i))
1233 mvm->scan_uid_status[i] = 0;
1234 }
1235 }
1236 }
1237
1238 static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
1239 {
1240 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1241
1242 flush_work(&mvm->d0i3_exit_work);
1243 flush_work(&mvm->async_handlers_wk);
1244 flush_work(&mvm->add_stream_wk);
1245 cancel_delayed_work_sync(&mvm->fw_dump_wk);
1246 cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
1247 cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
1248 iwl_mvm_free_fw_dump_desc(mvm);
1249
1250 mutex_lock(&mvm->mutex);
1251 __iwl_mvm_mac_stop(mvm);
1252 mutex_unlock(&mvm->mutex);
1253
1254 /*
1255 * The worker might have been waiting for the mutex, let it run and
1256 * discover that its list is now empty.
1257 */
1258 cancel_work_sync(&mvm->async_handlers_wk);
1259 }
1260
1261 static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
1262 {
1263 u16 i;
1264
1265 lockdep_assert_held(&mvm->mutex);
1266
1267 for (i = 0; i < NUM_PHY_CTX; i++)
1268 if (!mvm->phy_ctxts[i].ref)
1269 return &mvm->phy_ctxts[i];
1270
1271 IWL_ERR(mvm, "No available PHY context\n");
1272 return NULL;
1273 }
1274
1275 static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1276 s16 tx_power)
1277 {
1278 struct iwl_dev_tx_power_cmd cmd = {
1279 .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
1280 .v3.mac_context_id =
1281 cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
1282 .v3.pwr_restriction = cpu_to_le16(8 * tx_power),
1283 };
1284 int len = sizeof(cmd);
1285
1286 if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
1287 cmd.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
1288
1289 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
1290 len = sizeof(cmd.v3);
1291
1292 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
1293 }
1294
1295 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
1296 struct ieee80211_vif *vif)
1297 {
1298 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1299 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1300 int ret;
1301
1302 mvmvif->mvm = mvm;
1303
1304 /*
1305 * make sure D0i3 exit is completed, otherwise a target access
1306 * during tx queue configuration could be done when still in
1307 * D0i3 state.
1308 */
1309 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF);
1310 if (ret)
1311 return ret;
1312
1313 /*
1314 * Not much to do here. The stack will not allow interface
1315 * types or combinations that we didn't advertise, so we
1316 * don't really have to check the types.
1317 */
1318
1319 mutex_lock(&mvm->mutex);
1320
1321 /* make sure that beacon statistics don't go backwards with FW reset */
1322 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1323 mvmvif->beacon_stats.accu_num_beacons +=
1324 mvmvif->beacon_stats.num_beacons;
1325
1326 /* Allocate resources for the MAC context, and add it to the fw */
1327 ret = iwl_mvm_mac_ctxt_init(mvm, vif);
1328 if (ret)
1329 goto out_unlock;
1330
1331 /* Counting number of interfaces is needed for legacy PM */
1332 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1333 mvm->vif_count++;
1334
1335 /*
1336 * The AP binding flow can be done only after the beacon
1337 * template is configured (which happens only in the mac80211
1338 * start_ap() flow), and adding the broadcast station can happen
1339 * only after the binding.
1340 * In addition, since modifying the MAC before adding a bcast
1341 * station is not allowed by the FW, delay the adding of MAC context to
1342 * the point where we can also add the bcast station.
1343 * In short: there's not much we can do at this point, other than
1344 * allocating resources :)
1345 */
1346 if (vif->type == NL80211_IFTYPE_AP ||
1347 vif->type == NL80211_IFTYPE_ADHOC) {
1348 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
1349 if (ret) {
1350 IWL_ERR(mvm, "Failed to allocate bcast sta\n");
1351 goto out_release;
1352 }
1353
1354 iwl_mvm_vif_dbgfs_register(mvm, vif);
1355 goto out_unlock;
1356 }
1357
1358 mvmvif->features |= hw->netdev_features;
1359
1360 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
1361 if (ret)
1362 goto out_release;
1363
1364 ret = iwl_mvm_power_update_mac(mvm);
1365 if (ret)
1366 goto out_remove_mac;
1367
1368 /* beacon filtering */
1369 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
1370 if (ret)
1371 goto out_remove_mac;
1372
1373 if (!mvm->bf_allowed_vif &&
1374 vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
1375 mvm->bf_allowed_vif = mvmvif;
1376 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
1377 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
1378 }
1379
1380 /*
1381 * P2P_DEVICE interface does not have a channel context assigned to it,
1382 * so a dedicated PHY context is allocated to it and the corresponding
1383 * MAC context is bound to it at this stage.
1384 */
1385 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1386
1387 mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
1388 if (!mvmvif->phy_ctxt) {
1389 ret = -ENOSPC;
1390 goto out_free_bf;
1391 }
1392
1393 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
1394 ret = iwl_mvm_binding_add_vif(mvm, vif);
1395 if (ret)
1396 goto out_unref_phy;
1397
1398 ret = iwl_mvm_add_bcast_sta(mvm, vif);
1399 if (ret)
1400 goto out_unbind;
1401
1402 /* Save a pointer to p2p device vif, so it can later be used to
1403 * update the p2p device MAC when a GO is started/stopped */
1404 mvm->p2p_device_vif = vif;
1405 }
1406
1407 iwl_mvm_vif_dbgfs_register(mvm, vif);
1408 goto out_unlock;
1409
1410 out_unbind:
1411 iwl_mvm_binding_remove_vif(mvm, vif);
1412 out_unref_phy:
1413 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1414 out_free_bf:
1415 if (mvm->bf_allowed_vif == mvmvif) {
1416 mvm->bf_allowed_vif = NULL;
1417 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1418 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1419 }
1420 out_remove_mac:
1421 mvmvif->phy_ctxt = NULL;
1422 iwl_mvm_mac_ctxt_remove(mvm, vif);
1423 out_release:
1424 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1425 mvm->vif_count--;
1426
1427 iwl_mvm_mac_ctxt_release(mvm, vif);
1428 out_unlock:
1429 mutex_unlock(&mvm->mutex);
1430
1431 iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF);
1432
1433 return ret;
1434 }
1435
1436 static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
1437 struct ieee80211_vif *vif)
1438 {
1439 u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif);
1440
1441 if (tfd_msk) {
1442 /*
1443 * mac80211 first removes all the stations of the vif and
1444 * then removes the vif. When it removes a station it also
1445 * flushes the AMPDU session. So by now, all the AMPDU sessions
1446 * of all the stations of this vif are closed, and the queues
1447 * of these AMPDU sessions are properly closed.
1448 * We still need to take care of the shared queues of the vif.
1449 * Flush them here.
1450 */
1451 mutex_lock(&mvm->mutex);
1452 iwl_mvm_flush_tx_path(mvm, tfd_msk, 0);
1453 mutex_unlock(&mvm->mutex);
1454
1455 /*
1456 * There are transports that buffer a few frames in the host.
1457 * For these, the flush above isn't enough since while we were
1458 * flushing, the transport might have sent more frames to the
1459 * device. To solve this, wait here until the transport is
1460 * empty. Technically, this could have replaced the flush
1461 * above, but flush is much faster than draining. So flush
1462 * first, and drain to make sure we have no frames in the
1463 * transport anymore.
1464 * If a station still had frames on the shared queues, it is
1465 * already marked as draining, so to complete the draining, we
1466 * just need to wait until the transport is empty.
1467 */
1468 iwl_trans_wait_tx_queue_empty(mvm->trans, tfd_msk);
1469 }
1470
1471 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1472 /*
1473 * Flush the ROC worker which will flush the OFFCHANNEL queue.
1474 * We assume here that all the packets sent to the OFFCHANNEL
1475 * queue are sent in ROC session.
1476 */
1477 flush_work(&mvm->roc_done_wk);
1478 } else {
1479 /*
1480 * By now, all the AC queues are empty. The AGG queues are
1481 * empty too. We already got all the Tx responses for all the
1482 * packets in the queues. The drain work can have been
1483 * triggered. Flush it.
1484 */
1485 flush_work(&mvm->sta_drained_wk);
1486 }
1487 }
1488
1489 static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
1490 struct ieee80211_vif *vif)
1491 {
1492 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1493 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1494
1495 iwl_mvm_prepare_mac_removal(mvm, vif);
1496
1497 mutex_lock(&mvm->mutex);
1498
1499 if (mvm->bf_allowed_vif == mvmvif) {
1500 mvm->bf_allowed_vif = NULL;
1501 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1502 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1503 }
1504
1505 iwl_mvm_vif_dbgfs_clean(mvm, vif);
1506
1507 /*
1508 * For AP/GO interface, the tear down of the resources allocated to the
1509 * interface is be handled as part of the stop_ap flow.
1510 */
1511 if (vif->type == NL80211_IFTYPE_AP ||
1512 vif->type == NL80211_IFTYPE_ADHOC) {
1513 #ifdef CONFIG_NL80211_TESTMODE
1514 if (vif == mvm->noa_vif) {
1515 mvm->noa_vif = NULL;
1516 mvm->noa_duration = 0;
1517 }
1518 #endif
1519 iwl_mvm_dealloc_bcast_sta(mvm, vif);
1520 goto out_release;
1521 }
1522
1523 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1524 mvm->p2p_device_vif = NULL;
1525 iwl_mvm_rm_bcast_sta(mvm, vif);
1526 iwl_mvm_binding_remove_vif(mvm, vif);
1527 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1528 mvmvif->phy_ctxt = NULL;
1529 }
1530
1531 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
1532 mvm->vif_count--;
1533
1534 iwl_mvm_power_update_mac(mvm);
1535 iwl_mvm_mac_ctxt_remove(mvm, vif);
1536
1537 out_release:
1538 iwl_mvm_mac_ctxt_release(mvm, vif);
1539 mutex_unlock(&mvm->mutex);
1540 }
1541
1542 static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
1543 {
1544 return 0;
1545 }
1546
1547 struct iwl_mvm_mc_iter_data {
1548 struct iwl_mvm *mvm;
1549 int port_id;
1550 };
1551
1552 static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1553 struct ieee80211_vif *vif)
1554 {
1555 struct iwl_mvm_mc_iter_data *data = _data;
1556 struct iwl_mvm *mvm = data->mvm;
1557 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
1558 int ret, len;
1559
1560 /* if we don't have free ports, mcast frames will be dropped */
1561 if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
1562 return;
1563
1564 if (vif->type != NL80211_IFTYPE_STATION ||
1565 !vif->bss_conf.assoc)
1566 return;
1567
1568 cmd->port_id = data->port_id++;
1569 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1570 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1571
1572 ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
1573 if (ret)
1574 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1575 }
1576
1577 static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
1578 {
1579 struct iwl_mvm_mc_iter_data iter_data = {
1580 .mvm = mvm,
1581 };
1582
1583 lockdep_assert_held(&mvm->mutex);
1584
1585 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
1586 return;
1587
1588 ieee80211_iterate_active_interfaces_atomic(
1589 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1590 iwl_mvm_mc_iface_iterator, &iter_data);
1591 }
1592
1593 static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
1594 struct netdev_hw_addr_list *mc_list)
1595 {
1596 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1597 struct iwl_mcast_filter_cmd *cmd;
1598 struct netdev_hw_addr *addr;
1599 int addr_count;
1600 bool pass_all;
1601 int len;
1602
1603 addr_count = netdev_hw_addr_list_count(mc_list);
1604 pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES ||
1605 IWL_MVM_FW_MCAST_FILTER_PASS_ALL;
1606 if (pass_all)
1607 addr_count = 0;
1608
1609 len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
1610 cmd = kzalloc(len, GFP_ATOMIC);
1611 if (!cmd)
1612 return 0;
1613
1614 if (pass_all) {
1615 cmd->pass_all = 1;
1616 return (u64)(unsigned long)cmd;
1617 }
1618
1619 netdev_hw_addr_list_for_each(addr, mc_list) {
1620 IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
1621 cmd->count, addr->addr);
1622 memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
1623 addr->addr, ETH_ALEN);
1624 cmd->count++;
1625 }
1626
1627 return (u64)(unsigned long)cmd;
1628 }
1629
1630 static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
1631 unsigned int changed_flags,
1632 unsigned int *total_flags,
1633 u64 multicast)
1634 {
1635 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1636 struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
1637
1638 mutex_lock(&mvm->mutex);
1639
1640 /* replace previous configuration */
1641 kfree(mvm->mcast_filter_cmd);
1642 mvm->mcast_filter_cmd = cmd;
1643
1644 if (!cmd)
1645 goto out;
1646
1647 iwl_mvm_recalc_multicast(mvm);
1648 out:
1649 mutex_unlock(&mvm->mutex);
1650 *total_flags = 0;
1651 }
1652
1653 static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
1654 struct ieee80211_vif *vif,
1655 unsigned int filter_flags,
1656 unsigned int changed_flags)
1657 {
1658 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1659
1660 /* We support only filter for probe requests */
1661 if (!(changed_flags & FIF_PROBE_REQ))
1662 return;
1663
1664 /* Supported only for p2p client interfaces */
1665 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
1666 !vif->p2p)
1667 return;
1668
1669 mutex_lock(&mvm->mutex);
1670 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
1671 mutex_unlock(&mvm->mutex);
1672 }
1673
1674 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
1675 struct iwl_bcast_iter_data {
1676 struct iwl_mvm *mvm;
1677 struct iwl_bcast_filter_cmd *cmd;
1678 u8 current_filter;
1679 };
1680
1681 static void
1682 iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
1683 const struct iwl_fw_bcast_filter *in_filter,
1684 struct iwl_fw_bcast_filter *out_filter)
1685 {
1686 struct iwl_fw_bcast_filter_attr *attr;
1687 int i;
1688
1689 memcpy(out_filter, in_filter, sizeof(*out_filter));
1690
1691 for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
1692 attr = &out_filter->attrs[i];
1693
1694 if (!attr->mask)
1695 break;
1696
1697 switch (attr->reserved1) {
1698 case cpu_to_le16(BC_FILTER_MAGIC_IP):
1699 if (vif->bss_conf.arp_addr_cnt != 1) {
1700 attr->mask = 0;
1701 continue;
1702 }
1703
1704 attr->val = vif->bss_conf.arp_addr_list[0];
1705 break;
1706 case cpu_to_le16(BC_FILTER_MAGIC_MAC):
1707 attr->val = *(__be32 *)&vif->addr[2];
1708 break;
1709 default:
1710 break;
1711 }
1712 attr->reserved1 = 0;
1713 out_filter->num_attrs++;
1714 }
1715 }
1716
1717 static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
1718 struct ieee80211_vif *vif)
1719 {
1720 struct iwl_bcast_iter_data *data = _data;
1721 struct iwl_mvm *mvm = data->mvm;
1722 struct iwl_bcast_filter_cmd *cmd = data->cmd;
1723 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1724 struct iwl_fw_bcast_mac *bcast_mac;
1725 int i;
1726
1727 if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
1728 return;
1729
1730 bcast_mac = &cmd->macs[mvmvif->id];
1731
1732 /*
1733 * enable filtering only for associated stations, but not for P2P
1734 * Clients
1735 */
1736 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
1737 !vif->bss_conf.assoc)
1738 return;
1739
1740 bcast_mac->default_discard = 1;
1741
1742 /* copy all configured filters */
1743 for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
1744 /*
1745 * Make sure we don't exceed our filters limit.
1746 * if there is still a valid filter to be configured,
1747 * be on the safe side and just allow bcast for this mac.
1748 */
1749 if (WARN_ON_ONCE(data->current_filter >=
1750 ARRAY_SIZE(cmd->filters))) {
1751 bcast_mac->default_discard = 0;
1752 bcast_mac->attached_filters = 0;
1753 break;
1754 }
1755
1756 iwl_mvm_set_bcast_filter(vif,
1757 &mvm->bcast_filters[i],
1758 &cmd->filters[data->current_filter]);
1759
1760 /* skip current filter if it contains no attributes */
1761 if (!cmd->filters[data->current_filter].num_attrs)
1762 continue;
1763
1764 /* attach the filter to current mac */
1765 bcast_mac->attached_filters |=
1766 cpu_to_le16(BIT(data->current_filter));
1767
1768 data->current_filter++;
1769 }
1770 }
1771
1772 bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
1773 struct iwl_bcast_filter_cmd *cmd)
1774 {
1775 struct iwl_bcast_iter_data iter_data = {
1776 .mvm = mvm,
1777 .cmd = cmd,
1778 };
1779
1780 if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
1781 return false;
1782
1783 memset(cmd, 0, sizeof(*cmd));
1784 cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
1785 cmd->max_macs = ARRAY_SIZE(cmd->macs);
1786
1787 #ifdef CONFIG_IWLWIFI_DEBUGFS
1788 /* use debugfs filters/macs if override is configured */
1789 if (mvm->dbgfs_bcast_filtering.override) {
1790 memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
1791 sizeof(cmd->filters));
1792 memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
1793 sizeof(cmd->macs));
1794 return true;
1795 }
1796 #endif
1797
1798 /* if no filters are configured, do nothing */
1799 if (!mvm->bcast_filters)
1800 return false;
1801
1802 /* configure and attach these filters for each associated sta vif */
1803 ieee80211_iterate_active_interfaces(
1804 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1805 iwl_mvm_bcast_filter_iterator, &iter_data);
1806
1807 return true;
1808 }
1809
1810 static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
1811 {
1812 struct iwl_bcast_filter_cmd cmd;
1813
1814 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
1815 return 0;
1816
1817 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
1818 return 0;
1819
1820 return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
1821 sizeof(cmd), &cmd);
1822 }
1823 #else
1824 static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
1825 {
1826 return 0;
1827 }
1828 #endif
1829
1830 static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
1831 struct ieee80211_vif *vif)
1832 {
1833 struct iwl_mu_group_mgmt_cmd cmd = {};
1834
1835 memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership,
1836 WLAN_MEMBERSHIP_LEN);
1837 memcpy(cmd.user_position, vif->bss_conf.mu_group.position,
1838 WLAN_USER_POSITION_LEN);
1839
1840 return iwl_mvm_send_cmd_pdu(mvm,
1841 WIDE_ID(DATA_PATH_GROUP,
1842 UPDATE_MU_GROUPS_CMD),
1843 0, sizeof(cmd), &cmd);
1844 }
1845
1846 static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac,
1847 struct ieee80211_vif *vif)
1848 {
1849 if (vif->mu_mimo_owner) {
1850 struct iwl_mu_group_mgmt_notif *notif = _data;
1851
1852 /*
1853 * MU-MIMO Group Id action frame is little endian. We treat
1854 * the data received from firmware as if it came from the
1855 * action frame, so no conversion is needed.
1856 */
1857 ieee80211_update_mu_groups(vif,
1858 (u8 *)&notif->membership_status,
1859 (u8 *)&notif->user_position);
1860 }
1861 }
1862
1863 void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
1864 struct iwl_rx_cmd_buffer *rxb)
1865 {
1866 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1867 struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data;
1868
1869 ieee80211_iterate_active_interfaces_atomic(
1870 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1871 iwl_mvm_mu_mimo_iface_iterator, notif);
1872 }
1873
1874 static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1875 struct ieee80211_vif *vif,
1876 struct ieee80211_bss_conf *bss_conf,
1877 u32 changes)
1878 {
1879 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1880 int ret;
1881
1882 /*
1883 * Re-calculate the tsf id, as the master-slave relations depend on the
1884 * beacon interval, which was not known when the station interface was
1885 * added.
1886 */
1887 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
1888 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
1889
1890 if (changes & BSS_CHANGED_ASSOC && !bss_conf->assoc &&
1891 mvmvif->lqm_active)
1892 iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_STOP_MEASUREMENT,
1893 0, 0);
1894
1895 /*
1896 * If we're not associated yet, take the (new) BSSID before associating
1897 * so the firmware knows. If we're already associated, then use the old
1898 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC
1899 * branch for disassociation below.
1900 */
1901 if (changes & BSS_CHANGED_BSSID && !mvmvif->associated)
1902 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
1903
1904 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid);
1905 if (ret)
1906 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
1907
1908 /* after sending it once, adopt mac80211 data */
1909 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
1910 mvmvif->associated = bss_conf->assoc;
1911
1912 if (changes & BSS_CHANGED_ASSOC) {
1913 if (bss_conf->assoc) {
1914 /* clear statistics to get clean beacon counter */
1915 iwl_mvm_request_statistics(mvm, true);
1916 memset(&mvmvif->beacon_stats, 0,
1917 sizeof(mvmvif->beacon_stats));
1918
1919 /* add quota for this interface */
1920 ret = iwl_mvm_update_quotas(mvm, true, NULL);
1921 if (ret) {
1922 IWL_ERR(mvm, "failed to update quotas\n");
1923 return;
1924 }
1925
1926 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
1927 &mvm->status)) {
1928 /*
1929 * If we're restarting then the firmware will
1930 * obviously have lost synchronisation with
1931 * the AP. It will attempt to synchronise by
1932 * itself, but we can make it more reliable by
1933 * scheduling a session protection time event.
1934 *
1935 * The firmware needs to receive a beacon to
1936 * catch up with synchronisation, use 110% of
1937 * the beacon interval.
1938 *
1939 * Set a large maximum delay to allow for more
1940 * than a single interface.
1941 */
1942 u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
1943 iwl_mvm_protect_session(mvm, vif, dur, dur,
1944 5 * dur, false);
1945 }
1946
1947 iwl_mvm_sf_update(mvm, vif, false);
1948 iwl_mvm_power_vif_assoc(mvm, vif);
1949 if (vif->p2p) {
1950 iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
1951 iwl_mvm_update_smps(mvm, vif,
1952 IWL_MVM_SMPS_REQ_PROT,
1953 IEEE80211_SMPS_DYNAMIC);
1954 }
1955 } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
1956 /*
1957 * If update fails - SF might be running in associated
1958 * mode while disassociated - which is forbidden.
1959 */
1960 WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
1961 "Failed to update SF upon disassociation\n");
1962
1963 /* remove AP station now that the MAC is unassoc */
1964 ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
1965 if (ret)
1966 IWL_ERR(mvm, "failed to remove AP station\n");
1967
1968 if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
1969 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1970 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
1971 /* remove quota for this interface */
1972 ret = iwl_mvm_update_quotas(mvm, false, NULL);
1973 if (ret)
1974 IWL_ERR(mvm, "failed to update quotas\n");
1975
1976 if (vif->p2p)
1977 iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
1978
1979 /* this will take the cleared BSSID from bss_conf */
1980 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
1981 if (ret)
1982 IWL_ERR(mvm,
1983 "failed to update MAC %pM (clear after unassoc)\n",
1984 vif->addr);
1985 }
1986
1987 /*
1988 * The firmware tracks the MU-MIMO group on its own.
1989 * However, on HW restart we should restore this data.
1990 */
1991 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
1992 (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) {
1993 ret = iwl_mvm_update_mu_groups(mvm, vif);
1994 if (ret)
1995 IWL_ERR(mvm,
1996 "failed to update VHT MU_MIMO groups\n");
1997 }
1998
1999 iwl_mvm_recalc_multicast(mvm);
2000 iwl_mvm_configure_bcast_filter(mvm);
2001
2002 /* reset rssi values */
2003 mvmvif->bf_data.ave_beacon_signal = 0;
2004
2005 iwl_mvm_bt_coex_vif_change(mvm);
2006 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
2007 IEEE80211_SMPS_AUTOMATIC);
2008 if (fw_has_capa(&mvm->fw->ucode_capa,
2009 IWL_UCODE_TLV_CAPA_UMAC_SCAN))
2010 iwl_mvm_config_scan(mvm);
2011 }
2012
2013 if (changes & BSS_CHANGED_BEACON_INFO) {
2014 /*
2015 * We received a beacon from the associated AP so
2016 * remove the session protection.
2017 */
2018 iwl_mvm_remove_time_event(mvm, mvmvif,
2019 &mvmvif->time_event_data);
2020
2021 iwl_mvm_sf_update(mvm, vif, false);
2022 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2023 }
2024
2025 if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS |
2026 /*
2027 * Send power command on every beacon change,
2028 * because we may have not enabled beacon abort yet.
2029 */
2030 BSS_CHANGED_BEACON_INFO)) {
2031 ret = iwl_mvm_power_update_mac(mvm);
2032 if (ret)
2033 IWL_ERR(mvm, "failed to update power mode\n");
2034 }
2035
2036 if (changes & BSS_CHANGED_TXPOWER) {
2037 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2038 bss_conf->txpower);
2039 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2040 }
2041
2042 if (changes & BSS_CHANGED_CQM) {
2043 IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
2044 /* reset cqm events tracking */
2045 mvmvif->bf_data.last_cqm_event = 0;
2046 if (mvmvif->bf_data.bf_enabled) {
2047 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
2048 if (ret)
2049 IWL_ERR(mvm,
2050 "failed to update CQM thresholds\n");
2051 }
2052 }
2053
2054 if (changes & BSS_CHANGED_ARP_FILTER) {
2055 IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
2056 iwl_mvm_configure_bcast_filter(mvm);
2057 }
2058 }
2059
2060 static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
2061 struct ieee80211_vif *vif)
2062 {
2063 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2064 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2065 int ret;
2066
2067 /*
2068 * iwl_mvm_mac_ctxt_add() might read directly from the device
2069 * (the system time), so make sure it is available.
2070 */
2071 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP);
2072 if (ret)
2073 return ret;
2074
2075 mutex_lock(&mvm->mutex);
2076
2077 /* Send the beacon template */
2078 ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif);
2079 if (ret)
2080 goto out_unlock;
2081
2082 /*
2083 * Re-calculate the tsf id, as the master-slave relations depend on the
2084 * beacon interval, which was not known when the AP interface was added.
2085 */
2086 if (vif->type == NL80211_IFTYPE_AP)
2087 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2088
2089 mvmvif->ap_assoc_sta_count = 0;
2090
2091 /* Add the mac context */
2092 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
2093 if (ret)
2094 goto out_unlock;
2095
2096 /* Perform the binding */
2097 ret = iwl_mvm_binding_add_vif(mvm, vif);
2098 if (ret)
2099 goto out_remove;
2100
2101 /* Send the bcast station. At this stage the TBTT and DTIM time events
2102 * are added and applied to the scheduler */
2103 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2104 if (ret)
2105 goto out_unbind;
2106
2107 /* must be set before quota calculations */
2108 mvmvif->ap_ibss_active = true;
2109
2110 /* power updated needs to be done before quotas */
2111 iwl_mvm_power_update_mac(mvm);
2112
2113 ret = iwl_mvm_update_quotas(mvm, false, NULL);
2114 if (ret)
2115 goto out_quota_failed;
2116
2117 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2118 if (vif->p2p && mvm->p2p_device_vif)
2119 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2120
2121 iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
2122
2123 iwl_mvm_bt_coex_vif_change(mvm);
2124
2125 /* we don't support TDLS during DCM */
2126 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2127 iwl_mvm_teardown_tdls_peers(mvm);
2128
2129 goto out_unlock;
2130
2131 out_quota_failed:
2132 iwl_mvm_power_update_mac(mvm);
2133 mvmvif->ap_ibss_active = false;
2134 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2135 out_unbind:
2136 iwl_mvm_binding_remove_vif(mvm, vif);
2137 out_remove:
2138 iwl_mvm_mac_ctxt_remove(mvm, vif);
2139 out_unlock:
2140 mutex_unlock(&mvm->mutex);
2141 iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP);
2142 return ret;
2143 }
2144
2145 static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
2146 struct ieee80211_vif *vif)
2147 {
2148 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2149 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2150
2151 iwl_mvm_prepare_mac_removal(mvm, vif);
2152
2153 mutex_lock(&mvm->mutex);
2154
2155 /* Handle AP stop while in CSA */
2156 if (rcu_access_pointer(mvm->csa_vif) == vif) {
2157 iwl_mvm_remove_time_event(mvm, mvmvif,
2158 &mvmvif->time_event_data);
2159 RCU_INIT_POINTER(mvm->csa_vif, NULL);
2160 mvmvif->csa_countdown = false;
2161 }
2162
2163 if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
2164 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
2165 mvm->csa_tx_block_bcn_timeout = 0;
2166 }
2167
2168 mvmvif->ap_ibss_active = false;
2169 mvm->ap_last_beacon_gp2 = 0;
2170
2171 iwl_mvm_bt_coex_vif_change(mvm);
2172
2173 iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
2174
2175 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2176 if (vif->p2p && mvm->p2p_device_vif)
2177 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2178
2179 iwl_mvm_update_quotas(mvm, false, NULL);
2180 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2181 iwl_mvm_binding_remove_vif(mvm, vif);
2182
2183 iwl_mvm_power_update_mac(mvm);
2184
2185 iwl_mvm_mac_ctxt_remove(mvm, vif);
2186
2187 mutex_unlock(&mvm->mutex);
2188 }
2189
2190 static void
2191 iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
2192 struct ieee80211_vif *vif,
2193 struct ieee80211_bss_conf *bss_conf,
2194 u32 changes)
2195 {
2196 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2197
2198 /* Changes will be applied when the AP/IBSS is started */
2199 if (!mvmvif->ap_ibss_active)
2200 return;
2201
2202 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
2203 BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) &&
2204 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL))
2205 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2206
2207 /* Need to send a new beacon template to the FW */
2208 if (changes & BSS_CHANGED_BEACON &&
2209 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
2210 IWL_WARN(mvm, "Failed updating beacon data\n");
2211
2212 if (changes & BSS_CHANGED_TXPOWER) {
2213 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2214 bss_conf->txpower);
2215 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2216 }
2217 }
2218
2219 static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
2220 struct ieee80211_vif *vif,
2221 struct ieee80211_bss_conf *bss_conf,
2222 u32 changes)
2223 {
2224 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2225
2226 /*
2227 * iwl_mvm_bss_info_changed_station() might call
2228 * iwl_mvm_protect_session(), which reads directly from
2229 * the device (the system time), so make sure it is available.
2230 */
2231 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED))
2232 return;
2233
2234 mutex_lock(&mvm->mutex);
2235
2236 if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
2237 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
2238
2239 switch (vif->type) {
2240 case NL80211_IFTYPE_STATION:
2241 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
2242 break;
2243 case NL80211_IFTYPE_AP:
2244 case NL80211_IFTYPE_ADHOC:
2245 iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
2246 break;
2247 case NL80211_IFTYPE_MONITOR:
2248 if (changes & BSS_CHANGED_MU_GROUPS)
2249 iwl_mvm_update_mu_groups(mvm, vif);
2250 break;
2251 default:
2252 /* shouldn't happen */
2253 WARN_ON_ONCE(1);
2254 }
2255
2256 mutex_unlock(&mvm->mutex);
2257 iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
2258 }
2259
2260 static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
2261 struct ieee80211_vif *vif,
2262 struct ieee80211_scan_request *hw_req)
2263 {
2264 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2265 int ret;
2266
2267 if (hw_req->req.n_channels == 0 ||
2268 hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
2269 return -EINVAL;
2270
2271 mutex_lock(&mvm->mutex);
2272 ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
2273 mutex_unlock(&mvm->mutex);
2274
2275 return ret;
2276 }
2277
2278 static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
2279 struct ieee80211_vif *vif)
2280 {
2281 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2282
2283 mutex_lock(&mvm->mutex);
2284
2285 /* Due to a race condition, it's possible that mac80211 asks
2286 * us to stop a hw_scan when it's already stopped. This can
2287 * happen, for instance, if we stopped the scan ourselves,
2288 * called ieee80211_scan_completed() and the userspace called
2289 * cancel scan scan before ieee80211_scan_work() could run.
2290 * To handle that, simply return if the scan is not running.
2291 */
2292 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
2293 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
2294
2295 mutex_unlock(&mvm->mutex);
2296 }
2297
2298 static void
2299 iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
2300 struct ieee80211_sta *sta, u16 tids,
2301 int num_frames,
2302 enum ieee80211_frame_release_type reason,
2303 bool more_data)
2304 {
2305 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2306
2307 /* Called when we need to transmit (a) frame(s) from mac80211 */
2308
2309 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2310 tids, more_data, false);
2311 }
2312
2313 static void
2314 iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
2315 struct ieee80211_sta *sta, u16 tids,
2316 int num_frames,
2317 enum ieee80211_frame_release_type reason,
2318 bool more_data)
2319 {
2320 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2321
2322 /* Called when we need to transmit (a) frame(s) from agg queue */
2323
2324 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2325 tids, more_data, true);
2326 }
2327
2328 static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2329 enum sta_notify_cmd cmd,
2330 struct ieee80211_sta *sta)
2331 {
2332 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2333 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2334 unsigned long txqs = 0, tids = 0;
2335 int tid;
2336
2337 spin_lock_bh(&mvmsta->lock);
2338 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2339 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2340
2341 if (tid_data->state != IWL_AGG_ON &&
2342 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
2343 continue;
2344
2345 __set_bit(tid_data->txq_id, &txqs);
2346
2347 if (iwl_mvm_tid_queued(tid_data) == 0)
2348 continue;
2349
2350 __set_bit(tid, &tids);
2351 }
2352
2353 switch (cmd) {
2354 case STA_NOTIFY_SLEEP:
2355 if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
2356 ieee80211_sta_block_awake(hw, sta, true);
2357
2358 for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
2359 ieee80211_sta_set_buffered(sta, tid, true);
2360
2361 if (txqs)
2362 iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
2363 /*
2364 * The fw updates the STA to be asleep. Tx packets on the Tx
2365 * queues to this station will not be transmitted. The fw will
2366 * send a Tx response with TX_STATUS_FAIL_DEST_PS.
2367 */
2368 break;
2369 case STA_NOTIFY_AWAKE:
2370 if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
2371 break;
2372
2373 if (txqs)
2374 iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
2375 iwl_mvm_sta_modify_ps_wake(mvm, sta);
2376 break;
2377 default:
2378 break;
2379 }
2380 spin_unlock_bh(&mvmsta->lock);
2381 }
2382
2383 static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2384 struct ieee80211_vif *vif,
2385 enum sta_notify_cmd cmd,
2386 struct ieee80211_sta *sta)
2387 {
2388 __iwl_mvm_mac_sta_notify(hw, cmd, sta);
2389 }
2390
2391 void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
2392 {
2393 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2394 struct iwl_mvm_pm_state_notification *notif = (void *)pkt->data;
2395 struct ieee80211_sta *sta;
2396 struct iwl_mvm_sta *mvmsta;
2397 bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE);
2398
2399 if (WARN_ON(notif->sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)))
2400 return;
2401
2402 rcu_read_lock();
2403 sta = mvm->fw_id_to_mac_id[notif->sta_id];
2404 if (WARN_ON(IS_ERR_OR_NULL(sta))) {
2405 rcu_read_unlock();
2406 return;
2407 }
2408
2409 mvmsta = iwl_mvm_sta_from_mac80211(sta);
2410
2411 if (!mvmsta->vif ||
2412 mvmsta->vif->type != NL80211_IFTYPE_AP) {
2413 rcu_read_unlock();
2414 return;
2415 }
2416
2417 if (mvmsta->sleeping != sleeping) {
2418 mvmsta->sleeping = sleeping;
2419 __iwl_mvm_mac_sta_notify(mvm->hw,
2420 sleeping ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE,
2421 sta);
2422 ieee80211_sta_ps_transition(sta, sleeping);
2423 }
2424
2425 if (sleeping) {
2426 switch (notif->type) {
2427 case IWL_MVM_PM_EVENT_AWAKE:
2428 case IWL_MVM_PM_EVENT_ASLEEP:
2429 break;
2430 case IWL_MVM_PM_EVENT_UAPSD:
2431 ieee80211_sta_uapsd_trigger(sta, IEEE80211_NUM_TIDS);
2432 break;
2433 case IWL_MVM_PM_EVENT_PS_POLL:
2434 ieee80211_sta_pspoll(sta);
2435 break;
2436 default:
2437 break;
2438 }
2439 }
2440
2441 rcu_read_unlock();
2442 }
2443
2444 static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
2445 struct ieee80211_vif *vif,
2446 struct ieee80211_sta *sta)
2447 {
2448 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2449 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2450
2451 /*
2452 * This is called before mac80211 does RCU synchronisation,
2453 * so here we already invalidate our internal RCU-protected
2454 * station pointer. The rest of the code will thus no longer
2455 * be able to find the station this way, and we don't rely
2456 * on further RCU synchronisation after the sta_state()
2457 * callback deleted the station.
2458 */
2459 mutex_lock(&mvm->mutex);
2460 if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
2461 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
2462 ERR_PTR(-ENOENT));
2463
2464 mutex_unlock(&mvm->mutex);
2465 }
2466
2467 static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2468 const u8 *bssid)
2469 {
2470 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
2471 return;
2472
2473 if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) {
2474 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2475 return;
2476 }
2477
2478 if (!vif->p2p &&
2479 (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) {
2480 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2481 return;
2482 }
2483
2484 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
2485 }
2486
2487 static void
2488 iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
2489 struct ieee80211_vif *vif, u8 *peer_addr,
2490 enum nl80211_tdls_operation action)
2491 {
2492 struct iwl_fw_dbg_trigger_tlv *trig;
2493 struct iwl_fw_dbg_trigger_tdls *tdls_trig;
2494
2495 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TDLS))
2496 return;
2497
2498 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS);
2499 tdls_trig = (void *)trig->data;
2500 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
2501 return;
2502
2503 if (!(tdls_trig->action_bitmap & BIT(action)))
2504 return;
2505
2506 if (tdls_trig->peer_mode &&
2507 memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0)
2508 return;
2509
2510 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
2511 "TDLS event occurred, peer %pM, action %d",
2512 peer_addr, action);
2513 }
2514
2515 static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
2516 struct iwl_mvm_sta *mvm_sta)
2517 {
2518 struct iwl_mvm_tid_data *tid_data;
2519 struct sk_buff *skb;
2520 int i;
2521
2522 spin_lock_bh(&mvm_sta->lock);
2523 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
2524 tid_data = &mvm_sta->tid_data[i];
2525 while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames)))
2526 ieee80211_free_txskb(mvm->hw, skb);
2527 }
2528 spin_unlock_bh(&mvm_sta->lock);
2529 }
2530
2531 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2532 struct ieee80211_vif *vif,
2533 struct ieee80211_sta *sta,
2534 enum ieee80211_sta_state old_state,
2535 enum ieee80211_sta_state new_state)
2536 {
2537 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2538 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2539 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2540 int ret;
2541
2542 IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
2543 sta->addr, old_state, new_state);
2544
2545 /* this would be a mac80211 bug ... but don't crash */
2546 if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
2547 return -EINVAL;
2548
2549 /* if a STA is being removed, reuse its ID */
2550 flush_work(&mvm->sta_drained_wk);
2551
2552 /*
2553 * If we are in a STA removal flow and in DQA mode:
2554 *
2555 * This is after the sync_rcu part, so the queues have already been
2556 * flushed. No more TXs on their way in mac80211's path, and no more in
2557 * the queues.
2558 * Also, we won't be getting any new TX frames for this station.
2559 * What we might have are deferred TX frames that need to be taken care
2560 * of.
2561 *
2562 * Drop any still-queued deferred-frame before removing the STA, and
2563 * make sure the worker is no longer handling frames for this STA.
2564 */
2565 if (old_state == IEEE80211_STA_NONE &&
2566 new_state == IEEE80211_STA_NOTEXIST &&
2567 iwl_mvm_is_dqa_supported(mvm)) {
2568 iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
2569 flush_work(&mvm->add_stream_wk);
2570
2571 /*
2572 * No need to make sure deferred TX indication is off since the
2573 * worker will already remove it if it was on
2574 */
2575 }
2576
2577 mutex_lock(&mvm->mutex);
2578 /* track whether or not the station is associated */
2579 mvm_sta->associated = new_state >= IEEE80211_STA_ASSOC;
2580
2581 if (old_state == IEEE80211_STA_NOTEXIST &&
2582 new_state == IEEE80211_STA_NONE) {
2583 /*
2584 * Firmware bug - it'll crash if the beacon interval is less
2585 * than 16. We can't avoid connecting at all, so refuse the
2586 * station state change, this will cause mac80211 to abandon
2587 * attempts to connect to this AP, and eventually wpa_s will
2588 * blacklist the AP...
2589 */
2590 if (vif->type == NL80211_IFTYPE_STATION &&
2591 vif->bss_conf.beacon_int < 16) {
2592 IWL_ERR(mvm,
2593 "AP %pM beacon interval is %d, refusing due to firmware bug!\n",
2594 sta->addr, vif->bss_conf.beacon_int);
2595 ret = -EINVAL;
2596 goto out_unlock;
2597 }
2598
2599 if (sta->tdls &&
2600 (vif->p2p ||
2601 iwl_mvm_tdls_sta_count(mvm, NULL) ==
2602 IWL_MVM_TDLS_STA_COUNT ||
2603 iwl_mvm_phy_ctx_count(mvm) > 1)) {
2604 IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n");
2605 ret = -EBUSY;
2606 goto out_unlock;
2607 }
2608
2609 ret = iwl_mvm_add_sta(mvm, vif, sta);
2610 if (sta->tdls && ret == 0) {
2611 iwl_mvm_recalc_tdls_state(mvm, vif, true);
2612 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
2613 NL80211_TDLS_SETUP);
2614 }
2615 } else if (old_state == IEEE80211_STA_NONE &&
2616 new_state == IEEE80211_STA_AUTH) {
2617 /*
2618 * EBS may be disabled due to previous failures reported by FW.
2619 * Reset EBS status here assuming environment has been changed.
2620 */
2621 mvm->last_ebs_successful = true;
2622 iwl_mvm_check_uapsd(mvm, vif, sta->addr);
2623 ret = 0;
2624 } else if (old_state == IEEE80211_STA_AUTH &&
2625 new_state == IEEE80211_STA_ASSOC) {
2626 if (vif->type == NL80211_IFTYPE_AP) {
2627 mvmvif->ap_assoc_sta_count++;
2628 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2629 }
2630
2631 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
2632 true);
2633 ret = iwl_mvm_update_sta(mvm, vif, sta);
2634 } else if (old_state == IEEE80211_STA_ASSOC &&
2635 new_state == IEEE80211_STA_AUTHORIZED) {
2636
2637 /* we don't support TDLS during DCM */
2638 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2639 iwl_mvm_teardown_tdls_peers(mvm);
2640
2641 if (sta->tdls)
2642 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
2643 NL80211_TDLS_ENABLE_LINK);
2644
2645 /* enable beacon filtering */
2646 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2647 ret = 0;
2648 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
2649 new_state == IEEE80211_STA_ASSOC) {
2650 /* disable beacon filtering */
2651 WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0));
2652 ret = 0;
2653 } else if (old_state == IEEE80211_STA_ASSOC &&
2654 new_state == IEEE80211_STA_AUTH) {
2655 if (vif->type == NL80211_IFTYPE_AP) {
2656 mvmvif->ap_assoc_sta_count--;
2657 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2658 }
2659 ret = 0;
2660 } else if (old_state == IEEE80211_STA_AUTH &&
2661 new_state == IEEE80211_STA_NONE) {
2662 ret = 0;
2663 } else if (old_state == IEEE80211_STA_NONE &&
2664 new_state == IEEE80211_STA_NOTEXIST) {
2665 ret = iwl_mvm_rm_sta(mvm, vif, sta);
2666 if (sta->tdls) {
2667 iwl_mvm_recalc_tdls_state(mvm, vif, false);
2668 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
2669 NL80211_TDLS_DISABLE_LINK);
2670 }
2671 } else {
2672 ret = -EIO;
2673 }
2674 out_unlock:
2675 mutex_unlock(&mvm->mutex);
2676
2677 if (sta->tdls && ret == 0) {
2678 if (old_state == IEEE80211_STA_NOTEXIST &&
2679 new_state == IEEE80211_STA_NONE)
2680 ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2681 else if (old_state == IEEE80211_STA_NONE &&
2682 new_state == IEEE80211_STA_NOTEXIST)
2683 ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2684 }
2685
2686 return ret;
2687 }
2688
2689 static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2690 {
2691 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2692
2693 mvm->rts_threshold = value;
2694
2695 return 0;
2696 }
2697
2698 static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
2699 struct ieee80211_vif *vif,
2700 struct ieee80211_sta *sta, u32 changed)
2701 {
2702 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2703
2704 if (vif->type == NL80211_IFTYPE_STATION &&
2705 changed & IEEE80211_RC_NSS_CHANGED)
2706 iwl_mvm_sf_update(mvm, vif, false);
2707 }
2708
2709 static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
2710 struct ieee80211_vif *vif, u16 ac,
2711 const struct ieee80211_tx_queue_params *params)
2712 {
2713 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2714 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2715
2716 mvmvif->queue_params[ac] = *params;
2717
2718 /*
2719 * No need to update right away, we'll get BSS_CHANGED_QOS
2720 * The exception is P2P_DEVICE interface which needs immediate update.
2721 */
2722 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2723 int ret;
2724
2725 mutex_lock(&mvm->mutex);
2726 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2727 mutex_unlock(&mvm->mutex);
2728 return ret;
2729 }
2730 return 0;
2731 }
2732
2733 static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
2734 struct ieee80211_vif *vif)
2735 {
2736 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2737 u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
2738 u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS;
2739
2740 if (WARN_ON_ONCE(vif->bss_conf.assoc))
2741 return;
2742
2743 /*
2744 * iwl_mvm_protect_session() reads directly from the device
2745 * (the system time), so make sure it is available.
2746 */
2747 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX))
2748 return;
2749
2750 mutex_lock(&mvm->mutex);
2751 /* Try really hard to protect the session and hear a beacon */
2752 iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false);
2753 mutex_unlock(&mvm->mutex);
2754
2755 iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX);
2756 }
2757
2758 static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
2759 struct ieee80211_vif *vif,
2760 struct cfg80211_sched_scan_request *req,
2761 struct ieee80211_scan_ies *ies)
2762 {
2763 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2764
2765 int ret;
2766
2767 mutex_lock(&mvm->mutex);
2768
2769 if (!vif->bss_conf.idle) {
2770 ret = -EBUSY;
2771 goto out;
2772 }
2773
2774 ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
2775
2776 out:
2777 mutex_unlock(&mvm->mutex);
2778 return ret;
2779 }
2780
2781 static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
2782 struct ieee80211_vif *vif)
2783 {
2784 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2785 int ret;
2786
2787 mutex_lock(&mvm->mutex);
2788
2789 /* Due to a race condition, it's possible that mac80211 asks
2790 * us to stop a sched_scan when it's already stopped. This
2791 * can happen, for instance, if we stopped the scan ourselves,
2792 * called ieee80211_sched_scan_stopped() and the userspace called
2793 * stop sched scan scan before ieee80211_sched_scan_stopped_work()
2794 * could run. To handle this, simply return if the scan is
2795 * not running.
2796 */
2797 if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
2798 mutex_unlock(&mvm->mutex);
2799 return 0;
2800 }
2801
2802 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
2803 mutex_unlock(&mvm->mutex);
2804 iwl_mvm_wait_for_async_handlers(mvm);
2805
2806 return ret;
2807 }
2808
2809 static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
2810 enum set_key_cmd cmd,
2811 struct ieee80211_vif *vif,
2812 struct ieee80211_sta *sta,
2813 struct ieee80211_key_conf *key)
2814 {
2815 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2816 struct iwl_mvm_sta *mvmsta;
2817 struct iwl_mvm_key_pn *ptk_pn;
2818 int keyidx = key->keyidx;
2819 int ret;
2820 u8 key_offset;
2821
2822 if (iwlwifi_mod_params.sw_crypto) {
2823 IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
2824 return -EOPNOTSUPP;
2825 }
2826
2827 switch (key->cipher) {
2828 case WLAN_CIPHER_SUITE_TKIP:
2829 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2830 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2831 break;
2832 case WLAN_CIPHER_SUITE_CCMP:
2833 case WLAN_CIPHER_SUITE_GCMP:
2834 case WLAN_CIPHER_SUITE_GCMP_256:
2835 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2836 break;
2837 case WLAN_CIPHER_SUITE_AES_CMAC:
2838 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
2839 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
2840 WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
2841 break;
2842 case WLAN_CIPHER_SUITE_WEP40:
2843 case WLAN_CIPHER_SUITE_WEP104:
2844 /* For non-client mode, only use WEP keys for TX as we probably
2845 * don't have a station yet anyway and would then have to keep
2846 * track of the keys, linking them to each of the clients/peers
2847 * as they appear. For now, don't do that, for performance WEP
2848 * offload doesn't really matter much, but we need it for some
2849 * other offload features in client mode.
2850 */
2851 if (vif->type != NL80211_IFTYPE_STATION)
2852 return 0;
2853 break;
2854 default:
2855 /* currently FW supports only one optional cipher scheme */
2856 if (hw->n_cipher_schemes &&
2857 hw->cipher_schemes->cipher == key->cipher)
2858 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2859 else
2860 return -EOPNOTSUPP;
2861 }
2862
2863 mutex_lock(&mvm->mutex);
2864
2865 switch (cmd) {
2866 case SET_KEY:
2867 if ((vif->type == NL80211_IFTYPE_ADHOC ||
2868 vif->type == NL80211_IFTYPE_AP) && !sta) {
2869 /*
2870 * GTK on AP interface is a TX-only key, return 0;
2871 * on IBSS they're per-station and because we're lazy
2872 * we don't support them for RX, so do the same.
2873 * CMAC/GMAC in AP/IBSS modes must be done in software.
2874 */
2875 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
2876 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
2877 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
2878 ret = -EOPNOTSUPP;
2879 else
2880 ret = 0;
2881 key->hw_key_idx = STA_KEY_IDX_INVALID;
2882 break;
2883 }
2884
2885 /* During FW restart, in order to restore the state as it was,
2886 * don't try to reprogram keys we previously failed for.
2887 */
2888 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2889 key->hw_key_idx == STA_KEY_IDX_INVALID) {
2890 IWL_DEBUG_MAC80211(mvm,
2891 "skip invalid idx key programming during restart\n");
2892 ret = 0;
2893 break;
2894 }
2895
2896 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2897 sta && iwl_mvm_has_new_rx_api(mvm) &&
2898 key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
2899 (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
2900 key->cipher == WLAN_CIPHER_SUITE_GCMP ||
2901 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
2902 struct ieee80211_key_seq seq;
2903 int tid, q;
2904
2905 mvmsta = iwl_mvm_sta_from_mac80211(sta);
2906 WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx]));
2907 ptk_pn = kzalloc(sizeof(*ptk_pn) +
2908 mvm->trans->num_rx_queues *
2909 sizeof(ptk_pn->q[0]),
2910 GFP_KERNEL);
2911 if (!ptk_pn) {
2912 ret = -ENOMEM;
2913 break;
2914 }
2915
2916 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2917 ieee80211_get_key_rx_seq(key, tid, &seq);
2918 for (q = 0; q < mvm->trans->num_rx_queues; q++)
2919 memcpy(ptk_pn->q[q].pn[tid],
2920 seq.ccmp.pn,
2921 IEEE80211_CCMP_PN_LEN);
2922 }
2923
2924 rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn);
2925 }
2926
2927 /* in HW restart reuse the index, otherwise request a new one */
2928 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
2929 key_offset = key->hw_key_idx;
2930 else
2931 key_offset = STA_KEY_IDX_INVALID;
2932
2933 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
2934 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
2935 if (ret) {
2936 IWL_WARN(mvm, "set key failed\n");
2937 /*
2938 * can't add key for RX, but we don't need it
2939 * in the device for TX so still return 0
2940 */
2941 key->hw_key_idx = STA_KEY_IDX_INVALID;
2942 ret = 0;
2943 }
2944
2945 break;
2946 case DISABLE_KEY:
2947 if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
2948 ret = 0;
2949 break;
2950 }
2951
2952 if (sta && iwl_mvm_has_new_rx_api(mvm) &&
2953 key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
2954 (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
2955 key->cipher == WLAN_CIPHER_SUITE_GCMP ||
2956 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
2957 mvmsta = iwl_mvm_sta_from_mac80211(sta);
2958 ptk_pn = rcu_dereference_protected(
2959 mvmsta->ptk_pn[keyidx],
2960 lockdep_is_held(&mvm->mutex));
2961 RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL);
2962 if (ptk_pn)
2963 kfree_rcu(ptk_pn, rcu_head);
2964 }
2965
2966 IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
2967 ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
2968 break;
2969 default:
2970 ret = -EINVAL;
2971 }
2972
2973 mutex_unlock(&mvm->mutex);
2974 return ret;
2975 }
2976
2977 static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
2978 struct ieee80211_vif *vif,
2979 struct ieee80211_key_conf *keyconf,
2980 struct ieee80211_sta *sta,
2981 u32 iv32, u16 *phase1key)
2982 {
2983 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2984
2985 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
2986 return;
2987
2988 iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
2989 }
2990
2991
2992 static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
2993 struct iwl_rx_packet *pkt, void *data)
2994 {
2995 struct iwl_mvm *mvm =
2996 container_of(notif_wait, struct iwl_mvm, notif_wait);
2997 struct iwl_hs20_roc_res *resp;
2998 int resp_len = iwl_rx_packet_payload_len(pkt);
2999 struct iwl_mvm_time_event_data *te_data = data;
3000
3001 if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD))
3002 return true;
3003
3004 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
3005 IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n");
3006 return true;
3007 }
3008
3009 resp = (void *)pkt->data;
3010
3011 IWL_DEBUG_TE(mvm,
3012 "Aux ROC: Recieved response from ucode: status=%d uid=%d\n",
3013 resp->status, resp->event_unique_id);
3014
3015 te_data->uid = le32_to_cpu(resp->event_unique_id);
3016 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
3017 te_data->uid);
3018
3019 spin_lock_bh(&mvm->time_event_lock);
3020 list_add_tail(&te_data->list, &mvm->aux_roc_te_list);
3021 spin_unlock_bh(&mvm->time_event_lock);
3022
3023 return true;
3024 }
3025
3026 #define AUX_ROC_MIN_DURATION MSEC_TO_TU(100)
3027 #define AUX_ROC_MIN_DELAY MSEC_TO_TU(200)
3028 #define AUX_ROC_MAX_DELAY MSEC_TO_TU(600)
3029 #define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20)
3030 #define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10)
3031 static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
3032 struct ieee80211_channel *channel,
3033 struct ieee80211_vif *vif,
3034 int duration)
3035 {
3036 int res, time_reg = DEVICE_SYSTEM_TIME_REG;
3037 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3038 struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
3039 static const u16 time_event_response[] = { HOT_SPOT_CMD };
3040 struct iwl_notification_wait wait_time_event;
3041 u32 dtim_interval = vif->bss_conf.dtim_period *
3042 vif->bss_conf.beacon_int;
3043 u32 req_dur, delay;
3044 struct iwl_hs20_roc_req aux_roc_req = {
3045 .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
3046 .id_and_color =
3047 cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
3048 .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
3049 /* Set the channel info data */
3050 .channel_info.band = (channel->band == NL80211_BAND_2GHZ) ?
3051 PHY_BAND_24 : PHY_BAND_5,
3052 .channel_info.channel = channel->hw_value,
3053 .channel_info.width = PHY_VHT_CHANNEL_MODE20,
3054 /* Set the time and duration */
3055 .apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)),
3056 };
3057
3058 delay = AUX_ROC_MIN_DELAY;
3059 req_dur = MSEC_TO_TU(duration);
3060
3061 /*
3062 * If we are associated we want the delay time to be at least one
3063 * dtim interval so that the FW can wait until after the DTIM and
3064 * then start the time event, this will potentially allow us to
3065 * remain off-channel for the max duration.
3066 * Since we want to use almost a whole dtim interval we would also
3067 * like the delay to be for 2-3 dtim intervals, in case there are
3068 * other time events with higher priority.
3069 */
3070 if (vif->bss_conf.assoc) {
3071 delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
3072 /* We cannot remain off-channel longer than the DTIM interval */
3073 if (dtim_interval <= req_dur) {
3074 req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER;
3075 if (req_dur <= AUX_ROC_MIN_DURATION)
3076 req_dur = dtim_interval -
3077 AUX_ROC_MIN_SAFETY_BUFFER;
3078 }
3079 }
3080
3081 aux_roc_req.duration = cpu_to_le32(req_dur);
3082 aux_roc_req.apply_time_max_delay = cpu_to_le32(delay);
3083
3084 IWL_DEBUG_TE(mvm,
3085 "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
3086 channel->hw_value, req_dur, duration, delay,
3087 dtim_interval);
3088 /* Set the node address */
3089 memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
3090
3091 lockdep_assert_held(&mvm->mutex);
3092
3093 spin_lock_bh(&mvm->time_event_lock);
3094
3095 if (WARN_ON(te_data->id == HOT_SPOT_CMD)) {
3096 spin_unlock_bh(&mvm->time_event_lock);
3097 return -EIO;
3098 }
3099
3100 te_data->vif = vif;
3101 te_data->duration = duration;
3102 te_data->id = HOT_SPOT_CMD;
3103
3104 spin_unlock_bh(&mvm->time_event_lock);
3105
3106 /*
3107 * Use a notification wait, which really just processes the
3108 * command response and doesn't wait for anything, in order
3109 * to be able to process the response and get the UID inside
3110 * the RX path. Using CMD_WANT_SKB doesn't work because it
3111 * stores the buffer and then wakes up this thread, by which
3112 * time another notification (that the time event started)
3113 * might already be processed unsuccessfully.
3114 */
3115 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
3116 time_event_response,
3117 ARRAY_SIZE(time_event_response),
3118 iwl_mvm_rx_aux_roc, te_data);
3119
3120 res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req),
3121 &aux_roc_req);
3122
3123 if (res) {
3124 IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res);
3125 iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
3126 goto out_clear_te;
3127 }
3128
3129 /* No need to wait for anything, so just pass 1 (0 isn't valid) */
3130 res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
3131 /* should never fail */
3132 WARN_ON_ONCE(res);
3133
3134 if (res) {
3135 out_clear_te:
3136 spin_lock_bh(&mvm->time_event_lock);
3137 iwl_mvm_te_clear_data(mvm, te_data);
3138 spin_unlock_bh(&mvm->time_event_lock);
3139 }
3140
3141 return res;
3142 }
3143
3144 static int iwl_mvm_roc(struct ieee80211_hw *hw,
3145 struct ieee80211_vif *vif,
3146 struct ieee80211_channel *channel,
3147 int duration,
3148 enum ieee80211_roc_type type)
3149 {
3150 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3151 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3152 struct cfg80211_chan_def chandef;
3153 struct iwl_mvm_phy_ctxt *phy_ctxt;
3154 int ret, i;
3155
3156 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
3157 duration, type);
3158
3159 flush_work(&mvm->roc_done_wk);
3160
3161 mutex_lock(&mvm->mutex);
3162
3163 switch (vif->type) {
3164 case NL80211_IFTYPE_STATION:
3165 if (fw_has_capa(&mvm->fw->ucode_capa,
3166 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
3167 /* Use aux roc framework (HS20) */
3168 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
3169 vif, duration);
3170 goto out_unlock;
3171 }
3172 IWL_ERR(mvm, "hotspot not supported\n");
3173 ret = -EINVAL;
3174 goto out_unlock;
3175 case NL80211_IFTYPE_P2P_DEVICE:
3176 /* handle below */
3177 break;
3178 default:
3179 IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
3180 ret = -EINVAL;
3181 goto out_unlock;
3182 }
3183
3184 for (i = 0; i < NUM_PHY_CTX; i++) {
3185 phy_ctxt = &mvm->phy_ctxts[i];
3186 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
3187 continue;
3188
3189 if (phy_ctxt->ref && channel == phy_ctxt->channel) {
3190 /*
3191 * Unbind the P2P_DEVICE from the current PHY context,
3192 * and if the PHY context is not used remove it.
3193 */
3194 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3195 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3196 goto out_unlock;
3197
3198 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3199
3200 /* Bind the P2P_DEVICE to the current PHY Context */
3201 mvmvif->phy_ctxt = phy_ctxt;
3202
3203 ret = iwl_mvm_binding_add_vif(mvm, vif);
3204 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3205 goto out_unlock;
3206
3207 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3208 goto schedule_time_event;
3209 }
3210 }
3211
3212 /* Need to update the PHY context only if the ROC channel changed */
3213 if (channel == mvmvif->phy_ctxt->channel)
3214 goto schedule_time_event;
3215
3216 cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
3217
3218 /*
3219 * Change the PHY context configuration as it is currently referenced
3220 * only by the P2P Device MAC
3221 */
3222 if (mvmvif->phy_ctxt->ref == 1) {
3223 ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
3224 &chandef, 1, 1);
3225 if (ret)
3226 goto out_unlock;
3227 } else {
3228 /*
3229 * The PHY context is shared with other MACs. Need to remove the
3230 * P2P Device from the binding, allocate an new PHY context and
3231 * create a new binding
3232 */
3233 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3234 if (!phy_ctxt) {
3235 ret = -ENOSPC;
3236 goto out_unlock;
3237 }
3238
3239 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
3240 1, 1);
3241 if (ret) {
3242 IWL_ERR(mvm, "Failed to change PHY context\n");
3243 goto out_unlock;
3244 }
3245
3246 /* Unbind the P2P_DEVICE from the current PHY context */
3247 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3248 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3249 goto out_unlock;
3250
3251 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3252
3253 /* Bind the P2P_DEVICE to the new allocated PHY context */
3254 mvmvif->phy_ctxt = phy_ctxt;
3255
3256 ret = iwl_mvm_binding_add_vif(mvm, vif);
3257 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3258 goto out_unlock;
3259
3260 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3261 }
3262
3263 schedule_time_event:
3264 /* Schedule the time events */
3265 ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
3266
3267 out_unlock:
3268 mutex_unlock(&mvm->mutex);
3269 IWL_DEBUG_MAC80211(mvm, "leave\n");
3270 return ret;
3271 }
3272
3273 static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
3274 {
3275 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3276
3277 IWL_DEBUG_MAC80211(mvm, "enter\n");
3278
3279 mutex_lock(&mvm->mutex);
3280 iwl_mvm_stop_roc(mvm);
3281 mutex_unlock(&mvm->mutex);
3282
3283 IWL_DEBUG_MAC80211(mvm, "leave\n");
3284 return 0;
3285 }
3286
3287 static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
3288 struct ieee80211_chanctx_conf *ctx)
3289 {
3290 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3291 struct iwl_mvm_phy_ctxt *phy_ctxt;
3292 int ret;
3293
3294 lockdep_assert_held(&mvm->mutex);
3295
3296 IWL_DEBUG_MAC80211(mvm, "Add channel context\n");
3297
3298 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3299 if (!phy_ctxt) {
3300 ret = -ENOSPC;
3301 goto out;
3302 }
3303
3304 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3305 ctx->rx_chains_static,
3306 ctx->rx_chains_dynamic);
3307 if (ret) {
3308 IWL_ERR(mvm, "Failed to add PHY context\n");
3309 goto out;
3310 }
3311
3312 iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt);
3313 *phy_ctxt_id = phy_ctxt->id;
3314 out:
3315 return ret;
3316 }
3317
3318 static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
3319 struct ieee80211_chanctx_conf *ctx)
3320 {
3321 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3322 int ret;
3323
3324 mutex_lock(&mvm->mutex);
3325 ret = __iwl_mvm_add_chanctx(mvm, ctx);
3326 mutex_unlock(&mvm->mutex);
3327
3328 return ret;
3329 }
3330
3331 static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm,
3332 struct ieee80211_chanctx_conf *ctx)
3333 {
3334 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3335 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3336
3337 lockdep_assert_held(&mvm->mutex);
3338
3339 iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
3340 }
3341
3342 static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
3343 struct ieee80211_chanctx_conf *ctx)
3344 {
3345 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3346
3347 mutex_lock(&mvm->mutex);
3348 __iwl_mvm_remove_chanctx(mvm, ctx);
3349 mutex_unlock(&mvm->mutex);
3350 }
3351
3352 static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
3353 struct ieee80211_chanctx_conf *ctx,
3354 u32 changed)
3355 {
3356 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3357 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3358 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3359
3360 if (WARN_ONCE((phy_ctxt->ref > 1) &&
3361 (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
3362 IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
3363 IEEE80211_CHANCTX_CHANGE_RADAR |
3364 IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)),
3365 "Cannot change PHY. Ref=%d, changed=0x%X\n",
3366 phy_ctxt->ref, changed))
3367 return;
3368
3369 mutex_lock(&mvm->mutex);
3370 iwl_mvm_bt_coex_vif_change(mvm);
3371 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3372 ctx->rx_chains_static,
3373 ctx->rx_chains_dynamic);
3374 mutex_unlock(&mvm->mutex);
3375 }
3376
3377 static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
3378 struct ieee80211_vif *vif,
3379 struct ieee80211_chanctx_conf *ctx,
3380 bool switching_chanctx)
3381 {
3382 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3383 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3384 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3385 int ret;
3386
3387 lockdep_assert_held(&mvm->mutex);
3388
3389 mvmvif->phy_ctxt = phy_ctxt;
3390
3391 switch (vif->type) {
3392 case NL80211_IFTYPE_AP:
3393 /* only needed if we're switching chanctx (i.e. during CSA) */
3394 if (switching_chanctx) {
3395 mvmvif->ap_ibss_active = true;
3396 break;
3397 }
3398 case NL80211_IFTYPE_ADHOC:
3399 /*
3400 * The AP binding flow is handled as part of the start_ap flow
3401 * (in bss_info_changed), similarly for IBSS.
3402 */
3403 ret = 0;
3404 goto out;
3405 case NL80211_IFTYPE_STATION:
3406 break;
3407 case NL80211_IFTYPE_MONITOR:
3408 /* always disable PS when a monitor interface is active */
3409 mvmvif->ps_disabled = true;
3410 break;
3411 default:
3412 ret = -EINVAL;
3413 goto out;
3414 }
3415
3416 ret = iwl_mvm_binding_add_vif(mvm, vif);
3417 if (ret)
3418 goto out;
3419
3420 /*
3421 * Power state must be updated before quotas,
3422 * otherwise fw will complain.
3423 */
3424 iwl_mvm_power_update_mac(mvm);
3425
3426 /* Setting the quota at this stage is only required for monitor
3427 * interfaces. For the other types, the bss_info changed flow
3428 * will handle quota settings.
3429 */
3430 if (vif->type == NL80211_IFTYPE_MONITOR) {
3431 mvmvif->monitor_active = true;
3432 ret = iwl_mvm_update_quotas(mvm, false, NULL);
3433 if (ret)
3434 goto out_remove_binding;
3435
3436 ret = iwl_mvm_add_snif_sta(mvm, vif);
3437 if (ret)
3438 goto out_remove_binding;
3439
3440 }
3441
3442 /* Handle binding during CSA */
3443 if (vif->type == NL80211_IFTYPE_AP) {
3444 iwl_mvm_update_quotas(mvm, false, NULL);
3445 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3446 }
3447
3448 if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
3449 u32 duration = 2 * vif->bss_conf.beacon_int;
3450
3451 /* iwl_mvm_protect_session() reads directly from the
3452 * device (the system time), so make sure it is
3453 * available.
3454 */
3455 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA);
3456 if (ret)
3457 goto out_remove_binding;
3458
3459 /* Protect the session to make sure we hear the first
3460 * beacon on the new channel.
3461 */
3462 iwl_mvm_protect_session(mvm, vif, duration, duration,
3463 vif->bss_conf.beacon_int / 2,
3464 true);
3465
3466 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
3467
3468 iwl_mvm_update_quotas(mvm, false, NULL);
3469 }
3470
3471 goto out;
3472
3473 out_remove_binding:
3474 iwl_mvm_binding_remove_vif(mvm, vif);
3475 iwl_mvm_power_update_mac(mvm);
3476 out:
3477 if (ret)
3478 mvmvif->phy_ctxt = NULL;
3479 return ret;
3480 }
3481 static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
3482 struct ieee80211_vif *vif,
3483 struct ieee80211_chanctx_conf *ctx)
3484 {
3485 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3486 int ret;
3487
3488 mutex_lock(&mvm->mutex);
3489 ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false);
3490 mutex_unlock(&mvm->mutex);
3491
3492 return ret;
3493 }
3494
3495 static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
3496 struct ieee80211_vif *vif,
3497 struct ieee80211_chanctx_conf *ctx,
3498 bool switching_chanctx)
3499 {
3500 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3501 struct ieee80211_vif *disabled_vif = NULL;
3502
3503 lockdep_assert_held(&mvm->mutex);
3504
3505 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
3506
3507 switch (vif->type) {
3508 case NL80211_IFTYPE_ADHOC:
3509 goto out;
3510 case NL80211_IFTYPE_MONITOR:
3511 mvmvif->monitor_active = false;
3512 mvmvif->ps_disabled = false;
3513 iwl_mvm_rm_snif_sta(mvm, vif);
3514 break;
3515 case NL80211_IFTYPE_AP:
3516 /* This part is triggered only during CSA */
3517 if (!switching_chanctx || !mvmvif->ap_ibss_active)
3518 goto out;
3519
3520 mvmvif->csa_countdown = false;
3521
3522 /* Set CS bit on all the stations */
3523 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
3524
3525 /* Save blocked iface, the timeout is set on the next beacon */
3526 rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif);
3527
3528 mvmvif->ap_ibss_active = false;
3529 break;
3530 case NL80211_IFTYPE_STATION:
3531 if (!switching_chanctx)
3532 break;
3533
3534 disabled_vif = vif;
3535
3536 iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
3537 break;
3538 default:
3539 break;
3540 }
3541
3542 iwl_mvm_update_quotas(mvm, false, disabled_vif);
3543 iwl_mvm_binding_remove_vif(mvm, vif);
3544
3545 out:
3546 mvmvif->phy_ctxt = NULL;
3547 iwl_mvm_power_update_mac(mvm);
3548 }
3549
3550 static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
3551 struct ieee80211_vif *vif,
3552 struct ieee80211_chanctx_conf *ctx)
3553 {
3554 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3555
3556 mutex_lock(&mvm->mutex);
3557 __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false);
3558 mutex_unlock(&mvm->mutex);
3559 }
3560
3561 static int
3562 iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
3563 struct ieee80211_vif_chanctx_switch *vifs)
3564 {
3565 int ret;
3566
3567 mutex_lock(&mvm->mutex);
3568 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3569 __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
3570
3571 ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx);
3572 if (ret) {
3573 IWL_ERR(mvm, "failed to add new_ctx during channel switch\n");
3574 goto out_reassign;
3575 }
3576
3577 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3578 true);
3579 if (ret) {
3580 IWL_ERR(mvm,
3581 "failed to assign new_ctx during channel switch\n");
3582 goto out_remove;
3583 }
3584
3585 /* we don't support TDLS during DCM - can be caused by channel switch */
3586 if (iwl_mvm_phy_ctx_count(mvm) > 1)
3587 iwl_mvm_teardown_tdls_peers(mvm);
3588
3589 goto out;
3590
3591 out_remove:
3592 __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
3593
3594 out_reassign:
3595 if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) {
3596 IWL_ERR(mvm, "failed to add old_ctx back after failure.\n");
3597 goto out_restart;
3598 }
3599
3600 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3601 true)) {
3602 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3603 goto out_restart;
3604 }
3605
3606 goto out;
3607
3608 out_restart:
3609 /* things keep failing, better restart the hw */
3610 iwl_mvm_nic_restart(mvm, false);
3611
3612 out:
3613 mutex_unlock(&mvm->mutex);
3614
3615 return ret;
3616 }
3617
3618 static int
3619 iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
3620 struct ieee80211_vif_chanctx_switch *vifs)
3621 {
3622 int ret;
3623
3624 mutex_lock(&mvm->mutex);
3625 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3626
3627 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3628 true);
3629 if (ret) {
3630 IWL_ERR(mvm,
3631 "failed to assign new_ctx during channel switch\n");
3632 goto out_reassign;
3633 }
3634
3635 goto out;
3636
3637 out_reassign:
3638 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3639 true)) {
3640 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3641 goto out_restart;
3642 }
3643
3644 goto out;
3645
3646 out_restart:
3647 /* things keep failing, better restart the hw */
3648 iwl_mvm_nic_restart(mvm, false);
3649
3650 out:
3651 mutex_unlock(&mvm->mutex);
3652
3653 return ret;
3654 }
3655
3656 static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
3657 struct ieee80211_vif_chanctx_switch *vifs,
3658 int n_vifs,
3659 enum ieee80211_chanctx_switch_mode mode)
3660 {
3661 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3662 int ret;
3663
3664 /* we only support a single-vif right now */
3665 if (n_vifs > 1)
3666 return -EOPNOTSUPP;
3667
3668 switch (mode) {
3669 case CHANCTX_SWMODE_SWAP_CONTEXTS:
3670 ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs);
3671 break;
3672 case CHANCTX_SWMODE_REASSIGN_VIF:
3673 ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs);
3674 break;
3675 default:
3676 ret = -EOPNOTSUPP;
3677 break;
3678 }
3679
3680 return ret;
3681 }
3682
3683 static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
3684 struct ieee80211_sta *sta,
3685 bool set)
3686 {
3687 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3688 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3689
3690 if (!mvm_sta || !mvm_sta->vif) {
3691 IWL_ERR(mvm, "Station is not associated to a vif\n");
3692 return -EINVAL;
3693 }
3694
3695 return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
3696 }
3697
3698 #ifdef CONFIG_NL80211_TESTMODE
3699 static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
3700 [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
3701 [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
3702 [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
3703 };
3704
3705 static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
3706 struct ieee80211_vif *vif,
3707 void *data, int len)
3708 {
3709 struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
3710 int err;
3711 u32 noa_duration;
3712
3713 err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy);
3714 if (err)
3715 return err;
3716
3717 if (!tb[IWL_MVM_TM_ATTR_CMD])
3718 return -EINVAL;
3719
3720 switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
3721 case IWL_MVM_TM_CMD_SET_NOA:
3722 if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
3723 !vif->bss_conf.enable_beacon ||
3724 !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
3725 return -EINVAL;
3726
3727 noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
3728 if (noa_duration >= vif->bss_conf.beacon_int)
3729 return -EINVAL;
3730
3731 mvm->noa_duration = noa_duration;
3732 mvm->noa_vif = vif;
3733
3734 return iwl_mvm_update_quotas(mvm, false, NULL);
3735 case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
3736 /* must be associated client vif - ignore authorized */
3737 if (!vif || vif->type != NL80211_IFTYPE_STATION ||
3738 !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
3739 !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
3740 return -EINVAL;
3741
3742 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
3743 return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3744 return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3745 }
3746
3747 return -EOPNOTSUPP;
3748 }
3749
3750 static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
3751 struct ieee80211_vif *vif,
3752 void *data, int len)
3753 {
3754 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3755 int err;
3756
3757 mutex_lock(&mvm->mutex);
3758 err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
3759 mutex_unlock(&mvm->mutex);
3760
3761 return err;
3762 }
3763 #endif
3764
3765 static void iwl_mvm_channel_switch(struct ieee80211_hw *hw,
3766 struct ieee80211_vif *vif,
3767 struct ieee80211_channel_switch *chsw)
3768 {
3769 /* By implementing this operation, we prevent mac80211 from
3770 * starting its own channel switch timer, so that we can call
3771 * ieee80211_chswitch_done() ourselves at the right time
3772 * (which is when the absence time event starts).
3773 */
3774
3775 IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw),
3776 "dummy channel switch op\n");
3777 }
3778
3779 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
3780 struct ieee80211_vif *vif,
3781 struct ieee80211_channel_switch *chsw)
3782 {
3783 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3784 struct ieee80211_vif *csa_vif;
3785 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3786 u32 apply_time;
3787 int ret;
3788
3789 mutex_lock(&mvm->mutex);
3790
3791 mvmvif->csa_failed = false;
3792
3793 IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
3794 chsw->chandef.center_freq1);
3795
3796 iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH);
3797
3798 switch (vif->type) {
3799 case NL80211_IFTYPE_AP:
3800 csa_vif =
3801 rcu_dereference_protected(mvm->csa_vif,
3802 lockdep_is_held(&mvm->mutex));
3803 if (WARN_ONCE(csa_vif && csa_vif->csa_active,
3804 "Another CSA is already in progress")) {
3805 ret = -EBUSY;
3806 goto out_unlock;
3807 }
3808
3809 /* we still didn't unblock tx. prevent new CS meanwhile */
3810 if (rcu_dereference_protected(mvm->csa_tx_blocked_vif,
3811 lockdep_is_held(&mvm->mutex))) {
3812 ret = -EBUSY;
3813 goto out_unlock;
3814 }
3815
3816 rcu_assign_pointer(mvm->csa_vif, vif);
3817
3818 if (WARN_ONCE(mvmvif->csa_countdown,
3819 "Previous CSA countdown didn't complete")) {
3820 ret = -EBUSY;
3821 goto out_unlock;
3822 }
3823
3824 mvmvif->csa_target_freq = chsw->chandef.chan->center_freq;
3825
3826 break;
3827 case NL80211_IFTYPE_STATION:
3828 if (mvmvif->lqm_active)
3829 iwl_mvm_send_lqm_cmd(vif,
3830 LQM_CMD_OPERATION_STOP_MEASUREMENT,
3831 0, 0);
3832
3833 /* Schedule the time event to a bit before beacon 1,
3834 * to make sure we're in the new channel when the
3835 * GO/AP arrives.
3836 */
3837 apply_time = chsw->device_timestamp +
3838 ((vif->bss_conf.beacon_int * (chsw->count - 1) -
3839 IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
3840
3841 if (chsw->block_tx)
3842 iwl_mvm_csa_client_absent(mvm, vif);
3843
3844 iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int,
3845 apply_time);
3846 if (mvmvif->bf_data.bf_enabled) {
3847 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3848 if (ret)
3849 goto out_unlock;
3850 }
3851
3852 break;
3853 default:
3854 break;
3855 }
3856
3857 mvmvif->ps_disabled = true;
3858
3859 ret = iwl_mvm_power_update_ps(mvm);
3860 if (ret)
3861 goto out_unlock;
3862
3863 /* we won't be on this channel any longer */
3864 iwl_mvm_teardown_tdls_peers(mvm);
3865
3866 out_unlock:
3867 mutex_unlock(&mvm->mutex);
3868
3869 return ret;
3870 }
3871
3872 static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
3873 struct ieee80211_vif *vif)
3874 {
3875 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3876 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3877 int ret;
3878
3879 mutex_lock(&mvm->mutex);
3880
3881 if (mvmvif->csa_failed) {
3882 mvmvif->csa_failed = false;
3883 ret = -EIO;
3884 goto out_unlock;
3885 }
3886
3887 if (vif->type == NL80211_IFTYPE_STATION) {
3888 struct iwl_mvm_sta *mvmsta;
3889
3890 mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
3891 mvmvif->ap_sta_id);
3892
3893 if (WARN_ON(!mvmsta)) {
3894 ret = -EIO;
3895 goto out_unlock;
3896 }
3897
3898 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
3899
3900 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3901
3902 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3903 if (ret)
3904 goto out_unlock;
3905
3906 iwl_mvm_stop_session_protection(mvm, vif);
3907 }
3908
3909 mvmvif->ps_disabled = false;
3910
3911 ret = iwl_mvm_power_update_ps(mvm);
3912
3913 out_unlock:
3914 mutex_unlock(&mvm->mutex);
3915
3916 return ret;
3917 }
3918
3919 static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
3920 struct ieee80211_vif *vif, u32 queues, bool drop)
3921 {
3922 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3923 struct iwl_mvm_vif *mvmvif;
3924 struct iwl_mvm_sta *mvmsta;
3925 struct ieee80211_sta *sta;
3926 int i;
3927 u32 msk = 0;
3928
3929 if (!vif || vif->type != NL80211_IFTYPE_STATION)
3930 return;
3931
3932 /* Make sure we're done with the deferred traffic before flushing */
3933 if (iwl_mvm_is_dqa_supported(mvm))
3934 flush_work(&mvm->add_stream_wk);
3935
3936 mutex_lock(&mvm->mutex);
3937 mvmvif = iwl_mvm_vif_from_mac80211(vif);
3938
3939 /* flush the AP-station and all TDLS peers */
3940 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
3941 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3942 lockdep_is_held(&mvm->mutex));
3943 if (IS_ERR_OR_NULL(sta))
3944 continue;
3945
3946 mvmsta = iwl_mvm_sta_from_mac80211(sta);
3947 if (mvmsta->vif != vif)
3948 continue;
3949
3950 /* make sure only TDLS peers or the AP are flushed */
3951 WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
3952
3953 msk |= mvmsta->tfd_queue_msk;
3954 }
3955
3956 if (drop) {
3957 if (iwl_mvm_flush_tx_path(mvm, msk, 0))
3958 IWL_ERR(mvm, "flush request fail\n");
3959 mutex_unlock(&mvm->mutex);
3960 } else {
3961 mutex_unlock(&mvm->mutex);
3962
3963 /* this can take a while, and we may need/want other operations
3964 * to succeed while doing this, so do it without the mutex held
3965 */
3966 iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
3967 }
3968 }
3969
3970 static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
3971 struct survey_info *survey)
3972 {
3973 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3974 int ret;
3975
3976 memset(survey, 0, sizeof(*survey));
3977
3978 /* only support global statistics right now */
3979 if (idx != 0)
3980 return -ENOENT;
3981
3982 if (!fw_has_capa(&mvm->fw->ucode_capa,
3983 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
3984 return -ENOENT;
3985
3986 mutex_lock(&mvm->mutex);
3987
3988 if (mvm->ucode_loaded) {
3989 ret = iwl_mvm_request_statistics(mvm, false);
3990 if (ret)
3991 goto out;
3992 }
3993
3994 survey->filled = SURVEY_INFO_TIME |
3995 SURVEY_INFO_TIME_RX |
3996 SURVEY_INFO_TIME_TX |
3997 SURVEY_INFO_TIME_SCAN;
3998 survey->time = mvm->accu_radio_stats.on_time_rf +
3999 mvm->radio_stats.on_time_rf;
4000 do_div(survey->time, USEC_PER_MSEC);
4001
4002 survey->time_rx = mvm->accu_radio_stats.rx_time +
4003 mvm->radio_stats.rx_time;
4004 do_div(survey->time_rx, USEC_PER_MSEC);
4005
4006 survey->time_tx = mvm->accu_radio_stats.tx_time +
4007 mvm->radio_stats.tx_time;
4008 do_div(survey->time_tx, USEC_PER_MSEC);
4009
4010 survey->time_scan = mvm->accu_radio_stats.on_time_scan +
4011 mvm->radio_stats.on_time_scan;
4012 do_div(survey->time_scan, USEC_PER_MSEC);
4013
4014 ret = 0;
4015 out:
4016 mutex_unlock(&mvm->mutex);
4017 return ret;
4018 }
4019
4020 static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
4021 struct ieee80211_vif *vif,
4022 struct ieee80211_sta *sta,
4023 struct station_info *sinfo)
4024 {
4025 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4026 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4027 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
4028
4029 if (mvmsta->avg_energy) {
4030 sinfo->signal_avg = mvmsta->avg_energy;
4031 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
4032 }
4033
4034 if (!fw_has_capa(&mvm->fw->ucode_capa,
4035 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
4036 return;
4037
4038 /* if beacon filtering isn't on mac80211 does it anyway */
4039 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
4040 return;
4041
4042 if (!vif->bss_conf.assoc)
4043 return;
4044
4045 mutex_lock(&mvm->mutex);
4046
4047 if (mvmvif->ap_sta_id != mvmsta->sta_id)
4048 goto unlock;
4049
4050 if (iwl_mvm_request_statistics(mvm, false))
4051 goto unlock;
4052
4053 sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
4054 mvmvif->beacon_stats.accu_num_beacons;
4055 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
4056 if (mvmvif->beacon_stats.avg_signal) {
4057 /* firmware only reports a value after RXing a few beacons */
4058 sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
4059 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
4060 }
4061 unlock:
4062 mutex_unlock(&mvm->mutex);
4063 }
4064
4065 static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
4066 struct ieee80211_vif *vif,
4067 const struct ieee80211_event *event)
4068 {
4069 #define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...) \
4070 do { \
4071 if ((_cnt) && --(_cnt)) \
4072 break; \
4073 iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\
4074 } while (0)
4075
4076 struct iwl_fw_dbg_trigger_tlv *trig;
4077 struct iwl_fw_dbg_trigger_mlme *trig_mlme;
4078
4079 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
4080 return;
4081
4082 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
4083 trig_mlme = (void *)trig->data;
4084 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4085 return;
4086
4087 if (event->u.mlme.data == ASSOC_EVENT) {
4088 if (event->u.mlme.status == MLME_DENIED)
4089 CHECK_MLME_TRIGGER(mvm, trig, buf,
4090 trig_mlme->stop_assoc_denied,
4091 "DENIED ASSOC: reason %d",
4092 event->u.mlme.reason);
4093 else if (event->u.mlme.status == MLME_TIMEOUT)
4094 CHECK_MLME_TRIGGER(mvm, trig, buf,
4095 trig_mlme->stop_assoc_timeout,
4096 "ASSOC TIMEOUT");
4097 } else if (event->u.mlme.data == AUTH_EVENT) {
4098 if (event->u.mlme.status == MLME_DENIED)
4099 CHECK_MLME_TRIGGER(mvm, trig, buf,
4100 trig_mlme->stop_auth_denied,
4101 "DENIED AUTH: reason %d",
4102 event->u.mlme.reason);
4103 else if (event->u.mlme.status == MLME_TIMEOUT)
4104 CHECK_MLME_TRIGGER(mvm, trig, buf,
4105 trig_mlme->stop_auth_timeout,
4106 "AUTH TIMEOUT");
4107 } else if (event->u.mlme.data == DEAUTH_RX_EVENT) {
4108 CHECK_MLME_TRIGGER(mvm, trig, buf,
4109 trig_mlme->stop_rx_deauth,
4110 "DEAUTH RX %d", event->u.mlme.reason);
4111 } else if (event->u.mlme.data == DEAUTH_TX_EVENT) {
4112 CHECK_MLME_TRIGGER(mvm, trig, buf,
4113 trig_mlme->stop_tx_deauth,
4114 "DEAUTH TX %d", event->u.mlme.reason);
4115 }
4116 #undef CHECK_MLME_TRIGGER
4117 }
4118
4119 static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
4120 struct ieee80211_vif *vif,
4121 const struct ieee80211_event *event)
4122 {
4123 struct iwl_fw_dbg_trigger_tlv *trig;
4124 struct iwl_fw_dbg_trigger_ba *ba_trig;
4125
4126 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4127 return;
4128
4129 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4130 ba_trig = (void *)trig->data;
4131 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4132 return;
4133
4134 if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
4135 return;
4136
4137 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4138 "BAR received from %pM, tid %d, ssn %d",
4139 event->u.ba.sta->addr, event->u.ba.tid,
4140 event->u.ba.ssn);
4141 }
4142
4143 static void
4144 iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
4145 struct ieee80211_vif *vif,
4146 const struct ieee80211_event *event)
4147 {
4148 struct iwl_fw_dbg_trigger_tlv *trig;
4149 struct iwl_fw_dbg_trigger_ba *ba_trig;
4150
4151 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4152 return;
4153
4154 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4155 ba_trig = (void *)trig->data;
4156 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4157 return;
4158
4159 if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid)))
4160 return;
4161
4162 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4163 "Frame from %pM timed out, tid %d",
4164 event->u.ba.sta->addr, event->u.ba.tid);
4165 }
4166
4167 static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
4168 struct ieee80211_vif *vif,
4169 const struct ieee80211_event *event)
4170 {
4171 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4172
4173 switch (event->type) {
4174 case MLME_EVENT:
4175 iwl_mvm_event_mlme_callback(mvm, vif, event);
4176 break;
4177 case BAR_RX_EVENT:
4178 iwl_mvm_event_bar_rx_callback(mvm, vif, event);
4179 break;
4180 case BA_FRAME_TIMEOUT:
4181 iwl_mvm_event_frame_timeout_callback(mvm, vif, event);
4182 break;
4183 default:
4184 break;
4185 }
4186 }
4187
4188 void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
4189 struct iwl_mvm_internal_rxq_notif *notif,
4190 u32 size)
4191 {
4192 u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
4193 int ret;
4194
4195 lockdep_assert_held(&mvm->mutex);
4196
4197 if (!iwl_mvm_has_new_rx_api(mvm))
4198 return;
4199
4200 notif->cookie = mvm->queue_sync_cookie;
4201
4202 if (notif->sync)
4203 atomic_set(&mvm->queue_sync_counter,
4204 mvm->trans->num_rx_queues);
4205
4206 ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
4207 if (ret) {
4208 IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
4209 goto out;
4210 }
4211
4212 if (notif->sync)
4213 ret = wait_event_timeout(mvm->rx_sync_waitq,
4214 atomic_read(&mvm->queue_sync_counter) == 0,
4215 HZ);
4216 WARN_ON_ONCE(!ret);
4217
4218 out:
4219 atomic_set(&mvm->queue_sync_counter, 0);
4220 mvm->queue_sync_cookie++;
4221 }
4222
4223 static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
4224 {
4225 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4226 struct iwl_mvm_internal_rxq_notif data = {
4227 .type = IWL_MVM_RXQ_EMPTY,
4228 .sync = 1,
4229 };
4230
4231 mutex_lock(&mvm->mutex);
4232 iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data));
4233 mutex_unlock(&mvm->mutex);
4234 }
4235
4236 const struct ieee80211_ops iwl_mvm_hw_ops = {
4237 .tx = iwl_mvm_mac_tx,
4238 .ampdu_action = iwl_mvm_mac_ampdu_action,
4239 .start = iwl_mvm_mac_start,
4240 .reconfig_complete = iwl_mvm_mac_reconfig_complete,
4241 .stop = iwl_mvm_mac_stop,
4242 .add_interface = iwl_mvm_mac_add_interface,
4243 .remove_interface = iwl_mvm_mac_remove_interface,
4244 .config = iwl_mvm_mac_config,
4245 .prepare_multicast = iwl_mvm_prepare_multicast,
4246 .configure_filter = iwl_mvm_configure_filter,
4247 .config_iface_filter = iwl_mvm_config_iface_filter,
4248 .bss_info_changed = iwl_mvm_bss_info_changed,
4249 .hw_scan = iwl_mvm_mac_hw_scan,
4250 .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
4251 .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
4252 .sta_state = iwl_mvm_mac_sta_state,
4253 .sta_notify = iwl_mvm_mac_sta_notify,
4254 .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
4255 .release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
4256 .set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
4257 .sta_rc_update = iwl_mvm_sta_rc_update,
4258 .conf_tx = iwl_mvm_mac_conf_tx,
4259 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
4260 .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover,
4261 .flush = iwl_mvm_mac_flush,
4262 .sched_scan_start = iwl_mvm_mac_sched_scan_start,
4263 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
4264 .set_key = iwl_mvm_mac_set_key,
4265 .update_tkip_key = iwl_mvm_mac_update_tkip_key,
4266 .remain_on_channel = iwl_mvm_roc,
4267 .cancel_remain_on_channel = iwl_mvm_cancel_roc,
4268 .add_chanctx = iwl_mvm_add_chanctx,
4269 .remove_chanctx = iwl_mvm_remove_chanctx,
4270 .change_chanctx = iwl_mvm_change_chanctx,
4271 .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
4272 .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
4273 .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx,
4274
4275 .start_ap = iwl_mvm_start_ap_ibss,
4276 .stop_ap = iwl_mvm_stop_ap_ibss,
4277 .join_ibss = iwl_mvm_start_ap_ibss,
4278 .leave_ibss = iwl_mvm_stop_ap_ibss,
4279
4280 .set_tim = iwl_mvm_set_tim,
4281
4282 .channel_switch = iwl_mvm_channel_switch,
4283 .pre_channel_switch = iwl_mvm_pre_channel_switch,
4284 .post_channel_switch = iwl_mvm_post_channel_switch,
4285
4286 .tdls_channel_switch = iwl_mvm_tdls_channel_switch,
4287 .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
4288 .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
4289
4290 .event_callback = iwl_mvm_mac_event_callback,
4291
4292 .sync_rx_queues = iwl_mvm_sync_rx_queues,
4293
4294 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
4295
4296 #ifdef CONFIG_PM_SLEEP
4297 /* look at d3.c */
4298 .suspend = iwl_mvm_suspend,
4299 .resume = iwl_mvm_resume,
4300 .set_wakeup = iwl_mvm_set_wakeup,
4301 .set_rekey_data = iwl_mvm_set_rekey_data,
4302 #if IS_ENABLED(CONFIG_IPV6)
4303 .ipv6_addr_change = iwl_mvm_ipv6_addr_change,
4304 #endif
4305 .set_default_unicast_key = iwl_mvm_set_default_unicast_key,
4306 #endif
4307 .get_survey = iwl_mvm_mac_get_survey,
4308 .sta_statistics = iwl_mvm_mac_sta_statistics,
4309 };