]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
Merge remote-tracking branches 'asoc/topic/sgtl5000', 'asoc/topic/simple', 'asoc...
[mirror_ubuntu-zesty-kernel.git] / drivers / net / wireless / intel / iwlwifi / mvm / mac80211.c
1 /******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
28 *
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
37 * All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 *
43 * * Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * * Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
48 * distribution.
49 * * Neither the name Intel Corporation nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *
65 *****************************************************************************/
66 #include <linux/kernel.h>
67 #include <linux/slab.h>
68 #include <linux/skbuff.h>
69 #include <linux/netdevice.h>
70 #include <linux/etherdevice.h>
71 #include <linux/ip.h>
72 #include <linux/if_arp.h>
73 #include <linux/time.h>
74 #include <net/mac80211.h>
75 #include <net/ieee80211_radiotap.h>
76 #include <net/tcp.h>
77
78 #include "iwl-op-mode.h"
79 #include "iwl-io.h"
80 #include "mvm.h"
81 #include "sta.h"
82 #include "time-event.h"
83 #include "iwl-eeprom-parse.h"
84 #include "iwl-phy-db.h"
85 #include "testmode.h"
86 #include "iwl-fw-error-dump.h"
87 #include "iwl-prph.h"
88 #include "iwl-nvm-parse.h"
89 #include "fw-dbg.h"
90
91 static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
92 {
93 .max = 1,
94 .types = BIT(NL80211_IFTYPE_STATION),
95 },
96 {
97 .max = 1,
98 .types = BIT(NL80211_IFTYPE_AP) |
99 BIT(NL80211_IFTYPE_P2P_CLIENT) |
100 BIT(NL80211_IFTYPE_P2P_GO),
101 },
102 {
103 .max = 1,
104 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
105 },
106 };
107
108 static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
109 {
110 .num_different_channels = 2,
111 .max_interfaces = 3,
112 .limits = iwl_mvm_limits,
113 .n_limits = ARRAY_SIZE(iwl_mvm_limits),
114 },
115 };
116
117 #ifdef CONFIG_PM_SLEEP
118 static const struct nl80211_wowlan_tcp_data_token_feature
119 iwl_mvm_wowlan_tcp_token_feature = {
120 .min_len = 0,
121 .max_len = 255,
122 .bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS,
123 };
124
125 static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
126 .tok = &iwl_mvm_wowlan_tcp_token_feature,
127 .data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN -
128 sizeof(struct ethhdr) -
129 sizeof(struct iphdr) -
130 sizeof(struct tcphdr),
131 .data_interval_max = 65535, /* __le16 in API */
132 .wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN -
133 sizeof(struct ethhdr) -
134 sizeof(struct iphdr) -
135 sizeof(struct tcphdr),
136 .seq = true,
137 };
138 #endif
139
140 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
141 /*
142 * Use the reserved field to indicate magic values.
143 * these values will only be used internally by the driver,
144 * and won't make it to the fw (reserved will be 0).
145 * BC_FILTER_MAGIC_IP - configure the val of this attribute to
146 * be the vif's ip address. in case there is not a single
147 * ip address (0, or more than 1), this attribute will
148 * be skipped.
149 * BC_FILTER_MAGIC_MAC - set the val of this attribute to
150 * the LSB bytes of the vif's mac address
151 */
152 enum {
153 BC_FILTER_MAGIC_NONE = 0,
154 BC_FILTER_MAGIC_IP,
155 BC_FILTER_MAGIC_MAC,
156 };
157
158 static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
159 {
160 /* arp */
161 .discard = 0,
162 .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
163 .attrs = {
164 {
165 /* frame type - arp, hw type - ethernet */
166 .offset_type =
167 BCAST_FILTER_OFFSET_PAYLOAD_START,
168 .offset = sizeof(rfc1042_header),
169 .val = cpu_to_be32(0x08060001),
170 .mask = cpu_to_be32(0xffffffff),
171 },
172 {
173 /* arp dest ip */
174 .offset_type =
175 BCAST_FILTER_OFFSET_PAYLOAD_START,
176 .offset = sizeof(rfc1042_header) + 2 +
177 sizeof(struct arphdr) +
178 ETH_ALEN + sizeof(__be32) +
179 ETH_ALEN,
180 .mask = cpu_to_be32(0xffffffff),
181 /* mark it as special field */
182 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
183 },
184 },
185 },
186 {
187 /* dhcp offer bcast */
188 .discard = 0,
189 .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
190 .attrs = {
191 {
192 /* udp dest port - 68 (bootp client)*/
193 .offset_type = BCAST_FILTER_OFFSET_IP_END,
194 .offset = offsetof(struct udphdr, dest),
195 .val = cpu_to_be32(0x00440000),
196 .mask = cpu_to_be32(0xffff0000),
197 },
198 {
199 /* dhcp - lsb bytes of client hw address */
200 .offset_type = BCAST_FILTER_OFFSET_IP_END,
201 .offset = 38,
202 .mask = cpu_to_be32(0xffffffff),
203 /* mark it as special field */
204 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
205 },
206 },
207 },
208 /* last filter must be empty */
209 {},
210 };
211 #endif
212
213 void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
214 {
215 if (!iwl_mvm_is_d0i3_supported(mvm))
216 return;
217
218 IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
219 spin_lock_bh(&mvm->refs_lock);
220 mvm->refs[ref_type]++;
221 spin_unlock_bh(&mvm->refs_lock);
222 iwl_trans_ref(mvm->trans);
223 }
224
225 void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
226 {
227 if (!iwl_mvm_is_d0i3_supported(mvm))
228 return;
229
230 IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
231 spin_lock_bh(&mvm->refs_lock);
232 if (WARN_ON(!mvm->refs[ref_type])) {
233 spin_unlock_bh(&mvm->refs_lock);
234 return;
235 }
236 mvm->refs[ref_type]--;
237 spin_unlock_bh(&mvm->refs_lock);
238 iwl_trans_unref(mvm->trans);
239 }
240
241 static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm,
242 enum iwl_mvm_ref_type except_ref)
243 {
244 int i, j;
245
246 if (!iwl_mvm_is_d0i3_supported(mvm))
247 return;
248
249 spin_lock_bh(&mvm->refs_lock);
250 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
251 if (except_ref == i || !mvm->refs[i])
252 continue;
253
254 IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n",
255 i, mvm->refs[i]);
256 for (j = 0; j < mvm->refs[i]; j++)
257 iwl_trans_unref(mvm->trans);
258 mvm->refs[i] = 0;
259 }
260 spin_unlock_bh(&mvm->refs_lock);
261 }
262
263 bool iwl_mvm_ref_taken(struct iwl_mvm *mvm)
264 {
265 int i;
266 bool taken = false;
267
268 if (!iwl_mvm_is_d0i3_supported(mvm))
269 return true;
270
271 spin_lock_bh(&mvm->refs_lock);
272 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
273 if (mvm->refs[i]) {
274 taken = true;
275 break;
276 }
277 }
278 spin_unlock_bh(&mvm->refs_lock);
279
280 return taken;
281 }
282
283 int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
284 {
285 iwl_mvm_ref(mvm, ref_type);
286
287 if (!wait_event_timeout(mvm->d0i3_exit_waitq,
288 !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status),
289 HZ)) {
290 WARN_ON_ONCE(1);
291 iwl_mvm_unref(mvm, ref_type);
292 return -EIO;
293 }
294
295 return 0;
296 }
297
298 static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
299 {
300 int i;
301
302 memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts));
303 for (i = 0; i < NUM_PHY_CTX; i++) {
304 mvm->phy_ctxts[i].id = i;
305 mvm->phy_ctxts[i].ref = 0;
306 }
307 }
308
309 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
310 const char *alpha2,
311 enum iwl_mcc_source src_id,
312 bool *changed)
313 {
314 struct ieee80211_regdomain *regd = NULL;
315 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
316 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
317 struct iwl_mcc_update_resp *resp;
318
319 IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
320
321 lockdep_assert_held(&mvm->mutex);
322
323 resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
324 if (IS_ERR_OR_NULL(resp)) {
325 IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
326 PTR_ERR_OR_ZERO(resp));
327 goto out;
328 }
329
330 if (changed)
331 *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
332
333 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
334 __le32_to_cpu(resp->n_channels),
335 resp->channels,
336 __le16_to_cpu(resp->mcc));
337 /* Store the return source id */
338 src_id = resp->source_id;
339 kfree(resp);
340 if (IS_ERR_OR_NULL(regd)) {
341 IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
342 PTR_ERR_OR_ZERO(regd));
343 goto out;
344 }
345
346 IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
347 regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
348 mvm->lar_regdom_set = true;
349 mvm->mcc_src = src_id;
350
351 out:
352 return regd;
353 }
354
355 void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
356 {
357 bool changed;
358 struct ieee80211_regdomain *regd;
359
360 if (!iwl_mvm_is_lar_supported(mvm))
361 return;
362
363 regd = iwl_mvm_get_current_regdomain(mvm, &changed);
364 if (!IS_ERR_OR_NULL(regd)) {
365 /* only update the regulatory core if changed */
366 if (changed)
367 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
368
369 kfree(regd);
370 }
371 }
372
373 struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
374 bool *changed)
375 {
376 return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
377 iwl_mvm_is_wifi_mcc_supported(mvm) ?
378 MCC_SOURCE_GET_CURRENT :
379 MCC_SOURCE_OLD_FW, changed);
380 }
381
382 int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
383 {
384 enum iwl_mcc_source used_src;
385 struct ieee80211_regdomain *regd;
386 int ret;
387 bool changed;
388 const struct ieee80211_regdomain *r =
389 rtnl_dereference(mvm->hw->wiphy->regd);
390
391 if (!r)
392 return -ENOENT;
393
394 /* save the last source in case we overwrite it below */
395 used_src = mvm->mcc_src;
396 if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
397 /* Notify the firmware we support wifi location updates */
398 regd = iwl_mvm_get_current_regdomain(mvm, NULL);
399 if (!IS_ERR_OR_NULL(regd))
400 kfree(regd);
401 }
402
403 /* Now set our last stored MCC and source */
404 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src,
405 &changed);
406 if (IS_ERR_OR_NULL(regd))
407 return -EIO;
408
409 /* update cfg80211 if the regdomain was changed */
410 if (changed)
411 ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
412 else
413 ret = 0;
414
415 kfree(regd);
416 return ret;
417 }
418
419 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
420 {
421 struct ieee80211_hw *hw = mvm->hw;
422 int num_mac, ret, i;
423 static const u32 mvm_ciphers[] = {
424 WLAN_CIPHER_SUITE_WEP40,
425 WLAN_CIPHER_SUITE_WEP104,
426 WLAN_CIPHER_SUITE_TKIP,
427 WLAN_CIPHER_SUITE_CCMP,
428 };
429
430 /* Tell mac80211 our characteristics */
431 ieee80211_hw_set(hw, SIGNAL_DBM);
432 ieee80211_hw_set(hw, SPECTRUM_MGMT);
433 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
434 ieee80211_hw_set(hw, QUEUE_CONTROL);
435 ieee80211_hw_set(hw, WANT_MONITOR_VIF);
436 ieee80211_hw_set(hw, SUPPORTS_PS);
437 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
438 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
439 ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
440 ieee80211_hw_set(hw, CONNECTION_MONITOR);
441 ieee80211_hw_set(hw, CHANCTX_STA_CSA);
442 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
443 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
444 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
445 ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
446 if (iwl_mvm_has_new_rx_api(mvm))
447 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
448
449 if (mvm->trans->num_rx_queues > 1)
450 ieee80211_hw_set(hw, USES_RSS);
451
452 if (mvm->trans->max_skb_frags)
453 hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
454
455 if (!iwl_mvm_is_dqa_supported(mvm))
456 hw->queues = mvm->first_agg_queue;
457 else
458 hw->queues = IEEE80211_MAX_QUEUES;
459 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
460 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
461 IEEE80211_RADIOTAP_MCS_HAVE_STBC;
462 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
463 IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
464 hw->rate_control_algorithm = "iwl-mvm-rs";
465 hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
466 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
467
468 BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 4);
469 memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
470 hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
471 hw->wiphy->cipher_suites = mvm->ciphers;
472
473 if (iwl_mvm_has_new_rx_api(mvm)) {
474 mvm->ciphers[hw->wiphy->n_cipher_suites] =
475 WLAN_CIPHER_SUITE_GCMP;
476 hw->wiphy->n_cipher_suites++;
477 mvm->ciphers[hw->wiphy->n_cipher_suites] =
478 WLAN_CIPHER_SUITE_GCMP_256;
479 hw->wiphy->n_cipher_suites++;
480 }
481
482 /*
483 * Enable 11w if advertised by firmware and software crypto
484 * is not enabled (as the firmware will interpret some mgmt
485 * packets, so enabling it with software crypto isn't safe)
486 */
487 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
488 !iwlwifi_mod_params.sw_crypto) {
489 ieee80211_hw_set(hw, MFP_CAPABLE);
490 mvm->ciphers[hw->wiphy->n_cipher_suites] =
491 WLAN_CIPHER_SUITE_AES_CMAC;
492 hw->wiphy->n_cipher_suites++;
493 }
494
495 /* currently FW API supports only one optional cipher scheme */
496 if (mvm->fw->cs[0].cipher) {
497 const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0];
498 struct ieee80211_cipher_scheme *cs = &mvm->cs[0];
499
500 mvm->hw->n_cipher_schemes = 1;
501
502 cs->cipher = le32_to_cpu(fwcs->cipher);
503 cs->iftype = BIT(NL80211_IFTYPE_STATION);
504 cs->hdr_len = fwcs->hdr_len;
505 cs->pn_len = fwcs->pn_len;
506 cs->pn_off = fwcs->pn_off;
507 cs->key_idx_off = fwcs->key_idx_off;
508 cs->key_idx_mask = fwcs->key_idx_mask;
509 cs->key_idx_shift = fwcs->key_idx_shift;
510 cs->mic_len = fwcs->mic_len;
511
512 mvm->hw->cipher_schemes = mvm->cs;
513 mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher;
514 hw->wiphy->n_cipher_suites++;
515 }
516
517 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
518 hw->wiphy->features |=
519 NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
520 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
521 NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
522
523 hw->sta_data_size = sizeof(struct iwl_mvm_sta);
524 hw->vif_data_size = sizeof(struct iwl_mvm_vif);
525 hw->chanctx_data_size = sizeof(u16);
526
527 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
528 BIT(NL80211_IFTYPE_P2P_CLIENT) |
529 BIT(NL80211_IFTYPE_AP) |
530 BIT(NL80211_IFTYPE_P2P_GO) |
531 BIT(NL80211_IFTYPE_P2P_DEVICE) |
532 BIT(NL80211_IFTYPE_ADHOC);
533
534 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
535 hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
536 if (iwl_mvm_is_lar_supported(mvm))
537 hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
538 else
539 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
540 REGULATORY_DISABLE_BEACON_HINTS;
541
542 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
543 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
544
545 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
546
547 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
548 hw->wiphy->n_iface_combinations =
549 ARRAY_SIZE(iwl_mvm_iface_combinations);
550
551 hw->wiphy->max_remain_on_channel_duration = 10000;
552 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
553 /* we can compensate an offset of up to 3 channels = 15 MHz */
554 hw->wiphy->max_adj_channel_rssi_comp = 3 * 5;
555
556 /* Extract MAC address */
557 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
558 hw->wiphy->addresses = mvm->addresses;
559 hw->wiphy->n_addresses = 1;
560
561 /* Extract additional MAC addresses if available */
562 num_mac = (mvm->nvm_data->n_hw_addrs > 1) ?
563 min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1;
564
565 for (i = 1; i < num_mac; i++) {
566 memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr,
567 ETH_ALEN);
568 mvm->addresses[i].addr[5]++;
569 hw->wiphy->n_addresses++;
570 }
571
572 iwl_mvm_reset_phy_ctxts(mvm);
573
574 hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
575
576 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
577
578 BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
579 BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
580 IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
581
582 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
583 mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
584 else
585 mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
586
587 if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
588 hw->wiphy->bands[NL80211_BAND_2GHZ] =
589 &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
590 if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) {
591 hw->wiphy->bands[NL80211_BAND_5GHZ] =
592 &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
593
594 if (fw_has_capa(&mvm->fw->ucode_capa,
595 IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
596 fw_has_api(&mvm->fw->ucode_capa,
597 IWL_UCODE_TLV_API_LQ_SS_PARAMS))
598 hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |=
599 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
600 }
601
602 hw->wiphy->hw_version = mvm->trans->hw_id;
603
604 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
605 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
606 else
607 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
608
609 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
610 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
611 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
612 /* we create the 802.11 header and zero length SSID IE. */
613 hw->wiphy->max_sched_scan_ie_len =
614 SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
615 hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS;
616 hw->wiphy->max_sched_scan_plan_interval = U16_MAX;
617
618 /*
619 * the firmware uses u8 for num of iterations, but 0xff is saved for
620 * infinite loop, so the maximum number of iterations is actually 254.
621 */
622 hw->wiphy->max_sched_scan_plan_iterations = 254;
623
624 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
625 NL80211_FEATURE_LOW_PRIORITY_SCAN |
626 NL80211_FEATURE_P2P_GO_OPPPS |
627 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
628 NL80211_FEATURE_DYNAMIC_SMPS |
629 NL80211_FEATURE_STATIC_SMPS |
630 NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
631
632 if (fw_has_capa(&mvm->fw->ucode_capa,
633 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
634 hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
635 if (fw_has_capa(&mvm->fw->ucode_capa,
636 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
637 hw->wiphy->features |= NL80211_FEATURE_QUIET;
638
639 if (fw_has_capa(&mvm->fw->ucode_capa,
640 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
641 hw->wiphy->features |=
642 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
643
644 if (fw_has_capa(&mvm->fw->ucode_capa,
645 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
646 hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
647
648 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
649
650 #ifdef CONFIG_PM_SLEEP
651 if (iwl_mvm_is_d0i3_supported(mvm) &&
652 device_can_wakeup(mvm->trans->dev)) {
653 mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
654 hw->wiphy->wowlan = &mvm->wowlan;
655 }
656
657 if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
658 mvm->trans->ops->d3_suspend &&
659 mvm->trans->ops->d3_resume &&
660 device_can_wakeup(mvm->trans->dev)) {
661 mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
662 WIPHY_WOWLAN_DISCONNECT |
663 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
664 WIPHY_WOWLAN_RFKILL_RELEASE |
665 WIPHY_WOWLAN_NET_DETECT;
666 if (!iwlwifi_mod_params.sw_crypto)
667 mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
668 WIPHY_WOWLAN_GTK_REKEY_FAILURE |
669 WIPHY_WOWLAN_4WAY_HANDSHAKE;
670
671 mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
672 mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
673 mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
674 mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
675 mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
676 hw->wiphy->wowlan = &mvm->wowlan;
677 }
678 #endif
679
680 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
681 /* assign default bcast filtering configuration */
682 mvm->bcast_filters = iwl_mvm_default_bcast_filters;
683 #endif
684
685 ret = iwl_mvm_leds_init(mvm);
686 if (ret)
687 return ret;
688
689 if (fw_has_capa(&mvm->fw->ucode_capa,
690 IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
691 IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
692 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
693 ieee80211_hw_set(hw, TDLS_WIDER_BW);
694 }
695
696 if (fw_has_capa(&mvm->fw->ucode_capa,
697 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
698 IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
699 hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
700 }
701
702 hw->netdev_features |= mvm->cfg->features;
703 if (!iwl_mvm_is_csum_supported(mvm)) {
704 hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS |
705 NETIF_F_RXCSUM);
706 /* We may support SW TX CSUM */
707 if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
708 hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS;
709 }
710
711 ret = ieee80211_register_hw(mvm->hw);
712 if (ret)
713 iwl_mvm_leds_exit(mvm);
714
715 return ret;
716 }
717
718 static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
719 struct ieee80211_sta *sta,
720 struct sk_buff *skb)
721 {
722 struct iwl_mvm_sta *mvmsta;
723 bool defer = false;
724
725 /*
726 * double check the IN_D0I3 flag both before and after
727 * taking the spinlock, in order to prevent taking
728 * the spinlock when not needed.
729 */
730 if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
731 return false;
732
733 spin_lock(&mvm->d0i3_tx_lock);
734 /*
735 * testing the flag again ensures the skb dequeue
736 * loop (on d0i3 exit) hasn't run yet.
737 */
738 if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
739 goto out;
740
741 mvmsta = iwl_mvm_sta_from_mac80211(sta);
742 if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
743 mvmsta->sta_id != mvm->d0i3_ap_sta_id)
744 goto out;
745
746 __skb_queue_tail(&mvm->d0i3_tx, skb);
747 ieee80211_stop_queues(mvm->hw);
748
749 /* trigger wakeup */
750 iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
751 iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
752
753 defer = true;
754 out:
755 spin_unlock(&mvm->d0i3_tx_lock);
756 return defer;
757 }
758
759 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
760 struct ieee80211_tx_control *control,
761 struct sk_buff *skb)
762 {
763 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
764 struct ieee80211_sta *sta = control->sta;
765 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
766 struct ieee80211_hdr *hdr = (void *)skb->data;
767
768 if (iwl_mvm_is_radio_killed(mvm)) {
769 IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
770 goto drop;
771 }
772
773 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
774 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
775 !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
776 goto drop;
777
778 /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
779 if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER &&
780 ieee80211_is_mgmt(hdr->frame_control) &&
781 !ieee80211_is_deauth(hdr->frame_control) &&
782 !ieee80211_is_disassoc(hdr->frame_control) &&
783 !ieee80211_is_action(hdr->frame_control)))
784 sta = NULL;
785
786 if (sta) {
787 if (iwl_mvm_defer_tx(mvm, sta, skb))
788 return;
789 if (iwl_mvm_tx_skb(mvm, skb, sta))
790 goto drop;
791 return;
792 }
793
794 if (iwl_mvm_tx_skb_non_sta(mvm, skb))
795 goto drop;
796 return;
797 drop:
798 ieee80211_free_txskb(hw, skb);
799 }
800
801 static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
802 {
803 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
804 return false;
805 return true;
806 }
807
808 static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
809 {
810 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
811 return false;
812 if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
813 return true;
814
815 /* enabled by default */
816 return true;
817 }
818
819 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
820 do { \
821 if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \
822 break; \
823 iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \
824 } while (0)
825
826 static void
827 iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
828 struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
829 enum ieee80211_ampdu_mlme_action action)
830 {
831 struct iwl_fw_dbg_trigger_tlv *trig;
832 struct iwl_fw_dbg_trigger_ba *ba_trig;
833
834 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
835 return;
836
837 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
838 ba_trig = (void *)trig->data;
839
840 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
841 return;
842
843 switch (action) {
844 case IEEE80211_AMPDU_TX_OPERATIONAL: {
845 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
846 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
847
848 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
849 "TX AGG START: MAC %pM tid %d ssn %d\n",
850 sta->addr, tid, tid_data->ssn);
851 break;
852 }
853 case IEEE80211_AMPDU_TX_STOP_CONT:
854 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
855 "TX AGG STOP: MAC %pM tid %d\n",
856 sta->addr, tid);
857 break;
858 case IEEE80211_AMPDU_RX_START:
859 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
860 "RX AGG START: MAC %pM tid %d ssn %d\n",
861 sta->addr, tid, rx_ba_ssn);
862 break;
863 case IEEE80211_AMPDU_RX_STOP:
864 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
865 "RX AGG STOP: MAC %pM tid %d\n",
866 sta->addr, tid);
867 break;
868 default:
869 break;
870 }
871 }
872
873 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
874 struct ieee80211_vif *vif,
875 struct ieee80211_ampdu_params *params)
876 {
877 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
878 int ret;
879 bool tx_agg_ref = false;
880 struct ieee80211_sta *sta = params->sta;
881 enum ieee80211_ampdu_mlme_action action = params->action;
882 u16 tid = params->tid;
883 u16 *ssn = &params->ssn;
884 u8 buf_size = params->buf_size;
885 bool amsdu = params->amsdu;
886 u16 timeout = params->timeout;
887
888 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
889 sta->addr, tid, action);
890
891 if (!(mvm->nvm_data->sku_cap_11n_enable))
892 return -EACCES;
893
894 /* return from D0i3 before starting a new Tx aggregation */
895 switch (action) {
896 case IEEE80211_AMPDU_TX_START:
897 case IEEE80211_AMPDU_TX_STOP_CONT:
898 case IEEE80211_AMPDU_TX_STOP_FLUSH:
899 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
900 case IEEE80211_AMPDU_TX_OPERATIONAL:
901 /*
902 * for tx start, wait synchronously until D0i3 exit to
903 * get the correct sequence number for the tid.
904 * additionally, some other ampdu actions use direct
905 * target access, which is not handled automatically
906 * by the trans layer (unlike commands), so wait for
907 * d0i3 exit in these cases as well.
908 */
909 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG);
910 if (ret)
911 return ret;
912
913 tx_agg_ref = true;
914 break;
915 default:
916 break;
917 }
918
919 mutex_lock(&mvm->mutex);
920
921 switch (action) {
922 case IEEE80211_AMPDU_RX_START:
923 if (!iwl_enable_rx_ampdu(mvm->cfg)) {
924 ret = -EINVAL;
925 break;
926 }
927 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size,
928 timeout);
929 break;
930 case IEEE80211_AMPDU_RX_STOP:
931 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size,
932 timeout);
933 break;
934 case IEEE80211_AMPDU_TX_START:
935 if (!iwl_enable_tx_ampdu(mvm->cfg)) {
936 ret = -EINVAL;
937 break;
938 }
939 ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
940 break;
941 case IEEE80211_AMPDU_TX_STOP_CONT:
942 ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
943 break;
944 case IEEE80211_AMPDU_TX_STOP_FLUSH:
945 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
946 ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
947 break;
948 case IEEE80211_AMPDU_TX_OPERATIONAL:
949 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid,
950 buf_size, amsdu);
951 break;
952 default:
953 WARN_ON_ONCE(1);
954 ret = -EINVAL;
955 break;
956 }
957
958 if (!ret) {
959 u16 rx_ba_ssn = 0;
960
961 if (action == IEEE80211_AMPDU_RX_START)
962 rx_ba_ssn = *ssn;
963
964 iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
965 rx_ba_ssn, action);
966 }
967 mutex_unlock(&mvm->mutex);
968
969 /*
970 * If the tid is marked as started, we won't use it for offloaded
971 * traffic on the next D0i3 entry. It's safe to unref.
972 */
973 if (tx_agg_ref)
974 iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
975
976 return ret;
977 }
978
979 static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
980 struct ieee80211_vif *vif)
981 {
982 struct iwl_mvm *mvm = data;
983 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
984
985 mvmvif->uploaded = false;
986 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
987
988 spin_lock_bh(&mvm->time_event_lock);
989 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
990 spin_unlock_bh(&mvm->time_event_lock);
991
992 mvmvif->phy_ctxt = NULL;
993 memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
994 }
995
996 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
997 {
998 /* clear the D3 reconfig, we only need it to avoid dumping a
999 * firmware coredump on reconfiguration, we shouldn't do that
1000 * on D3->D0 transition
1001 */
1002 if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
1003 mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert;
1004 iwl_mvm_fw_error_dump(mvm);
1005 }
1006
1007 /* cleanup all stale references (scan, roc), but keep the
1008 * ucode_down ref until reconfig is complete
1009 */
1010 iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
1011
1012 iwl_mvm_stop_device(mvm);
1013
1014 mvm->scan_status = 0;
1015 mvm->ps_disabled = false;
1016 mvm->calibrating = false;
1017
1018 /* just in case one was running */
1019 iwl_mvm_cleanup_roc_te(mvm);
1020 ieee80211_remain_on_channel_expired(mvm->hw);
1021
1022 /*
1023 * cleanup all interfaces, even inactive ones, as some might have
1024 * gone down during the HW restart
1025 */
1026 ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
1027
1028 mvm->p2p_device_vif = NULL;
1029 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1030
1031 iwl_mvm_reset_phy_ctxts(mvm);
1032 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
1033 memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
1034 memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
1035 memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
1036 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
1037 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
1038
1039 ieee80211_wake_queues(mvm->hw);
1040
1041 /* clear any stale d0i3 state */
1042 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1043
1044 mvm->vif_count = 0;
1045 mvm->rx_ba_sessions = 0;
1046 mvm->fw_dbg_conf = FW_DBG_INVALID;
1047
1048 /* keep statistics ticking */
1049 iwl_mvm_accu_radio_stats(mvm);
1050 }
1051
1052 int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
1053 {
1054 int ret;
1055
1056 lockdep_assert_held(&mvm->mutex);
1057
1058 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1059 /* Clean up some internal and mac80211 state on restart */
1060 iwl_mvm_restart_cleanup(mvm);
1061 } else {
1062 /* Hold the reference to prevent runtime suspend while
1063 * the start procedure runs. It's a bit confusing
1064 * that the UCODE_DOWN reference is taken, but it just
1065 * means "UCODE is not UP yet". ( TODO: rename this
1066 * reference).
1067 */
1068 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1069 }
1070 ret = iwl_mvm_up(mvm);
1071
1072 if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1073 /* Something went wrong - we need to finish some cleanup
1074 * that normally iwl_mvm_mac_restart_complete() below
1075 * would do.
1076 */
1077 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1078 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1079 }
1080
1081 return ret;
1082 }
1083
1084 static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
1085 {
1086 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1087 int ret;
1088
1089 /* Some hw restart cleanups must not hold the mutex */
1090 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1091 /*
1092 * Make sure we are out of d0i3. This is needed
1093 * to make sure the reference accounting is correct
1094 * (and there is no stale d0i3_exit_work).
1095 */
1096 wait_event_timeout(mvm->d0i3_exit_waitq,
1097 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1098 &mvm->status),
1099 HZ);
1100 }
1101
1102 mutex_lock(&mvm->mutex);
1103 ret = __iwl_mvm_mac_start(mvm);
1104 mutex_unlock(&mvm->mutex);
1105
1106 return ret;
1107 }
1108
1109 static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
1110 {
1111 int ret;
1112
1113 mutex_lock(&mvm->mutex);
1114
1115 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1116 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1117 ret = iwl_mvm_update_quotas(mvm, true, NULL);
1118 if (ret)
1119 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
1120 ret);
1121
1122 /* allow transport/FW low power modes */
1123 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1124
1125 /*
1126 * If we have TDLS peers, remove them. We don't know the last seqno/PN
1127 * of packets the FW sent out, so we must reconnect.
1128 */
1129 iwl_mvm_teardown_tdls_peers(mvm);
1130
1131 mutex_unlock(&mvm->mutex);
1132 }
1133
1134 static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
1135 {
1136 if (iwl_mvm_is_d0i3_supported(mvm) &&
1137 iwl_mvm_enter_d0i3_on_suspend(mvm))
1138 WARN_ONCE(!wait_event_timeout(mvm->d0i3_exit_waitq,
1139 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1140 &mvm->status),
1141 HZ),
1142 "D0i3 exit on resume timed out\n");
1143 }
1144
1145 static void
1146 iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
1147 enum ieee80211_reconfig_type reconfig_type)
1148 {
1149 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1150
1151 switch (reconfig_type) {
1152 case IEEE80211_RECONFIG_TYPE_RESTART:
1153 iwl_mvm_restart_complete(mvm);
1154 break;
1155 case IEEE80211_RECONFIG_TYPE_SUSPEND:
1156 iwl_mvm_resume_complete(mvm);
1157 break;
1158 }
1159 }
1160
1161 void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
1162 {
1163 lockdep_assert_held(&mvm->mutex);
1164
1165 /* firmware counters are obviously reset now, but we shouldn't
1166 * partially track so also clear the fw_reset_accu counters.
1167 */
1168 memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));
1169
1170 /* async_handlers_wk is now blocked */
1171
1172 /*
1173 * The work item could be running or queued if the
1174 * ROC time event stops just as we get here.
1175 */
1176 flush_work(&mvm->roc_done_wk);
1177
1178 iwl_mvm_stop_device(mvm);
1179
1180 iwl_mvm_async_handlers_purge(mvm);
1181 /* async_handlers_list is empty and will stay empty: HW is stopped */
1182
1183 /* the fw is stopped, the aux sta is dead: clean up driver state */
1184 iwl_mvm_del_aux_sta(mvm);
1185
1186 iwl_free_fw_paging(mvm);
1187
1188 /*
1189 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
1190 * won't be called in this case).
1191 * But make sure to cleanup interfaces that have gone down before/during
1192 * HW restart was requested.
1193 */
1194 if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1195 ieee80211_iterate_interfaces(mvm->hw, 0,
1196 iwl_mvm_cleanup_iterator, mvm);
1197
1198 /* We shouldn't have any UIDs still set. Loop over all the UIDs to
1199 * make sure there's nothing left there and warn if any is found.
1200 */
1201 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1202 int i;
1203
1204 for (i = 0; i < mvm->max_scans; i++) {
1205 if (WARN_ONCE(mvm->scan_uid_status[i],
1206 "UMAC scan UID %d status was not cleaned\n",
1207 i))
1208 mvm->scan_uid_status[i] = 0;
1209 }
1210 }
1211 }
1212
1213 static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
1214 {
1215 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1216
1217 flush_work(&mvm->d0i3_exit_work);
1218 flush_work(&mvm->async_handlers_wk);
1219 flush_work(&mvm->add_stream_wk);
1220 cancel_delayed_work_sync(&mvm->fw_dump_wk);
1221 cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
1222 cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
1223 iwl_mvm_free_fw_dump_desc(mvm);
1224
1225 mutex_lock(&mvm->mutex);
1226 __iwl_mvm_mac_stop(mvm);
1227 mutex_unlock(&mvm->mutex);
1228
1229 /*
1230 * The worker might have been waiting for the mutex, let it run and
1231 * discover that its list is now empty.
1232 */
1233 cancel_work_sync(&mvm->async_handlers_wk);
1234 }
1235
1236 static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
1237 {
1238 u16 i;
1239
1240 lockdep_assert_held(&mvm->mutex);
1241
1242 for (i = 0; i < NUM_PHY_CTX; i++)
1243 if (!mvm->phy_ctxts[i].ref)
1244 return &mvm->phy_ctxts[i];
1245
1246 IWL_ERR(mvm, "No available PHY context\n");
1247 return NULL;
1248 }
1249
1250 static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1251 s16 tx_power)
1252 {
1253 struct iwl_dev_tx_power_cmd cmd = {
1254 .v3.v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
1255 .v3.v2.mac_context_id =
1256 cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
1257 .v3.v2.pwr_restriction = cpu_to_le16(8 * tx_power),
1258 };
1259 int len = sizeof(cmd);
1260
1261 if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
1262 cmd.v3.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
1263
1264 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
1265 len = sizeof(cmd.v3);
1266 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_CHAIN))
1267 len = sizeof(cmd.v3.v2);
1268
1269 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
1270 }
1271
1272 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
1273 struct ieee80211_vif *vif)
1274 {
1275 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1276 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1277 int ret;
1278
1279 mvmvif->mvm = mvm;
1280
1281 /*
1282 * make sure D0i3 exit is completed, otherwise a target access
1283 * during tx queue configuration could be done when still in
1284 * D0i3 state.
1285 */
1286 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF);
1287 if (ret)
1288 return ret;
1289
1290 /*
1291 * Not much to do here. The stack will not allow interface
1292 * types or combinations that we didn't advertise, so we
1293 * don't really have to check the types.
1294 */
1295
1296 mutex_lock(&mvm->mutex);
1297
1298 /* make sure that beacon statistics don't go backwards with FW reset */
1299 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1300 mvmvif->beacon_stats.accu_num_beacons +=
1301 mvmvif->beacon_stats.num_beacons;
1302
1303 /* Allocate resources for the MAC context, and add it to the fw */
1304 ret = iwl_mvm_mac_ctxt_init(mvm, vif);
1305 if (ret)
1306 goto out_unlock;
1307
1308 /* Counting number of interfaces is needed for legacy PM */
1309 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1310 mvm->vif_count++;
1311
1312 /*
1313 * The AP binding flow can be done only after the beacon
1314 * template is configured (which happens only in the mac80211
1315 * start_ap() flow), and adding the broadcast station can happen
1316 * only after the binding.
1317 * In addition, since modifying the MAC before adding a bcast
1318 * station is not allowed by the FW, delay the adding of MAC context to
1319 * the point where we can also add the bcast station.
1320 * In short: there's not much we can do at this point, other than
1321 * allocating resources :)
1322 */
1323 if (vif->type == NL80211_IFTYPE_AP ||
1324 vif->type == NL80211_IFTYPE_ADHOC) {
1325 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
1326 if (ret) {
1327 IWL_ERR(mvm, "Failed to allocate bcast sta\n");
1328 goto out_release;
1329 }
1330
1331 iwl_mvm_vif_dbgfs_register(mvm, vif);
1332 goto out_unlock;
1333 }
1334
1335 mvmvif->features |= hw->netdev_features;
1336
1337 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
1338 if (ret)
1339 goto out_release;
1340
1341 ret = iwl_mvm_power_update_mac(mvm);
1342 if (ret)
1343 goto out_remove_mac;
1344
1345 /* beacon filtering */
1346 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
1347 if (ret)
1348 goto out_remove_mac;
1349
1350 if (!mvm->bf_allowed_vif &&
1351 vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
1352 mvm->bf_allowed_vif = mvmvif;
1353 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
1354 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
1355 }
1356
1357 /*
1358 * P2P_DEVICE interface does not have a channel context assigned to it,
1359 * so a dedicated PHY context is allocated to it and the corresponding
1360 * MAC context is bound to it at this stage.
1361 */
1362 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1363
1364 mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
1365 if (!mvmvif->phy_ctxt) {
1366 ret = -ENOSPC;
1367 goto out_free_bf;
1368 }
1369
1370 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
1371 ret = iwl_mvm_binding_add_vif(mvm, vif);
1372 if (ret)
1373 goto out_unref_phy;
1374
1375 ret = iwl_mvm_add_bcast_sta(mvm, vif);
1376 if (ret)
1377 goto out_unbind;
1378
1379 /* Save a pointer to p2p device vif, so it can later be used to
1380 * update the p2p device MAC when a GO is started/stopped */
1381 mvm->p2p_device_vif = vif;
1382 }
1383
1384 iwl_mvm_vif_dbgfs_register(mvm, vif);
1385 goto out_unlock;
1386
1387 out_unbind:
1388 iwl_mvm_binding_remove_vif(mvm, vif);
1389 out_unref_phy:
1390 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1391 out_free_bf:
1392 if (mvm->bf_allowed_vif == mvmvif) {
1393 mvm->bf_allowed_vif = NULL;
1394 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1395 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1396 }
1397 out_remove_mac:
1398 mvmvif->phy_ctxt = NULL;
1399 iwl_mvm_mac_ctxt_remove(mvm, vif);
1400 out_release:
1401 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1402 mvm->vif_count--;
1403
1404 iwl_mvm_mac_ctxt_release(mvm, vif);
1405 out_unlock:
1406 mutex_unlock(&mvm->mutex);
1407
1408 iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF);
1409
1410 return ret;
1411 }
1412
1413 static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
1414 struct ieee80211_vif *vif)
1415 {
1416 u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif);
1417
1418 if (tfd_msk) {
1419 /*
1420 * mac80211 first removes all the stations of the vif and
1421 * then removes the vif. When it removes a station it also
1422 * flushes the AMPDU session. So by now, all the AMPDU sessions
1423 * of all the stations of this vif are closed, and the queues
1424 * of these AMPDU sessions are properly closed.
1425 * We still need to take care of the shared queues of the vif.
1426 * Flush them here.
1427 */
1428 mutex_lock(&mvm->mutex);
1429 iwl_mvm_flush_tx_path(mvm, tfd_msk, 0);
1430 mutex_unlock(&mvm->mutex);
1431
1432 /*
1433 * There are transports that buffer a few frames in the host.
1434 * For these, the flush above isn't enough since while we were
1435 * flushing, the transport might have sent more frames to the
1436 * device. To solve this, wait here until the transport is
1437 * empty. Technically, this could have replaced the flush
1438 * above, but flush is much faster than draining. So flush
1439 * first, and drain to make sure we have no frames in the
1440 * transport anymore.
1441 * If a station still had frames on the shared queues, it is
1442 * already marked as draining, so to complete the draining, we
1443 * just need to wait until the transport is empty.
1444 */
1445 iwl_trans_wait_tx_queue_empty(mvm->trans, tfd_msk);
1446 }
1447
1448 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1449 /*
1450 * Flush the ROC worker which will flush the OFFCHANNEL queue.
1451 * We assume here that all the packets sent to the OFFCHANNEL
1452 * queue are sent in ROC session.
1453 */
1454 flush_work(&mvm->roc_done_wk);
1455 } else {
1456 /*
1457 * By now, all the AC queues are empty. The AGG queues are
1458 * empty too. We already got all the Tx responses for all the
1459 * packets in the queues. The drain work can have been
1460 * triggered. Flush it.
1461 */
1462 flush_work(&mvm->sta_drained_wk);
1463 }
1464 }
1465
1466 static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
1467 struct ieee80211_vif *vif)
1468 {
1469 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1470 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1471
1472 iwl_mvm_prepare_mac_removal(mvm, vif);
1473
1474 mutex_lock(&mvm->mutex);
1475
1476 if (mvm->bf_allowed_vif == mvmvif) {
1477 mvm->bf_allowed_vif = NULL;
1478 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1479 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1480 }
1481
1482 iwl_mvm_vif_dbgfs_clean(mvm, vif);
1483
1484 /*
1485 * For AP/GO interface, the tear down of the resources allocated to the
1486 * interface is be handled as part of the stop_ap flow.
1487 */
1488 if (vif->type == NL80211_IFTYPE_AP ||
1489 vif->type == NL80211_IFTYPE_ADHOC) {
1490 #ifdef CONFIG_NL80211_TESTMODE
1491 if (vif == mvm->noa_vif) {
1492 mvm->noa_vif = NULL;
1493 mvm->noa_duration = 0;
1494 }
1495 #endif
1496 iwl_mvm_dealloc_bcast_sta(mvm, vif);
1497 goto out_release;
1498 }
1499
1500 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1501 mvm->p2p_device_vif = NULL;
1502 iwl_mvm_rm_bcast_sta(mvm, vif);
1503 iwl_mvm_binding_remove_vif(mvm, vif);
1504 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1505 mvmvif->phy_ctxt = NULL;
1506 }
1507
1508 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
1509 mvm->vif_count--;
1510
1511 iwl_mvm_power_update_mac(mvm);
1512 iwl_mvm_mac_ctxt_remove(mvm, vif);
1513
1514 out_release:
1515 iwl_mvm_mac_ctxt_release(mvm, vif);
1516 mutex_unlock(&mvm->mutex);
1517 }
1518
1519 static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
1520 {
1521 return 0;
1522 }
1523
1524 struct iwl_mvm_mc_iter_data {
1525 struct iwl_mvm *mvm;
1526 int port_id;
1527 };
1528
1529 static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1530 struct ieee80211_vif *vif)
1531 {
1532 struct iwl_mvm_mc_iter_data *data = _data;
1533 struct iwl_mvm *mvm = data->mvm;
1534 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
1535 int ret, len;
1536
1537 /* if we don't have free ports, mcast frames will be dropped */
1538 if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
1539 return;
1540
1541 if (vif->type != NL80211_IFTYPE_STATION ||
1542 !vif->bss_conf.assoc)
1543 return;
1544
1545 cmd->port_id = data->port_id++;
1546 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1547 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1548
1549 ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
1550 if (ret)
1551 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1552 }
1553
1554 static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
1555 {
1556 struct iwl_mvm_mc_iter_data iter_data = {
1557 .mvm = mvm,
1558 };
1559
1560 lockdep_assert_held(&mvm->mutex);
1561
1562 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
1563 return;
1564
1565 ieee80211_iterate_active_interfaces_atomic(
1566 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1567 iwl_mvm_mc_iface_iterator, &iter_data);
1568 }
1569
1570 static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
1571 struct netdev_hw_addr_list *mc_list)
1572 {
1573 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1574 struct iwl_mcast_filter_cmd *cmd;
1575 struct netdev_hw_addr *addr;
1576 int addr_count;
1577 bool pass_all;
1578 int len;
1579
1580 addr_count = netdev_hw_addr_list_count(mc_list);
1581 pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES ||
1582 IWL_MVM_FW_MCAST_FILTER_PASS_ALL;
1583 if (pass_all)
1584 addr_count = 0;
1585
1586 len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
1587 cmd = kzalloc(len, GFP_ATOMIC);
1588 if (!cmd)
1589 return 0;
1590
1591 if (pass_all) {
1592 cmd->pass_all = 1;
1593 return (u64)(unsigned long)cmd;
1594 }
1595
1596 netdev_hw_addr_list_for_each(addr, mc_list) {
1597 IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
1598 cmd->count, addr->addr);
1599 memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
1600 addr->addr, ETH_ALEN);
1601 cmd->count++;
1602 }
1603
1604 return (u64)(unsigned long)cmd;
1605 }
1606
1607 static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
1608 unsigned int changed_flags,
1609 unsigned int *total_flags,
1610 u64 multicast)
1611 {
1612 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1613 struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
1614
1615 mutex_lock(&mvm->mutex);
1616
1617 /* replace previous configuration */
1618 kfree(mvm->mcast_filter_cmd);
1619 mvm->mcast_filter_cmd = cmd;
1620
1621 if (!cmd)
1622 goto out;
1623
1624 iwl_mvm_recalc_multicast(mvm);
1625 out:
1626 mutex_unlock(&mvm->mutex);
1627 *total_flags = 0;
1628 }
1629
1630 static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
1631 struct ieee80211_vif *vif,
1632 unsigned int filter_flags,
1633 unsigned int changed_flags)
1634 {
1635 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1636
1637 /* We support only filter for probe requests */
1638 if (!(changed_flags & FIF_PROBE_REQ))
1639 return;
1640
1641 /* Supported only for p2p client interfaces */
1642 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
1643 !vif->p2p)
1644 return;
1645
1646 mutex_lock(&mvm->mutex);
1647 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
1648 mutex_unlock(&mvm->mutex);
1649 }
1650
1651 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
1652 struct iwl_bcast_iter_data {
1653 struct iwl_mvm *mvm;
1654 struct iwl_bcast_filter_cmd *cmd;
1655 u8 current_filter;
1656 };
1657
1658 static void
1659 iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
1660 const struct iwl_fw_bcast_filter *in_filter,
1661 struct iwl_fw_bcast_filter *out_filter)
1662 {
1663 struct iwl_fw_bcast_filter_attr *attr;
1664 int i;
1665
1666 memcpy(out_filter, in_filter, sizeof(*out_filter));
1667
1668 for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
1669 attr = &out_filter->attrs[i];
1670
1671 if (!attr->mask)
1672 break;
1673
1674 switch (attr->reserved1) {
1675 case cpu_to_le16(BC_FILTER_MAGIC_IP):
1676 if (vif->bss_conf.arp_addr_cnt != 1) {
1677 attr->mask = 0;
1678 continue;
1679 }
1680
1681 attr->val = vif->bss_conf.arp_addr_list[0];
1682 break;
1683 case cpu_to_le16(BC_FILTER_MAGIC_MAC):
1684 attr->val = *(__be32 *)&vif->addr[2];
1685 break;
1686 default:
1687 break;
1688 }
1689 attr->reserved1 = 0;
1690 out_filter->num_attrs++;
1691 }
1692 }
1693
1694 static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
1695 struct ieee80211_vif *vif)
1696 {
1697 struct iwl_bcast_iter_data *data = _data;
1698 struct iwl_mvm *mvm = data->mvm;
1699 struct iwl_bcast_filter_cmd *cmd = data->cmd;
1700 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1701 struct iwl_fw_bcast_mac *bcast_mac;
1702 int i;
1703
1704 if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
1705 return;
1706
1707 bcast_mac = &cmd->macs[mvmvif->id];
1708
1709 /*
1710 * enable filtering only for associated stations, but not for P2P
1711 * Clients
1712 */
1713 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
1714 !vif->bss_conf.assoc)
1715 return;
1716
1717 bcast_mac->default_discard = 1;
1718
1719 /* copy all configured filters */
1720 for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
1721 /*
1722 * Make sure we don't exceed our filters limit.
1723 * if there is still a valid filter to be configured,
1724 * be on the safe side and just allow bcast for this mac.
1725 */
1726 if (WARN_ON_ONCE(data->current_filter >=
1727 ARRAY_SIZE(cmd->filters))) {
1728 bcast_mac->default_discard = 0;
1729 bcast_mac->attached_filters = 0;
1730 break;
1731 }
1732
1733 iwl_mvm_set_bcast_filter(vif,
1734 &mvm->bcast_filters[i],
1735 &cmd->filters[data->current_filter]);
1736
1737 /* skip current filter if it contains no attributes */
1738 if (!cmd->filters[data->current_filter].num_attrs)
1739 continue;
1740
1741 /* attach the filter to current mac */
1742 bcast_mac->attached_filters |=
1743 cpu_to_le16(BIT(data->current_filter));
1744
1745 data->current_filter++;
1746 }
1747 }
1748
1749 bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
1750 struct iwl_bcast_filter_cmd *cmd)
1751 {
1752 struct iwl_bcast_iter_data iter_data = {
1753 .mvm = mvm,
1754 .cmd = cmd,
1755 };
1756
1757 if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
1758 return false;
1759
1760 memset(cmd, 0, sizeof(*cmd));
1761 cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
1762 cmd->max_macs = ARRAY_SIZE(cmd->macs);
1763
1764 #ifdef CONFIG_IWLWIFI_DEBUGFS
1765 /* use debugfs filters/macs if override is configured */
1766 if (mvm->dbgfs_bcast_filtering.override) {
1767 memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
1768 sizeof(cmd->filters));
1769 memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
1770 sizeof(cmd->macs));
1771 return true;
1772 }
1773 #endif
1774
1775 /* if no filters are configured, do nothing */
1776 if (!mvm->bcast_filters)
1777 return false;
1778
1779 /* configure and attach these filters for each associated sta vif */
1780 ieee80211_iterate_active_interfaces(
1781 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1782 iwl_mvm_bcast_filter_iterator, &iter_data);
1783
1784 return true;
1785 }
1786
1787 static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
1788 {
1789 struct iwl_bcast_filter_cmd cmd;
1790
1791 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
1792 return 0;
1793
1794 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
1795 return 0;
1796
1797 return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
1798 sizeof(cmd), &cmd);
1799 }
1800 #else
1801 static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
1802 {
1803 return 0;
1804 }
1805 #endif
1806
1807 static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
1808 struct ieee80211_vif *vif)
1809 {
1810 struct iwl_mu_group_mgmt_cmd cmd = {};
1811
1812 memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership,
1813 WLAN_MEMBERSHIP_LEN);
1814 memcpy(cmd.user_position, vif->bss_conf.mu_group.position,
1815 WLAN_USER_POSITION_LEN);
1816
1817 return iwl_mvm_send_cmd_pdu(mvm,
1818 WIDE_ID(DATA_PATH_GROUP,
1819 UPDATE_MU_GROUPS_CMD),
1820 0, sizeof(cmd), &cmd);
1821 }
1822
1823 static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac,
1824 struct ieee80211_vif *vif)
1825 {
1826 if (vif->mu_mimo_owner) {
1827 struct iwl_mu_group_mgmt_notif *notif = _data;
1828
1829 /*
1830 * MU-MIMO Group Id action frame is little endian. We treat
1831 * the data received from firmware as if it came from the
1832 * action frame, so no conversion is needed.
1833 */
1834 ieee80211_update_mu_groups(vif,
1835 (u8 *)&notif->membership_status,
1836 (u8 *)&notif->user_position);
1837 }
1838 }
1839
1840 void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
1841 struct iwl_rx_cmd_buffer *rxb)
1842 {
1843 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1844 struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data;
1845
1846 ieee80211_iterate_active_interfaces_atomic(
1847 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1848 iwl_mvm_mu_mimo_iface_iterator, notif);
1849 }
1850
1851 static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1852 struct ieee80211_vif *vif,
1853 struct ieee80211_bss_conf *bss_conf,
1854 u32 changes)
1855 {
1856 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1857 int ret;
1858
1859 /*
1860 * Re-calculate the tsf id, as the master-slave relations depend on the
1861 * beacon interval, which was not known when the station interface was
1862 * added.
1863 */
1864 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
1865 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
1866
1867 if (changes & BSS_CHANGED_ASSOC && !bss_conf->assoc &&
1868 mvmvif->lqm_active)
1869 iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_STOP_MEASUREMENT,
1870 0, 0);
1871
1872 /*
1873 * If we're not associated yet, take the (new) BSSID before associating
1874 * so the firmware knows. If we're already associated, then use the old
1875 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC
1876 * branch for disassociation below.
1877 */
1878 if (changes & BSS_CHANGED_BSSID && !mvmvif->associated)
1879 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
1880
1881 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid);
1882 if (ret)
1883 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
1884
1885 /* after sending it once, adopt mac80211 data */
1886 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
1887 mvmvif->associated = bss_conf->assoc;
1888
1889 if (changes & BSS_CHANGED_ASSOC) {
1890 if (bss_conf->assoc) {
1891 /* clear statistics to get clean beacon counter */
1892 iwl_mvm_request_statistics(mvm, true);
1893 memset(&mvmvif->beacon_stats, 0,
1894 sizeof(mvmvif->beacon_stats));
1895
1896 /* add quota for this interface */
1897 ret = iwl_mvm_update_quotas(mvm, true, NULL);
1898 if (ret) {
1899 IWL_ERR(mvm, "failed to update quotas\n");
1900 return;
1901 }
1902
1903 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
1904 &mvm->status)) {
1905 /*
1906 * If we're restarting then the firmware will
1907 * obviously have lost synchronisation with
1908 * the AP. It will attempt to synchronise by
1909 * itself, but we can make it more reliable by
1910 * scheduling a session protection time event.
1911 *
1912 * The firmware needs to receive a beacon to
1913 * catch up with synchronisation, use 110% of
1914 * the beacon interval.
1915 *
1916 * Set a large maximum delay to allow for more
1917 * than a single interface.
1918 */
1919 u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
1920 iwl_mvm_protect_session(mvm, vif, dur, dur,
1921 5 * dur, false);
1922 }
1923
1924 iwl_mvm_sf_update(mvm, vif, false);
1925 iwl_mvm_power_vif_assoc(mvm, vif);
1926 if (vif->p2p) {
1927 iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
1928 iwl_mvm_update_smps(mvm, vif,
1929 IWL_MVM_SMPS_REQ_PROT,
1930 IEEE80211_SMPS_DYNAMIC);
1931 }
1932 } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
1933 /*
1934 * If update fails - SF might be running in associated
1935 * mode while disassociated - which is forbidden.
1936 */
1937 WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
1938 "Failed to update SF upon disassociation\n");
1939
1940 /* remove AP station now that the MAC is unassoc */
1941 ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
1942 if (ret)
1943 IWL_ERR(mvm, "failed to remove AP station\n");
1944
1945 if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
1946 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1947 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
1948 /* remove quota for this interface */
1949 ret = iwl_mvm_update_quotas(mvm, false, NULL);
1950 if (ret)
1951 IWL_ERR(mvm, "failed to update quotas\n");
1952
1953 if (vif->p2p)
1954 iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
1955
1956 /* this will take the cleared BSSID from bss_conf */
1957 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
1958 if (ret)
1959 IWL_ERR(mvm,
1960 "failed to update MAC %pM (clear after unassoc)\n",
1961 vif->addr);
1962 }
1963
1964 /*
1965 * The firmware tracks the MU-MIMO group on its own.
1966 * However, on HW restart we should restore this data.
1967 */
1968 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
1969 (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) {
1970 ret = iwl_mvm_update_mu_groups(mvm, vif);
1971 if (ret)
1972 IWL_ERR(mvm,
1973 "failed to update VHT MU_MIMO groups\n");
1974 }
1975
1976 iwl_mvm_recalc_multicast(mvm);
1977 iwl_mvm_configure_bcast_filter(mvm);
1978
1979 /* reset rssi values */
1980 mvmvif->bf_data.ave_beacon_signal = 0;
1981
1982 iwl_mvm_bt_coex_vif_change(mvm);
1983 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
1984 IEEE80211_SMPS_AUTOMATIC);
1985 if (fw_has_capa(&mvm->fw->ucode_capa,
1986 IWL_UCODE_TLV_CAPA_UMAC_SCAN))
1987 iwl_mvm_config_scan(mvm);
1988 } else if (changes & BSS_CHANGED_BEACON_INFO) {
1989 /*
1990 * We received a beacon _after_ association so
1991 * remove the session protection.
1992 */
1993 iwl_mvm_remove_time_event(mvm, mvmvif,
1994 &mvmvif->time_event_data);
1995 }
1996
1997 if (changes & BSS_CHANGED_BEACON_INFO) {
1998 iwl_mvm_sf_update(mvm, vif, false);
1999 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2000 }
2001
2002 if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS |
2003 /*
2004 * Send power command on every beacon change,
2005 * because we may have not enabled beacon abort yet.
2006 */
2007 BSS_CHANGED_BEACON_INFO)) {
2008 ret = iwl_mvm_power_update_mac(mvm);
2009 if (ret)
2010 IWL_ERR(mvm, "failed to update power mode\n");
2011 }
2012
2013 if (changes & BSS_CHANGED_TXPOWER) {
2014 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2015 bss_conf->txpower);
2016 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2017 }
2018
2019 if (changes & BSS_CHANGED_CQM) {
2020 IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
2021 /* reset cqm events tracking */
2022 mvmvif->bf_data.last_cqm_event = 0;
2023 if (mvmvif->bf_data.bf_enabled) {
2024 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
2025 if (ret)
2026 IWL_ERR(mvm,
2027 "failed to update CQM thresholds\n");
2028 }
2029 }
2030
2031 if (changes & BSS_CHANGED_ARP_FILTER) {
2032 IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
2033 iwl_mvm_configure_bcast_filter(mvm);
2034 }
2035 }
2036
2037 static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
2038 struct ieee80211_vif *vif)
2039 {
2040 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2041 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2042 int ret;
2043
2044 /*
2045 * iwl_mvm_mac_ctxt_add() might read directly from the device
2046 * (the system time), so make sure it is available.
2047 */
2048 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP);
2049 if (ret)
2050 return ret;
2051
2052 mutex_lock(&mvm->mutex);
2053
2054 /* Send the beacon template */
2055 ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif);
2056 if (ret)
2057 goto out_unlock;
2058
2059 /*
2060 * Re-calculate the tsf id, as the master-slave relations depend on the
2061 * beacon interval, which was not known when the AP interface was added.
2062 */
2063 if (vif->type == NL80211_IFTYPE_AP)
2064 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2065
2066 mvmvif->ap_assoc_sta_count = 0;
2067
2068 /* Add the mac context */
2069 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
2070 if (ret)
2071 goto out_unlock;
2072
2073 /* Perform the binding */
2074 ret = iwl_mvm_binding_add_vif(mvm, vif);
2075 if (ret)
2076 goto out_remove;
2077
2078 /* Send the bcast station. At this stage the TBTT and DTIM time events
2079 * are added and applied to the scheduler */
2080 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2081 if (ret)
2082 goto out_unbind;
2083
2084 /* must be set before quota calculations */
2085 mvmvif->ap_ibss_active = true;
2086
2087 /* power updated needs to be done before quotas */
2088 iwl_mvm_power_update_mac(mvm);
2089
2090 ret = iwl_mvm_update_quotas(mvm, false, NULL);
2091 if (ret)
2092 goto out_quota_failed;
2093
2094 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2095 if (vif->p2p && mvm->p2p_device_vif)
2096 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2097
2098 iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
2099
2100 iwl_mvm_bt_coex_vif_change(mvm);
2101
2102 /* we don't support TDLS during DCM */
2103 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2104 iwl_mvm_teardown_tdls_peers(mvm);
2105
2106 goto out_unlock;
2107
2108 out_quota_failed:
2109 iwl_mvm_power_update_mac(mvm);
2110 mvmvif->ap_ibss_active = false;
2111 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2112 out_unbind:
2113 iwl_mvm_binding_remove_vif(mvm, vif);
2114 out_remove:
2115 iwl_mvm_mac_ctxt_remove(mvm, vif);
2116 out_unlock:
2117 mutex_unlock(&mvm->mutex);
2118 iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP);
2119 return ret;
2120 }
2121
2122 static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
2123 struct ieee80211_vif *vif)
2124 {
2125 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2126 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2127
2128 iwl_mvm_prepare_mac_removal(mvm, vif);
2129
2130 mutex_lock(&mvm->mutex);
2131
2132 /* Handle AP stop while in CSA */
2133 if (rcu_access_pointer(mvm->csa_vif) == vif) {
2134 iwl_mvm_remove_time_event(mvm, mvmvif,
2135 &mvmvif->time_event_data);
2136 RCU_INIT_POINTER(mvm->csa_vif, NULL);
2137 mvmvif->csa_countdown = false;
2138 }
2139
2140 if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
2141 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
2142 mvm->csa_tx_block_bcn_timeout = 0;
2143 }
2144
2145 mvmvif->ap_ibss_active = false;
2146 mvm->ap_last_beacon_gp2 = 0;
2147
2148 iwl_mvm_bt_coex_vif_change(mvm);
2149
2150 iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
2151
2152 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2153 if (vif->p2p && mvm->p2p_device_vif)
2154 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2155
2156 iwl_mvm_update_quotas(mvm, false, NULL);
2157 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2158 iwl_mvm_binding_remove_vif(mvm, vif);
2159
2160 iwl_mvm_power_update_mac(mvm);
2161
2162 iwl_mvm_mac_ctxt_remove(mvm, vif);
2163
2164 mutex_unlock(&mvm->mutex);
2165 }
2166
2167 static void
2168 iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
2169 struct ieee80211_vif *vif,
2170 struct ieee80211_bss_conf *bss_conf,
2171 u32 changes)
2172 {
2173 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2174
2175 /* Changes will be applied when the AP/IBSS is started */
2176 if (!mvmvif->ap_ibss_active)
2177 return;
2178
2179 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
2180 BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) &&
2181 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL))
2182 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2183
2184 /* Need to send a new beacon template to the FW */
2185 if (changes & BSS_CHANGED_BEACON &&
2186 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
2187 IWL_WARN(mvm, "Failed updating beacon data\n");
2188
2189 if (changes & BSS_CHANGED_TXPOWER) {
2190 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2191 bss_conf->txpower);
2192 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2193 }
2194 }
2195
2196 static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
2197 struct ieee80211_vif *vif,
2198 struct ieee80211_bss_conf *bss_conf,
2199 u32 changes)
2200 {
2201 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2202
2203 /*
2204 * iwl_mvm_bss_info_changed_station() might call
2205 * iwl_mvm_protect_session(), which reads directly from
2206 * the device (the system time), so make sure it is available.
2207 */
2208 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED))
2209 return;
2210
2211 mutex_lock(&mvm->mutex);
2212
2213 if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
2214 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
2215
2216 switch (vif->type) {
2217 case NL80211_IFTYPE_STATION:
2218 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
2219 break;
2220 case NL80211_IFTYPE_AP:
2221 case NL80211_IFTYPE_ADHOC:
2222 iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
2223 break;
2224 default:
2225 /* shouldn't happen */
2226 WARN_ON_ONCE(1);
2227 }
2228
2229 mutex_unlock(&mvm->mutex);
2230 iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
2231 }
2232
2233 static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
2234 struct ieee80211_vif *vif,
2235 struct ieee80211_scan_request *hw_req)
2236 {
2237 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2238 int ret;
2239
2240 if (hw_req->req.n_channels == 0 ||
2241 hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
2242 return -EINVAL;
2243
2244 mutex_lock(&mvm->mutex);
2245 ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
2246 mutex_unlock(&mvm->mutex);
2247
2248 return ret;
2249 }
2250
2251 static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
2252 struct ieee80211_vif *vif)
2253 {
2254 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2255
2256 mutex_lock(&mvm->mutex);
2257
2258 /* Due to a race condition, it's possible that mac80211 asks
2259 * us to stop a hw_scan when it's already stopped. This can
2260 * happen, for instance, if we stopped the scan ourselves,
2261 * called ieee80211_scan_completed() and the userspace called
2262 * cancel scan scan before ieee80211_scan_work() could run.
2263 * To handle that, simply return if the scan is not running.
2264 */
2265 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
2266 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
2267
2268 mutex_unlock(&mvm->mutex);
2269 }
2270
2271 static void
2272 iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
2273 struct ieee80211_sta *sta, u16 tids,
2274 int num_frames,
2275 enum ieee80211_frame_release_type reason,
2276 bool more_data)
2277 {
2278 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2279
2280 /* Called when we need to transmit (a) frame(s) from mac80211 */
2281
2282 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2283 tids, more_data, false);
2284 }
2285
2286 static void
2287 iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
2288 struct ieee80211_sta *sta, u16 tids,
2289 int num_frames,
2290 enum ieee80211_frame_release_type reason,
2291 bool more_data)
2292 {
2293 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2294
2295 /* Called when we need to transmit (a) frame(s) from agg queue */
2296
2297 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2298 tids, more_data, true);
2299 }
2300
2301 static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2302 struct ieee80211_vif *vif,
2303 enum sta_notify_cmd cmd,
2304 struct ieee80211_sta *sta)
2305 {
2306 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2307 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2308 unsigned long txqs = 0, tids = 0;
2309 int tid;
2310
2311 spin_lock_bh(&mvmsta->lock);
2312 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2313 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2314
2315 if (tid_data->state != IWL_AGG_ON &&
2316 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
2317 continue;
2318
2319 __set_bit(tid_data->txq_id, &txqs);
2320
2321 if (iwl_mvm_tid_queued(tid_data) == 0)
2322 continue;
2323
2324 __set_bit(tid, &tids);
2325 }
2326
2327 switch (cmd) {
2328 case STA_NOTIFY_SLEEP:
2329 if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
2330 ieee80211_sta_block_awake(hw, sta, true);
2331
2332 for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
2333 ieee80211_sta_set_buffered(sta, tid, true);
2334
2335 if (txqs)
2336 iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
2337 /*
2338 * The fw updates the STA to be asleep. Tx packets on the Tx
2339 * queues to this station will not be transmitted. The fw will
2340 * send a Tx response with TX_STATUS_FAIL_DEST_PS.
2341 */
2342 break;
2343 case STA_NOTIFY_AWAKE:
2344 if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
2345 break;
2346
2347 if (txqs)
2348 iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
2349 iwl_mvm_sta_modify_ps_wake(mvm, sta);
2350 break;
2351 default:
2352 break;
2353 }
2354 spin_unlock_bh(&mvmsta->lock);
2355 }
2356
2357 static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
2358 struct ieee80211_vif *vif,
2359 struct ieee80211_sta *sta)
2360 {
2361 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2362 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2363
2364 /*
2365 * This is called before mac80211 does RCU synchronisation,
2366 * so here we already invalidate our internal RCU-protected
2367 * station pointer. The rest of the code will thus no longer
2368 * be able to find the station this way, and we don't rely
2369 * on further RCU synchronisation after the sta_state()
2370 * callback deleted the station.
2371 */
2372 mutex_lock(&mvm->mutex);
2373 if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
2374 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
2375 ERR_PTR(-ENOENT));
2376
2377 mutex_unlock(&mvm->mutex);
2378 }
2379
2380 static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2381 const u8 *bssid)
2382 {
2383 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
2384 return;
2385
2386 if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) {
2387 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2388 return;
2389 }
2390
2391 if (!vif->p2p &&
2392 (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) {
2393 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2394 return;
2395 }
2396
2397 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
2398 }
2399
2400 static void
2401 iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
2402 struct ieee80211_vif *vif, u8 *peer_addr,
2403 enum nl80211_tdls_operation action)
2404 {
2405 struct iwl_fw_dbg_trigger_tlv *trig;
2406 struct iwl_fw_dbg_trigger_tdls *tdls_trig;
2407
2408 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TDLS))
2409 return;
2410
2411 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS);
2412 tdls_trig = (void *)trig->data;
2413 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
2414 return;
2415
2416 if (!(tdls_trig->action_bitmap & BIT(action)))
2417 return;
2418
2419 if (tdls_trig->peer_mode &&
2420 memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0)
2421 return;
2422
2423 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
2424 "TDLS event occurred, peer %pM, action %d",
2425 peer_addr, action);
2426 }
2427
2428 static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
2429 struct iwl_mvm_sta *mvm_sta)
2430 {
2431 struct iwl_mvm_tid_data *tid_data;
2432 struct sk_buff *skb;
2433 int i;
2434
2435 spin_lock_bh(&mvm_sta->lock);
2436 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
2437 tid_data = &mvm_sta->tid_data[i];
2438 while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames)))
2439 ieee80211_free_txskb(mvm->hw, skb);
2440 }
2441 spin_unlock_bh(&mvm_sta->lock);
2442 }
2443
2444 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2445 struct ieee80211_vif *vif,
2446 struct ieee80211_sta *sta,
2447 enum ieee80211_sta_state old_state,
2448 enum ieee80211_sta_state new_state)
2449 {
2450 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2451 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2452 int ret;
2453
2454 IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
2455 sta->addr, old_state, new_state);
2456
2457 /* this would be a mac80211 bug ... but don't crash */
2458 if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
2459 return -EINVAL;
2460
2461 /* if a STA is being removed, reuse its ID */
2462 flush_work(&mvm->sta_drained_wk);
2463
2464 /*
2465 * If we are in a STA removal flow and in DQA mode:
2466 *
2467 * This is after the sync_rcu part, so the queues have already been
2468 * flushed. No more TXs on their way in mac80211's path, and no more in
2469 * the queues.
2470 * Also, we won't be getting any new TX frames for this station.
2471 * What we might have are deferred TX frames that need to be taken care
2472 * of.
2473 *
2474 * Drop any still-queued deferred-frame before removing the STA, and
2475 * make sure the worker is no longer handling frames for this STA.
2476 */
2477 if (old_state == IEEE80211_STA_NONE &&
2478 new_state == IEEE80211_STA_NOTEXIST &&
2479 iwl_mvm_is_dqa_supported(mvm)) {
2480 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2481
2482 iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
2483 flush_work(&mvm->add_stream_wk);
2484
2485 /*
2486 * No need to make sure deferred TX indication is off since the
2487 * worker will already remove it if it was on
2488 */
2489 }
2490
2491 mutex_lock(&mvm->mutex);
2492 if (old_state == IEEE80211_STA_NOTEXIST &&
2493 new_state == IEEE80211_STA_NONE) {
2494 /*
2495 * Firmware bug - it'll crash if the beacon interval is less
2496 * than 16. We can't avoid connecting at all, so refuse the
2497 * station state change, this will cause mac80211 to abandon
2498 * attempts to connect to this AP, and eventually wpa_s will
2499 * blacklist the AP...
2500 */
2501 if (vif->type == NL80211_IFTYPE_STATION &&
2502 vif->bss_conf.beacon_int < 16) {
2503 IWL_ERR(mvm,
2504 "AP %pM beacon interval is %d, refusing due to firmware bug!\n",
2505 sta->addr, vif->bss_conf.beacon_int);
2506 ret = -EINVAL;
2507 goto out_unlock;
2508 }
2509
2510 if (sta->tdls &&
2511 (vif->p2p ||
2512 iwl_mvm_tdls_sta_count(mvm, NULL) ==
2513 IWL_MVM_TDLS_STA_COUNT ||
2514 iwl_mvm_phy_ctx_count(mvm) > 1)) {
2515 IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n");
2516 ret = -EBUSY;
2517 goto out_unlock;
2518 }
2519
2520 ret = iwl_mvm_add_sta(mvm, vif, sta);
2521 if (sta->tdls && ret == 0) {
2522 iwl_mvm_recalc_tdls_state(mvm, vif, true);
2523 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
2524 NL80211_TDLS_SETUP);
2525 }
2526 } else if (old_state == IEEE80211_STA_NONE &&
2527 new_state == IEEE80211_STA_AUTH) {
2528 /*
2529 * EBS may be disabled due to previous failures reported by FW.
2530 * Reset EBS status here assuming environment has been changed.
2531 */
2532 mvm->last_ebs_successful = true;
2533 iwl_mvm_check_uapsd(mvm, vif, sta->addr);
2534 ret = 0;
2535 } else if (old_state == IEEE80211_STA_AUTH &&
2536 new_state == IEEE80211_STA_ASSOC) {
2537 if (vif->type == NL80211_IFTYPE_AP) {
2538 mvmvif->ap_assoc_sta_count++;
2539 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2540 }
2541 ret = iwl_mvm_update_sta(mvm, vif, sta);
2542 if (ret == 0)
2543 iwl_mvm_rs_rate_init(mvm, sta,
2544 mvmvif->phy_ctxt->channel->band,
2545 true);
2546 } else if (old_state == IEEE80211_STA_ASSOC &&
2547 new_state == IEEE80211_STA_AUTHORIZED) {
2548
2549 /* we don't support TDLS during DCM */
2550 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2551 iwl_mvm_teardown_tdls_peers(mvm);
2552
2553 if (sta->tdls)
2554 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
2555 NL80211_TDLS_ENABLE_LINK);
2556
2557 /* enable beacon filtering */
2558 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2559 ret = 0;
2560 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
2561 new_state == IEEE80211_STA_ASSOC) {
2562 /* disable beacon filtering */
2563 WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0));
2564 ret = 0;
2565 } else if (old_state == IEEE80211_STA_ASSOC &&
2566 new_state == IEEE80211_STA_AUTH) {
2567 if (vif->type == NL80211_IFTYPE_AP) {
2568 mvmvif->ap_assoc_sta_count--;
2569 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2570 }
2571 ret = 0;
2572 } else if (old_state == IEEE80211_STA_AUTH &&
2573 new_state == IEEE80211_STA_NONE) {
2574 ret = 0;
2575 } else if (old_state == IEEE80211_STA_NONE &&
2576 new_state == IEEE80211_STA_NOTEXIST) {
2577 ret = iwl_mvm_rm_sta(mvm, vif, sta);
2578 if (sta->tdls) {
2579 iwl_mvm_recalc_tdls_state(mvm, vif, false);
2580 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
2581 NL80211_TDLS_DISABLE_LINK);
2582 }
2583 } else {
2584 ret = -EIO;
2585 }
2586 out_unlock:
2587 mutex_unlock(&mvm->mutex);
2588
2589 if (sta->tdls && ret == 0) {
2590 if (old_state == IEEE80211_STA_NOTEXIST &&
2591 new_state == IEEE80211_STA_NONE)
2592 ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2593 else if (old_state == IEEE80211_STA_NONE &&
2594 new_state == IEEE80211_STA_NOTEXIST)
2595 ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2596 }
2597
2598 return ret;
2599 }
2600
2601 static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2602 {
2603 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2604
2605 mvm->rts_threshold = value;
2606
2607 return 0;
2608 }
2609
2610 static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
2611 struct ieee80211_vif *vif,
2612 struct ieee80211_sta *sta, u32 changed)
2613 {
2614 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2615
2616 if (vif->type == NL80211_IFTYPE_STATION &&
2617 changed & IEEE80211_RC_NSS_CHANGED)
2618 iwl_mvm_sf_update(mvm, vif, false);
2619 }
2620
2621 static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
2622 struct ieee80211_vif *vif, u16 ac,
2623 const struct ieee80211_tx_queue_params *params)
2624 {
2625 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2626 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2627
2628 mvmvif->queue_params[ac] = *params;
2629
2630 /*
2631 * No need to update right away, we'll get BSS_CHANGED_QOS
2632 * The exception is P2P_DEVICE interface which needs immediate update.
2633 */
2634 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2635 int ret;
2636
2637 mutex_lock(&mvm->mutex);
2638 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2639 mutex_unlock(&mvm->mutex);
2640 return ret;
2641 }
2642 return 0;
2643 }
2644
2645 static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
2646 struct ieee80211_vif *vif)
2647 {
2648 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2649 u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
2650 u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS;
2651
2652 if (WARN_ON_ONCE(vif->bss_conf.assoc))
2653 return;
2654
2655 /*
2656 * iwl_mvm_protect_session() reads directly from the device
2657 * (the system time), so make sure it is available.
2658 */
2659 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX))
2660 return;
2661
2662 mutex_lock(&mvm->mutex);
2663 /* Try really hard to protect the session and hear a beacon */
2664 iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false);
2665 mutex_unlock(&mvm->mutex);
2666
2667 iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX);
2668 }
2669
2670 static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
2671 struct ieee80211_vif *vif,
2672 struct cfg80211_sched_scan_request *req,
2673 struct ieee80211_scan_ies *ies)
2674 {
2675 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2676
2677 int ret;
2678
2679 mutex_lock(&mvm->mutex);
2680
2681 if (!vif->bss_conf.idle) {
2682 ret = -EBUSY;
2683 goto out;
2684 }
2685
2686 ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
2687
2688 out:
2689 mutex_unlock(&mvm->mutex);
2690 return ret;
2691 }
2692
2693 static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
2694 struct ieee80211_vif *vif)
2695 {
2696 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2697 int ret;
2698
2699 mutex_lock(&mvm->mutex);
2700
2701 /* Due to a race condition, it's possible that mac80211 asks
2702 * us to stop a sched_scan when it's already stopped. This
2703 * can happen, for instance, if we stopped the scan ourselves,
2704 * called ieee80211_sched_scan_stopped() and the userspace called
2705 * stop sched scan scan before ieee80211_sched_scan_stopped_work()
2706 * could run. To handle this, simply return if the scan is
2707 * not running.
2708 */
2709 if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
2710 mutex_unlock(&mvm->mutex);
2711 return 0;
2712 }
2713
2714 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
2715 mutex_unlock(&mvm->mutex);
2716 iwl_mvm_wait_for_async_handlers(mvm);
2717
2718 return ret;
2719 }
2720
2721 static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
2722 enum set_key_cmd cmd,
2723 struct ieee80211_vif *vif,
2724 struct ieee80211_sta *sta,
2725 struct ieee80211_key_conf *key)
2726 {
2727 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2728 struct iwl_mvm_sta *mvmsta;
2729 struct iwl_mvm_key_pn *ptk_pn;
2730 int keyidx = key->keyidx;
2731 int ret;
2732 u8 key_offset;
2733
2734 if (iwlwifi_mod_params.sw_crypto) {
2735 IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
2736 return -EOPNOTSUPP;
2737 }
2738
2739 switch (key->cipher) {
2740 case WLAN_CIPHER_SUITE_TKIP:
2741 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2742 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2743 break;
2744 case WLAN_CIPHER_SUITE_CCMP:
2745 case WLAN_CIPHER_SUITE_GCMP:
2746 case WLAN_CIPHER_SUITE_GCMP_256:
2747 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2748 break;
2749 case WLAN_CIPHER_SUITE_AES_CMAC:
2750 WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
2751 break;
2752 case WLAN_CIPHER_SUITE_WEP40:
2753 case WLAN_CIPHER_SUITE_WEP104:
2754 /* For non-client mode, only use WEP keys for TX as we probably
2755 * don't have a station yet anyway and would then have to keep
2756 * track of the keys, linking them to each of the clients/peers
2757 * as they appear. For now, don't do that, for performance WEP
2758 * offload doesn't really matter much, but we need it for some
2759 * other offload features in client mode.
2760 */
2761 if (vif->type != NL80211_IFTYPE_STATION)
2762 return 0;
2763 break;
2764 default:
2765 /* currently FW supports only one optional cipher scheme */
2766 if (hw->n_cipher_schemes &&
2767 hw->cipher_schemes->cipher == key->cipher)
2768 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2769 else
2770 return -EOPNOTSUPP;
2771 }
2772
2773 mutex_lock(&mvm->mutex);
2774
2775 switch (cmd) {
2776 case SET_KEY:
2777 if ((vif->type == NL80211_IFTYPE_ADHOC ||
2778 vif->type == NL80211_IFTYPE_AP) && !sta) {
2779 /*
2780 * GTK on AP interface is a TX-only key, return 0;
2781 * on IBSS they're per-station and because we're lazy
2782 * we don't support them for RX, so do the same.
2783 * CMAC in AP/IBSS modes must be done in software.
2784 */
2785 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
2786 ret = -EOPNOTSUPP;
2787 else
2788 ret = 0;
2789 key->hw_key_idx = STA_KEY_IDX_INVALID;
2790 break;
2791 }
2792
2793 /* During FW restart, in order to restore the state as it was,
2794 * don't try to reprogram keys we previously failed for.
2795 */
2796 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2797 key->hw_key_idx == STA_KEY_IDX_INVALID) {
2798 IWL_DEBUG_MAC80211(mvm,
2799 "skip invalid idx key programming during restart\n");
2800 ret = 0;
2801 break;
2802 }
2803
2804 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2805 sta && iwl_mvm_has_new_rx_api(mvm) &&
2806 key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
2807 (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
2808 key->cipher == WLAN_CIPHER_SUITE_GCMP ||
2809 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
2810 struct ieee80211_key_seq seq;
2811 int tid, q;
2812
2813 mvmsta = iwl_mvm_sta_from_mac80211(sta);
2814 WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx]));
2815 ptk_pn = kzalloc(sizeof(*ptk_pn) +
2816 mvm->trans->num_rx_queues *
2817 sizeof(ptk_pn->q[0]),
2818 GFP_KERNEL);
2819 if (!ptk_pn) {
2820 ret = -ENOMEM;
2821 break;
2822 }
2823
2824 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2825 ieee80211_get_key_rx_seq(key, tid, &seq);
2826 for (q = 0; q < mvm->trans->num_rx_queues; q++)
2827 memcpy(ptk_pn->q[q].pn[tid],
2828 seq.ccmp.pn,
2829 IEEE80211_CCMP_PN_LEN);
2830 }
2831
2832 rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn);
2833 }
2834
2835 /* in HW restart reuse the index, otherwise request a new one */
2836 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
2837 key_offset = key->hw_key_idx;
2838 else
2839 key_offset = STA_KEY_IDX_INVALID;
2840
2841 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
2842 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
2843 if (ret) {
2844 IWL_WARN(mvm, "set key failed\n");
2845 /*
2846 * can't add key for RX, but we don't need it
2847 * in the device for TX so still return 0
2848 */
2849 key->hw_key_idx = STA_KEY_IDX_INVALID;
2850 ret = 0;
2851 }
2852
2853 break;
2854 case DISABLE_KEY:
2855 if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
2856 ret = 0;
2857 break;
2858 }
2859
2860 if (sta && iwl_mvm_has_new_rx_api(mvm) &&
2861 key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
2862 (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
2863 key->cipher == WLAN_CIPHER_SUITE_GCMP ||
2864 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
2865 mvmsta = iwl_mvm_sta_from_mac80211(sta);
2866 ptk_pn = rcu_dereference_protected(
2867 mvmsta->ptk_pn[keyidx],
2868 lockdep_is_held(&mvm->mutex));
2869 RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL);
2870 if (ptk_pn)
2871 kfree_rcu(ptk_pn, rcu_head);
2872 }
2873
2874 IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
2875 ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
2876 break;
2877 default:
2878 ret = -EINVAL;
2879 }
2880
2881 mutex_unlock(&mvm->mutex);
2882 return ret;
2883 }
2884
2885 static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
2886 struct ieee80211_vif *vif,
2887 struct ieee80211_key_conf *keyconf,
2888 struct ieee80211_sta *sta,
2889 u32 iv32, u16 *phase1key)
2890 {
2891 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2892
2893 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
2894 return;
2895
2896 iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
2897 }
2898
2899
2900 static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
2901 struct iwl_rx_packet *pkt, void *data)
2902 {
2903 struct iwl_mvm *mvm =
2904 container_of(notif_wait, struct iwl_mvm, notif_wait);
2905 struct iwl_hs20_roc_res *resp;
2906 int resp_len = iwl_rx_packet_payload_len(pkt);
2907 struct iwl_mvm_time_event_data *te_data = data;
2908
2909 if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD))
2910 return true;
2911
2912 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
2913 IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n");
2914 return true;
2915 }
2916
2917 resp = (void *)pkt->data;
2918
2919 IWL_DEBUG_TE(mvm,
2920 "Aux ROC: Recieved response from ucode: status=%d uid=%d\n",
2921 resp->status, resp->event_unique_id);
2922
2923 te_data->uid = le32_to_cpu(resp->event_unique_id);
2924 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
2925 te_data->uid);
2926
2927 spin_lock_bh(&mvm->time_event_lock);
2928 list_add_tail(&te_data->list, &mvm->aux_roc_te_list);
2929 spin_unlock_bh(&mvm->time_event_lock);
2930
2931 return true;
2932 }
2933
2934 #define AUX_ROC_MIN_DURATION MSEC_TO_TU(100)
2935 #define AUX_ROC_MIN_DELAY MSEC_TO_TU(200)
2936 #define AUX_ROC_MAX_DELAY MSEC_TO_TU(600)
2937 #define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20)
2938 #define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10)
2939 static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
2940 struct ieee80211_channel *channel,
2941 struct ieee80211_vif *vif,
2942 int duration)
2943 {
2944 int res, time_reg = DEVICE_SYSTEM_TIME_REG;
2945 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2946 struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
2947 static const u16 time_event_response[] = { HOT_SPOT_CMD };
2948 struct iwl_notification_wait wait_time_event;
2949 u32 dtim_interval = vif->bss_conf.dtim_period *
2950 vif->bss_conf.beacon_int;
2951 u32 req_dur, delay;
2952 struct iwl_hs20_roc_req aux_roc_req = {
2953 .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
2954 .id_and_color =
2955 cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
2956 .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
2957 /* Set the channel info data */
2958 .channel_info.band = (channel->band == NL80211_BAND_2GHZ) ?
2959 PHY_BAND_24 : PHY_BAND_5,
2960 .channel_info.channel = channel->hw_value,
2961 .channel_info.width = PHY_VHT_CHANNEL_MODE20,
2962 /* Set the time and duration */
2963 .apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)),
2964 };
2965
2966 delay = AUX_ROC_MIN_DELAY;
2967 req_dur = MSEC_TO_TU(duration);
2968
2969 /*
2970 * If we are associated we want the delay time to be at least one
2971 * dtim interval so that the FW can wait until after the DTIM and
2972 * then start the time event, this will potentially allow us to
2973 * remain off-channel for the max duration.
2974 * Since we want to use almost a whole dtim interval we would also
2975 * like the delay to be for 2-3 dtim intervals, in case there are
2976 * other time events with higher priority.
2977 */
2978 if (vif->bss_conf.assoc) {
2979 delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
2980 /* We cannot remain off-channel longer than the DTIM interval */
2981 if (dtim_interval <= req_dur) {
2982 req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER;
2983 if (req_dur <= AUX_ROC_MIN_DURATION)
2984 req_dur = dtim_interval -
2985 AUX_ROC_MIN_SAFETY_BUFFER;
2986 }
2987 }
2988
2989 aux_roc_req.duration = cpu_to_le32(req_dur);
2990 aux_roc_req.apply_time_max_delay = cpu_to_le32(delay);
2991
2992 IWL_DEBUG_TE(mvm,
2993 "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
2994 channel->hw_value, req_dur, duration, delay,
2995 dtim_interval);
2996 /* Set the node address */
2997 memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
2998
2999 lockdep_assert_held(&mvm->mutex);
3000
3001 spin_lock_bh(&mvm->time_event_lock);
3002
3003 if (WARN_ON(te_data->id == HOT_SPOT_CMD)) {
3004 spin_unlock_bh(&mvm->time_event_lock);
3005 return -EIO;
3006 }
3007
3008 te_data->vif = vif;
3009 te_data->duration = duration;
3010 te_data->id = HOT_SPOT_CMD;
3011
3012 spin_unlock_bh(&mvm->time_event_lock);
3013
3014 /*
3015 * Use a notification wait, which really just processes the
3016 * command response and doesn't wait for anything, in order
3017 * to be able to process the response and get the UID inside
3018 * the RX path. Using CMD_WANT_SKB doesn't work because it
3019 * stores the buffer and then wakes up this thread, by which
3020 * time another notification (that the time event started)
3021 * might already be processed unsuccessfully.
3022 */
3023 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
3024 time_event_response,
3025 ARRAY_SIZE(time_event_response),
3026 iwl_mvm_rx_aux_roc, te_data);
3027
3028 res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req),
3029 &aux_roc_req);
3030
3031 if (res) {
3032 IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res);
3033 iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
3034 goto out_clear_te;
3035 }
3036
3037 /* No need to wait for anything, so just pass 1 (0 isn't valid) */
3038 res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
3039 /* should never fail */
3040 WARN_ON_ONCE(res);
3041
3042 if (res) {
3043 out_clear_te:
3044 spin_lock_bh(&mvm->time_event_lock);
3045 iwl_mvm_te_clear_data(mvm, te_data);
3046 spin_unlock_bh(&mvm->time_event_lock);
3047 }
3048
3049 return res;
3050 }
3051
3052 static int iwl_mvm_roc(struct ieee80211_hw *hw,
3053 struct ieee80211_vif *vif,
3054 struct ieee80211_channel *channel,
3055 int duration,
3056 enum ieee80211_roc_type type)
3057 {
3058 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3059 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3060 struct cfg80211_chan_def chandef;
3061 struct iwl_mvm_phy_ctxt *phy_ctxt;
3062 int ret, i;
3063
3064 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
3065 duration, type);
3066
3067 flush_work(&mvm->roc_done_wk);
3068
3069 mutex_lock(&mvm->mutex);
3070
3071 switch (vif->type) {
3072 case NL80211_IFTYPE_STATION:
3073 if (fw_has_capa(&mvm->fw->ucode_capa,
3074 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
3075 /* Use aux roc framework (HS20) */
3076 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
3077 vif, duration);
3078 goto out_unlock;
3079 }
3080 IWL_ERR(mvm, "hotspot not supported\n");
3081 ret = -EINVAL;
3082 goto out_unlock;
3083 case NL80211_IFTYPE_P2P_DEVICE:
3084 /* handle below */
3085 break;
3086 default:
3087 IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
3088 ret = -EINVAL;
3089 goto out_unlock;
3090 }
3091
3092 for (i = 0; i < NUM_PHY_CTX; i++) {
3093 phy_ctxt = &mvm->phy_ctxts[i];
3094 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
3095 continue;
3096
3097 if (phy_ctxt->ref && channel == phy_ctxt->channel) {
3098 /*
3099 * Unbind the P2P_DEVICE from the current PHY context,
3100 * and if the PHY context is not used remove it.
3101 */
3102 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3103 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3104 goto out_unlock;
3105
3106 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3107
3108 /* Bind the P2P_DEVICE to the current PHY Context */
3109 mvmvif->phy_ctxt = phy_ctxt;
3110
3111 ret = iwl_mvm_binding_add_vif(mvm, vif);
3112 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3113 goto out_unlock;
3114
3115 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3116 goto schedule_time_event;
3117 }
3118 }
3119
3120 /* Need to update the PHY context only if the ROC channel changed */
3121 if (channel == mvmvif->phy_ctxt->channel)
3122 goto schedule_time_event;
3123
3124 cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
3125
3126 /*
3127 * Change the PHY context configuration as it is currently referenced
3128 * only by the P2P Device MAC
3129 */
3130 if (mvmvif->phy_ctxt->ref == 1) {
3131 ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
3132 &chandef, 1, 1);
3133 if (ret)
3134 goto out_unlock;
3135 } else {
3136 /*
3137 * The PHY context is shared with other MACs. Need to remove the
3138 * P2P Device from the binding, allocate an new PHY context and
3139 * create a new binding
3140 */
3141 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3142 if (!phy_ctxt) {
3143 ret = -ENOSPC;
3144 goto out_unlock;
3145 }
3146
3147 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
3148 1, 1);
3149 if (ret) {
3150 IWL_ERR(mvm, "Failed to change PHY context\n");
3151 goto out_unlock;
3152 }
3153
3154 /* Unbind the P2P_DEVICE from the current PHY context */
3155 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3156 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3157 goto out_unlock;
3158
3159 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3160
3161 /* Bind the P2P_DEVICE to the new allocated PHY context */
3162 mvmvif->phy_ctxt = phy_ctxt;
3163
3164 ret = iwl_mvm_binding_add_vif(mvm, vif);
3165 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3166 goto out_unlock;
3167
3168 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3169 }
3170
3171 schedule_time_event:
3172 /* Schedule the time events */
3173 ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
3174
3175 out_unlock:
3176 mutex_unlock(&mvm->mutex);
3177 IWL_DEBUG_MAC80211(mvm, "leave\n");
3178 return ret;
3179 }
3180
3181 static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
3182 {
3183 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3184
3185 IWL_DEBUG_MAC80211(mvm, "enter\n");
3186
3187 mutex_lock(&mvm->mutex);
3188 iwl_mvm_stop_roc(mvm);
3189 mutex_unlock(&mvm->mutex);
3190
3191 IWL_DEBUG_MAC80211(mvm, "leave\n");
3192 return 0;
3193 }
3194
3195 static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
3196 struct ieee80211_chanctx_conf *ctx)
3197 {
3198 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3199 struct iwl_mvm_phy_ctxt *phy_ctxt;
3200 int ret;
3201
3202 lockdep_assert_held(&mvm->mutex);
3203
3204 IWL_DEBUG_MAC80211(mvm, "Add channel context\n");
3205
3206 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3207 if (!phy_ctxt) {
3208 ret = -ENOSPC;
3209 goto out;
3210 }
3211
3212 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3213 ctx->rx_chains_static,
3214 ctx->rx_chains_dynamic);
3215 if (ret) {
3216 IWL_ERR(mvm, "Failed to add PHY context\n");
3217 goto out;
3218 }
3219
3220 iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt);
3221 *phy_ctxt_id = phy_ctxt->id;
3222 out:
3223 return ret;
3224 }
3225
3226 static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
3227 struct ieee80211_chanctx_conf *ctx)
3228 {
3229 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3230 int ret;
3231
3232 mutex_lock(&mvm->mutex);
3233 ret = __iwl_mvm_add_chanctx(mvm, ctx);
3234 mutex_unlock(&mvm->mutex);
3235
3236 return ret;
3237 }
3238
3239 static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm,
3240 struct ieee80211_chanctx_conf *ctx)
3241 {
3242 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3243 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3244
3245 lockdep_assert_held(&mvm->mutex);
3246
3247 iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
3248 }
3249
3250 static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
3251 struct ieee80211_chanctx_conf *ctx)
3252 {
3253 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3254
3255 mutex_lock(&mvm->mutex);
3256 __iwl_mvm_remove_chanctx(mvm, ctx);
3257 mutex_unlock(&mvm->mutex);
3258 }
3259
3260 static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
3261 struct ieee80211_chanctx_conf *ctx,
3262 u32 changed)
3263 {
3264 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3265 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3266 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3267
3268 if (WARN_ONCE((phy_ctxt->ref > 1) &&
3269 (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
3270 IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
3271 IEEE80211_CHANCTX_CHANGE_RADAR |
3272 IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)),
3273 "Cannot change PHY. Ref=%d, changed=0x%X\n",
3274 phy_ctxt->ref, changed))
3275 return;
3276
3277 mutex_lock(&mvm->mutex);
3278 iwl_mvm_bt_coex_vif_change(mvm);
3279 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3280 ctx->rx_chains_static,
3281 ctx->rx_chains_dynamic);
3282 mutex_unlock(&mvm->mutex);
3283 }
3284
3285 static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
3286 struct ieee80211_vif *vif,
3287 struct ieee80211_chanctx_conf *ctx,
3288 bool switching_chanctx)
3289 {
3290 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3291 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3292 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3293 int ret;
3294
3295 lockdep_assert_held(&mvm->mutex);
3296
3297 mvmvif->phy_ctxt = phy_ctxt;
3298
3299 switch (vif->type) {
3300 case NL80211_IFTYPE_AP:
3301 /* only needed if we're switching chanctx (i.e. during CSA) */
3302 if (switching_chanctx) {
3303 mvmvif->ap_ibss_active = true;
3304 break;
3305 }
3306 case NL80211_IFTYPE_ADHOC:
3307 /*
3308 * The AP binding flow is handled as part of the start_ap flow
3309 * (in bss_info_changed), similarly for IBSS.
3310 */
3311 ret = 0;
3312 goto out;
3313 case NL80211_IFTYPE_STATION:
3314 break;
3315 case NL80211_IFTYPE_MONITOR:
3316 /* always disable PS when a monitor interface is active */
3317 mvmvif->ps_disabled = true;
3318 break;
3319 default:
3320 ret = -EINVAL;
3321 goto out;
3322 }
3323
3324 ret = iwl_mvm_binding_add_vif(mvm, vif);
3325 if (ret)
3326 goto out;
3327
3328 /*
3329 * Power state must be updated before quotas,
3330 * otherwise fw will complain.
3331 */
3332 iwl_mvm_power_update_mac(mvm);
3333
3334 /* Setting the quota at this stage is only required for monitor
3335 * interfaces. For the other types, the bss_info changed flow
3336 * will handle quota settings.
3337 */
3338 if (vif->type == NL80211_IFTYPE_MONITOR) {
3339 mvmvif->monitor_active = true;
3340 ret = iwl_mvm_update_quotas(mvm, false, NULL);
3341 if (ret)
3342 goto out_remove_binding;
3343
3344 ret = iwl_mvm_add_snif_sta(mvm, vif);
3345 if (ret)
3346 goto out_remove_binding;
3347
3348 }
3349
3350 /* Handle binding during CSA */
3351 if (vif->type == NL80211_IFTYPE_AP) {
3352 iwl_mvm_update_quotas(mvm, false, NULL);
3353 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3354 }
3355
3356 if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
3357 u32 duration = 2 * vif->bss_conf.beacon_int;
3358
3359 /* iwl_mvm_protect_session() reads directly from the
3360 * device (the system time), so make sure it is
3361 * available.
3362 */
3363 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA);
3364 if (ret)
3365 goto out_remove_binding;
3366
3367 /* Protect the session to make sure we hear the first
3368 * beacon on the new channel.
3369 */
3370 iwl_mvm_protect_session(mvm, vif, duration, duration,
3371 vif->bss_conf.beacon_int / 2,
3372 true);
3373
3374 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
3375
3376 iwl_mvm_update_quotas(mvm, false, NULL);
3377 }
3378
3379 goto out;
3380
3381 out_remove_binding:
3382 iwl_mvm_binding_remove_vif(mvm, vif);
3383 iwl_mvm_power_update_mac(mvm);
3384 out:
3385 if (ret)
3386 mvmvif->phy_ctxt = NULL;
3387 return ret;
3388 }
3389 static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
3390 struct ieee80211_vif *vif,
3391 struct ieee80211_chanctx_conf *ctx)
3392 {
3393 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3394 int ret;
3395
3396 mutex_lock(&mvm->mutex);
3397 ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false);
3398 mutex_unlock(&mvm->mutex);
3399
3400 return ret;
3401 }
3402
3403 static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
3404 struct ieee80211_vif *vif,
3405 struct ieee80211_chanctx_conf *ctx,
3406 bool switching_chanctx)
3407 {
3408 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3409 struct ieee80211_vif *disabled_vif = NULL;
3410
3411 lockdep_assert_held(&mvm->mutex);
3412
3413 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
3414
3415 switch (vif->type) {
3416 case NL80211_IFTYPE_ADHOC:
3417 goto out;
3418 case NL80211_IFTYPE_MONITOR:
3419 mvmvif->monitor_active = false;
3420 mvmvif->ps_disabled = false;
3421 iwl_mvm_rm_snif_sta(mvm, vif);
3422 break;
3423 case NL80211_IFTYPE_AP:
3424 /* This part is triggered only during CSA */
3425 if (!switching_chanctx || !mvmvif->ap_ibss_active)
3426 goto out;
3427
3428 mvmvif->csa_countdown = false;
3429
3430 /* Set CS bit on all the stations */
3431 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
3432
3433 /* Save blocked iface, the timeout is set on the next beacon */
3434 rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif);
3435
3436 mvmvif->ap_ibss_active = false;
3437 break;
3438 case NL80211_IFTYPE_STATION:
3439 if (!switching_chanctx)
3440 break;
3441
3442 disabled_vif = vif;
3443
3444 iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
3445 break;
3446 default:
3447 break;
3448 }
3449
3450 iwl_mvm_update_quotas(mvm, false, disabled_vif);
3451 iwl_mvm_binding_remove_vif(mvm, vif);
3452
3453 out:
3454 mvmvif->phy_ctxt = NULL;
3455 iwl_mvm_power_update_mac(mvm);
3456 }
3457
3458 static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
3459 struct ieee80211_vif *vif,
3460 struct ieee80211_chanctx_conf *ctx)
3461 {
3462 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3463
3464 mutex_lock(&mvm->mutex);
3465 __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false);
3466 mutex_unlock(&mvm->mutex);
3467 }
3468
3469 static int
3470 iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
3471 struct ieee80211_vif_chanctx_switch *vifs)
3472 {
3473 int ret;
3474
3475 mutex_lock(&mvm->mutex);
3476 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3477 __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
3478
3479 ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx);
3480 if (ret) {
3481 IWL_ERR(mvm, "failed to add new_ctx during channel switch\n");
3482 goto out_reassign;
3483 }
3484
3485 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3486 true);
3487 if (ret) {
3488 IWL_ERR(mvm,
3489 "failed to assign new_ctx during channel switch\n");
3490 goto out_remove;
3491 }
3492
3493 /* we don't support TDLS during DCM - can be caused by channel switch */
3494 if (iwl_mvm_phy_ctx_count(mvm) > 1)
3495 iwl_mvm_teardown_tdls_peers(mvm);
3496
3497 goto out;
3498
3499 out_remove:
3500 __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
3501
3502 out_reassign:
3503 if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) {
3504 IWL_ERR(mvm, "failed to add old_ctx back after failure.\n");
3505 goto out_restart;
3506 }
3507
3508 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3509 true)) {
3510 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3511 goto out_restart;
3512 }
3513
3514 goto out;
3515
3516 out_restart:
3517 /* things keep failing, better restart the hw */
3518 iwl_mvm_nic_restart(mvm, false);
3519
3520 out:
3521 mutex_unlock(&mvm->mutex);
3522
3523 return ret;
3524 }
3525
3526 static int
3527 iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
3528 struct ieee80211_vif_chanctx_switch *vifs)
3529 {
3530 int ret;
3531
3532 mutex_lock(&mvm->mutex);
3533 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3534
3535 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3536 true);
3537 if (ret) {
3538 IWL_ERR(mvm,
3539 "failed to assign new_ctx during channel switch\n");
3540 goto out_reassign;
3541 }
3542
3543 goto out;
3544
3545 out_reassign:
3546 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3547 true)) {
3548 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3549 goto out_restart;
3550 }
3551
3552 goto out;
3553
3554 out_restart:
3555 /* things keep failing, better restart the hw */
3556 iwl_mvm_nic_restart(mvm, false);
3557
3558 out:
3559 mutex_unlock(&mvm->mutex);
3560
3561 return ret;
3562 }
3563
3564 static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
3565 struct ieee80211_vif_chanctx_switch *vifs,
3566 int n_vifs,
3567 enum ieee80211_chanctx_switch_mode mode)
3568 {
3569 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3570 int ret;
3571
3572 /* we only support a single-vif right now */
3573 if (n_vifs > 1)
3574 return -EOPNOTSUPP;
3575
3576 switch (mode) {
3577 case CHANCTX_SWMODE_SWAP_CONTEXTS:
3578 ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs);
3579 break;
3580 case CHANCTX_SWMODE_REASSIGN_VIF:
3581 ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs);
3582 break;
3583 default:
3584 ret = -EOPNOTSUPP;
3585 break;
3586 }
3587
3588 return ret;
3589 }
3590
3591 static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
3592 struct ieee80211_sta *sta,
3593 bool set)
3594 {
3595 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3596 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3597
3598 if (!mvm_sta || !mvm_sta->vif) {
3599 IWL_ERR(mvm, "Station is not associated to a vif\n");
3600 return -EINVAL;
3601 }
3602
3603 return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
3604 }
3605
3606 #ifdef CONFIG_NL80211_TESTMODE
3607 static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
3608 [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
3609 [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
3610 [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
3611 };
3612
3613 static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
3614 struct ieee80211_vif *vif,
3615 void *data, int len)
3616 {
3617 struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
3618 int err;
3619 u32 noa_duration;
3620
3621 err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy);
3622 if (err)
3623 return err;
3624
3625 if (!tb[IWL_MVM_TM_ATTR_CMD])
3626 return -EINVAL;
3627
3628 switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
3629 case IWL_MVM_TM_CMD_SET_NOA:
3630 if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
3631 !vif->bss_conf.enable_beacon ||
3632 !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
3633 return -EINVAL;
3634
3635 noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
3636 if (noa_duration >= vif->bss_conf.beacon_int)
3637 return -EINVAL;
3638
3639 mvm->noa_duration = noa_duration;
3640 mvm->noa_vif = vif;
3641
3642 return iwl_mvm_update_quotas(mvm, false, NULL);
3643 case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
3644 /* must be associated client vif - ignore authorized */
3645 if (!vif || vif->type != NL80211_IFTYPE_STATION ||
3646 !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
3647 !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
3648 return -EINVAL;
3649
3650 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
3651 return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3652 return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3653 }
3654
3655 return -EOPNOTSUPP;
3656 }
3657
3658 static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
3659 struct ieee80211_vif *vif,
3660 void *data, int len)
3661 {
3662 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3663 int err;
3664
3665 mutex_lock(&mvm->mutex);
3666 err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
3667 mutex_unlock(&mvm->mutex);
3668
3669 return err;
3670 }
3671 #endif
3672
3673 static void iwl_mvm_channel_switch(struct ieee80211_hw *hw,
3674 struct ieee80211_vif *vif,
3675 struct ieee80211_channel_switch *chsw)
3676 {
3677 /* By implementing this operation, we prevent mac80211 from
3678 * starting its own channel switch timer, so that we can call
3679 * ieee80211_chswitch_done() ourselves at the right time
3680 * (which is when the absence time event starts).
3681 */
3682
3683 IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw),
3684 "dummy channel switch op\n");
3685 }
3686
3687 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
3688 struct ieee80211_vif *vif,
3689 struct ieee80211_channel_switch *chsw)
3690 {
3691 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3692 struct ieee80211_vif *csa_vif;
3693 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3694 u32 apply_time;
3695 int ret;
3696
3697 mutex_lock(&mvm->mutex);
3698
3699 mvmvif->csa_failed = false;
3700
3701 IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
3702 chsw->chandef.center_freq1);
3703
3704 iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH);
3705
3706 switch (vif->type) {
3707 case NL80211_IFTYPE_AP:
3708 csa_vif =
3709 rcu_dereference_protected(mvm->csa_vif,
3710 lockdep_is_held(&mvm->mutex));
3711 if (WARN_ONCE(csa_vif && csa_vif->csa_active,
3712 "Another CSA is already in progress")) {
3713 ret = -EBUSY;
3714 goto out_unlock;
3715 }
3716
3717 /* we still didn't unblock tx. prevent new CS meanwhile */
3718 if (rcu_dereference_protected(mvm->csa_tx_blocked_vif,
3719 lockdep_is_held(&mvm->mutex))) {
3720 ret = -EBUSY;
3721 goto out_unlock;
3722 }
3723
3724 rcu_assign_pointer(mvm->csa_vif, vif);
3725
3726 if (WARN_ONCE(mvmvif->csa_countdown,
3727 "Previous CSA countdown didn't complete")) {
3728 ret = -EBUSY;
3729 goto out_unlock;
3730 }
3731
3732 mvmvif->csa_target_freq = chsw->chandef.chan->center_freq;
3733
3734 break;
3735 case NL80211_IFTYPE_STATION:
3736 if (mvmvif->lqm_active)
3737 iwl_mvm_send_lqm_cmd(vif,
3738 LQM_CMD_OPERATION_STOP_MEASUREMENT,
3739 0, 0);
3740
3741 /* Schedule the time event to a bit before beacon 1,
3742 * to make sure we're in the new channel when the
3743 * GO/AP arrives.
3744 */
3745 apply_time = chsw->device_timestamp +
3746 ((vif->bss_conf.beacon_int * (chsw->count - 1) -
3747 IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
3748
3749 if (chsw->block_tx)
3750 iwl_mvm_csa_client_absent(mvm, vif);
3751
3752 iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int,
3753 apply_time);
3754 if (mvmvif->bf_data.bf_enabled) {
3755 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3756 if (ret)
3757 goto out_unlock;
3758 }
3759
3760 break;
3761 default:
3762 break;
3763 }
3764
3765 mvmvif->ps_disabled = true;
3766
3767 ret = iwl_mvm_power_update_ps(mvm);
3768 if (ret)
3769 goto out_unlock;
3770
3771 /* we won't be on this channel any longer */
3772 iwl_mvm_teardown_tdls_peers(mvm);
3773
3774 out_unlock:
3775 mutex_unlock(&mvm->mutex);
3776
3777 return ret;
3778 }
3779
3780 static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
3781 struct ieee80211_vif *vif)
3782 {
3783 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3784 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3785 int ret;
3786
3787 mutex_lock(&mvm->mutex);
3788
3789 if (mvmvif->csa_failed) {
3790 mvmvif->csa_failed = false;
3791 ret = -EIO;
3792 goto out_unlock;
3793 }
3794
3795 if (vif->type == NL80211_IFTYPE_STATION) {
3796 struct iwl_mvm_sta *mvmsta;
3797
3798 mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
3799 mvmvif->ap_sta_id);
3800
3801 if (WARN_ON(!mvmsta)) {
3802 ret = -EIO;
3803 goto out_unlock;
3804 }
3805
3806 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
3807
3808 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3809
3810 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3811 if (ret)
3812 goto out_unlock;
3813
3814 iwl_mvm_stop_session_protection(mvm, vif);
3815 }
3816
3817 mvmvif->ps_disabled = false;
3818
3819 ret = iwl_mvm_power_update_ps(mvm);
3820
3821 out_unlock:
3822 mutex_unlock(&mvm->mutex);
3823
3824 return ret;
3825 }
3826
3827 static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
3828 struct ieee80211_vif *vif, u32 queues, bool drop)
3829 {
3830 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3831 struct iwl_mvm_vif *mvmvif;
3832 struct iwl_mvm_sta *mvmsta;
3833 struct ieee80211_sta *sta;
3834 int i;
3835 u32 msk = 0;
3836
3837 if (!vif || vif->type != NL80211_IFTYPE_STATION)
3838 return;
3839
3840 /* Make sure we're done with the deferred traffic before flushing */
3841 if (iwl_mvm_is_dqa_supported(mvm))
3842 flush_work(&mvm->add_stream_wk);
3843
3844 mutex_lock(&mvm->mutex);
3845 mvmvif = iwl_mvm_vif_from_mac80211(vif);
3846
3847 /* flush the AP-station and all TDLS peers */
3848 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
3849 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3850 lockdep_is_held(&mvm->mutex));
3851 if (IS_ERR_OR_NULL(sta))
3852 continue;
3853
3854 mvmsta = iwl_mvm_sta_from_mac80211(sta);
3855 if (mvmsta->vif != vif)
3856 continue;
3857
3858 /* make sure only TDLS peers or the AP are flushed */
3859 WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
3860
3861 msk |= mvmsta->tfd_queue_msk;
3862 }
3863
3864 if (drop) {
3865 if (iwl_mvm_flush_tx_path(mvm, msk, 0))
3866 IWL_ERR(mvm, "flush request fail\n");
3867 mutex_unlock(&mvm->mutex);
3868 } else {
3869 mutex_unlock(&mvm->mutex);
3870
3871 /* this can take a while, and we may need/want other operations
3872 * to succeed while doing this, so do it without the mutex held
3873 */
3874 iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
3875 }
3876 }
3877
3878 static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
3879 struct survey_info *survey)
3880 {
3881 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3882 int ret;
3883
3884 memset(survey, 0, sizeof(*survey));
3885
3886 /* only support global statistics right now */
3887 if (idx != 0)
3888 return -ENOENT;
3889
3890 if (!fw_has_capa(&mvm->fw->ucode_capa,
3891 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
3892 return -ENOENT;
3893
3894 mutex_lock(&mvm->mutex);
3895
3896 if (mvm->ucode_loaded) {
3897 ret = iwl_mvm_request_statistics(mvm, false);
3898 if (ret)
3899 goto out;
3900 }
3901
3902 survey->filled = SURVEY_INFO_TIME |
3903 SURVEY_INFO_TIME_RX |
3904 SURVEY_INFO_TIME_TX |
3905 SURVEY_INFO_TIME_SCAN;
3906 survey->time = mvm->accu_radio_stats.on_time_rf +
3907 mvm->radio_stats.on_time_rf;
3908 do_div(survey->time, USEC_PER_MSEC);
3909
3910 survey->time_rx = mvm->accu_radio_stats.rx_time +
3911 mvm->radio_stats.rx_time;
3912 do_div(survey->time_rx, USEC_PER_MSEC);
3913
3914 survey->time_tx = mvm->accu_radio_stats.tx_time +
3915 mvm->radio_stats.tx_time;
3916 do_div(survey->time_tx, USEC_PER_MSEC);
3917
3918 survey->time_scan = mvm->accu_radio_stats.on_time_scan +
3919 mvm->radio_stats.on_time_scan;
3920 do_div(survey->time_scan, USEC_PER_MSEC);
3921
3922 ret = 0;
3923 out:
3924 mutex_unlock(&mvm->mutex);
3925 return ret;
3926 }
3927
3928 static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
3929 struct ieee80211_vif *vif,
3930 struct ieee80211_sta *sta,
3931 struct station_info *sinfo)
3932 {
3933 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3934 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3935 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3936
3937 if (mvmsta->avg_energy) {
3938 sinfo->signal_avg = mvmsta->avg_energy;
3939 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
3940 }
3941
3942 if (!fw_has_capa(&mvm->fw->ucode_capa,
3943 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
3944 return;
3945
3946 /* if beacon filtering isn't on mac80211 does it anyway */
3947 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
3948 return;
3949
3950 if (!vif->bss_conf.assoc)
3951 return;
3952
3953 mutex_lock(&mvm->mutex);
3954
3955 if (mvmvif->ap_sta_id != mvmsta->sta_id)
3956 goto unlock;
3957
3958 if (iwl_mvm_request_statistics(mvm, false))
3959 goto unlock;
3960
3961 sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
3962 mvmvif->beacon_stats.accu_num_beacons;
3963 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
3964 if (mvmvif->beacon_stats.avg_signal) {
3965 /* firmware only reports a value after RXing a few beacons */
3966 sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
3967 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
3968 }
3969 unlock:
3970 mutex_unlock(&mvm->mutex);
3971 }
3972
3973 static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
3974 struct ieee80211_vif *vif,
3975 const struct ieee80211_event *event)
3976 {
3977 #define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...) \
3978 do { \
3979 if ((_cnt) && --(_cnt)) \
3980 break; \
3981 iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\
3982 } while (0)
3983
3984 struct iwl_fw_dbg_trigger_tlv *trig;
3985 struct iwl_fw_dbg_trigger_mlme *trig_mlme;
3986
3987 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
3988 return;
3989
3990 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
3991 trig_mlme = (void *)trig->data;
3992 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
3993 return;
3994
3995 if (event->u.mlme.data == ASSOC_EVENT) {
3996 if (event->u.mlme.status == MLME_DENIED)
3997 CHECK_MLME_TRIGGER(mvm, trig, buf,
3998 trig_mlme->stop_assoc_denied,
3999 "DENIED ASSOC: reason %d",
4000 event->u.mlme.reason);
4001 else if (event->u.mlme.status == MLME_TIMEOUT)
4002 CHECK_MLME_TRIGGER(mvm, trig, buf,
4003 trig_mlme->stop_assoc_timeout,
4004 "ASSOC TIMEOUT");
4005 } else if (event->u.mlme.data == AUTH_EVENT) {
4006 if (event->u.mlme.status == MLME_DENIED)
4007 CHECK_MLME_TRIGGER(mvm, trig, buf,
4008 trig_mlme->stop_auth_denied,
4009 "DENIED AUTH: reason %d",
4010 event->u.mlme.reason);
4011 else if (event->u.mlme.status == MLME_TIMEOUT)
4012 CHECK_MLME_TRIGGER(mvm, trig, buf,
4013 trig_mlme->stop_auth_timeout,
4014 "AUTH TIMEOUT");
4015 } else if (event->u.mlme.data == DEAUTH_RX_EVENT) {
4016 CHECK_MLME_TRIGGER(mvm, trig, buf,
4017 trig_mlme->stop_rx_deauth,
4018 "DEAUTH RX %d", event->u.mlme.reason);
4019 } else if (event->u.mlme.data == DEAUTH_TX_EVENT) {
4020 CHECK_MLME_TRIGGER(mvm, trig, buf,
4021 trig_mlme->stop_tx_deauth,
4022 "DEAUTH TX %d", event->u.mlme.reason);
4023 }
4024 #undef CHECK_MLME_TRIGGER
4025 }
4026
4027 static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
4028 struct ieee80211_vif *vif,
4029 const struct ieee80211_event *event)
4030 {
4031 struct iwl_fw_dbg_trigger_tlv *trig;
4032 struct iwl_fw_dbg_trigger_ba *ba_trig;
4033
4034 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4035 return;
4036
4037 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4038 ba_trig = (void *)trig->data;
4039 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4040 return;
4041
4042 if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
4043 return;
4044
4045 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4046 "BAR received from %pM, tid %d, ssn %d",
4047 event->u.ba.sta->addr, event->u.ba.tid,
4048 event->u.ba.ssn);
4049 }
4050
4051 static void
4052 iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
4053 struct ieee80211_vif *vif,
4054 const struct ieee80211_event *event)
4055 {
4056 struct iwl_fw_dbg_trigger_tlv *trig;
4057 struct iwl_fw_dbg_trigger_ba *ba_trig;
4058
4059 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4060 return;
4061
4062 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4063 ba_trig = (void *)trig->data;
4064 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4065 return;
4066
4067 if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid)))
4068 return;
4069
4070 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4071 "Frame from %pM timed out, tid %d",
4072 event->u.ba.sta->addr, event->u.ba.tid);
4073 }
4074
4075 static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
4076 struct ieee80211_vif *vif,
4077 const struct ieee80211_event *event)
4078 {
4079 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4080
4081 switch (event->type) {
4082 case MLME_EVENT:
4083 iwl_mvm_event_mlme_callback(mvm, vif, event);
4084 break;
4085 case BAR_RX_EVENT:
4086 iwl_mvm_event_bar_rx_callback(mvm, vif, event);
4087 break;
4088 case BA_FRAME_TIMEOUT:
4089 iwl_mvm_event_frame_timeout_callback(mvm, vif, event);
4090 break;
4091 default:
4092 break;
4093 }
4094 }
4095
4096 void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
4097 struct iwl_mvm_internal_rxq_notif *notif,
4098 u32 size)
4099 {
4100 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq);
4101 u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
4102 int ret;
4103
4104 lockdep_assert_held(&mvm->mutex);
4105
4106 if (!iwl_mvm_has_new_rx_api(mvm))
4107 return;
4108
4109 notif->cookie = mvm->queue_sync_cookie;
4110
4111 if (notif->sync)
4112 atomic_set(&mvm->queue_sync_counter,
4113 mvm->trans->num_rx_queues);
4114
4115 ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
4116 if (ret) {
4117 IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
4118 goto out;
4119 }
4120
4121 if (notif->sync)
4122 ret = wait_event_timeout(notif_waitq,
4123 atomic_read(&mvm->queue_sync_counter) == 0,
4124 HZ);
4125 WARN_ON_ONCE(!ret);
4126
4127 out:
4128 atomic_set(&mvm->queue_sync_counter, 0);
4129 mvm->queue_sync_cookie++;
4130 }
4131
4132 static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
4133 {
4134 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4135 struct iwl_mvm_internal_rxq_notif data = {
4136 .type = IWL_MVM_RXQ_EMPTY,
4137 .sync = 1,
4138 };
4139
4140 mutex_lock(&mvm->mutex);
4141 iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data));
4142 mutex_unlock(&mvm->mutex);
4143 }
4144
4145 const struct ieee80211_ops iwl_mvm_hw_ops = {
4146 .tx = iwl_mvm_mac_tx,
4147 .ampdu_action = iwl_mvm_mac_ampdu_action,
4148 .start = iwl_mvm_mac_start,
4149 .reconfig_complete = iwl_mvm_mac_reconfig_complete,
4150 .stop = iwl_mvm_mac_stop,
4151 .add_interface = iwl_mvm_mac_add_interface,
4152 .remove_interface = iwl_mvm_mac_remove_interface,
4153 .config = iwl_mvm_mac_config,
4154 .prepare_multicast = iwl_mvm_prepare_multicast,
4155 .configure_filter = iwl_mvm_configure_filter,
4156 .config_iface_filter = iwl_mvm_config_iface_filter,
4157 .bss_info_changed = iwl_mvm_bss_info_changed,
4158 .hw_scan = iwl_mvm_mac_hw_scan,
4159 .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
4160 .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
4161 .sta_state = iwl_mvm_mac_sta_state,
4162 .sta_notify = iwl_mvm_mac_sta_notify,
4163 .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
4164 .release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
4165 .set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
4166 .sta_rc_update = iwl_mvm_sta_rc_update,
4167 .conf_tx = iwl_mvm_mac_conf_tx,
4168 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
4169 .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover,
4170 .flush = iwl_mvm_mac_flush,
4171 .sched_scan_start = iwl_mvm_mac_sched_scan_start,
4172 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
4173 .set_key = iwl_mvm_mac_set_key,
4174 .update_tkip_key = iwl_mvm_mac_update_tkip_key,
4175 .remain_on_channel = iwl_mvm_roc,
4176 .cancel_remain_on_channel = iwl_mvm_cancel_roc,
4177 .add_chanctx = iwl_mvm_add_chanctx,
4178 .remove_chanctx = iwl_mvm_remove_chanctx,
4179 .change_chanctx = iwl_mvm_change_chanctx,
4180 .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
4181 .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
4182 .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx,
4183
4184 .start_ap = iwl_mvm_start_ap_ibss,
4185 .stop_ap = iwl_mvm_stop_ap_ibss,
4186 .join_ibss = iwl_mvm_start_ap_ibss,
4187 .leave_ibss = iwl_mvm_stop_ap_ibss,
4188
4189 .set_tim = iwl_mvm_set_tim,
4190
4191 .channel_switch = iwl_mvm_channel_switch,
4192 .pre_channel_switch = iwl_mvm_pre_channel_switch,
4193 .post_channel_switch = iwl_mvm_post_channel_switch,
4194
4195 .tdls_channel_switch = iwl_mvm_tdls_channel_switch,
4196 .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
4197 .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
4198
4199 .event_callback = iwl_mvm_mac_event_callback,
4200
4201 .sync_rx_queues = iwl_mvm_sync_rx_queues,
4202
4203 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
4204
4205 #ifdef CONFIG_PM_SLEEP
4206 /* look at d3.c */
4207 .suspend = iwl_mvm_suspend,
4208 .resume = iwl_mvm_resume,
4209 .set_wakeup = iwl_mvm_set_wakeup,
4210 .set_rekey_data = iwl_mvm_set_rekey_data,
4211 #if IS_ENABLED(CONFIG_IPV6)
4212 .ipv6_addr_change = iwl_mvm_ipv6_addr_change,
4213 #endif
4214 .set_default_unicast_key = iwl_mvm_set_default_unicast_key,
4215 #endif
4216 .get_survey = iwl_mvm_mac_get_survey,
4217 .sta_statistics = iwl_mvm_mac_sta_statistics,
4218 };