]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/net/wireless/ath/ath10k/mac.c
Merge tag 'nfc-next-4.7-1' of git://git.kernel.org/pub/scm/linux/kernel/git/sameo...
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / wireless / ath / ath10k / mac.c
1 /*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #include "mac.h"
19
20 #include <net/mac80211.h>
21 #include <linux/etherdevice.h>
22
23 #include "hif.h"
24 #include "core.h"
25 #include "debug.h"
26 #include "wmi.h"
27 #include "htt.h"
28 #include "txrx.h"
29 #include "testmode.h"
30 #include "wmi.h"
31 #include "wmi-tlv.h"
32 #include "wmi-ops.h"
33 #include "wow.h"
34
35 /*********/
36 /* Rates */
37 /*********/
38
39 static struct ieee80211_rate ath10k_rates[] = {
40 { .bitrate = 10,
41 .hw_value = ATH10K_HW_RATE_CCK_LP_1M },
42 { .bitrate = 20,
43 .hw_value = ATH10K_HW_RATE_CCK_LP_2M,
44 .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M,
45 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
46 { .bitrate = 55,
47 .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M,
48 .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M,
49 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
50 { .bitrate = 110,
51 .hw_value = ATH10K_HW_RATE_CCK_LP_11M,
52 .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M,
53 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
54
55 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
56 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
57 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
58 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
59 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
60 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
61 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
62 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
63 };
64
65 #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
66
67 #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
68 #define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \
69 ATH10K_MAC_FIRST_OFDM_RATE_IDX)
70 #define ath10k_g_rates (ath10k_rates + 0)
71 #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
72
73 static bool ath10k_mac_bitrate_is_cck(int bitrate)
74 {
75 switch (bitrate) {
76 case 10:
77 case 20:
78 case 55:
79 case 110:
80 return true;
81 }
82
83 return false;
84 }
85
86 static u8 ath10k_mac_bitrate_to_rate(int bitrate)
87 {
88 return DIV_ROUND_UP(bitrate, 5) |
89 (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
90 }
91
92 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
93 u8 hw_rate, bool cck)
94 {
95 const struct ieee80211_rate *rate;
96 int i;
97
98 for (i = 0; i < sband->n_bitrates; i++) {
99 rate = &sband->bitrates[i];
100
101 if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck)
102 continue;
103
104 if (rate->hw_value == hw_rate)
105 return i;
106 else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
107 rate->hw_value_short == hw_rate)
108 return i;
109 }
110
111 return 0;
112 }
113
114 u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
115 u32 bitrate)
116 {
117 int i;
118
119 for (i = 0; i < sband->n_bitrates; i++)
120 if (sband->bitrates[i].bitrate == bitrate)
121 return i;
122
123 return 0;
124 }
125
126 static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
127 {
128 switch ((mcs_map >> (2 * nss)) & 0x3) {
129 case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
130 case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
131 case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
132 }
133 return 0;
134 }
135
136 static u32
137 ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
138 {
139 int nss;
140
141 for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
142 if (ht_mcs_mask[nss])
143 return nss + 1;
144
145 return 1;
146 }
147
148 static u32
149 ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
150 {
151 int nss;
152
153 for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
154 if (vht_mcs_mask[nss])
155 return nss + 1;
156
157 return 1;
158 }
159
160 int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val)
161 {
162 enum wmi_host_platform_type platform_type;
163 int ret;
164
165 if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map))
166 platform_type = WMI_HOST_PLATFORM_LOW_PERF;
167 else
168 platform_type = WMI_HOST_PLATFORM_HIGH_PERF;
169
170 ret = ath10k_wmi_ext_resource_config(ar, platform_type, val);
171
172 if (ret && ret != -EOPNOTSUPP) {
173 ath10k_warn(ar, "failed to configure ext resource: %d\n", ret);
174 return ret;
175 }
176
177 return 0;
178 }
179
180 /**********/
181 /* Crypto */
182 /**********/
183
184 static int ath10k_send_key(struct ath10k_vif *arvif,
185 struct ieee80211_key_conf *key,
186 enum set_key_cmd cmd,
187 const u8 *macaddr, u32 flags)
188 {
189 struct ath10k *ar = arvif->ar;
190 struct wmi_vdev_install_key_arg arg = {
191 .vdev_id = arvif->vdev_id,
192 .key_idx = key->keyidx,
193 .key_len = key->keylen,
194 .key_data = key->key,
195 .key_flags = flags,
196 .macaddr = macaddr,
197 };
198
199 lockdep_assert_held(&arvif->ar->conf_mutex);
200
201 switch (key->cipher) {
202 case WLAN_CIPHER_SUITE_CCMP:
203 arg.key_cipher = WMI_CIPHER_AES_CCM;
204 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
205 break;
206 case WLAN_CIPHER_SUITE_TKIP:
207 arg.key_cipher = WMI_CIPHER_TKIP;
208 arg.key_txmic_len = 8;
209 arg.key_rxmic_len = 8;
210 break;
211 case WLAN_CIPHER_SUITE_WEP40:
212 case WLAN_CIPHER_SUITE_WEP104:
213 arg.key_cipher = WMI_CIPHER_WEP;
214 break;
215 case WLAN_CIPHER_SUITE_AES_CMAC:
216 WARN_ON(1);
217 return -EINVAL;
218 default:
219 ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
220 return -EOPNOTSUPP;
221 }
222
223 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
224 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
225
226 if (cmd == DISABLE_KEY) {
227 arg.key_cipher = WMI_CIPHER_NONE;
228 arg.key_data = NULL;
229 }
230
231 return ath10k_wmi_vdev_install_key(arvif->ar, &arg);
232 }
233
234 static int ath10k_install_key(struct ath10k_vif *arvif,
235 struct ieee80211_key_conf *key,
236 enum set_key_cmd cmd,
237 const u8 *macaddr, u32 flags)
238 {
239 struct ath10k *ar = arvif->ar;
240 int ret;
241 unsigned long time_left;
242
243 lockdep_assert_held(&ar->conf_mutex);
244
245 reinit_completion(&ar->install_key_done);
246
247 if (arvif->nohwcrypt)
248 return 1;
249
250 ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
251 if (ret)
252 return ret;
253
254 time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ);
255 if (time_left == 0)
256 return -ETIMEDOUT;
257
258 return 0;
259 }
260
261 static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
262 const u8 *addr)
263 {
264 struct ath10k *ar = arvif->ar;
265 struct ath10k_peer *peer;
266 int ret;
267 int i;
268 u32 flags;
269
270 lockdep_assert_held(&ar->conf_mutex);
271
272 if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP &&
273 arvif->vif->type != NL80211_IFTYPE_ADHOC &&
274 arvif->vif->type != NL80211_IFTYPE_MESH_POINT))
275 return -EINVAL;
276
277 spin_lock_bh(&ar->data_lock);
278 peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
279 spin_unlock_bh(&ar->data_lock);
280
281 if (!peer)
282 return -ENOENT;
283
284 for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
285 if (arvif->wep_keys[i] == NULL)
286 continue;
287
288 switch (arvif->vif->type) {
289 case NL80211_IFTYPE_AP:
290 flags = WMI_KEY_PAIRWISE;
291
292 if (arvif->def_wep_key_idx == i)
293 flags |= WMI_KEY_TX_USAGE;
294
295 ret = ath10k_install_key(arvif, arvif->wep_keys[i],
296 SET_KEY, addr, flags);
297 if (ret < 0)
298 return ret;
299 break;
300 case NL80211_IFTYPE_ADHOC:
301 ret = ath10k_install_key(arvif, arvif->wep_keys[i],
302 SET_KEY, addr,
303 WMI_KEY_PAIRWISE);
304 if (ret < 0)
305 return ret;
306
307 ret = ath10k_install_key(arvif, arvif->wep_keys[i],
308 SET_KEY, addr, WMI_KEY_GROUP);
309 if (ret < 0)
310 return ret;
311 break;
312 default:
313 WARN_ON(1);
314 return -EINVAL;
315 }
316
317 spin_lock_bh(&ar->data_lock);
318 peer->keys[i] = arvif->wep_keys[i];
319 spin_unlock_bh(&ar->data_lock);
320 }
321
322 /* In some cases (notably with static WEP IBSS with multiple keys)
323 * multicast Tx becomes broken. Both pairwise and groupwise keys are
324 * installed already. Using WMI_KEY_TX_USAGE in different combinations
325 * didn't seem help. Using def_keyid vdev parameter seems to be
326 * effective so use that.
327 *
328 * FIXME: Revisit. Perhaps this can be done in a less hacky way.
329 */
330 if (arvif->vif->type != NL80211_IFTYPE_ADHOC)
331 return 0;
332
333 if (arvif->def_wep_key_idx == -1)
334 return 0;
335
336 ret = ath10k_wmi_vdev_set_param(arvif->ar,
337 arvif->vdev_id,
338 arvif->ar->wmi.vdev_param->def_keyid,
339 arvif->def_wep_key_idx);
340 if (ret) {
341 ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n",
342 arvif->vdev_id, ret);
343 return ret;
344 }
345
346 return 0;
347 }
348
349 static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
350 const u8 *addr)
351 {
352 struct ath10k *ar = arvif->ar;
353 struct ath10k_peer *peer;
354 int first_errno = 0;
355 int ret;
356 int i;
357 u32 flags = 0;
358
359 lockdep_assert_held(&ar->conf_mutex);
360
361 spin_lock_bh(&ar->data_lock);
362 peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
363 spin_unlock_bh(&ar->data_lock);
364
365 if (!peer)
366 return -ENOENT;
367
368 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
369 if (peer->keys[i] == NULL)
370 continue;
371
372 /* key flags are not required to delete the key */
373 ret = ath10k_install_key(arvif, peer->keys[i],
374 DISABLE_KEY, addr, flags);
375 if (ret < 0 && first_errno == 0)
376 first_errno = ret;
377
378 if (ret < 0)
379 ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
380 i, ret);
381
382 spin_lock_bh(&ar->data_lock);
383 peer->keys[i] = NULL;
384 spin_unlock_bh(&ar->data_lock);
385 }
386
387 return first_errno;
388 }
389
390 bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
391 u8 keyidx)
392 {
393 struct ath10k_peer *peer;
394 int i;
395
396 lockdep_assert_held(&ar->data_lock);
397
398 /* We don't know which vdev this peer belongs to,
399 * since WMI doesn't give us that information.
400 *
401 * FIXME: multi-bss needs to be handled.
402 */
403 peer = ath10k_peer_find(ar, 0, addr);
404 if (!peer)
405 return false;
406
407 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
408 if (peer->keys[i] && peer->keys[i]->keyidx == keyidx)
409 return true;
410 }
411
412 return false;
413 }
414
415 static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
416 struct ieee80211_key_conf *key)
417 {
418 struct ath10k *ar = arvif->ar;
419 struct ath10k_peer *peer;
420 u8 addr[ETH_ALEN];
421 int first_errno = 0;
422 int ret;
423 int i;
424 u32 flags = 0;
425
426 lockdep_assert_held(&ar->conf_mutex);
427
428 for (;;) {
429 /* since ath10k_install_key we can't hold data_lock all the
430 * time, so we try to remove the keys incrementally */
431 spin_lock_bh(&ar->data_lock);
432 i = 0;
433 list_for_each_entry(peer, &ar->peers, list) {
434 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
435 if (peer->keys[i] == key) {
436 ether_addr_copy(addr, peer->addr);
437 peer->keys[i] = NULL;
438 break;
439 }
440 }
441
442 if (i < ARRAY_SIZE(peer->keys))
443 break;
444 }
445 spin_unlock_bh(&ar->data_lock);
446
447 if (i == ARRAY_SIZE(peer->keys))
448 break;
449 /* key flags are not required to delete the key */
450 ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
451 if (ret < 0 && first_errno == 0)
452 first_errno = ret;
453
454 if (ret)
455 ath10k_warn(ar, "failed to remove key for %pM: %d\n",
456 addr, ret);
457 }
458
459 return first_errno;
460 }
461
462 static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
463 struct ieee80211_key_conf *key)
464 {
465 struct ath10k *ar = arvif->ar;
466 struct ath10k_peer *peer;
467 int ret;
468
469 lockdep_assert_held(&ar->conf_mutex);
470
471 list_for_each_entry(peer, &ar->peers, list) {
472 if (ether_addr_equal(peer->addr, arvif->vif->addr))
473 continue;
474
475 if (ether_addr_equal(peer->addr, arvif->bssid))
476 continue;
477
478 if (peer->keys[key->keyidx] == key)
479 continue;
480
481 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n",
482 arvif->vdev_id, key->keyidx);
483
484 ret = ath10k_install_peer_wep_keys(arvif, peer->addr);
485 if (ret) {
486 ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n",
487 arvif->vdev_id, peer->addr, ret);
488 return ret;
489 }
490 }
491
492 return 0;
493 }
494
495 /*********************/
496 /* General utilities */
497 /*********************/
498
499 static inline enum wmi_phy_mode
500 chan_to_phymode(const struct cfg80211_chan_def *chandef)
501 {
502 enum wmi_phy_mode phymode = MODE_UNKNOWN;
503
504 switch (chandef->chan->band) {
505 case NL80211_BAND_2GHZ:
506 switch (chandef->width) {
507 case NL80211_CHAN_WIDTH_20_NOHT:
508 if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
509 phymode = MODE_11B;
510 else
511 phymode = MODE_11G;
512 break;
513 case NL80211_CHAN_WIDTH_20:
514 phymode = MODE_11NG_HT20;
515 break;
516 case NL80211_CHAN_WIDTH_40:
517 phymode = MODE_11NG_HT40;
518 break;
519 case NL80211_CHAN_WIDTH_5:
520 case NL80211_CHAN_WIDTH_10:
521 case NL80211_CHAN_WIDTH_80:
522 case NL80211_CHAN_WIDTH_80P80:
523 case NL80211_CHAN_WIDTH_160:
524 phymode = MODE_UNKNOWN;
525 break;
526 }
527 break;
528 case NL80211_BAND_5GHZ:
529 switch (chandef->width) {
530 case NL80211_CHAN_WIDTH_20_NOHT:
531 phymode = MODE_11A;
532 break;
533 case NL80211_CHAN_WIDTH_20:
534 phymode = MODE_11NA_HT20;
535 break;
536 case NL80211_CHAN_WIDTH_40:
537 phymode = MODE_11NA_HT40;
538 break;
539 case NL80211_CHAN_WIDTH_80:
540 phymode = MODE_11AC_VHT80;
541 break;
542 case NL80211_CHAN_WIDTH_5:
543 case NL80211_CHAN_WIDTH_10:
544 case NL80211_CHAN_WIDTH_80P80:
545 case NL80211_CHAN_WIDTH_160:
546 phymode = MODE_UNKNOWN;
547 break;
548 }
549 break;
550 default:
551 break;
552 }
553
554 WARN_ON(phymode == MODE_UNKNOWN);
555 return phymode;
556 }
557
558 static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
559 {
560 /*
561 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
562 * 0 for no restriction
563 * 1 for 1/4 us
564 * 2 for 1/2 us
565 * 3 for 1 us
566 * 4 for 2 us
567 * 5 for 4 us
568 * 6 for 8 us
569 * 7 for 16 us
570 */
571 switch (mpdudensity) {
572 case 0:
573 return 0;
574 case 1:
575 case 2:
576 case 3:
577 /* Our lower layer calculations limit our precision to
578 1 microsecond */
579 return 1;
580 case 4:
581 return 2;
582 case 5:
583 return 4;
584 case 6:
585 return 8;
586 case 7:
587 return 16;
588 default:
589 return 0;
590 }
591 }
592
593 int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
594 struct cfg80211_chan_def *def)
595 {
596 struct ieee80211_chanctx_conf *conf;
597
598 rcu_read_lock();
599 conf = rcu_dereference(vif->chanctx_conf);
600 if (!conf) {
601 rcu_read_unlock();
602 return -ENOENT;
603 }
604
605 *def = conf->def;
606 rcu_read_unlock();
607
608 return 0;
609 }
610
611 static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw,
612 struct ieee80211_chanctx_conf *conf,
613 void *data)
614 {
615 int *num = data;
616
617 (*num)++;
618 }
619
620 static int ath10k_mac_num_chanctxs(struct ath10k *ar)
621 {
622 int num = 0;
623
624 ieee80211_iter_chan_contexts_atomic(ar->hw,
625 ath10k_mac_num_chanctxs_iter,
626 &num);
627
628 return num;
629 }
630
631 static void
632 ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
633 struct ieee80211_chanctx_conf *conf,
634 void *data)
635 {
636 struct cfg80211_chan_def **def = data;
637
638 *def = &conf->def;
639 }
640
641 static int ath10k_peer_create(struct ath10k *ar,
642 struct ieee80211_vif *vif,
643 struct ieee80211_sta *sta,
644 u32 vdev_id,
645 const u8 *addr,
646 enum wmi_peer_type peer_type)
647 {
648 struct ath10k_vif *arvif;
649 struct ath10k_peer *peer;
650 int num_peers = 0;
651 int ret;
652
653 lockdep_assert_held(&ar->conf_mutex);
654
655 num_peers = ar->num_peers;
656
657 /* Each vdev consumes a peer entry as well */
658 list_for_each_entry(arvif, &ar->arvifs, list)
659 num_peers++;
660
661 if (num_peers >= ar->max_num_peers)
662 return -ENOBUFS;
663
664 ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
665 if (ret) {
666 ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
667 addr, vdev_id, ret);
668 return ret;
669 }
670
671 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
672 if (ret) {
673 ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n",
674 addr, vdev_id, ret);
675 return ret;
676 }
677
678 spin_lock_bh(&ar->data_lock);
679
680 peer = ath10k_peer_find(ar, vdev_id, addr);
681 if (!peer) {
682 ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
683 addr, vdev_id);
684 ath10k_wmi_peer_delete(ar, vdev_id, addr);
685 spin_unlock_bh(&ar->data_lock);
686 return -ENOENT;
687 }
688
689 peer->vif = vif;
690 peer->sta = sta;
691
692 spin_unlock_bh(&ar->data_lock);
693
694 ar->num_peers++;
695
696 return 0;
697 }
698
699 static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
700 {
701 struct ath10k *ar = arvif->ar;
702 u32 param;
703 int ret;
704
705 param = ar->wmi.pdev_param->sta_kickout_th;
706 ret = ath10k_wmi_pdev_set_param(ar, param,
707 ATH10K_KICKOUT_THRESHOLD);
708 if (ret) {
709 ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n",
710 arvif->vdev_id, ret);
711 return ret;
712 }
713
714 param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs;
715 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
716 ATH10K_KEEPALIVE_MIN_IDLE);
717 if (ret) {
718 ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n",
719 arvif->vdev_id, ret);
720 return ret;
721 }
722
723 param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs;
724 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
725 ATH10K_KEEPALIVE_MAX_IDLE);
726 if (ret) {
727 ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n",
728 arvif->vdev_id, ret);
729 return ret;
730 }
731
732 param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs;
733 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
734 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
735 if (ret) {
736 ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
737 arvif->vdev_id, ret);
738 return ret;
739 }
740
741 return 0;
742 }
743
744 static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
745 {
746 struct ath10k *ar = arvif->ar;
747 u32 vdev_param;
748
749 vdev_param = ar->wmi.vdev_param->rts_threshold;
750 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
751 }
752
753 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
754 {
755 int ret;
756
757 lockdep_assert_held(&ar->conf_mutex);
758
759 ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
760 if (ret)
761 return ret;
762
763 ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
764 if (ret)
765 return ret;
766
767 ar->num_peers--;
768
769 return 0;
770 }
771
772 static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
773 {
774 struct ath10k_peer *peer, *tmp;
775 int peer_id;
776
777 lockdep_assert_held(&ar->conf_mutex);
778
779 spin_lock_bh(&ar->data_lock);
780 list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
781 if (peer->vdev_id != vdev_id)
782 continue;
783
784 ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
785 peer->addr, vdev_id);
786
787 for_each_set_bit(peer_id, peer->peer_ids,
788 ATH10K_MAX_NUM_PEER_IDS) {
789 ar->peer_map[peer_id] = NULL;
790 }
791
792 list_del(&peer->list);
793 kfree(peer);
794 ar->num_peers--;
795 }
796 spin_unlock_bh(&ar->data_lock);
797 }
798
799 static void ath10k_peer_cleanup_all(struct ath10k *ar)
800 {
801 struct ath10k_peer *peer, *tmp;
802
803 lockdep_assert_held(&ar->conf_mutex);
804
805 spin_lock_bh(&ar->data_lock);
806 list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
807 list_del(&peer->list);
808 kfree(peer);
809 }
810 spin_unlock_bh(&ar->data_lock);
811
812 ar->num_peers = 0;
813 ar->num_stations = 0;
814 }
815
816 static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id,
817 struct ieee80211_sta *sta,
818 enum wmi_tdls_peer_state state)
819 {
820 int ret;
821 struct wmi_tdls_peer_update_cmd_arg arg = {};
822 struct wmi_tdls_peer_capab_arg cap = {};
823 struct wmi_channel_arg chan_arg = {};
824
825 lockdep_assert_held(&ar->conf_mutex);
826
827 arg.vdev_id = vdev_id;
828 arg.peer_state = state;
829 ether_addr_copy(arg.addr, sta->addr);
830
831 cap.peer_max_sp = sta->max_sp;
832 cap.peer_uapsd_queues = sta->uapsd_queues;
833
834 if (state == WMI_TDLS_PEER_STATE_CONNECTED &&
835 !sta->tdls_initiator)
836 cap.is_peer_responder = 1;
837
838 ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg);
839 if (ret) {
840 ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n",
841 arg.addr, vdev_id, ret);
842 return ret;
843 }
844
845 return 0;
846 }
847
848 /************************/
849 /* Interface management */
850 /************************/
851
852 void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
853 {
854 struct ath10k *ar = arvif->ar;
855
856 lockdep_assert_held(&ar->data_lock);
857
858 if (!arvif->beacon)
859 return;
860
861 if (!arvif->beacon_buf)
862 dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
863 arvif->beacon->len, DMA_TO_DEVICE);
864
865 if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED &&
866 arvif->beacon_state != ATH10K_BEACON_SENT))
867 return;
868
869 dev_kfree_skb_any(arvif->beacon);
870
871 arvif->beacon = NULL;
872 arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
873 }
874
875 static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
876 {
877 struct ath10k *ar = arvif->ar;
878
879 lockdep_assert_held(&ar->data_lock);
880
881 ath10k_mac_vif_beacon_free(arvif);
882
883 if (arvif->beacon_buf) {
884 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
885 arvif->beacon_buf, arvif->beacon_paddr);
886 arvif->beacon_buf = NULL;
887 }
888 }
889
890 static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
891 {
892 unsigned long time_left;
893
894 lockdep_assert_held(&ar->conf_mutex);
895
896 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
897 return -ESHUTDOWN;
898
899 time_left = wait_for_completion_timeout(&ar->vdev_setup_done,
900 ATH10K_VDEV_SETUP_TIMEOUT_HZ);
901 if (time_left == 0)
902 return -ETIMEDOUT;
903
904 return 0;
905 }
906
907 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
908 {
909 struct cfg80211_chan_def *chandef = NULL;
910 struct ieee80211_channel *channel = NULL;
911 struct wmi_vdev_start_request_arg arg = {};
912 int ret = 0;
913
914 lockdep_assert_held(&ar->conf_mutex);
915
916 ieee80211_iter_chan_contexts_atomic(ar->hw,
917 ath10k_mac_get_any_chandef_iter,
918 &chandef);
919 if (WARN_ON_ONCE(!chandef))
920 return -ENOENT;
921
922 channel = chandef->chan;
923
924 arg.vdev_id = vdev_id;
925 arg.channel.freq = channel->center_freq;
926 arg.channel.band_center_freq1 = chandef->center_freq1;
927
928 /* TODO setup this dynamically, what in case we
929 don't have any vifs? */
930 arg.channel.mode = chan_to_phymode(chandef);
931 arg.channel.chan_radar =
932 !!(channel->flags & IEEE80211_CHAN_RADAR);
933
934 arg.channel.min_power = 0;
935 arg.channel.max_power = channel->max_power * 2;
936 arg.channel.max_reg_power = channel->max_reg_power * 2;
937 arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
938
939 reinit_completion(&ar->vdev_setup_done);
940
941 ret = ath10k_wmi_vdev_start(ar, &arg);
942 if (ret) {
943 ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n",
944 vdev_id, ret);
945 return ret;
946 }
947
948 ret = ath10k_vdev_setup_sync(ar);
949 if (ret) {
950 ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n",
951 vdev_id, ret);
952 return ret;
953 }
954
955 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
956 if (ret) {
957 ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n",
958 vdev_id, ret);
959 goto vdev_stop;
960 }
961
962 ar->monitor_vdev_id = vdev_id;
963
964 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
965 ar->monitor_vdev_id);
966 return 0;
967
968 vdev_stop:
969 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
970 if (ret)
971 ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n",
972 ar->monitor_vdev_id, ret);
973
974 return ret;
975 }
976
977 static int ath10k_monitor_vdev_stop(struct ath10k *ar)
978 {
979 int ret = 0;
980
981 lockdep_assert_held(&ar->conf_mutex);
982
983 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
984 if (ret)
985 ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n",
986 ar->monitor_vdev_id, ret);
987
988 reinit_completion(&ar->vdev_setup_done);
989
990 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
991 if (ret)
992 ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n",
993 ar->monitor_vdev_id, ret);
994
995 ret = ath10k_vdev_setup_sync(ar);
996 if (ret)
997 ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n",
998 ar->monitor_vdev_id, ret);
999
1000 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
1001 ar->monitor_vdev_id);
1002 return ret;
1003 }
1004
1005 static int ath10k_monitor_vdev_create(struct ath10k *ar)
1006 {
1007 int bit, ret = 0;
1008
1009 lockdep_assert_held(&ar->conf_mutex);
1010
1011 if (ar->free_vdev_map == 0) {
1012 ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n");
1013 return -ENOMEM;
1014 }
1015
1016 bit = __ffs64(ar->free_vdev_map);
1017
1018 ar->monitor_vdev_id = bit;
1019
1020 ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
1021 WMI_VDEV_TYPE_MONITOR,
1022 0, ar->mac_addr);
1023 if (ret) {
1024 ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n",
1025 ar->monitor_vdev_id, ret);
1026 return ret;
1027 }
1028
1029 ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
1030 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
1031 ar->monitor_vdev_id);
1032
1033 return 0;
1034 }
1035
1036 static int ath10k_monitor_vdev_delete(struct ath10k *ar)
1037 {
1038 int ret = 0;
1039
1040 lockdep_assert_held(&ar->conf_mutex);
1041
1042 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
1043 if (ret) {
1044 ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n",
1045 ar->monitor_vdev_id, ret);
1046 return ret;
1047 }
1048
1049 ar->free_vdev_map |= 1LL << ar->monitor_vdev_id;
1050
1051 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
1052 ar->monitor_vdev_id);
1053 return ret;
1054 }
1055
1056 static int ath10k_monitor_start(struct ath10k *ar)
1057 {
1058 int ret;
1059
1060 lockdep_assert_held(&ar->conf_mutex);
1061
1062 ret = ath10k_monitor_vdev_create(ar);
1063 if (ret) {
1064 ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret);
1065 return ret;
1066 }
1067
1068 ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
1069 if (ret) {
1070 ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret);
1071 ath10k_monitor_vdev_delete(ar);
1072 return ret;
1073 }
1074
1075 ar->monitor_started = true;
1076 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n");
1077
1078 return 0;
1079 }
1080
1081 static int ath10k_monitor_stop(struct ath10k *ar)
1082 {
1083 int ret;
1084
1085 lockdep_assert_held(&ar->conf_mutex);
1086
1087 ret = ath10k_monitor_vdev_stop(ar);
1088 if (ret) {
1089 ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret);
1090 return ret;
1091 }
1092
1093 ret = ath10k_monitor_vdev_delete(ar);
1094 if (ret) {
1095 ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret);
1096 return ret;
1097 }
1098
1099 ar->monitor_started = false;
1100 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n");
1101
1102 return 0;
1103 }
1104
1105 static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
1106 {
1107 int num_ctx;
1108
1109 /* At least one chanctx is required to derive a channel to start
1110 * monitor vdev on.
1111 */
1112 num_ctx = ath10k_mac_num_chanctxs(ar);
1113 if (num_ctx == 0)
1114 return false;
1115
1116 /* If there's already an existing special monitor interface then don't
1117 * bother creating another monitor vdev.
1118 */
1119 if (ar->monitor_arvif)
1120 return false;
1121
1122 return ar->monitor ||
1123 ar->filter_flags & FIF_OTHER_BSS ||
1124 test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1125 }
1126
1127 static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar)
1128 {
1129 int num_ctx;
1130
1131 num_ctx = ath10k_mac_num_chanctxs(ar);
1132
1133 /* FIXME: Current interface combinations and cfg80211/mac80211 code
1134 * shouldn't allow this but make sure to prevent handling the following
1135 * case anyway since multi-channel DFS hasn't been tested at all.
1136 */
1137 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1)
1138 return false;
1139
1140 return true;
1141 }
1142
1143 static int ath10k_monitor_recalc(struct ath10k *ar)
1144 {
1145 bool needed;
1146 bool allowed;
1147 int ret;
1148
1149 lockdep_assert_held(&ar->conf_mutex);
1150
1151 needed = ath10k_mac_monitor_vdev_is_needed(ar);
1152 allowed = ath10k_mac_monitor_vdev_is_allowed(ar);
1153
1154 ath10k_dbg(ar, ATH10K_DBG_MAC,
1155 "mac monitor recalc started? %d needed? %d allowed? %d\n",
1156 ar->monitor_started, needed, allowed);
1157
1158 if (WARN_ON(needed && !allowed)) {
1159 if (ar->monitor_started) {
1160 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n");
1161
1162 ret = ath10k_monitor_stop(ar);
1163 if (ret)
1164 ath10k_warn(ar, "failed to stop disallowed monitor: %d\n",
1165 ret);
1166 /* not serious */
1167 }
1168
1169 return -EPERM;
1170 }
1171
1172 if (needed == ar->monitor_started)
1173 return 0;
1174
1175 if (needed)
1176 return ath10k_monitor_start(ar);
1177 else
1178 return ath10k_monitor_stop(ar);
1179 }
1180
1181 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
1182 {
1183 struct ath10k *ar = arvif->ar;
1184 u32 vdev_param, rts_cts = 0;
1185
1186 lockdep_assert_held(&ar->conf_mutex);
1187
1188 vdev_param = ar->wmi.vdev_param->enable_rtscts;
1189
1190 rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
1191
1192 if (arvif->num_legacy_stations > 0)
1193 rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
1194 WMI_RTSCTS_PROFILE);
1195 else
1196 rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES,
1197 WMI_RTSCTS_PROFILE);
1198
1199 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
1200 rts_cts);
1201 }
1202
1203 static int ath10k_start_cac(struct ath10k *ar)
1204 {
1205 int ret;
1206
1207 lockdep_assert_held(&ar->conf_mutex);
1208
1209 set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1210
1211 ret = ath10k_monitor_recalc(ar);
1212 if (ret) {
1213 ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret);
1214 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1215 return ret;
1216 }
1217
1218 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
1219 ar->monitor_vdev_id);
1220
1221 return 0;
1222 }
1223
1224 static int ath10k_stop_cac(struct ath10k *ar)
1225 {
1226 lockdep_assert_held(&ar->conf_mutex);
1227
1228 /* CAC is not running - do nothing */
1229 if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
1230 return 0;
1231
1232 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1233 ath10k_monitor_stop(ar);
1234
1235 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n");
1236
1237 return 0;
1238 }
1239
1240 static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw,
1241 struct ieee80211_chanctx_conf *conf,
1242 void *data)
1243 {
1244 bool *ret = data;
1245
1246 if (!*ret && conf->radar_enabled)
1247 *ret = true;
1248 }
1249
1250 static bool ath10k_mac_has_radar_enabled(struct ath10k *ar)
1251 {
1252 bool has_radar = false;
1253
1254 ieee80211_iter_chan_contexts_atomic(ar->hw,
1255 ath10k_mac_has_radar_iter,
1256 &has_radar);
1257
1258 return has_radar;
1259 }
1260
1261 static void ath10k_recalc_radar_detection(struct ath10k *ar)
1262 {
1263 int ret;
1264
1265 lockdep_assert_held(&ar->conf_mutex);
1266
1267 ath10k_stop_cac(ar);
1268
1269 if (!ath10k_mac_has_radar_enabled(ar))
1270 return;
1271
1272 if (ar->num_started_vdevs > 0)
1273 return;
1274
1275 ret = ath10k_start_cac(ar);
1276 if (ret) {
1277 /*
1278 * Not possible to start CAC on current channel so starting
1279 * radiation is not allowed, make this channel DFS_UNAVAILABLE
1280 * by indicating that radar was detected.
1281 */
1282 ath10k_warn(ar, "failed to start CAC: %d\n", ret);
1283 ieee80211_radar_detected(ar->hw);
1284 }
1285 }
1286
1287 static int ath10k_vdev_stop(struct ath10k_vif *arvif)
1288 {
1289 struct ath10k *ar = arvif->ar;
1290 int ret;
1291
1292 lockdep_assert_held(&ar->conf_mutex);
1293
1294 reinit_completion(&ar->vdev_setup_done);
1295
1296 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
1297 if (ret) {
1298 ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
1299 arvif->vdev_id, ret);
1300 return ret;
1301 }
1302
1303 ret = ath10k_vdev_setup_sync(ar);
1304 if (ret) {
1305 ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n",
1306 arvif->vdev_id, ret);
1307 return ret;
1308 }
1309
1310 WARN_ON(ar->num_started_vdevs == 0);
1311
1312 if (ar->num_started_vdevs != 0) {
1313 ar->num_started_vdevs--;
1314 ath10k_recalc_radar_detection(ar);
1315 }
1316
1317 return ret;
1318 }
1319
1320 static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
1321 const struct cfg80211_chan_def *chandef,
1322 bool restart)
1323 {
1324 struct ath10k *ar = arvif->ar;
1325 struct wmi_vdev_start_request_arg arg = {};
1326 int ret = 0;
1327
1328 lockdep_assert_held(&ar->conf_mutex);
1329
1330 reinit_completion(&ar->vdev_setup_done);
1331
1332 arg.vdev_id = arvif->vdev_id;
1333 arg.dtim_period = arvif->dtim_period;
1334 arg.bcn_intval = arvif->beacon_interval;
1335
1336 arg.channel.freq = chandef->chan->center_freq;
1337 arg.channel.band_center_freq1 = chandef->center_freq1;
1338 arg.channel.mode = chan_to_phymode(chandef);
1339
1340 arg.channel.min_power = 0;
1341 arg.channel.max_power = chandef->chan->max_power * 2;
1342 arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
1343 arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
1344
1345 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
1346 arg.ssid = arvif->u.ap.ssid;
1347 arg.ssid_len = arvif->u.ap.ssid_len;
1348 arg.hidden_ssid = arvif->u.ap.hidden_ssid;
1349
1350 /* For now allow DFS for AP mode */
1351 arg.channel.chan_radar =
1352 !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
1353 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
1354 arg.ssid = arvif->vif->bss_conf.ssid;
1355 arg.ssid_len = arvif->vif->bss_conf.ssid_len;
1356 }
1357
1358 ath10k_dbg(ar, ATH10K_DBG_MAC,
1359 "mac vdev %d start center_freq %d phymode %s\n",
1360 arg.vdev_id, arg.channel.freq,
1361 ath10k_wmi_phymode_str(arg.channel.mode));
1362
1363 if (restart)
1364 ret = ath10k_wmi_vdev_restart(ar, &arg);
1365 else
1366 ret = ath10k_wmi_vdev_start(ar, &arg);
1367
1368 if (ret) {
1369 ath10k_warn(ar, "failed to start WMI vdev %i: %d\n",
1370 arg.vdev_id, ret);
1371 return ret;
1372 }
1373
1374 ret = ath10k_vdev_setup_sync(ar);
1375 if (ret) {
1376 ath10k_warn(ar,
1377 "failed to synchronize setup for vdev %i restart %d: %d\n",
1378 arg.vdev_id, restart, ret);
1379 return ret;
1380 }
1381
1382 ar->num_started_vdevs++;
1383 ath10k_recalc_radar_detection(ar);
1384
1385 return ret;
1386 }
1387
1388 static int ath10k_vdev_start(struct ath10k_vif *arvif,
1389 const struct cfg80211_chan_def *def)
1390 {
1391 return ath10k_vdev_start_restart(arvif, def, false);
1392 }
1393
1394 static int ath10k_vdev_restart(struct ath10k_vif *arvif,
1395 const struct cfg80211_chan_def *def)
1396 {
1397 return ath10k_vdev_start_restart(arvif, def, true);
1398 }
1399
1400 static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
1401 struct sk_buff *bcn)
1402 {
1403 struct ath10k *ar = arvif->ar;
1404 struct ieee80211_mgmt *mgmt;
1405 const u8 *p2p_ie;
1406 int ret;
1407
1408 if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p)
1409 return 0;
1410
1411 mgmt = (void *)bcn->data;
1412 p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1413 mgmt->u.beacon.variable,
1414 bcn->len - (mgmt->u.beacon.variable -
1415 bcn->data));
1416 if (!p2p_ie)
1417 return -ENOENT;
1418
1419 ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
1420 if (ret) {
1421 ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n",
1422 arvif->vdev_id, ret);
1423 return ret;
1424 }
1425
1426 return 0;
1427 }
1428
1429 static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
1430 u8 oui_type, size_t ie_offset)
1431 {
1432 size_t len;
1433 const u8 *next;
1434 const u8 *end;
1435 u8 *ie;
1436
1437 if (WARN_ON(skb->len < ie_offset))
1438 return -EINVAL;
1439
1440 ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
1441 skb->data + ie_offset,
1442 skb->len - ie_offset);
1443 if (!ie)
1444 return -ENOENT;
1445
1446 len = ie[1] + 2;
1447 end = skb->data + skb->len;
1448 next = ie + len;
1449
1450 if (WARN_ON(next > end))
1451 return -EINVAL;
1452
1453 memmove(ie, next, end - next);
1454 skb_trim(skb, skb->len - len);
1455
1456 return 0;
1457 }
1458
1459 static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
1460 {
1461 struct ath10k *ar = arvif->ar;
1462 struct ieee80211_hw *hw = ar->hw;
1463 struct ieee80211_vif *vif = arvif->vif;
1464 struct ieee80211_mutable_offsets offs = {};
1465 struct sk_buff *bcn;
1466 int ret;
1467
1468 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1469 return 0;
1470
1471 if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
1472 arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
1473 return 0;
1474
1475 bcn = ieee80211_beacon_get_template(hw, vif, &offs);
1476 if (!bcn) {
1477 ath10k_warn(ar, "failed to get beacon template from mac80211\n");
1478 return -EPERM;
1479 }
1480
1481 ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn);
1482 if (ret) {
1483 ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret);
1484 kfree_skb(bcn);
1485 return ret;
1486 }
1487
1488 /* P2P IE is inserted by firmware automatically (as configured above)
1489 * so remove it from the base beacon template to avoid duplicate P2P
1490 * IEs in beacon frames.
1491 */
1492 ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1493 offsetof(struct ieee80211_mgmt,
1494 u.beacon.variable));
1495
1496 ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0,
1497 0, NULL, 0);
1498 kfree_skb(bcn);
1499
1500 if (ret) {
1501 ath10k_warn(ar, "failed to submit beacon template command: %d\n",
1502 ret);
1503 return ret;
1504 }
1505
1506 return 0;
1507 }
1508
1509 static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
1510 {
1511 struct ath10k *ar = arvif->ar;
1512 struct ieee80211_hw *hw = ar->hw;
1513 struct ieee80211_vif *vif = arvif->vif;
1514 struct sk_buff *prb;
1515 int ret;
1516
1517 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1518 return 0;
1519
1520 if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
1521 return 0;
1522
1523 prb = ieee80211_proberesp_get(hw, vif);
1524 if (!prb) {
1525 ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
1526 return -EPERM;
1527 }
1528
1529 ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb);
1530 kfree_skb(prb);
1531
1532 if (ret) {
1533 ath10k_warn(ar, "failed to submit probe resp template command: %d\n",
1534 ret);
1535 return ret;
1536 }
1537
1538 return 0;
1539 }
1540
1541 static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif)
1542 {
1543 struct ath10k *ar = arvif->ar;
1544 struct cfg80211_chan_def def;
1545 int ret;
1546
1547 /* When originally vdev is started during assign_vif_chanctx() some
1548 * information is missing, notably SSID. Firmware revisions with beacon
1549 * offloading require the SSID to be provided during vdev (re)start to
1550 * handle hidden SSID properly.
1551 *
1552 * Vdev restart must be done after vdev has been both started and
1553 * upped. Otherwise some firmware revisions (at least 10.2) fail to
1554 * deliver vdev restart response event causing timeouts during vdev
1555 * syncing in ath10k.
1556 *
1557 * Note: The vdev down/up and template reinstallation could be skipped
1558 * since only wmi-tlv firmware are known to have beacon offload and
1559 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart
1560 * response delivery. It's probably more robust to keep it as is.
1561 */
1562 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1563 return 0;
1564
1565 if (WARN_ON(!arvif->is_started))
1566 return -EINVAL;
1567
1568 if (WARN_ON(!arvif->is_up))
1569 return -EINVAL;
1570
1571 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
1572 return -EINVAL;
1573
1574 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1575 if (ret) {
1576 ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n",
1577 arvif->vdev_id, ret);
1578 return ret;
1579 }
1580
1581 /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise
1582 * firmware will crash upon vdev up.
1583 */
1584
1585 ret = ath10k_mac_setup_bcn_tmpl(arvif);
1586 if (ret) {
1587 ath10k_warn(ar, "failed to update beacon template: %d\n", ret);
1588 return ret;
1589 }
1590
1591 ret = ath10k_mac_setup_prb_tmpl(arvif);
1592 if (ret) {
1593 ath10k_warn(ar, "failed to update presp template: %d\n", ret);
1594 return ret;
1595 }
1596
1597 ret = ath10k_vdev_restart(arvif, &def);
1598 if (ret) {
1599 ath10k_warn(ar, "failed to restart ap vdev %i: %d\n",
1600 arvif->vdev_id, ret);
1601 return ret;
1602 }
1603
1604 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1605 arvif->bssid);
1606 if (ret) {
1607 ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n",
1608 arvif->vdev_id, ret);
1609 return ret;
1610 }
1611
1612 return 0;
1613 }
1614
1615 static void ath10k_control_beaconing(struct ath10k_vif *arvif,
1616 struct ieee80211_bss_conf *info)
1617 {
1618 struct ath10k *ar = arvif->ar;
1619 int ret = 0;
1620
1621 lockdep_assert_held(&arvif->ar->conf_mutex);
1622
1623 if (!info->enable_beacon) {
1624 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1625 if (ret)
1626 ath10k_warn(ar, "failed to down vdev_id %i: %d\n",
1627 arvif->vdev_id, ret);
1628
1629 arvif->is_up = false;
1630
1631 spin_lock_bh(&arvif->ar->data_lock);
1632 ath10k_mac_vif_beacon_free(arvif);
1633 spin_unlock_bh(&arvif->ar->data_lock);
1634
1635 return;
1636 }
1637
1638 arvif->tx_seq_no = 0x1000;
1639
1640 arvif->aid = 0;
1641 ether_addr_copy(arvif->bssid, info->bssid);
1642
1643 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1644 arvif->bssid);
1645 if (ret) {
1646 ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
1647 arvif->vdev_id, ret);
1648 return;
1649 }
1650
1651 arvif->is_up = true;
1652
1653 ret = ath10k_mac_vif_fix_hidden_ssid(arvif);
1654 if (ret) {
1655 ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n",
1656 arvif->vdev_id, ret);
1657 return;
1658 }
1659
1660 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
1661 }
1662
1663 static void ath10k_control_ibss(struct ath10k_vif *arvif,
1664 struct ieee80211_bss_conf *info,
1665 const u8 self_peer[ETH_ALEN])
1666 {
1667 struct ath10k *ar = arvif->ar;
1668 u32 vdev_param;
1669 int ret = 0;
1670
1671 lockdep_assert_held(&arvif->ar->conf_mutex);
1672
1673 if (!info->ibss_joined) {
1674 if (is_zero_ether_addr(arvif->bssid))
1675 return;
1676
1677 eth_zero_addr(arvif->bssid);
1678
1679 return;
1680 }
1681
1682 vdev_param = arvif->ar->wmi.vdev_param->atim_window;
1683 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
1684 ATH10K_DEFAULT_ATIM);
1685 if (ret)
1686 ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n",
1687 arvif->vdev_id, ret);
1688 }
1689
1690 static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif)
1691 {
1692 struct ath10k *ar = arvif->ar;
1693 u32 param;
1694 u32 value;
1695 int ret;
1696
1697 lockdep_assert_held(&arvif->ar->conf_mutex);
1698
1699 if (arvif->u.sta.uapsd)
1700 value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER;
1701 else
1702 value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
1703
1704 param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
1705 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value);
1706 if (ret) {
1707 ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n",
1708 value, arvif->vdev_id, ret);
1709 return ret;
1710 }
1711
1712 return 0;
1713 }
1714
1715 static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
1716 {
1717 struct ath10k *ar = arvif->ar;
1718 u32 param;
1719 u32 value;
1720 int ret;
1721
1722 lockdep_assert_held(&arvif->ar->conf_mutex);
1723
1724 if (arvif->u.sta.uapsd)
1725 value = WMI_STA_PS_PSPOLL_COUNT_UAPSD;
1726 else
1727 value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
1728
1729 param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
1730 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
1731 param, value);
1732 if (ret) {
1733 ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n",
1734 value, arvif->vdev_id, ret);
1735 return ret;
1736 }
1737
1738 return 0;
1739 }
1740
1741 static int ath10k_mac_num_vifs_started(struct ath10k *ar)
1742 {
1743 struct ath10k_vif *arvif;
1744 int num = 0;
1745
1746 lockdep_assert_held(&ar->conf_mutex);
1747
1748 list_for_each_entry(arvif, &ar->arvifs, list)
1749 if (arvif->is_started)
1750 num++;
1751
1752 return num;
1753 }
1754
1755 static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
1756 {
1757 struct ath10k *ar = arvif->ar;
1758 struct ieee80211_vif *vif = arvif->vif;
1759 struct ieee80211_conf *conf = &ar->hw->conf;
1760 enum wmi_sta_powersave_param param;
1761 enum wmi_sta_ps_mode psmode;
1762 int ret;
1763 int ps_timeout;
1764 bool enable_ps;
1765
1766 lockdep_assert_held(&arvif->ar->conf_mutex);
1767
1768 if (arvif->vif->type != NL80211_IFTYPE_STATION)
1769 return 0;
1770
1771 enable_ps = arvif->ps;
1772
1773 if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
1774 !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
1775 ar->running_fw->fw_file.fw_features)) {
1776 ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
1777 arvif->vdev_id);
1778 enable_ps = false;
1779 }
1780
1781 if (!arvif->is_started) {
1782 /* mac80211 can update vif powersave state while disconnected.
1783 * Firmware doesn't behave nicely and consumes more power than
1784 * necessary if PS is disabled on a non-started vdev. Hence
1785 * force-enable PS for non-running vdevs.
1786 */
1787 psmode = WMI_STA_PS_MODE_ENABLED;
1788 } else if (enable_ps) {
1789 psmode = WMI_STA_PS_MODE_ENABLED;
1790 param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
1791
1792 ps_timeout = conf->dynamic_ps_timeout;
1793 if (ps_timeout == 0) {
1794 /* Firmware doesn't like 0 */
1795 ps_timeout = ieee80211_tu_to_usec(
1796 vif->bss_conf.beacon_int) / 1000;
1797 }
1798
1799 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
1800 ps_timeout);
1801 if (ret) {
1802 ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n",
1803 arvif->vdev_id, ret);
1804 return ret;
1805 }
1806 } else {
1807 psmode = WMI_STA_PS_MODE_DISABLED;
1808 }
1809
1810 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
1811 arvif->vdev_id, psmode ? "enable" : "disable");
1812
1813 ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
1814 if (ret) {
1815 ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n",
1816 psmode, arvif->vdev_id, ret);
1817 return ret;
1818 }
1819
1820 return 0;
1821 }
1822
1823 static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
1824 {
1825 struct ath10k *ar = arvif->ar;
1826 struct wmi_sta_keepalive_arg arg = {};
1827 int ret;
1828
1829 lockdep_assert_held(&arvif->ar->conf_mutex);
1830
1831 if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
1832 return 0;
1833
1834 if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map))
1835 return 0;
1836
1837 /* Some firmware revisions have a bug and ignore the `enabled` field.
1838 * Instead use the interval to disable the keepalive.
1839 */
1840 arg.vdev_id = arvif->vdev_id;
1841 arg.enabled = 1;
1842 arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME;
1843 arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE;
1844
1845 ret = ath10k_wmi_sta_keepalive(ar, &arg);
1846 if (ret) {
1847 ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n",
1848 arvif->vdev_id, ret);
1849 return ret;
1850 }
1851
1852 return 0;
1853 }
1854
1855 static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
1856 {
1857 struct ath10k *ar = arvif->ar;
1858 struct ieee80211_vif *vif = arvif->vif;
1859 int ret;
1860
1861 lockdep_assert_held(&arvif->ar->conf_mutex);
1862
1863 if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)))
1864 return;
1865
1866 if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
1867 return;
1868
1869 if (!vif->csa_active)
1870 return;
1871
1872 if (!arvif->is_up)
1873 return;
1874
1875 if (!ieee80211_csa_is_complete(vif)) {
1876 ieee80211_csa_update_counter(vif);
1877
1878 ret = ath10k_mac_setup_bcn_tmpl(arvif);
1879 if (ret)
1880 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
1881 ret);
1882
1883 ret = ath10k_mac_setup_prb_tmpl(arvif);
1884 if (ret)
1885 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
1886 ret);
1887 } else {
1888 ieee80211_csa_finish(vif);
1889 }
1890 }
1891
1892 static void ath10k_mac_vif_ap_csa_work(struct work_struct *work)
1893 {
1894 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
1895 ap_csa_work);
1896 struct ath10k *ar = arvif->ar;
1897
1898 mutex_lock(&ar->conf_mutex);
1899 ath10k_mac_vif_ap_csa_count_down(arvif);
1900 mutex_unlock(&ar->conf_mutex);
1901 }
1902
1903 static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac,
1904 struct ieee80211_vif *vif)
1905 {
1906 struct sk_buff *skb = data;
1907 struct ieee80211_mgmt *mgmt = (void *)skb->data;
1908 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1909
1910 if (vif->type != NL80211_IFTYPE_STATION)
1911 return;
1912
1913 if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
1914 return;
1915
1916 cancel_delayed_work(&arvif->connection_loss_work);
1917 }
1918
1919 void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb)
1920 {
1921 ieee80211_iterate_active_interfaces_atomic(ar->hw,
1922 IEEE80211_IFACE_ITER_NORMAL,
1923 ath10k_mac_handle_beacon_iter,
1924 skb);
1925 }
1926
1927 static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
1928 struct ieee80211_vif *vif)
1929 {
1930 u32 *vdev_id = data;
1931 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1932 struct ath10k *ar = arvif->ar;
1933 struct ieee80211_hw *hw = ar->hw;
1934
1935 if (arvif->vdev_id != *vdev_id)
1936 return;
1937
1938 if (!arvif->is_up)
1939 return;
1940
1941 ieee80211_beacon_loss(vif);
1942
1943 /* Firmware doesn't report beacon loss events repeatedly. If AP probe
1944 * (done by mac80211) succeeds but beacons do not resume then it
1945 * doesn't make sense to continue operation. Queue connection loss work
1946 * which can be cancelled when beacon is received.
1947 */
1948 ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
1949 ATH10K_CONNECTION_LOSS_HZ);
1950 }
1951
1952 void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id)
1953 {
1954 ieee80211_iterate_active_interfaces_atomic(ar->hw,
1955 IEEE80211_IFACE_ITER_NORMAL,
1956 ath10k_mac_handle_beacon_miss_iter,
1957 &vdev_id);
1958 }
1959
1960 static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work)
1961 {
1962 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
1963 connection_loss_work.work);
1964 struct ieee80211_vif *vif = arvif->vif;
1965
1966 if (!arvif->is_up)
1967 return;
1968
1969 ieee80211_connection_loss(vif);
1970 }
1971
1972 /**********************/
1973 /* Station management */
1974 /**********************/
1975
1976 static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar,
1977 struct ieee80211_vif *vif)
1978 {
1979 /* Some firmware revisions have unstable STA powersave when listen
1980 * interval is set too high (e.g. 5). The symptoms are firmware doesn't
1981 * generate NullFunc frames properly even if buffered frames have been
1982 * indicated in Beacon TIM. Firmware would seldom wake up to pull
1983 * buffered frames. Often pinging the device from AP would simply fail.
1984 *
1985 * As a workaround set it to 1.
1986 */
1987 if (vif->type == NL80211_IFTYPE_STATION)
1988 return 1;
1989
1990 return ar->hw->conf.listen_interval;
1991 }
1992
1993 static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
1994 struct ieee80211_vif *vif,
1995 struct ieee80211_sta *sta,
1996 struct wmi_peer_assoc_complete_arg *arg)
1997 {
1998 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1999 u32 aid;
2000
2001 lockdep_assert_held(&ar->conf_mutex);
2002
2003 if (vif->type == NL80211_IFTYPE_STATION)
2004 aid = vif->bss_conf.aid;
2005 else
2006 aid = sta->aid;
2007
2008 ether_addr_copy(arg->addr, sta->addr);
2009 arg->vdev_id = arvif->vdev_id;
2010 arg->peer_aid = aid;
2011 arg->peer_flags |= arvif->ar->wmi.peer_flags->auth;
2012 arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
2013 arg->peer_num_spatial_streams = 1;
2014 arg->peer_caps = vif->bss_conf.assoc_capability;
2015 }
2016
2017 static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
2018 struct ieee80211_vif *vif,
2019 struct ieee80211_sta *sta,
2020 struct wmi_peer_assoc_complete_arg *arg)
2021 {
2022 struct ieee80211_bss_conf *info = &vif->bss_conf;
2023 struct cfg80211_chan_def def;
2024 struct cfg80211_bss *bss;
2025 const u8 *rsnie = NULL;
2026 const u8 *wpaie = NULL;
2027
2028 lockdep_assert_held(&ar->conf_mutex);
2029
2030 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2031 return;
2032
2033 bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
2034 IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
2035 if (bss) {
2036 const struct cfg80211_bss_ies *ies;
2037
2038 rcu_read_lock();
2039 rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
2040
2041 ies = rcu_dereference(bss->ies);
2042
2043 wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
2044 WLAN_OUI_TYPE_MICROSOFT_WPA,
2045 ies->data,
2046 ies->len);
2047 rcu_read_unlock();
2048 cfg80211_put_bss(ar->hw->wiphy, bss);
2049 }
2050
2051 /* FIXME: base on RSN IE/WPA IE is a correct idea? */
2052 if (rsnie || wpaie) {
2053 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
2054 arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way;
2055 }
2056
2057 if (wpaie) {
2058 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
2059 arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way;
2060 }
2061
2062 if (sta->mfp &&
2063 test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT,
2064 ar->running_fw->fw_file.fw_features)) {
2065 arg->peer_flags |= ar->wmi.peer_flags->pmf;
2066 }
2067 }
2068
2069 static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
2070 struct ieee80211_vif *vif,
2071 struct ieee80211_sta *sta,
2072 struct wmi_peer_assoc_complete_arg *arg)
2073 {
2074 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2075 struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
2076 struct cfg80211_chan_def def;
2077 const struct ieee80211_supported_band *sband;
2078 const struct ieee80211_rate *rates;
2079 enum nl80211_band band;
2080 u32 ratemask;
2081 u8 rate;
2082 int i;
2083
2084 lockdep_assert_held(&ar->conf_mutex);
2085
2086 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2087 return;
2088
2089 band = def.chan->band;
2090 sband = ar->hw->wiphy->bands[band];
2091 ratemask = sta->supp_rates[band];
2092 ratemask &= arvif->bitrate_mask.control[band].legacy;
2093 rates = sband->bitrates;
2094
2095 rateset->num_rates = 0;
2096
2097 for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
2098 if (!(ratemask & 1))
2099 continue;
2100
2101 rate = ath10k_mac_bitrate_to_rate(rates->bitrate);
2102 rateset->rates[rateset->num_rates] = rate;
2103 rateset->num_rates++;
2104 }
2105 }
2106
2107 static bool
2108 ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
2109 {
2110 int nss;
2111
2112 for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
2113 if (ht_mcs_mask[nss])
2114 return false;
2115
2116 return true;
2117 }
2118
2119 static bool
2120 ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
2121 {
2122 int nss;
2123
2124 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
2125 if (vht_mcs_mask[nss])
2126 return false;
2127
2128 return true;
2129 }
2130
2131 static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
2132 struct ieee80211_vif *vif,
2133 struct ieee80211_sta *sta,
2134 struct wmi_peer_assoc_complete_arg *arg)
2135 {
2136 const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2137 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2138 struct cfg80211_chan_def def;
2139 enum nl80211_band band;
2140 const u8 *ht_mcs_mask;
2141 const u16 *vht_mcs_mask;
2142 int i, n;
2143 u8 max_nss;
2144 u32 stbc;
2145
2146 lockdep_assert_held(&ar->conf_mutex);
2147
2148 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2149 return;
2150
2151 if (!ht_cap->ht_supported)
2152 return;
2153
2154 band = def.chan->band;
2155 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2156 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2157
2158 if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) &&
2159 ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2160 return;
2161
2162 arg->peer_flags |= ar->wmi.peer_flags->ht;
2163 arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2164 ht_cap->ampdu_factor)) - 1;
2165
2166 arg->peer_mpdu_density =
2167 ath10k_parse_mpdudensity(ht_cap->ampdu_density);
2168
2169 arg->peer_ht_caps = ht_cap->cap;
2170 arg->peer_rate_caps |= WMI_RC_HT_FLAG;
2171
2172 if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
2173 arg->peer_flags |= ar->wmi.peer_flags->ldbc;
2174
2175 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
2176 arg->peer_flags |= ar->wmi.peer_flags->bw40;
2177 arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
2178 }
2179
2180 if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
2181 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
2182 arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2183
2184 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
2185 arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2186 }
2187
2188 if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
2189 arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
2190 arg->peer_flags |= ar->wmi.peer_flags->stbc;
2191 }
2192
2193 if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
2194 stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
2195 stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
2196 stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
2197 arg->peer_rate_caps |= stbc;
2198 arg->peer_flags |= ar->wmi.peer_flags->stbc;
2199 }
2200
2201 if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
2202 arg->peer_rate_caps |= WMI_RC_TS_FLAG;
2203 else if (ht_cap->mcs.rx_mask[1])
2204 arg->peer_rate_caps |= WMI_RC_DS_FLAG;
2205
2206 for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
2207 if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
2208 (ht_mcs_mask[i / 8] & BIT(i % 8))) {
2209 max_nss = (i / 8) + 1;
2210 arg->peer_ht_rates.rates[n++] = i;
2211 }
2212
2213 /*
2214 * This is a workaround for HT-enabled STAs which break the spec
2215 * and have no HT capabilities RX mask (no HT RX MCS map).
2216 *
2217 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
2218 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
2219 *
2220 * Firmware asserts if such situation occurs.
2221 */
2222 if (n == 0) {
2223 arg->peer_ht_rates.num_rates = 8;
2224 for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
2225 arg->peer_ht_rates.rates[i] = i;
2226 } else {
2227 arg->peer_ht_rates.num_rates = n;
2228 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
2229 }
2230
2231 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
2232 arg->addr,
2233 arg->peer_ht_rates.num_rates,
2234 arg->peer_num_spatial_streams);
2235 }
2236
2237 static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
2238 struct ath10k_vif *arvif,
2239 struct ieee80211_sta *sta)
2240 {
2241 u32 uapsd = 0;
2242 u32 max_sp = 0;
2243 int ret = 0;
2244
2245 lockdep_assert_held(&ar->conf_mutex);
2246
2247 if (sta->wme && sta->uapsd_queues) {
2248 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
2249 sta->uapsd_queues, sta->max_sp);
2250
2251 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
2252 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
2253 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
2254 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
2255 uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
2256 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
2257 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
2258 uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
2259 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
2260 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
2261 uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
2262 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
2263
2264 if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
2265 max_sp = sta->max_sp;
2266
2267 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
2268 sta->addr,
2269 WMI_AP_PS_PEER_PARAM_UAPSD,
2270 uapsd);
2271 if (ret) {
2272 ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n",
2273 arvif->vdev_id, ret);
2274 return ret;
2275 }
2276
2277 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
2278 sta->addr,
2279 WMI_AP_PS_PEER_PARAM_MAX_SP,
2280 max_sp);
2281 if (ret) {
2282 ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n",
2283 arvif->vdev_id, ret);
2284 return ret;
2285 }
2286
2287 /* TODO setup this based on STA listen interval and
2288 beacon interval. Currently we don't know
2289 sta->listen_interval - mac80211 patch required.
2290 Currently use 10 seconds */
2291 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
2292 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
2293 10);
2294 if (ret) {
2295 ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n",
2296 arvif->vdev_id, ret);
2297 return ret;
2298 }
2299 }
2300
2301 return 0;
2302 }
2303
2304 static u16
2305 ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
2306 const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
2307 {
2308 int idx_limit;
2309 int nss;
2310 u16 mcs_map;
2311 u16 mcs;
2312
2313 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
2314 mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
2315 vht_mcs_limit[nss];
2316
2317 if (mcs_map)
2318 idx_limit = fls(mcs_map) - 1;
2319 else
2320 idx_limit = -1;
2321
2322 switch (idx_limit) {
2323 case 0: /* fall through */
2324 case 1: /* fall through */
2325 case 2: /* fall through */
2326 case 3: /* fall through */
2327 case 4: /* fall through */
2328 case 5: /* fall through */
2329 case 6: /* fall through */
2330 default:
2331 /* see ath10k_mac_can_set_bitrate_mask() */
2332 WARN_ON(1);
2333 /* fall through */
2334 case -1:
2335 mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
2336 break;
2337 case 7:
2338 mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
2339 break;
2340 case 8:
2341 mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
2342 break;
2343 case 9:
2344 mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
2345 break;
2346 }
2347
2348 tx_mcs_set &= ~(0x3 << (nss * 2));
2349 tx_mcs_set |= mcs << (nss * 2);
2350 }
2351
2352 return tx_mcs_set;
2353 }
2354
2355 static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
2356 struct ieee80211_vif *vif,
2357 struct ieee80211_sta *sta,
2358 struct wmi_peer_assoc_complete_arg *arg)
2359 {
2360 const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
2361 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2362 struct cfg80211_chan_def def;
2363 enum nl80211_band band;
2364 const u16 *vht_mcs_mask;
2365 u8 ampdu_factor;
2366
2367 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2368 return;
2369
2370 if (!vht_cap->vht_supported)
2371 return;
2372
2373 band = def.chan->band;
2374 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2375
2376 if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2377 return;
2378
2379 arg->peer_flags |= ar->wmi.peer_flags->vht;
2380
2381 if (def.chan->band == NL80211_BAND_2GHZ)
2382 arg->peer_flags |= ar->wmi.peer_flags->vht_2g;
2383
2384 arg->peer_vht_caps = vht_cap->cap;
2385
2386 ampdu_factor = (vht_cap->cap &
2387 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
2388 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
2389
2390 /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
2391 * zero in VHT IE. Using it would result in degraded throughput.
2392 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
2393 * it if VHT max_mpdu is smaller. */
2394 arg->peer_max_mpdu = max(arg->peer_max_mpdu,
2395 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2396 ampdu_factor)) - 1);
2397
2398 if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
2399 arg->peer_flags |= ar->wmi.peer_flags->bw80;
2400
2401 arg->peer_vht_rates.rx_max_rate =
2402 __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
2403 arg->peer_vht_rates.rx_mcs_set =
2404 __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
2405 arg->peer_vht_rates.tx_max_rate =
2406 __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
2407 arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit(
2408 __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
2409
2410 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
2411 sta->addr, arg->peer_max_mpdu, arg->peer_flags);
2412 }
2413
2414 static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
2415 struct ieee80211_vif *vif,
2416 struct ieee80211_sta *sta,
2417 struct wmi_peer_assoc_complete_arg *arg)
2418 {
2419 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2420
2421 switch (arvif->vdev_type) {
2422 case WMI_VDEV_TYPE_AP:
2423 if (sta->wme)
2424 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2425
2426 if (sta->wme && sta->uapsd_queues) {
2427 arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd;
2428 arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
2429 }
2430 break;
2431 case WMI_VDEV_TYPE_STA:
2432 if (vif->bss_conf.qos)
2433 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2434 break;
2435 case WMI_VDEV_TYPE_IBSS:
2436 if (sta->wme)
2437 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2438 break;
2439 default:
2440 break;
2441 }
2442
2443 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n",
2444 sta->addr, !!(arg->peer_flags &
2445 arvif->ar->wmi.peer_flags->qos));
2446 }
2447
2448 static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
2449 {
2450 return sta->supp_rates[NL80211_BAND_2GHZ] >>
2451 ATH10K_MAC_FIRST_OFDM_RATE_IDX;
2452 }
2453
2454 static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
2455 struct ieee80211_vif *vif,
2456 struct ieee80211_sta *sta,
2457 struct wmi_peer_assoc_complete_arg *arg)
2458 {
2459 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2460 struct cfg80211_chan_def def;
2461 enum nl80211_band band;
2462 const u8 *ht_mcs_mask;
2463 const u16 *vht_mcs_mask;
2464 enum wmi_phy_mode phymode = MODE_UNKNOWN;
2465
2466 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2467 return;
2468
2469 band = def.chan->band;
2470 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2471 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2472
2473 switch (band) {
2474 case NL80211_BAND_2GHZ:
2475 if (sta->vht_cap.vht_supported &&
2476 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2477 if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2478 phymode = MODE_11AC_VHT40;
2479 else
2480 phymode = MODE_11AC_VHT20;
2481 } else if (sta->ht_cap.ht_supported &&
2482 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2483 if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2484 phymode = MODE_11NG_HT40;
2485 else
2486 phymode = MODE_11NG_HT20;
2487 } else if (ath10k_mac_sta_has_ofdm_only(sta)) {
2488 phymode = MODE_11G;
2489 } else {
2490 phymode = MODE_11B;
2491 }
2492
2493 break;
2494 case NL80211_BAND_5GHZ:
2495 /*
2496 * Check VHT first.
2497 */
2498 if (sta->vht_cap.vht_supported &&
2499 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2500 if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
2501 phymode = MODE_11AC_VHT80;
2502 else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2503 phymode = MODE_11AC_VHT40;
2504 else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
2505 phymode = MODE_11AC_VHT20;
2506 } else if (sta->ht_cap.ht_supported &&
2507 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2508 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
2509 phymode = MODE_11NA_HT40;
2510 else
2511 phymode = MODE_11NA_HT20;
2512 } else {
2513 phymode = MODE_11A;
2514 }
2515
2516 break;
2517 default:
2518 break;
2519 }
2520
2521 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
2522 sta->addr, ath10k_wmi_phymode_str(phymode));
2523
2524 arg->peer_phymode = phymode;
2525 WARN_ON(phymode == MODE_UNKNOWN);
2526 }
2527
2528 static int ath10k_peer_assoc_prepare(struct ath10k *ar,
2529 struct ieee80211_vif *vif,
2530 struct ieee80211_sta *sta,
2531 struct wmi_peer_assoc_complete_arg *arg)
2532 {
2533 lockdep_assert_held(&ar->conf_mutex);
2534
2535 memset(arg, 0, sizeof(*arg));
2536
2537 ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
2538 ath10k_peer_assoc_h_crypto(ar, vif, sta, arg);
2539 ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
2540 ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
2541 ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
2542 ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
2543 ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
2544
2545 return 0;
2546 }
2547
2548 static const u32 ath10k_smps_map[] = {
2549 [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
2550 [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
2551 [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
2552 [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
2553 };
2554
2555 static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
2556 const u8 *addr,
2557 const struct ieee80211_sta_ht_cap *ht_cap)
2558 {
2559 int smps;
2560
2561 if (!ht_cap->ht_supported)
2562 return 0;
2563
2564 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
2565 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
2566
2567 if (smps >= ARRAY_SIZE(ath10k_smps_map))
2568 return -EINVAL;
2569
2570 return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr,
2571 WMI_PEER_SMPS_STATE,
2572 ath10k_smps_map[smps]);
2573 }
2574
2575 static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
2576 struct ieee80211_vif *vif,
2577 struct ieee80211_sta_vht_cap vht_cap)
2578 {
2579 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2580 int ret;
2581 u32 param;
2582 u32 value;
2583
2584 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC)
2585 return 0;
2586
2587 if (!(ar->vht_cap_info &
2588 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2589 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
2590 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2591 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
2592 return 0;
2593
2594 param = ar->wmi.vdev_param->txbf;
2595 value = 0;
2596
2597 if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED))
2598 return 0;
2599
2600 /* The following logic is correct. If a remote STA advertises support
2601 * for being a beamformer then we should enable us being a beamformee.
2602 */
2603
2604 if (ar->vht_cap_info &
2605 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2606 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
2607 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
2608 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2609
2610 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
2611 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
2612 }
2613
2614 if (ar->vht_cap_info &
2615 (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2616 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
2617 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
2618 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2619
2620 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
2621 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
2622 }
2623
2624 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE)
2625 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2626
2627 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER)
2628 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2629
2630 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value);
2631 if (ret) {
2632 ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n",
2633 value, ret);
2634 return ret;
2635 }
2636
2637 return 0;
2638 }
2639
2640 /* can be called only in mac80211 callbacks due to `key_count` usage */
2641 static void ath10k_bss_assoc(struct ieee80211_hw *hw,
2642 struct ieee80211_vif *vif,
2643 struct ieee80211_bss_conf *bss_conf)
2644 {
2645 struct ath10k *ar = hw->priv;
2646 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2647 struct ieee80211_sta_ht_cap ht_cap;
2648 struct ieee80211_sta_vht_cap vht_cap;
2649 struct wmi_peer_assoc_complete_arg peer_arg;
2650 struct ieee80211_sta *ap_sta;
2651 int ret;
2652
2653 lockdep_assert_held(&ar->conf_mutex);
2654
2655 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
2656 arvif->vdev_id, arvif->bssid, arvif->aid);
2657
2658 rcu_read_lock();
2659
2660 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
2661 if (!ap_sta) {
2662 ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n",
2663 bss_conf->bssid, arvif->vdev_id);
2664 rcu_read_unlock();
2665 return;
2666 }
2667
2668 /* ap_sta must be accessed only within rcu section which must be left
2669 * before calling ath10k_setup_peer_smps() which might sleep. */
2670 ht_cap = ap_sta->ht_cap;
2671 vht_cap = ap_sta->vht_cap;
2672
2673 ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
2674 if (ret) {
2675 ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n",
2676 bss_conf->bssid, arvif->vdev_id, ret);
2677 rcu_read_unlock();
2678 return;
2679 }
2680
2681 rcu_read_unlock();
2682
2683 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
2684 if (ret) {
2685 ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n",
2686 bss_conf->bssid, arvif->vdev_id, ret);
2687 return;
2688 }
2689
2690 ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
2691 if (ret) {
2692 ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n",
2693 arvif->vdev_id, ret);
2694 return;
2695 }
2696
2697 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
2698 if (ret) {
2699 ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n",
2700 arvif->vdev_id, bss_conf->bssid, ret);
2701 return;
2702 }
2703
2704 ath10k_dbg(ar, ATH10K_DBG_MAC,
2705 "mac vdev %d up (associated) bssid %pM aid %d\n",
2706 arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
2707
2708 WARN_ON(arvif->is_up);
2709
2710 arvif->aid = bss_conf->aid;
2711 ether_addr_copy(arvif->bssid, bss_conf->bssid);
2712
2713 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
2714 if (ret) {
2715 ath10k_warn(ar, "failed to set vdev %d up: %d\n",
2716 arvif->vdev_id, ret);
2717 return;
2718 }
2719
2720 arvif->is_up = true;
2721
2722 /* Workaround: Some firmware revisions (tested with qca6174
2723 * WLAN.RM.2.0-00073) have buggy powersave state machine and must be
2724 * poked with peer param command.
2725 */
2726 ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
2727 WMI_PEER_DUMMY_VAR, 1);
2728 if (ret) {
2729 ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
2730 arvif->bssid, arvif->vdev_id, ret);
2731 return;
2732 }
2733 }
2734
2735 static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
2736 struct ieee80211_vif *vif)
2737 {
2738 struct ath10k *ar = hw->priv;
2739 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2740 struct ieee80211_sta_vht_cap vht_cap = {};
2741 int ret;
2742
2743 lockdep_assert_held(&ar->conf_mutex);
2744
2745 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
2746 arvif->vdev_id, arvif->bssid);
2747
2748 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
2749 if (ret)
2750 ath10k_warn(ar, "faield to down vdev %i: %d\n",
2751 arvif->vdev_id, ret);
2752
2753 arvif->def_wep_key_idx = -1;
2754
2755 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
2756 if (ret) {
2757 ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
2758 arvif->vdev_id, ret);
2759 return;
2760 }
2761
2762 arvif->is_up = false;
2763
2764 cancel_delayed_work_sync(&arvif->connection_loss_work);
2765 }
2766
2767 static int ath10k_station_assoc(struct ath10k *ar,
2768 struct ieee80211_vif *vif,
2769 struct ieee80211_sta *sta,
2770 bool reassoc)
2771 {
2772 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2773 struct wmi_peer_assoc_complete_arg peer_arg;
2774 int ret = 0;
2775
2776 lockdep_assert_held(&ar->conf_mutex);
2777
2778 ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg);
2779 if (ret) {
2780 ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
2781 sta->addr, arvif->vdev_id, ret);
2782 return ret;
2783 }
2784
2785 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
2786 if (ret) {
2787 ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
2788 sta->addr, arvif->vdev_id, ret);
2789 return ret;
2790 }
2791
2792 /* Re-assoc is run only to update supported rates for given station. It
2793 * doesn't make much sense to reconfigure the peer completely.
2794 */
2795 if (!reassoc) {
2796 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr,
2797 &sta->ht_cap);
2798 if (ret) {
2799 ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
2800 arvif->vdev_id, ret);
2801 return ret;
2802 }
2803
2804 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
2805 if (ret) {
2806 ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
2807 sta->addr, arvif->vdev_id, ret);
2808 return ret;
2809 }
2810
2811 if (!sta->wme) {
2812 arvif->num_legacy_stations++;
2813 ret = ath10k_recalc_rtscts_prot(arvif);
2814 if (ret) {
2815 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
2816 arvif->vdev_id, ret);
2817 return ret;
2818 }
2819 }
2820
2821 /* Plumb cached keys only for static WEP */
2822 if (arvif->def_wep_key_idx != -1) {
2823 ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
2824 if (ret) {
2825 ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
2826 arvif->vdev_id, ret);
2827 return ret;
2828 }
2829 }
2830 }
2831
2832 return ret;
2833 }
2834
2835 static int ath10k_station_disassoc(struct ath10k *ar,
2836 struct ieee80211_vif *vif,
2837 struct ieee80211_sta *sta)
2838 {
2839 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2840 int ret = 0;
2841
2842 lockdep_assert_held(&ar->conf_mutex);
2843
2844 if (!sta->wme) {
2845 arvif->num_legacy_stations--;
2846 ret = ath10k_recalc_rtscts_prot(arvif);
2847 if (ret) {
2848 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
2849 arvif->vdev_id, ret);
2850 return ret;
2851 }
2852 }
2853
2854 ret = ath10k_clear_peer_keys(arvif, sta->addr);
2855 if (ret) {
2856 ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n",
2857 arvif->vdev_id, ret);
2858 return ret;
2859 }
2860
2861 return ret;
2862 }
2863
2864 /**************/
2865 /* Regulatory */
2866 /**************/
2867
2868 static int ath10k_update_channel_list(struct ath10k *ar)
2869 {
2870 struct ieee80211_hw *hw = ar->hw;
2871 struct ieee80211_supported_band **bands;
2872 enum nl80211_band band;
2873 struct ieee80211_channel *channel;
2874 struct wmi_scan_chan_list_arg arg = {0};
2875 struct wmi_channel_arg *ch;
2876 bool passive;
2877 int len;
2878 int ret;
2879 int i;
2880
2881 lockdep_assert_held(&ar->conf_mutex);
2882
2883 bands = hw->wiphy->bands;
2884 for (band = 0; band < NUM_NL80211_BANDS; band++) {
2885 if (!bands[band])
2886 continue;
2887
2888 for (i = 0; i < bands[band]->n_channels; i++) {
2889 if (bands[band]->channels[i].flags &
2890 IEEE80211_CHAN_DISABLED)
2891 continue;
2892
2893 arg.n_channels++;
2894 }
2895 }
2896
2897 len = sizeof(struct wmi_channel_arg) * arg.n_channels;
2898 arg.channels = kzalloc(len, GFP_KERNEL);
2899 if (!arg.channels)
2900 return -ENOMEM;
2901
2902 ch = arg.channels;
2903 for (band = 0; band < NUM_NL80211_BANDS; band++) {
2904 if (!bands[band])
2905 continue;
2906
2907 for (i = 0; i < bands[band]->n_channels; i++) {
2908 channel = &bands[band]->channels[i];
2909
2910 if (channel->flags & IEEE80211_CHAN_DISABLED)
2911 continue;
2912
2913 ch->allow_ht = true;
2914
2915 /* FIXME: when should we really allow VHT? */
2916 ch->allow_vht = true;
2917
2918 ch->allow_ibss =
2919 !(channel->flags & IEEE80211_CHAN_NO_IR);
2920
2921 ch->ht40plus =
2922 !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
2923
2924 ch->chan_radar =
2925 !!(channel->flags & IEEE80211_CHAN_RADAR);
2926
2927 passive = channel->flags & IEEE80211_CHAN_NO_IR;
2928 ch->passive = passive;
2929
2930 ch->freq = channel->center_freq;
2931 ch->band_center_freq1 = channel->center_freq;
2932 ch->min_power = 0;
2933 ch->max_power = channel->max_power * 2;
2934 ch->max_reg_power = channel->max_reg_power * 2;
2935 ch->max_antenna_gain = channel->max_antenna_gain * 2;
2936 ch->reg_class_id = 0; /* FIXME */
2937
2938 /* FIXME: why use only legacy modes, why not any
2939 * HT/VHT modes? Would that even make any
2940 * difference? */
2941 if (channel->band == NL80211_BAND_2GHZ)
2942 ch->mode = MODE_11G;
2943 else
2944 ch->mode = MODE_11A;
2945
2946 if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
2947 continue;
2948
2949 ath10k_dbg(ar, ATH10K_DBG_WMI,
2950 "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
2951 ch - arg.channels, arg.n_channels,
2952 ch->freq, ch->max_power, ch->max_reg_power,
2953 ch->max_antenna_gain, ch->mode);
2954
2955 ch++;
2956 }
2957 }
2958
2959 ret = ath10k_wmi_scan_chan_list(ar, &arg);
2960 kfree(arg.channels);
2961
2962 return ret;
2963 }
2964
2965 static enum wmi_dfs_region
2966 ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
2967 {
2968 switch (dfs_region) {
2969 case NL80211_DFS_UNSET:
2970 return WMI_UNINIT_DFS_DOMAIN;
2971 case NL80211_DFS_FCC:
2972 return WMI_FCC_DFS_DOMAIN;
2973 case NL80211_DFS_ETSI:
2974 return WMI_ETSI_DFS_DOMAIN;
2975 case NL80211_DFS_JP:
2976 return WMI_MKK4_DFS_DOMAIN;
2977 }
2978 return WMI_UNINIT_DFS_DOMAIN;
2979 }
2980
2981 static void ath10k_regd_update(struct ath10k *ar)
2982 {
2983 struct reg_dmn_pair_mapping *regpair;
2984 int ret;
2985 enum wmi_dfs_region wmi_dfs_reg;
2986 enum nl80211_dfs_regions nl_dfs_reg;
2987
2988 lockdep_assert_held(&ar->conf_mutex);
2989
2990 ret = ath10k_update_channel_list(ar);
2991 if (ret)
2992 ath10k_warn(ar, "failed to update channel list: %d\n", ret);
2993
2994 regpair = ar->ath_common.regulatory.regpair;
2995
2996 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
2997 nl_dfs_reg = ar->dfs_detector->region;
2998 wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
2999 } else {
3000 wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
3001 }
3002
3003 /* Target allows setting up per-band regdomain but ath_common provides
3004 * a combined one only */
3005 ret = ath10k_wmi_pdev_set_regdomain(ar,
3006 regpair->reg_domain,
3007 regpair->reg_domain, /* 2ghz */
3008 regpair->reg_domain, /* 5ghz */
3009 regpair->reg_2ghz_ctl,
3010 regpair->reg_5ghz_ctl,
3011 wmi_dfs_reg);
3012 if (ret)
3013 ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret);
3014 }
3015
3016 static void ath10k_reg_notifier(struct wiphy *wiphy,
3017 struct regulatory_request *request)
3018 {
3019 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
3020 struct ath10k *ar = hw->priv;
3021 bool result;
3022
3023 ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
3024
3025 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
3026 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
3027 request->dfs_region);
3028 result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
3029 request->dfs_region);
3030 if (!result)
3031 ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n",
3032 request->dfs_region);
3033 }
3034
3035 mutex_lock(&ar->conf_mutex);
3036 if (ar->state == ATH10K_STATE_ON)
3037 ath10k_regd_update(ar);
3038 mutex_unlock(&ar->conf_mutex);
3039 }
3040
3041 /***************/
3042 /* TX handlers */
3043 /***************/
3044
3045 enum ath10k_mac_tx_path {
3046 ATH10K_MAC_TX_HTT,
3047 ATH10K_MAC_TX_HTT_MGMT,
3048 ATH10K_MAC_TX_WMI_MGMT,
3049 ATH10K_MAC_TX_UNKNOWN,
3050 };
3051
3052 void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
3053 {
3054 lockdep_assert_held(&ar->htt.tx_lock);
3055
3056 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3057 ar->tx_paused |= BIT(reason);
3058 ieee80211_stop_queues(ar->hw);
3059 }
3060
3061 static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac,
3062 struct ieee80211_vif *vif)
3063 {
3064 struct ath10k *ar = data;
3065 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3066
3067 if (arvif->tx_paused)
3068 return;
3069
3070 ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3071 }
3072
3073 void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
3074 {
3075 lockdep_assert_held(&ar->htt.tx_lock);
3076
3077 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3078 ar->tx_paused &= ~BIT(reason);
3079
3080 if (ar->tx_paused)
3081 return;
3082
3083 ieee80211_iterate_active_interfaces_atomic(ar->hw,
3084 IEEE80211_IFACE_ITER_RESUME_ALL,
3085 ath10k_mac_tx_unlock_iter,
3086 ar);
3087
3088 ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue);
3089 }
3090
3091 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
3092 {
3093 struct ath10k *ar = arvif->ar;
3094
3095 lockdep_assert_held(&ar->htt.tx_lock);
3096
3097 WARN_ON(reason >= BITS_PER_LONG);
3098 arvif->tx_paused |= BIT(reason);
3099 ieee80211_stop_queue(ar->hw, arvif->vdev_id);
3100 }
3101
3102 void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason)
3103 {
3104 struct ath10k *ar = arvif->ar;
3105
3106 lockdep_assert_held(&ar->htt.tx_lock);
3107
3108 WARN_ON(reason >= BITS_PER_LONG);
3109 arvif->tx_paused &= ~BIT(reason);
3110
3111 if (ar->tx_paused)
3112 return;
3113
3114 if (arvif->tx_paused)
3115 return;
3116
3117 ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3118 }
3119
3120 static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
3121 enum wmi_tlv_tx_pause_id pause_id,
3122 enum wmi_tlv_tx_pause_action action)
3123 {
3124 struct ath10k *ar = arvif->ar;
3125
3126 lockdep_assert_held(&ar->htt.tx_lock);
3127
3128 switch (action) {
3129 case WMI_TLV_TX_PAUSE_ACTION_STOP:
3130 ath10k_mac_vif_tx_lock(arvif, pause_id);
3131 break;
3132 case WMI_TLV_TX_PAUSE_ACTION_WAKE:
3133 ath10k_mac_vif_tx_unlock(arvif, pause_id);
3134 break;
3135 default:
3136 ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
3137 action, arvif->vdev_id);
3138 break;
3139 }
3140 }
3141
3142 struct ath10k_mac_tx_pause {
3143 u32 vdev_id;
3144 enum wmi_tlv_tx_pause_id pause_id;
3145 enum wmi_tlv_tx_pause_action action;
3146 };
3147
3148 static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
3149 struct ieee80211_vif *vif)
3150 {
3151 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3152 struct ath10k_mac_tx_pause *arg = data;
3153
3154 if (arvif->vdev_id != arg->vdev_id)
3155 return;
3156
3157 ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
3158 }
3159
3160 void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
3161 enum wmi_tlv_tx_pause_id pause_id,
3162 enum wmi_tlv_tx_pause_action action)
3163 {
3164 struct ath10k_mac_tx_pause arg = {
3165 .vdev_id = vdev_id,
3166 .pause_id = pause_id,
3167 .action = action,
3168 };
3169
3170 spin_lock_bh(&ar->htt.tx_lock);
3171 ieee80211_iterate_active_interfaces_atomic(ar->hw,
3172 IEEE80211_IFACE_ITER_RESUME_ALL,
3173 ath10k_mac_handle_tx_pause_iter,
3174 &arg);
3175 spin_unlock_bh(&ar->htt.tx_lock);
3176 }
3177
3178 static enum ath10k_hw_txrx_mode
3179 ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
3180 struct ieee80211_vif *vif,
3181 struct ieee80211_sta *sta,
3182 struct sk_buff *skb)
3183 {
3184 const struct ieee80211_hdr *hdr = (void *)skb->data;
3185 __le16 fc = hdr->frame_control;
3186
3187 if (!vif || vif->type == NL80211_IFTYPE_MONITOR)
3188 return ATH10K_HW_TXRX_RAW;
3189
3190 if (ieee80211_is_mgmt(fc))
3191 return ATH10K_HW_TXRX_MGMT;
3192
3193 /* Workaround:
3194 *
3195 * NullFunc frames are mostly used to ping if a client or AP are still
3196 * reachable and responsive. This implies tx status reports must be
3197 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can
3198 * come to a conclusion that the other end disappeared and tear down
3199 * BSS connection or it can never disconnect from BSS/client (which is
3200 * the case).
3201 *
3202 * Firmware with HTT older than 3.0 delivers incorrect tx status for
3203 * NullFunc frames to driver. However there's a HTT Mgmt Tx command
3204 * which seems to deliver correct tx reports for NullFunc frames. The
3205 * downside of using it is it ignores client powersave state so it can
3206 * end up disconnecting sleeping clients in AP mode. It should fix STA
3207 * mode though because AP don't sleep.
3208 */
3209 if (ar->htt.target_version_major < 3 &&
3210 (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
3211 !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3212 ar->running_fw->fw_file.fw_features))
3213 return ATH10K_HW_TXRX_MGMT;
3214
3215 /* Workaround:
3216 *
3217 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
3218 * NativeWifi txmode - it selects AP key instead of peer key. It seems
3219 * to work with Ethernet txmode so use it.
3220 *
3221 * FIXME: Check if raw mode works with TDLS.
3222 */
3223 if (ieee80211_is_data_present(fc) && sta && sta->tdls)
3224 return ATH10K_HW_TXRX_ETHERNET;
3225
3226 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
3227 return ATH10K_HW_TXRX_RAW;
3228
3229 return ATH10K_HW_TXRX_NATIVE_WIFI;
3230 }
3231
3232 static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
3233 struct sk_buff *skb)
3234 {
3235 const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3236 const struct ieee80211_hdr *hdr = (void *)skb->data;
3237 const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
3238 IEEE80211_TX_CTL_INJECTED;
3239
3240 if (!ieee80211_has_protected(hdr->frame_control))
3241 return false;
3242
3243 if ((info->flags & mask) == mask)
3244 return false;
3245
3246 if (vif)
3247 return !ath10k_vif_to_arvif(vif)->nohwcrypt;
3248
3249 return true;
3250 }
3251
3252 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
3253 * Control in the header.
3254 */
3255 static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
3256 {
3257 struct ieee80211_hdr *hdr = (void *)skb->data;
3258 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3259 u8 *qos_ctl;
3260
3261 if (!ieee80211_is_data_qos(hdr->frame_control))
3262 return;
3263
3264 qos_ctl = ieee80211_get_qos_ctl(hdr);
3265 memmove(skb->data + IEEE80211_QOS_CTL_LEN,
3266 skb->data, (void *)qos_ctl - (void *)skb->data);
3267 skb_pull(skb, IEEE80211_QOS_CTL_LEN);
3268
3269 /* Some firmware revisions don't handle sending QoS NullFunc well.
3270 * These frames are mainly used for CQM purposes so it doesn't really
3271 * matter whether QoS NullFunc or NullFunc are sent.
3272 */
3273 hdr = (void *)skb->data;
3274 if (ieee80211_is_qos_nullfunc(hdr->frame_control))
3275 cb->flags &= ~ATH10K_SKB_F_QOS;
3276
3277 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
3278 }
3279
3280 static void ath10k_tx_h_8023(struct sk_buff *skb)
3281 {
3282 struct ieee80211_hdr *hdr;
3283 struct rfc1042_hdr *rfc1042;
3284 struct ethhdr *eth;
3285 size_t hdrlen;
3286 u8 da[ETH_ALEN];
3287 u8 sa[ETH_ALEN];
3288 __be16 type;
3289
3290 hdr = (void *)skb->data;
3291 hdrlen = ieee80211_hdrlen(hdr->frame_control);
3292 rfc1042 = (void *)skb->data + hdrlen;
3293
3294 ether_addr_copy(da, ieee80211_get_DA(hdr));
3295 ether_addr_copy(sa, ieee80211_get_SA(hdr));
3296 type = rfc1042->snap_type;
3297
3298 skb_pull(skb, hdrlen + sizeof(*rfc1042));
3299 skb_push(skb, sizeof(*eth));
3300
3301 eth = (void *)skb->data;
3302 ether_addr_copy(eth->h_dest, da);
3303 ether_addr_copy(eth->h_source, sa);
3304 eth->h_proto = type;
3305 }
3306
3307 static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
3308 struct ieee80211_vif *vif,
3309 struct sk_buff *skb)
3310 {
3311 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3312 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3313
3314 /* This is case only for P2P_GO */
3315 if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
3316 return;
3317
3318 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
3319 spin_lock_bh(&ar->data_lock);
3320 if (arvif->u.ap.noa_data)
3321 if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
3322 GFP_ATOMIC))
3323 memcpy(skb_put(skb, arvif->u.ap.noa_len),
3324 arvif->u.ap.noa_data,
3325 arvif->u.ap.noa_len);
3326 spin_unlock_bh(&ar->data_lock);
3327 }
3328 }
3329
3330 static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
3331 struct ieee80211_vif *vif,
3332 struct ieee80211_txq *txq,
3333 struct sk_buff *skb)
3334 {
3335 struct ieee80211_hdr *hdr = (void *)skb->data;
3336 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3337
3338 cb->flags = 0;
3339 if (!ath10k_tx_h_use_hwcrypto(vif, skb))
3340 cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
3341
3342 if (ieee80211_is_mgmt(hdr->frame_control))
3343 cb->flags |= ATH10K_SKB_F_MGMT;
3344
3345 if (ieee80211_is_data_qos(hdr->frame_control))
3346 cb->flags |= ATH10K_SKB_F_QOS;
3347
3348 cb->vif = vif;
3349 cb->txq = txq;
3350 }
3351
3352 bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
3353 {
3354 /* FIXME: Not really sure since when the behaviour changed. At some
3355 * point new firmware stopped requiring creation of peer entries for
3356 * offchannel tx (and actually creating them causes issues with wmi-htc
3357 * tx credit replenishment and reliability). Assuming it's at least 3.4
3358 * because that's when the `freq` was introduced to TX_FRM HTT command.
3359 */
3360 return (ar->htt.target_version_major >= 3 &&
3361 ar->htt.target_version_minor >= 4 &&
3362 ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
3363 }
3364
3365 static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
3366 {
3367 struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
3368 int ret = 0;
3369
3370 spin_lock_bh(&ar->data_lock);
3371
3372 if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) {
3373 ath10k_warn(ar, "wmi mgmt tx queue is full\n");
3374 ret = -ENOSPC;
3375 goto unlock;
3376 }
3377
3378 __skb_queue_tail(q, skb);
3379 ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
3380
3381 unlock:
3382 spin_unlock_bh(&ar->data_lock);
3383
3384 return ret;
3385 }
3386
3387 static enum ath10k_mac_tx_path
3388 ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
3389 struct sk_buff *skb,
3390 enum ath10k_hw_txrx_mode txmode)
3391 {
3392 switch (txmode) {
3393 case ATH10K_HW_TXRX_RAW:
3394 case ATH10K_HW_TXRX_NATIVE_WIFI:
3395 case ATH10K_HW_TXRX_ETHERNET:
3396 return ATH10K_MAC_TX_HTT;
3397 case ATH10K_HW_TXRX_MGMT:
3398 if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3399 ar->running_fw->fw_file.fw_features))
3400 return ATH10K_MAC_TX_WMI_MGMT;
3401 else if (ar->htt.target_version_major >= 3)
3402 return ATH10K_MAC_TX_HTT;
3403 else
3404 return ATH10K_MAC_TX_HTT_MGMT;
3405 }
3406
3407 return ATH10K_MAC_TX_UNKNOWN;
3408 }
3409
3410 static int ath10k_mac_tx_submit(struct ath10k *ar,
3411 enum ath10k_hw_txrx_mode txmode,
3412 enum ath10k_mac_tx_path txpath,
3413 struct sk_buff *skb)
3414 {
3415 struct ath10k_htt *htt = &ar->htt;
3416 int ret = -EINVAL;
3417
3418 switch (txpath) {
3419 case ATH10K_MAC_TX_HTT:
3420 ret = ath10k_htt_tx(htt, txmode, skb);
3421 break;
3422 case ATH10K_MAC_TX_HTT_MGMT:
3423 ret = ath10k_htt_mgmt_tx(htt, skb);
3424 break;
3425 case ATH10K_MAC_TX_WMI_MGMT:
3426 ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
3427 break;
3428 case ATH10K_MAC_TX_UNKNOWN:
3429 WARN_ON_ONCE(1);
3430 ret = -EINVAL;
3431 break;
3432 }
3433
3434 if (ret) {
3435 ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
3436 ret);
3437 ieee80211_free_txskb(ar->hw, skb);
3438 }
3439
3440 return ret;
3441 }
3442
3443 /* This function consumes the sk_buff regardless of return value as far as
3444 * caller is concerned so no freeing is necessary afterwards.
3445 */
3446 static int ath10k_mac_tx(struct ath10k *ar,
3447 struct ieee80211_vif *vif,
3448 struct ieee80211_sta *sta,
3449 enum ath10k_hw_txrx_mode txmode,
3450 enum ath10k_mac_tx_path txpath,
3451 struct sk_buff *skb)
3452 {
3453 struct ieee80211_hw *hw = ar->hw;
3454 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3455 int ret;
3456
3457 /* We should disable CCK RATE due to P2P */
3458 if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
3459 ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
3460
3461 switch (txmode) {
3462 case ATH10K_HW_TXRX_MGMT:
3463 case ATH10K_HW_TXRX_NATIVE_WIFI:
3464 ath10k_tx_h_nwifi(hw, skb);
3465 ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
3466 ath10k_tx_h_seq_no(vif, skb);
3467 break;
3468 case ATH10K_HW_TXRX_ETHERNET:
3469 ath10k_tx_h_8023(skb);
3470 break;
3471 case ATH10K_HW_TXRX_RAW:
3472 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
3473 WARN_ON_ONCE(1);
3474 ieee80211_free_txskb(hw, skb);
3475 return -ENOTSUPP;
3476 }
3477 }
3478
3479 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
3480 if (!ath10k_mac_tx_frm_has_freq(ar)) {
3481 ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
3482 skb);
3483
3484 skb_queue_tail(&ar->offchan_tx_queue, skb);
3485 ieee80211_queue_work(hw, &ar->offchan_tx_work);
3486 return 0;
3487 }
3488 }
3489
3490 ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb);
3491 if (ret) {
3492 ath10k_warn(ar, "failed to submit frame: %d\n", ret);
3493 return ret;
3494 }
3495
3496 return 0;
3497 }
3498
3499 void ath10k_offchan_tx_purge(struct ath10k *ar)
3500 {
3501 struct sk_buff *skb;
3502
3503 for (;;) {
3504 skb = skb_dequeue(&ar->offchan_tx_queue);
3505 if (!skb)
3506 break;
3507
3508 ieee80211_free_txskb(ar->hw, skb);
3509 }
3510 }
3511
3512 void ath10k_offchan_tx_work(struct work_struct *work)
3513 {
3514 struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
3515 struct ath10k_peer *peer;
3516 struct ath10k_vif *arvif;
3517 enum ath10k_hw_txrx_mode txmode;
3518 enum ath10k_mac_tx_path txpath;
3519 struct ieee80211_hdr *hdr;
3520 struct ieee80211_vif *vif;
3521 struct ieee80211_sta *sta;
3522 struct sk_buff *skb;
3523 const u8 *peer_addr;
3524 int vdev_id;
3525 int ret;
3526 unsigned long time_left;
3527 bool tmp_peer_created = false;
3528
3529 /* FW requirement: We must create a peer before FW will send out
3530 * an offchannel frame. Otherwise the frame will be stuck and
3531 * never transmitted. We delete the peer upon tx completion.
3532 * It is unlikely that a peer for offchannel tx will already be
3533 * present. However it may be in some rare cases so account for that.
3534 * Otherwise we might remove a legitimate peer and break stuff. */
3535
3536 for (;;) {
3537 skb = skb_dequeue(&ar->offchan_tx_queue);
3538 if (!skb)
3539 break;
3540
3541 mutex_lock(&ar->conf_mutex);
3542
3543 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p\n",
3544 skb);
3545
3546 hdr = (struct ieee80211_hdr *)skb->data;
3547 peer_addr = ieee80211_get_DA(hdr);
3548
3549 spin_lock_bh(&ar->data_lock);
3550 vdev_id = ar->scan.vdev_id;
3551 peer = ath10k_peer_find(ar, vdev_id, peer_addr);
3552 spin_unlock_bh(&ar->data_lock);
3553
3554 if (peer)
3555 /* FIXME: should this use ath10k_warn()? */
3556 ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
3557 peer_addr, vdev_id);
3558
3559 if (!peer) {
3560 ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
3561 peer_addr,
3562 WMI_PEER_TYPE_DEFAULT);
3563 if (ret)
3564 ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
3565 peer_addr, vdev_id, ret);
3566 tmp_peer_created = (ret == 0);
3567 }
3568
3569 spin_lock_bh(&ar->data_lock);
3570 reinit_completion(&ar->offchan_tx_completed);
3571 ar->offchan_tx_skb = skb;
3572 spin_unlock_bh(&ar->data_lock);
3573
3574 /* It's safe to access vif and sta - conf_mutex guarantees that
3575 * sta_state() and remove_interface() are locked exclusively
3576 * out wrt to this offchannel worker.
3577 */
3578 arvif = ath10k_get_arvif(ar, vdev_id);
3579 if (arvif) {
3580 vif = arvif->vif;
3581 sta = ieee80211_find_sta(vif, peer_addr);
3582 } else {
3583 vif = NULL;
3584 sta = NULL;
3585 }
3586
3587 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3588 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3589
3590 ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
3591 if (ret) {
3592 ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
3593 ret);
3594 /* not serious */
3595 }
3596
3597 time_left =
3598 wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
3599 if (time_left == 0)
3600 ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
3601 skb);
3602
3603 if (!peer && tmp_peer_created) {
3604 ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
3605 if (ret)
3606 ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
3607 peer_addr, vdev_id, ret);
3608 }
3609
3610 mutex_unlock(&ar->conf_mutex);
3611 }
3612 }
3613
3614 void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
3615 {
3616 struct sk_buff *skb;
3617
3618 for (;;) {
3619 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
3620 if (!skb)
3621 break;
3622
3623 ieee80211_free_txskb(ar->hw, skb);
3624 }
3625 }
3626
3627 void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
3628 {
3629 struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
3630 struct sk_buff *skb;
3631 int ret;
3632
3633 for (;;) {
3634 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
3635 if (!skb)
3636 break;
3637
3638 ret = ath10k_wmi_mgmt_tx(ar, skb);
3639 if (ret) {
3640 ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n",
3641 ret);
3642 ieee80211_free_txskb(ar->hw, skb);
3643 }
3644 }
3645 }
3646
3647 static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
3648 {
3649 struct ath10k_txq *artxq = (void *)txq->drv_priv;
3650
3651 if (!txq)
3652 return;
3653
3654 INIT_LIST_HEAD(&artxq->list);
3655 }
3656
3657 static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
3658 {
3659 struct ath10k_txq *artxq = (void *)txq->drv_priv;
3660 struct ath10k_skb_cb *cb;
3661 struct sk_buff *msdu;
3662 int msdu_id;
3663
3664 if (!txq)
3665 return;
3666
3667 spin_lock_bh(&ar->txqs_lock);
3668 if (!list_empty(&artxq->list))
3669 list_del_init(&artxq->list);
3670 spin_unlock_bh(&ar->txqs_lock);
3671
3672 spin_lock_bh(&ar->htt.tx_lock);
3673 idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
3674 cb = ATH10K_SKB_CB(msdu);
3675 if (cb->txq == txq)
3676 cb->txq = NULL;
3677 }
3678 spin_unlock_bh(&ar->htt.tx_lock);
3679 }
3680
3681 struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
3682 u16 peer_id,
3683 u8 tid)
3684 {
3685 struct ath10k_peer *peer;
3686
3687 lockdep_assert_held(&ar->data_lock);
3688
3689 peer = ar->peer_map[peer_id];
3690 if (!peer)
3691 return NULL;
3692
3693 if (peer->sta)
3694 return peer->sta->txq[tid];
3695 else if (peer->vif)
3696 return peer->vif->txq;
3697 else
3698 return NULL;
3699 }
3700
3701 static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
3702 struct ieee80211_txq *txq)
3703 {
3704 struct ath10k *ar = hw->priv;
3705 struct ath10k_txq *artxq = (void *)txq->drv_priv;
3706
3707 /* No need to get locks */
3708
3709 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH)
3710 return true;
3711
3712 if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed)
3713 return true;
3714
3715 if (artxq->num_fw_queued < artxq->num_push_allowed)
3716 return true;
3717
3718 return false;
3719 }
3720
3721 int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
3722 struct ieee80211_txq *txq)
3723 {
3724 struct ath10k *ar = hw->priv;
3725 struct ath10k_htt *htt = &ar->htt;
3726 struct ath10k_txq *artxq = (void *)txq->drv_priv;
3727 struct ieee80211_vif *vif = txq->vif;
3728 struct ieee80211_sta *sta = txq->sta;
3729 enum ath10k_hw_txrx_mode txmode;
3730 enum ath10k_mac_tx_path txpath;
3731 struct sk_buff *skb;
3732 size_t skb_len;
3733 int ret;
3734
3735 spin_lock_bh(&ar->htt.tx_lock);
3736 ret = ath10k_htt_tx_inc_pending(htt);
3737 spin_unlock_bh(&ar->htt.tx_lock);
3738
3739 if (ret)
3740 return ret;
3741
3742 skb = ieee80211_tx_dequeue(hw, txq);
3743 if (!skb) {
3744 spin_lock_bh(&ar->htt.tx_lock);
3745 ath10k_htt_tx_dec_pending(htt);
3746 spin_unlock_bh(&ar->htt.tx_lock);
3747
3748 return -ENOENT;
3749 }
3750
3751 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
3752
3753 skb_len = skb->len;
3754 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3755 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3756
3757 ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
3758 if (unlikely(ret)) {
3759 ath10k_warn(ar, "failed to push frame: %d\n", ret);
3760
3761 spin_lock_bh(&ar->htt.tx_lock);
3762 ath10k_htt_tx_dec_pending(htt);
3763 spin_unlock_bh(&ar->htt.tx_lock);
3764
3765 return ret;
3766 }
3767
3768 spin_lock_bh(&ar->htt.tx_lock);
3769 artxq->num_fw_queued++;
3770 spin_unlock_bh(&ar->htt.tx_lock);
3771
3772 return skb_len;
3773 }
3774
3775 void ath10k_mac_tx_push_pending(struct ath10k *ar)
3776 {
3777 struct ieee80211_hw *hw = ar->hw;
3778 struct ieee80211_txq *txq;
3779 struct ath10k_txq *artxq;
3780 struct ath10k_txq *last;
3781 int ret;
3782 int max;
3783
3784 spin_lock_bh(&ar->txqs_lock);
3785 rcu_read_lock();
3786
3787 last = list_last_entry(&ar->txqs, struct ath10k_txq, list);
3788 while (!list_empty(&ar->txqs)) {
3789 artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
3790 txq = container_of((void *)artxq, struct ieee80211_txq,
3791 drv_priv);
3792
3793 /* Prevent aggressive sta/tid taking over tx queue */
3794 max = 16;
3795 ret = 0;
3796 while (ath10k_mac_tx_can_push(hw, txq) && max--) {
3797 ret = ath10k_mac_tx_push_txq(hw, txq);
3798 if (ret < 0)
3799 break;
3800 }
3801
3802 list_del_init(&artxq->list);
3803 if (ret != -ENOENT)
3804 list_add_tail(&artxq->list, &ar->txqs);
3805
3806 ath10k_htt_tx_txq_update(hw, txq);
3807
3808 if (artxq == last || (ret < 0 && ret != -ENOENT))
3809 break;
3810 }
3811
3812 rcu_read_unlock();
3813 spin_unlock_bh(&ar->txqs_lock);
3814 }
3815
3816 /************/
3817 /* Scanning */
3818 /************/
3819
3820 void __ath10k_scan_finish(struct ath10k *ar)
3821 {
3822 lockdep_assert_held(&ar->data_lock);
3823
3824 switch (ar->scan.state) {
3825 case ATH10K_SCAN_IDLE:
3826 break;
3827 case ATH10K_SCAN_RUNNING:
3828 case ATH10K_SCAN_ABORTING:
3829 if (!ar->scan.is_roc)
3830 ieee80211_scan_completed(ar->hw,
3831 (ar->scan.state ==
3832 ATH10K_SCAN_ABORTING));
3833 else if (ar->scan.roc_notify)
3834 ieee80211_remain_on_channel_expired(ar->hw);
3835 /* fall through */
3836 case ATH10K_SCAN_STARTING:
3837 ar->scan.state = ATH10K_SCAN_IDLE;
3838 ar->scan_channel = NULL;
3839 ar->scan.roc_freq = 0;
3840 ath10k_offchan_tx_purge(ar);
3841 cancel_delayed_work(&ar->scan.timeout);
3842 complete_all(&ar->scan.completed);
3843 break;
3844 }
3845 }
3846
3847 void ath10k_scan_finish(struct ath10k *ar)
3848 {
3849 spin_lock_bh(&ar->data_lock);
3850 __ath10k_scan_finish(ar);
3851 spin_unlock_bh(&ar->data_lock);
3852 }
3853
3854 static int ath10k_scan_stop(struct ath10k *ar)
3855 {
3856 struct wmi_stop_scan_arg arg = {
3857 .req_id = 1, /* FIXME */
3858 .req_type = WMI_SCAN_STOP_ONE,
3859 .u.scan_id = ATH10K_SCAN_ID,
3860 };
3861 int ret;
3862
3863 lockdep_assert_held(&ar->conf_mutex);
3864
3865 ret = ath10k_wmi_stop_scan(ar, &arg);
3866 if (ret) {
3867 ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret);
3868 goto out;
3869 }
3870
3871 ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
3872 if (ret == 0) {
3873 ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
3874 ret = -ETIMEDOUT;
3875 } else if (ret > 0) {
3876 ret = 0;
3877 }
3878
3879 out:
3880 /* Scan state should be updated upon scan completion but in case
3881 * firmware fails to deliver the event (for whatever reason) it is
3882 * desired to clean up scan state anyway. Firmware may have just
3883 * dropped the scan completion event delivery due to transport pipe
3884 * being overflown with data and/or it can recover on its own before
3885 * next scan request is submitted.
3886 */
3887 spin_lock_bh(&ar->data_lock);
3888 if (ar->scan.state != ATH10K_SCAN_IDLE)
3889 __ath10k_scan_finish(ar);
3890 spin_unlock_bh(&ar->data_lock);
3891
3892 return ret;
3893 }
3894
3895 static void ath10k_scan_abort(struct ath10k *ar)
3896 {
3897 int ret;
3898
3899 lockdep_assert_held(&ar->conf_mutex);
3900
3901 spin_lock_bh(&ar->data_lock);
3902
3903 switch (ar->scan.state) {
3904 case ATH10K_SCAN_IDLE:
3905 /* This can happen if timeout worker kicked in and called
3906 * abortion while scan completion was being processed.
3907 */
3908 break;
3909 case ATH10K_SCAN_STARTING:
3910 case ATH10K_SCAN_ABORTING:
3911 ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n",
3912 ath10k_scan_state_str(ar->scan.state),
3913 ar->scan.state);
3914 break;
3915 case ATH10K_SCAN_RUNNING:
3916 ar->scan.state = ATH10K_SCAN_ABORTING;
3917 spin_unlock_bh(&ar->data_lock);
3918
3919 ret = ath10k_scan_stop(ar);
3920 if (ret)
3921 ath10k_warn(ar, "failed to abort scan: %d\n", ret);
3922
3923 spin_lock_bh(&ar->data_lock);
3924 break;
3925 }
3926
3927 spin_unlock_bh(&ar->data_lock);
3928 }
3929
3930 void ath10k_scan_timeout_work(struct work_struct *work)
3931 {
3932 struct ath10k *ar = container_of(work, struct ath10k,
3933 scan.timeout.work);
3934
3935 mutex_lock(&ar->conf_mutex);
3936 ath10k_scan_abort(ar);
3937 mutex_unlock(&ar->conf_mutex);
3938 }
3939
3940 static int ath10k_start_scan(struct ath10k *ar,
3941 const struct wmi_start_scan_arg *arg)
3942 {
3943 int ret;
3944
3945 lockdep_assert_held(&ar->conf_mutex);
3946
3947 ret = ath10k_wmi_start_scan(ar, arg);
3948 if (ret)
3949 return ret;
3950
3951 ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
3952 if (ret == 0) {
3953 ret = ath10k_scan_stop(ar);
3954 if (ret)
3955 ath10k_warn(ar, "failed to stop scan: %d\n", ret);
3956
3957 return -ETIMEDOUT;
3958 }
3959
3960 /* If we failed to start the scan, return error code at
3961 * this point. This is probably due to some issue in the
3962 * firmware, but no need to wedge the driver due to that...
3963 */
3964 spin_lock_bh(&ar->data_lock);
3965 if (ar->scan.state == ATH10K_SCAN_IDLE) {
3966 spin_unlock_bh(&ar->data_lock);
3967 return -EINVAL;
3968 }
3969 spin_unlock_bh(&ar->data_lock);
3970
3971 return 0;
3972 }
3973
3974 /**********************/
3975 /* mac80211 callbacks */
3976 /**********************/
3977
3978 static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
3979 struct ieee80211_tx_control *control,
3980 struct sk_buff *skb)
3981 {
3982 struct ath10k *ar = hw->priv;
3983 struct ath10k_htt *htt = &ar->htt;
3984 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3985 struct ieee80211_vif *vif = info->control.vif;
3986 struct ieee80211_sta *sta = control->sta;
3987 struct ieee80211_txq *txq = NULL;
3988 struct ieee80211_hdr *hdr = (void *)skb->data;
3989 enum ath10k_hw_txrx_mode txmode;
3990 enum ath10k_mac_tx_path txpath;
3991 bool is_htt;
3992 bool is_mgmt;
3993 bool is_presp;
3994 int ret;
3995
3996 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
3997
3998 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3999 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
4000 is_htt = (txpath == ATH10K_MAC_TX_HTT ||
4001 txpath == ATH10K_MAC_TX_HTT_MGMT);
4002 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
4003
4004 if (is_htt) {
4005 spin_lock_bh(&ar->htt.tx_lock);
4006 is_presp = ieee80211_is_probe_resp(hdr->frame_control);
4007
4008 ret = ath10k_htt_tx_inc_pending(htt);
4009 if (ret) {
4010 ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
4011 ret);
4012 spin_unlock_bh(&ar->htt.tx_lock);
4013 ieee80211_free_txskb(ar->hw, skb);
4014 return;
4015 }
4016
4017 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
4018 if (ret) {
4019 ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n",
4020 ret);
4021 ath10k_htt_tx_dec_pending(htt);
4022 spin_unlock_bh(&ar->htt.tx_lock);
4023 ieee80211_free_txskb(ar->hw, skb);
4024 return;
4025 }
4026 spin_unlock_bh(&ar->htt.tx_lock);
4027 }
4028
4029 ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
4030 if (ret) {
4031 ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
4032 if (is_htt) {
4033 spin_lock_bh(&ar->htt.tx_lock);
4034 ath10k_htt_tx_dec_pending(htt);
4035 if (is_mgmt)
4036 ath10k_htt_tx_mgmt_dec_pending(htt);
4037 spin_unlock_bh(&ar->htt.tx_lock);
4038 }
4039 return;
4040 }
4041 }
4042
4043 static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
4044 struct ieee80211_txq *txq)
4045 {
4046 struct ath10k *ar = hw->priv;
4047 struct ath10k_txq *artxq = (void *)txq->drv_priv;
4048
4049 spin_lock_bh(&ar->txqs_lock);
4050 if (list_empty(&artxq->list))
4051 list_add_tail(&artxq->list, &ar->txqs);
4052 spin_unlock_bh(&ar->txqs_lock);
4053
4054 if (ath10k_mac_tx_can_push(hw, txq))
4055 tasklet_schedule(&ar->htt.txrx_compl_task);
4056
4057 ath10k_htt_tx_txq_update(hw, txq);
4058 }
4059
4060 /* Must not be called with conf_mutex held as workers can use that also. */
4061 void ath10k_drain_tx(struct ath10k *ar)
4062 {
4063 /* make sure rcu-protected mac80211 tx path itself is drained */
4064 synchronize_net();
4065
4066 ath10k_offchan_tx_purge(ar);
4067 ath10k_mgmt_over_wmi_tx_purge(ar);
4068
4069 cancel_work_sync(&ar->offchan_tx_work);
4070 cancel_work_sync(&ar->wmi_mgmt_tx_work);
4071 }
4072
4073 void ath10k_halt(struct ath10k *ar)
4074 {
4075 struct ath10k_vif *arvif;
4076
4077 lockdep_assert_held(&ar->conf_mutex);
4078
4079 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
4080 ar->filter_flags = 0;
4081 ar->monitor = false;
4082 ar->monitor_arvif = NULL;
4083
4084 if (ar->monitor_started)
4085 ath10k_monitor_stop(ar);
4086
4087 ar->monitor_started = false;
4088 ar->tx_paused = 0;
4089
4090 ath10k_scan_finish(ar);
4091 ath10k_peer_cleanup_all(ar);
4092 ath10k_core_stop(ar);
4093 ath10k_hif_power_down(ar);
4094
4095 spin_lock_bh(&ar->data_lock);
4096 list_for_each_entry(arvif, &ar->arvifs, list)
4097 ath10k_mac_vif_beacon_cleanup(arvif);
4098 spin_unlock_bh(&ar->data_lock);
4099 }
4100
4101 static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
4102 {
4103 struct ath10k *ar = hw->priv;
4104
4105 mutex_lock(&ar->conf_mutex);
4106
4107 *tx_ant = ar->cfg_tx_chainmask;
4108 *rx_ant = ar->cfg_rx_chainmask;
4109
4110 mutex_unlock(&ar->conf_mutex);
4111
4112 return 0;
4113 }
4114
4115 static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
4116 {
4117 /* It is not clear that allowing gaps in chainmask
4118 * is helpful. Probably it will not do what user
4119 * is hoping for, so warn in that case.
4120 */
4121 if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
4122 return;
4123
4124 ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n",
4125 dbg, cm);
4126 }
4127
4128 static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
4129 {
4130 int nsts = ar->vht_cap_info;
4131
4132 nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4133 nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4134
4135 /* If firmware does not deliver to host number of space-time
4136 * streams supported, assume it support up to 4 BF STS and return
4137 * the value for VHT CAP: nsts-1)
4138 */
4139 if (nsts == 0)
4140 return 3;
4141
4142 return nsts;
4143 }
4144
4145 static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
4146 {
4147 int sound_dim = ar->vht_cap_info;
4148
4149 sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4150 sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4151
4152 /* If the sounding dimension is not advertised by the firmware,
4153 * let's use a default value of 1
4154 */
4155 if (sound_dim == 0)
4156 return 1;
4157
4158 return sound_dim;
4159 }
4160
4161 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
4162 {
4163 struct ieee80211_sta_vht_cap vht_cap = {0};
4164 u16 mcs_map;
4165 u32 val;
4166 int i;
4167
4168 vht_cap.vht_supported = 1;
4169 vht_cap.cap = ar->vht_cap_info;
4170
4171 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
4172 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
4173 val = ath10k_mac_get_vht_cap_bf_sts(ar);
4174 val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4175 val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4176
4177 vht_cap.cap |= val;
4178 }
4179
4180 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
4181 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
4182 val = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
4183 val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4184 val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4185
4186 vht_cap.cap |= val;
4187 }
4188
4189 mcs_map = 0;
4190 for (i = 0; i < 8; i++) {
4191 if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i)))
4192 mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
4193 else
4194 mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
4195 }
4196
4197 vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
4198 vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
4199
4200 return vht_cap;
4201 }
4202
4203 static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
4204 {
4205 int i;
4206 struct ieee80211_sta_ht_cap ht_cap = {0};
4207
4208 if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
4209 return ht_cap;
4210
4211 ht_cap.ht_supported = 1;
4212 ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
4213 ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
4214 ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
4215 ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
4216 ht_cap.cap |=
4217 WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT;
4218
4219 if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
4220 ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
4221
4222 if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
4223 ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
4224
4225 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
4226 u32 smps;
4227
4228 smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
4229 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
4230
4231 ht_cap.cap |= smps;
4232 }
4233
4234 if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC)
4235 ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
4236
4237 if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
4238 u32 stbc;
4239
4240 stbc = ar->ht_cap_info;
4241 stbc &= WMI_HT_CAP_RX_STBC;
4242 stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
4243 stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
4244 stbc &= IEEE80211_HT_CAP_RX_STBC;
4245
4246 ht_cap.cap |= stbc;
4247 }
4248
4249 if (ar->ht_cap_info & WMI_HT_CAP_LDPC)
4250 ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
4251
4252 if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
4253 ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
4254
4255 /* max AMSDU is implicitly taken from vht_cap_info */
4256 if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
4257 ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
4258
4259 for (i = 0; i < ar->num_rf_chains; i++) {
4260 if (ar->cfg_rx_chainmask & BIT(i))
4261 ht_cap.mcs.rx_mask[i] = 0xFF;
4262 }
4263
4264 ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
4265
4266 return ht_cap;
4267 }
4268
4269 static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
4270 {
4271 struct ieee80211_supported_band *band;
4272 struct ieee80211_sta_vht_cap vht_cap;
4273 struct ieee80211_sta_ht_cap ht_cap;
4274
4275 ht_cap = ath10k_get_ht_cap(ar);
4276 vht_cap = ath10k_create_vht_cap(ar);
4277
4278 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
4279 band = &ar->mac.sbands[NL80211_BAND_2GHZ];
4280 band->ht_cap = ht_cap;
4281
4282 /* Enable the VHT support at 2.4 GHz */
4283 band->vht_cap = vht_cap;
4284 }
4285 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
4286 band = &ar->mac.sbands[NL80211_BAND_5GHZ];
4287 band->ht_cap = ht_cap;
4288 band->vht_cap = vht_cap;
4289 }
4290 }
4291
4292 static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
4293 {
4294 int ret;
4295
4296 lockdep_assert_held(&ar->conf_mutex);
4297
4298 ath10k_check_chain_mask(ar, tx_ant, "tx");
4299 ath10k_check_chain_mask(ar, rx_ant, "rx");
4300
4301 ar->cfg_tx_chainmask = tx_ant;
4302 ar->cfg_rx_chainmask = rx_ant;
4303
4304 if ((ar->state != ATH10K_STATE_ON) &&
4305 (ar->state != ATH10K_STATE_RESTARTED))
4306 return 0;
4307
4308 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
4309 tx_ant);
4310 if (ret) {
4311 ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n",
4312 ret, tx_ant);
4313 return ret;
4314 }
4315
4316 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
4317 rx_ant);
4318 if (ret) {
4319 ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n",
4320 ret, rx_ant);
4321 return ret;
4322 }
4323
4324 /* Reload HT/VHT capability */
4325 ath10k_mac_setup_ht_vht_cap(ar);
4326
4327 return 0;
4328 }
4329
4330 static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
4331 {
4332 struct ath10k *ar = hw->priv;
4333 int ret;
4334
4335 mutex_lock(&ar->conf_mutex);
4336 ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
4337 mutex_unlock(&ar->conf_mutex);
4338 return ret;
4339 }
4340
4341 static int ath10k_start(struct ieee80211_hw *hw)
4342 {
4343 struct ath10k *ar = hw->priv;
4344 u32 param;
4345 int ret = 0;
4346
4347 /*
4348 * This makes sense only when restarting hw. It is harmless to call
4349 * uncoditionally. This is necessary to make sure no HTT/WMI tx
4350 * commands will be submitted while restarting.
4351 */
4352 ath10k_drain_tx(ar);
4353
4354 mutex_lock(&ar->conf_mutex);
4355
4356 switch (ar->state) {
4357 case ATH10K_STATE_OFF:
4358 ar->state = ATH10K_STATE_ON;
4359 break;
4360 case ATH10K_STATE_RESTARTING:
4361 ath10k_halt(ar);
4362 ar->state = ATH10K_STATE_RESTARTED;
4363 break;
4364 case ATH10K_STATE_ON:
4365 case ATH10K_STATE_RESTARTED:
4366 case ATH10K_STATE_WEDGED:
4367 WARN_ON(1);
4368 ret = -EINVAL;
4369 goto err;
4370 case ATH10K_STATE_UTF:
4371 ret = -EBUSY;
4372 goto err;
4373 }
4374
4375 ret = ath10k_hif_power_up(ar);
4376 if (ret) {
4377 ath10k_err(ar, "Could not init hif: %d\n", ret);
4378 goto err_off;
4379 }
4380
4381 ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
4382 &ar->normal_mode_fw);
4383 if (ret) {
4384 ath10k_err(ar, "Could not init core: %d\n", ret);
4385 goto err_power_down;
4386 }
4387
4388 param = ar->wmi.pdev_param->pmf_qos;
4389 ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4390 if (ret) {
4391 ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret);
4392 goto err_core_stop;
4393 }
4394
4395 param = ar->wmi.pdev_param->dynamic_bw;
4396 ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4397 if (ret) {
4398 ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret);
4399 goto err_core_stop;
4400 }
4401
4402 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
4403 ret = ath10k_wmi_adaptive_qcs(ar, true);
4404 if (ret) {
4405 ath10k_warn(ar, "failed to enable adaptive qcs: %d\n",
4406 ret);
4407 goto err_core_stop;
4408 }
4409 }
4410
4411 if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) {
4412 param = ar->wmi.pdev_param->burst_enable;
4413 ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4414 if (ret) {
4415 ath10k_warn(ar, "failed to disable burst: %d\n", ret);
4416 goto err_core_stop;
4417 }
4418 }
4419
4420 __ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
4421
4422 /*
4423 * By default FW set ARP frames ac to voice (6). In that case ARP
4424 * exchange is not working properly for UAPSD enabled AP. ARP requests
4425 * which arrives with access category 0 are processed by network stack
4426 * and send back with access category 0, but FW changes access category
4427 * to 6. Set ARP frames access category to best effort (0) solves
4428 * this problem.
4429 */
4430
4431 param = ar->wmi.pdev_param->arp_ac_override;
4432 ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4433 if (ret) {
4434 ath10k_warn(ar, "failed to set arp ac override parameter: %d\n",
4435 ret);
4436 goto err_core_stop;
4437 }
4438
4439 if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA,
4440 ar->running_fw->fw_file.fw_features)) {
4441 ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1,
4442 WMI_CCA_DETECT_LEVEL_AUTO,
4443 WMI_CCA_DETECT_MARGIN_AUTO);
4444 if (ret) {
4445 ath10k_warn(ar, "failed to enable adaptive cca: %d\n",
4446 ret);
4447 goto err_core_stop;
4448 }
4449 }
4450
4451 param = ar->wmi.pdev_param->ani_enable;
4452 ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4453 if (ret) {
4454 ath10k_warn(ar, "failed to enable ani by default: %d\n",
4455 ret);
4456 goto err_core_stop;
4457 }
4458
4459 ar->ani_enabled = true;
4460
4461 if (ath10k_peer_stats_enabled(ar)) {
4462 param = ar->wmi.pdev_param->peer_stats_update_period;
4463 ret = ath10k_wmi_pdev_set_param(ar, param,
4464 PEER_DEFAULT_STATS_UPDATE_PERIOD);
4465 if (ret) {
4466 ath10k_warn(ar,
4467 "failed to set peer stats period : %d\n",
4468 ret);
4469 goto err_core_stop;
4470 }
4471 }
4472
4473 ar->num_started_vdevs = 0;
4474 ath10k_regd_update(ar);
4475
4476 ath10k_spectral_start(ar);
4477 ath10k_thermal_set_throttling(ar);
4478
4479 mutex_unlock(&ar->conf_mutex);
4480 return 0;
4481
4482 err_core_stop:
4483 ath10k_core_stop(ar);
4484
4485 err_power_down:
4486 ath10k_hif_power_down(ar);
4487
4488 err_off:
4489 ar->state = ATH10K_STATE_OFF;
4490
4491 err:
4492 mutex_unlock(&ar->conf_mutex);
4493 return ret;
4494 }
4495
4496 static void ath10k_stop(struct ieee80211_hw *hw)
4497 {
4498 struct ath10k *ar = hw->priv;
4499
4500 ath10k_drain_tx(ar);
4501
4502 mutex_lock(&ar->conf_mutex);
4503 if (ar->state != ATH10K_STATE_OFF) {
4504 ath10k_halt(ar);
4505 ar->state = ATH10K_STATE_OFF;
4506 }
4507 mutex_unlock(&ar->conf_mutex);
4508
4509 cancel_delayed_work_sync(&ar->scan.timeout);
4510 cancel_work_sync(&ar->restart_work);
4511 }
4512
4513 static int ath10k_config_ps(struct ath10k *ar)
4514 {
4515 struct ath10k_vif *arvif;
4516 int ret = 0;
4517
4518 lockdep_assert_held(&ar->conf_mutex);
4519
4520 list_for_each_entry(arvif, &ar->arvifs, list) {
4521 ret = ath10k_mac_vif_setup_ps(arvif);
4522 if (ret) {
4523 ath10k_warn(ar, "failed to setup powersave: %d\n", ret);
4524 break;
4525 }
4526 }
4527
4528 return ret;
4529 }
4530
4531 static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
4532 {
4533 int ret;
4534 u32 param;
4535
4536 lockdep_assert_held(&ar->conf_mutex);
4537
4538 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower);
4539
4540 param = ar->wmi.pdev_param->txpower_limit2g;
4541 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
4542 if (ret) {
4543 ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
4544 txpower, ret);
4545 return ret;
4546 }
4547
4548 param = ar->wmi.pdev_param->txpower_limit5g;
4549 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
4550 if (ret) {
4551 ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
4552 txpower, ret);
4553 return ret;
4554 }
4555
4556 return 0;
4557 }
4558
4559 static int ath10k_mac_txpower_recalc(struct ath10k *ar)
4560 {
4561 struct ath10k_vif *arvif;
4562 int ret, txpower = -1;
4563
4564 lockdep_assert_held(&ar->conf_mutex);
4565
4566 list_for_each_entry(arvif, &ar->arvifs, list) {
4567 WARN_ON(arvif->txpower < 0);
4568
4569 if (txpower == -1)
4570 txpower = arvif->txpower;
4571 else
4572 txpower = min(txpower, arvif->txpower);
4573 }
4574
4575 if (WARN_ON(txpower == -1))
4576 return -EINVAL;
4577
4578 ret = ath10k_mac_txpower_setup(ar, txpower);
4579 if (ret) {
4580 ath10k_warn(ar, "failed to setup tx power %d: %d\n",
4581 txpower, ret);
4582 return ret;
4583 }
4584
4585 return 0;
4586 }
4587
4588 static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
4589 {
4590 struct ath10k *ar = hw->priv;
4591 struct ieee80211_conf *conf = &hw->conf;
4592 int ret = 0;
4593
4594 mutex_lock(&ar->conf_mutex);
4595
4596 if (changed & IEEE80211_CONF_CHANGE_PS)
4597 ath10k_config_ps(ar);
4598
4599 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
4600 ar->monitor = conf->flags & IEEE80211_CONF_MONITOR;
4601 ret = ath10k_monitor_recalc(ar);
4602 if (ret)
4603 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
4604 }
4605
4606 mutex_unlock(&ar->conf_mutex);
4607 return ret;
4608 }
4609
4610 static u32 get_nss_from_chainmask(u16 chain_mask)
4611 {
4612 if ((chain_mask & 0xf) == 0xf)
4613 return 4;
4614 else if ((chain_mask & 0x7) == 0x7)
4615 return 3;
4616 else if ((chain_mask & 0x3) == 0x3)
4617 return 2;
4618 return 1;
4619 }
4620
4621 static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
4622 {
4623 u32 value = 0;
4624 struct ath10k *ar = arvif->ar;
4625 int nsts;
4626 int sound_dim;
4627
4628 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
4629 return 0;
4630
4631 nsts = ath10k_mac_get_vht_cap_bf_sts(ar);
4632 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
4633 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
4634 value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
4635
4636 sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
4637 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
4638 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
4639 value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
4640
4641 if (!value)
4642 return 0;
4643
4644 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
4645 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
4646
4647 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
4648 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER |
4649 WMI_VDEV_PARAM_TXBF_SU_TX_BFER);
4650
4651 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
4652 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
4653
4654 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
4655 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE |
4656 WMI_VDEV_PARAM_TXBF_SU_TX_BFEE);
4657
4658 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
4659 ar->wmi.vdev_param->txbf, value);
4660 }
4661
4662 /*
4663 * TODO:
4664 * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
4665 * because we will send mgmt frames without CCK. This requirement
4666 * for P2P_FIND/GO_NEG should be handled by checking CCK flag
4667 * in the TX packet.
4668 */
4669 static int ath10k_add_interface(struct ieee80211_hw *hw,
4670 struct ieee80211_vif *vif)
4671 {
4672 struct ath10k *ar = hw->priv;
4673 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
4674 struct ath10k_peer *peer;
4675 enum wmi_sta_powersave_param param;
4676 int ret = 0;
4677 u32 value;
4678 int bit;
4679 int i;
4680 u32 vdev_param;
4681
4682 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
4683
4684 mutex_lock(&ar->conf_mutex);
4685
4686 memset(arvif, 0, sizeof(*arvif));
4687 ath10k_mac_txq_init(vif->txq);
4688
4689 arvif->ar = ar;
4690 arvif->vif = vif;
4691
4692 INIT_LIST_HEAD(&arvif->list);
4693 INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work);
4694 INIT_DELAYED_WORK(&arvif->connection_loss_work,
4695 ath10k_mac_vif_sta_connection_loss_work);
4696
4697 for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
4698 arvif->bitrate_mask.control[i].legacy = 0xffffffff;
4699 memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
4700 sizeof(arvif->bitrate_mask.control[i].ht_mcs));
4701 memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
4702 sizeof(arvif->bitrate_mask.control[i].vht_mcs));
4703 }
4704
4705 if (ar->num_peers >= ar->max_num_peers) {
4706 ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n");
4707 ret = -ENOBUFS;
4708 goto err;
4709 }
4710
4711 if (ar->free_vdev_map == 0) {
4712 ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
4713 ret = -EBUSY;
4714 goto err;
4715 }
4716 bit = __ffs64(ar->free_vdev_map);
4717
4718 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n",
4719 bit, ar->free_vdev_map);
4720
4721 arvif->vdev_id = bit;
4722 arvif->vdev_subtype =
4723 ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
4724
4725 switch (vif->type) {
4726 case NL80211_IFTYPE_P2P_DEVICE:
4727 arvif->vdev_type = WMI_VDEV_TYPE_STA;
4728 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4729 (ar, WMI_VDEV_SUBTYPE_P2P_DEVICE);
4730 break;
4731 case NL80211_IFTYPE_UNSPECIFIED:
4732 case NL80211_IFTYPE_STATION:
4733 arvif->vdev_type = WMI_VDEV_TYPE_STA;
4734 if (vif->p2p)
4735 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4736 (ar, WMI_VDEV_SUBTYPE_P2P_CLIENT);
4737 break;
4738 case NL80211_IFTYPE_ADHOC:
4739 arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
4740 break;
4741 case NL80211_IFTYPE_MESH_POINT:
4742 if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) {
4743 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4744 (ar, WMI_VDEV_SUBTYPE_MESH_11S);
4745 } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
4746 ret = -EINVAL;
4747 ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n");
4748 goto err;
4749 }
4750 arvif->vdev_type = WMI_VDEV_TYPE_AP;
4751 break;
4752 case NL80211_IFTYPE_AP:
4753 arvif->vdev_type = WMI_VDEV_TYPE_AP;
4754
4755 if (vif->p2p)
4756 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4757 (ar, WMI_VDEV_SUBTYPE_P2P_GO);
4758 break;
4759 case NL80211_IFTYPE_MONITOR:
4760 arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
4761 break;
4762 default:
4763 WARN_ON(1);
4764 break;
4765 }
4766
4767 /* Using vdev_id as queue number will make it very easy to do per-vif
4768 * tx queue locking. This shouldn't wrap due to interface combinations
4769 * but do a modulo for correctness sake and prevent using offchannel tx
4770 * queues for regular vif tx.
4771 */
4772 vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
4773 for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
4774 vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
4775
4776 /* Some firmware revisions don't wait for beacon tx completion before
4777 * sending another SWBA event. This could lead to hardware using old
4778 * (freed) beacon data in some cases, e.g. tx credit starvation
4779 * combined with missed TBTT. This is very very rare.
4780 *
4781 * On non-IOMMU-enabled hosts this could be a possible security issue
4782 * because hw could beacon some random data on the air. On
4783 * IOMMU-enabled hosts DMAR faults would occur in most cases and target
4784 * device would crash.
4785 *
4786 * Since there are no beacon tx completions (implicit nor explicit)
4787 * propagated to host the only workaround for this is to allocate a
4788 * DMA-coherent buffer for a lifetime of a vif and use it for all
4789 * beacon tx commands. Worst case for this approach is some beacons may
4790 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
4791 */
4792 if (vif->type == NL80211_IFTYPE_ADHOC ||
4793 vif->type == NL80211_IFTYPE_MESH_POINT ||
4794 vif->type == NL80211_IFTYPE_AP) {
4795 arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
4796 IEEE80211_MAX_FRAME_LEN,
4797 &arvif->beacon_paddr,
4798 GFP_ATOMIC);
4799 if (!arvif->beacon_buf) {
4800 ret = -ENOMEM;
4801 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
4802 ret);
4803 goto err;
4804 }
4805 }
4806 if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags))
4807 arvif->nohwcrypt = true;
4808
4809 if (arvif->nohwcrypt &&
4810 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
4811 ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
4812 goto err;
4813 }
4814
4815 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
4816 arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
4817 arvif->beacon_buf ? "single-buf" : "per-skb");
4818
4819 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
4820 arvif->vdev_subtype, vif->addr);
4821 if (ret) {
4822 ath10k_warn(ar, "failed to create WMI vdev %i: %d\n",
4823 arvif->vdev_id, ret);
4824 goto err;
4825 }
4826
4827 ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
4828 list_add(&arvif->list, &ar->arvifs);
4829
4830 /* It makes no sense to have firmware do keepalives. mac80211 already
4831 * takes care of this with idle connection polling.
4832 */
4833 ret = ath10k_mac_vif_disable_keepalive(arvif);
4834 if (ret) {
4835 ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n",
4836 arvif->vdev_id, ret);
4837 goto err_vdev_delete;
4838 }
4839
4840 arvif->def_wep_key_idx = -1;
4841
4842 vdev_param = ar->wmi.vdev_param->tx_encap_type;
4843 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
4844 ATH10K_HW_TXRX_NATIVE_WIFI);
4845 /* 10.X firmware does not support this VDEV parameter. Do not warn */
4846 if (ret && ret != -EOPNOTSUPP) {
4847 ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n",
4848 arvif->vdev_id, ret);
4849 goto err_vdev_delete;
4850 }
4851
4852 /* Configuring number of spatial stream for monitor interface is causing
4853 * target assert in qca9888 and qca6174.
4854 */
4855 if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) {
4856 u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
4857
4858 vdev_param = ar->wmi.vdev_param->nss;
4859 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
4860 nss);
4861 if (ret) {
4862 ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n",
4863 arvif->vdev_id, ar->cfg_tx_chainmask, nss,
4864 ret);
4865 goto err_vdev_delete;
4866 }
4867 }
4868
4869 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
4870 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
4871 ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id,
4872 vif->addr, WMI_PEER_TYPE_DEFAULT);
4873 if (ret) {
4874 ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
4875 arvif->vdev_id, ret);
4876 goto err_vdev_delete;
4877 }
4878
4879 spin_lock_bh(&ar->data_lock);
4880
4881 peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr);
4882 if (!peer) {
4883 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
4884 vif->addr, arvif->vdev_id);
4885 spin_unlock_bh(&ar->data_lock);
4886 ret = -ENOENT;
4887 goto err_peer_delete;
4888 }
4889
4890 arvif->peer_id = find_first_bit(peer->peer_ids,
4891 ATH10K_MAX_NUM_PEER_IDS);
4892
4893 spin_unlock_bh(&ar->data_lock);
4894 } else {
4895 arvif->peer_id = HTT_INVALID_PEERID;
4896 }
4897
4898 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
4899 ret = ath10k_mac_set_kickout(arvif);
4900 if (ret) {
4901 ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
4902 arvif->vdev_id, ret);
4903 goto err_peer_delete;
4904 }
4905 }
4906
4907 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
4908 param = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
4909 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
4910 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
4911 param, value);
4912 if (ret) {
4913 ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n",
4914 arvif->vdev_id, ret);
4915 goto err_peer_delete;
4916 }
4917
4918 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
4919 if (ret) {
4920 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
4921 arvif->vdev_id, ret);
4922 goto err_peer_delete;
4923 }
4924
4925 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
4926 if (ret) {
4927 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
4928 arvif->vdev_id, ret);
4929 goto err_peer_delete;
4930 }
4931 }
4932
4933 ret = ath10k_mac_set_txbf_conf(arvif);
4934 if (ret) {
4935 ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n",
4936 arvif->vdev_id, ret);
4937 goto err_peer_delete;
4938 }
4939
4940 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
4941 if (ret) {
4942 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
4943 arvif->vdev_id, ret);
4944 goto err_peer_delete;
4945 }
4946
4947 arvif->txpower = vif->bss_conf.txpower;
4948 ret = ath10k_mac_txpower_recalc(ar);
4949 if (ret) {
4950 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
4951 goto err_peer_delete;
4952 }
4953
4954 if (vif->type == NL80211_IFTYPE_MONITOR) {
4955 ar->monitor_arvif = arvif;
4956 ret = ath10k_monitor_recalc(ar);
4957 if (ret) {
4958 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
4959 goto err_peer_delete;
4960 }
4961 }
4962
4963 spin_lock_bh(&ar->htt.tx_lock);
4964 if (!ar->tx_paused)
4965 ieee80211_wake_queue(ar->hw, arvif->vdev_id);
4966 spin_unlock_bh(&ar->htt.tx_lock);
4967
4968 mutex_unlock(&ar->conf_mutex);
4969 return 0;
4970
4971 err_peer_delete:
4972 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
4973 arvif->vdev_type == WMI_VDEV_TYPE_IBSS)
4974 ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
4975
4976 err_vdev_delete:
4977 ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
4978 ar->free_vdev_map |= 1LL << arvif->vdev_id;
4979 list_del(&arvif->list);
4980
4981 err:
4982 if (arvif->beacon_buf) {
4983 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
4984 arvif->beacon_buf, arvif->beacon_paddr);
4985 arvif->beacon_buf = NULL;
4986 }
4987
4988 mutex_unlock(&ar->conf_mutex);
4989
4990 return ret;
4991 }
4992
4993 static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif)
4994 {
4995 int i;
4996
4997 for (i = 0; i < BITS_PER_LONG; i++)
4998 ath10k_mac_vif_tx_unlock(arvif, i);
4999 }
5000
5001 static void ath10k_remove_interface(struct ieee80211_hw *hw,
5002 struct ieee80211_vif *vif)
5003 {
5004 struct ath10k *ar = hw->priv;
5005 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5006 struct ath10k_peer *peer;
5007 int ret;
5008 int i;
5009
5010 cancel_work_sync(&arvif->ap_csa_work);
5011 cancel_delayed_work_sync(&arvif->connection_loss_work);
5012
5013 mutex_lock(&ar->conf_mutex);
5014
5015 spin_lock_bh(&ar->data_lock);
5016 ath10k_mac_vif_beacon_cleanup(arvif);
5017 spin_unlock_bh(&ar->data_lock);
5018
5019 ret = ath10k_spectral_vif_stop(arvif);
5020 if (ret)
5021 ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n",
5022 arvif->vdev_id, ret);
5023
5024 ar->free_vdev_map |= 1LL << arvif->vdev_id;
5025 list_del(&arvif->list);
5026
5027 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5028 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5029 ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
5030 vif->addr);
5031 if (ret)
5032 ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n",
5033 arvif->vdev_id, ret);
5034
5035 kfree(arvif->u.ap.noa_data);
5036 }
5037
5038 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
5039 arvif->vdev_id);
5040
5041 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
5042 if (ret)
5043 ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
5044 arvif->vdev_id, ret);
5045
5046 /* Some firmware revisions don't notify host about self-peer removal
5047 * until after associated vdev is deleted.
5048 */
5049 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5050 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5051 ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
5052 vif->addr);
5053 if (ret)
5054 ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n",
5055 arvif->vdev_id, ret);
5056
5057 spin_lock_bh(&ar->data_lock);
5058 ar->num_peers--;
5059 spin_unlock_bh(&ar->data_lock);
5060 }
5061
5062 spin_lock_bh(&ar->data_lock);
5063 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
5064 peer = ar->peer_map[i];
5065 if (!peer)
5066 continue;
5067
5068 if (peer->vif == vif) {
5069 ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n",
5070 vif->addr, arvif->vdev_id);
5071 peer->vif = NULL;
5072 }
5073 }
5074 spin_unlock_bh(&ar->data_lock);
5075
5076 ath10k_peer_cleanup(ar, arvif->vdev_id);
5077 ath10k_mac_txq_unref(ar, vif->txq);
5078
5079 if (vif->type == NL80211_IFTYPE_MONITOR) {
5080 ar->monitor_arvif = NULL;
5081 ret = ath10k_monitor_recalc(ar);
5082 if (ret)
5083 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5084 }
5085
5086 spin_lock_bh(&ar->htt.tx_lock);
5087 ath10k_mac_vif_tx_unlock_all(arvif);
5088 spin_unlock_bh(&ar->htt.tx_lock);
5089
5090 ath10k_mac_txq_unref(ar, vif->txq);
5091
5092 mutex_unlock(&ar->conf_mutex);
5093 }
5094
5095 /*
5096 * FIXME: Has to be verified.
5097 */
5098 #define SUPPORTED_FILTERS \
5099 (FIF_ALLMULTI | \
5100 FIF_CONTROL | \
5101 FIF_PSPOLL | \
5102 FIF_OTHER_BSS | \
5103 FIF_BCN_PRBRESP_PROMISC | \
5104 FIF_PROBE_REQ | \
5105 FIF_FCSFAIL)
5106
5107 static void ath10k_configure_filter(struct ieee80211_hw *hw,
5108 unsigned int changed_flags,
5109 unsigned int *total_flags,
5110 u64 multicast)
5111 {
5112 struct ath10k *ar = hw->priv;
5113 int ret;
5114
5115 mutex_lock(&ar->conf_mutex);
5116
5117 changed_flags &= SUPPORTED_FILTERS;
5118 *total_flags &= SUPPORTED_FILTERS;
5119 ar->filter_flags = *total_flags;
5120
5121 ret = ath10k_monitor_recalc(ar);
5122 if (ret)
5123 ath10k_warn(ar, "failed to recalc montior: %d\n", ret);
5124
5125 mutex_unlock(&ar->conf_mutex);
5126 }
5127
5128 static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
5129 struct ieee80211_vif *vif,
5130 struct ieee80211_bss_conf *info,
5131 u32 changed)
5132 {
5133 struct ath10k *ar = hw->priv;
5134 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5135 int ret = 0;
5136 u32 vdev_param, pdev_param, slottime, preamble;
5137
5138 mutex_lock(&ar->conf_mutex);
5139
5140 if (changed & BSS_CHANGED_IBSS)
5141 ath10k_control_ibss(arvif, info, vif->addr);
5142
5143 if (changed & BSS_CHANGED_BEACON_INT) {
5144 arvif->beacon_interval = info->beacon_int;
5145 vdev_param = ar->wmi.vdev_param->beacon_interval;
5146 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5147 arvif->beacon_interval);
5148 ath10k_dbg(ar, ATH10K_DBG_MAC,
5149 "mac vdev %d beacon_interval %d\n",
5150 arvif->vdev_id, arvif->beacon_interval);
5151
5152 if (ret)
5153 ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n",
5154 arvif->vdev_id, ret);
5155 }
5156
5157 if (changed & BSS_CHANGED_BEACON) {
5158 ath10k_dbg(ar, ATH10K_DBG_MAC,
5159 "vdev %d set beacon tx mode to staggered\n",
5160 arvif->vdev_id);
5161
5162 pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
5163 ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
5164 WMI_BEACON_STAGGERED_MODE);
5165 if (ret)
5166 ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n",
5167 arvif->vdev_id, ret);
5168
5169 ret = ath10k_mac_setup_bcn_tmpl(arvif);
5170 if (ret)
5171 ath10k_warn(ar, "failed to update beacon template: %d\n",
5172 ret);
5173
5174 if (ieee80211_vif_is_mesh(vif)) {
5175 /* mesh doesn't use SSID but firmware needs it */
5176 strncpy(arvif->u.ap.ssid, "mesh",
5177 sizeof(arvif->u.ap.ssid));
5178 arvif->u.ap.ssid_len = 4;
5179 }
5180 }
5181
5182 if (changed & BSS_CHANGED_AP_PROBE_RESP) {
5183 ret = ath10k_mac_setup_prb_tmpl(arvif);
5184 if (ret)
5185 ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n",
5186 arvif->vdev_id, ret);
5187 }
5188
5189 if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
5190 arvif->dtim_period = info->dtim_period;
5191
5192 ath10k_dbg(ar, ATH10K_DBG_MAC,
5193 "mac vdev %d dtim_period %d\n",
5194 arvif->vdev_id, arvif->dtim_period);
5195
5196 vdev_param = ar->wmi.vdev_param->dtim_period;
5197 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5198 arvif->dtim_period);
5199 if (ret)
5200 ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n",
5201 arvif->vdev_id, ret);
5202 }
5203
5204 if (changed & BSS_CHANGED_SSID &&
5205 vif->type == NL80211_IFTYPE_AP) {
5206 arvif->u.ap.ssid_len = info->ssid_len;
5207 if (info->ssid_len)
5208 memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
5209 arvif->u.ap.hidden_ssid = info->hidden_ssid;
5210 }
5211
5212 if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
5213 ether_addr_copy(arvif->bssid, info->bssid);
5214
5215 if (changed & BSS_CHANGED_BEACON_ENABLED)
5216 ath10k_control_beaconing(arvif, info);
5217
5218 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
5219 arvif->use_cts_prot = info->use_cts_prot;
5220 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
5221 arvif->vdev_id, info->use_cts_prot);
5222
5223 ret = ath10k_recalc_rtscts_prot(arvif);
5224 if (ret)
5225 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
5226 arvif->vdev_id, ret);
5227
5228 vdev_param = ar->wmi.vdev_param->protection_mode;
5229 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5230 info->use_cts_prot ? 1 : 0);
5231 if (ret)
5232 ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n",
5233 info->use_cts_prot, arvif->vdev_id, ret);
5234 }
5235
5236 if (changed & BSS_CHANGED_ERP_SLOT) {
5237 if (info->use_short_slot)
5238 slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
5239
5240 else
5241 slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
5242
5243 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
5244 arvif->vdev_id, slottime);
5245
5246 vdev_param = ar->wmi.vdev_param->slot_time;
5247 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5248 slottime);
5249 if (ret)
5250 ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n",
5251 arvif->vdev_id, ret);
5252 }
5253
5254 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
5255 if (info->use_short_preamble)
5256 preamble = WMI_VDEV_PREAMBLE_SHORT;
5257 else
5258 preamble = WMI_VDEV_PREAMBLE_LONG;
5259
5260 ath10k_dbg(ar, ATH10K_DBG_MAC,
5261 "mac vdev %d preamble %dn",
5262 arvif->vdev_id, preamble);
5263
5264 vdev_param = ar->wmi.vdev_param->preamble;
5265 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5266 preamble);
5267 if (ret)
5268 ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n",
5269 arvif->vdev_id, ret);
5270 }
5271
5272 if (changed & BSS_CHANGED_ASSOC) {
5273 if (info->assoc) {
5274 /* Workaround: Make sure monitor vdev is not running
5275 * when associating to prevent some firmware revisions
5276 * (e.g. 10.1 and 10.2) from crashing.
5277 */
5278 if (ar->monitor_started)
5279 ath10k_monitor_stop(ar);
5280 ath10k_bss_assoc(hw, vif, info);
5281 ath10k_monitor_recalc(ar);
5282 } else {
5283 ath10k_bss_disassoc(hw, vif);
5284 }
5285 }
5286
5287 if (changed & BSS_CHANGED_TXPOWER) {
5288 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n",
5289 arvif->vdev_id, info->txpower);
5290
5291 arvif->txpower = info->txpower;
5292 ret = ath10k_mac_txpower_recalc(ar);
5293 if (ret)
5294 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
5295 }
5296
5297 if (changed & BSS_CHANGED_PS) {
5298 arvif->ps = vif->bss_conf.ps;
5299
5300 ret = ath10k_config_ps(ar);
5301 if (ret)
5302 ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n",
5303 arvif->vdev_id, ret);
5304 }
5305
5306 mutex_unlock(&ar->conf_mutex);
5307 }
5308
5309 static int ath10k_hw_scan(struct ieee80211_hw *hw,
5310 struct ieee80211_vif *vif,
5311 struct ieee80211_scan_request *hw_req)
5312 {
5313 struct ath10k *ar = hw->priv;
5314 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5315 struct cfg80211_scan_request *req = &hw_req->req;
5316 struct wmi_start_scan_arg arg;
5317 int ret = 0;
5318 int i;
5319
5320 mutex_lock(&ar->conf_mutex);
5321
5322 spin_lock_bh(&ar->data_lock);
5323 switch (ar->scan.state) {
5324 case ATH10K_SCAN_IDLE:
5325 reinit_completion(&ar->scan.started);
5326 reinit_completion(&ar->scan.completed);
5327 ar->scan.state = ATH10K_SCAN_STARTING;
5328 ar->scan.is_roc = false;
5329 ar->scan.vdev_id = arvif->vdev_id;
5330 ret = 0;
5331 break;
5332 case ATH10K_SCAN_STARTING:
5333 case ATH10K_SCAN_RUNNING:
5334 case ATH10K_SCAN_ABORTING:
5335 ret = -EBUSY;
5336 break;
5337 }
5338 spin_unlock_bh(&ar->data_lock);
5339
5340 if (ret)
5341 goto exit;
5342
5343 memset(&arg, 0, sizeof(arg));
5344 ath10k_wmi_start_scan_init(ar, &arg);
5345 arg.vdev_id = arvif->vdev_id;
5346 arg.scan_id = ATH10K_SCAN_ID;
5347
5348 if (req->ie_len) {
5349 arg.ie_len = req->ie_len;
5350 memcpy(arg.ie, req->ie, arg.ie_len);
5351 }
5352
5353 if (req->n_ssids) {
5354 arg.n_ssids = req->n_ssids;
5355 for (i = 0; i < arg.n_ssids; i++) {
5356 arg.ssids[i].len = req->ssids[i].ssid_len;
5357 arg.ssids[i].ssid = req->ssids[i].ssid;
5358 }
5359 } else {
5360 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
5361 }
5362
5363 if (req->n_channels) {
5364 arg.n_channels = req->n_channels;
5365 for (i = 0; i < arg.n_channels; i++)
5366 arg.channels[i] = req->channels[i]->center_freq;
5367 }
5368
5369 ret = ath10k_start_scan(ar, &arg);
5370 if (ret) {
5371 ath10k_warn(ar, "failed to start hw scan: %d\n", ret);
5372 spin_lock_bh(&ar->data_lock);
5373 ar->scan.state = ATH10K_SCAN_IDLE;
5374 spin_unlock_bh(&ar->data_lock);
5375 }
5376
5377 /* Add a 200ms margin to account for event/command processing */
5378 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
5379 msecs_to_jiffies(arg.max_scan_time +
5380 200));
5381
5382 exit:
5383 mutex_unlock(&ar->conf_mutex);
5384 return ret;
5385 }
5386
5387 static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
5388 struct ieee80211_vif *vif)
5389 {
5390 struct ath10k *ar = hw->priv;
5391
5392 mutex_lock(&ar->conf_mutex);
5393 ath10k_scan_abort(ar);
5394 mutex_unlock(&ar->conf_mutex);
5395
5396 cancel_delayed_work_sync(&ar->scan.timeout);
5397 }
5398
5399 static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
5400 struct ath10k_vif *arvif,
5401 enum set_key_cmd cmd,
5402 struct ieee80211_key_conf *key)
5403 {
5404 u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid;
5405 int ret;
5406
5407 /* 10.1 firmware branch requires default key index to be set to group
5408 * key index after installing it. Otherwise FW/HW Txes corrupted
5409 * frames with multi-vif APs. This is not required for main firmware
5410 * branch (e.g. 636).
5411 *
5412 * This is also needed for 636 fw for IBSS-RSN to work more reliably.
5413 *
5414 * FIXME: It remains unknown if this is required for multi-vif STA
5415 * interfaces on 10.1.
5416 */
5417
5418 if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
5419 arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
5420 return;
5421
5422 if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
5423 return;
5424
5425 if (key->cipher == WLAN_CIPHER_SUITE_WEP104)
5426 return;
5427
5428 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
5429 return;
5430
5431 if (cmd != SET_KEY)
5432 return;
5433
5434 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5435 key->keyidx);
5436 if (ret)
5437 ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n",
5438 arvif->vdev_id, ret);
5439 }
5440
5441 static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5442 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
5443 struct ieee80211_key_conf *key)
5444 {
5445 struct ath10k *ar = hw->priv;
5446 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5447 struct ath10k_peer *peer;
5448 const u8 *peer_addr;
5449 bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
5450 key->cipher == WLAN_CIPHER_SUITE_WEP104;
5451 int ret = 0;
5452 int ret2;
5453 u32 flags = 0;
5454 u32 flags2;
5455
5456 /* this one needs to be done in software */
5457 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
5458 return 1;
5459
5460 if (arvif->nohwcrypt)
5461 return 1;
5462
5463 if (key->keyidx > WMI_MAX_KEY_INDEX)
5464 return -ENOSPC;
5465
5466 mutex_lock(&ar->conf_mutex);
5467
5468 if (sta)
5469 peer_addr = sta->addr;
5470 else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
5471 peer_addr = vif->bss_conf.bssid;
5472 else
5473 peer_addr = vif->addr;
5474
5475 key->hw_key_idx = key->keyidx;
5476
5477 if (is_wep) {
5478 if (cmd == SET_KEY)
5479 arvif->wep_keys[key->keyidx] = key;
5480 else
5481 arvif->wep_keys[key->keyidx] = NULL;
5482 }
5483
5484 /* the peer should not disappear in mid-way (unless FW goes awry) since
5485 * we already hold conf_mutex. we just make sure its there now. */
5486 spin_lock_bh(&ar->data_lock);
5487 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
5488 spin_unlock_bh(&ar->data_lock);
5489
5490 if (!peer) {
5491 if (cmd == SET_KEY) {
5492 ath10k_warn(ar, "failed to install key for non-existent peer %pM\n",
5493 peer_addr);
5494 ret = -EOPNOTSUPP;
5495 goto exit;
5496 } else {
5497 /* if the peer doesn't exist there is no key to disable
5498 * anymore */
5499 goto exit;
5500 }
5501 }
5502
5503 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
5504 flags |= WMI_KEY_PAIRWISE;
5505 else
5506 flags |= WMI_KEY_GROUP;
5507
5508 if (is_wep) {
5509 if (cmd == DISABLE_KEY)
5510 ath10k_clear_vdev_key(arvif, key);
5511
5512 /* When WEP keys are uploaded it's possible that there are
5513 * stations associated already (e.g. when merging) without any
5514 * keys. Static WEP needs an explicit per-peer key upload.
5515 */
5516 if (vif->type == NL80211_IFTYPE_ADHOC &&
5517 cmd == SET_KEY)
5518 ath10k_mac_vif_update_wep_key(arvif, key);
5519
5520 /* 802.1x never sets the def_wep_key_idx so each set_key()
5521 * call changes default tx key.
5522 *
5523 * Static WEP sets def_wep_key_idx via .set_default_unicast_key
5524 * after first set_key().
5525 */
5526 if (cmd == SET_KEY && arvif->def_wep_key_idx == -1)
5527 flags |= WMI_KEY_TX_USAGE;
5528 }
5529
5530 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
5531 if (ret) {
5532 WARN_ON(ret > 0);
5533 ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
5534 arvif->vdev_id, peer_addr, ret);
5535 goto exit;
5536 }
5537
5538 /* mac80211 sets static WEP keys as groupwise while firmware requires
5539 * them to be installed twice as both pairwise and groupwise.
5540 */
5541 if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) {
5542 flags2 = flags;
5543 flags2 &= ~WMI_KEY_GROUP;
5544 flags2 |= WMI_KEY_PAIRWISE;
5545
5546 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
5547 if (ret) {
5548 WARN_ON(ret > 0);
5549 ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
5550 arvif->vdev_id, peer_addr, ret);
5551 ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
5552 peer_addr, flags);
5553 if (ret2) {
5554 WARN_ON(ret2 > 0);
5555 ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
5556 arvif->vdev_id, peer_addr, ret2);
5557 }
5558 goto exit;
5559 }
5560 }
5561
5562 ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
5563
5564 spin_lock_bh(&ar->data_lock);
5565 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
5566 if (peer && cmd == SET_KEY)
5567 peer->keys[key->keyidx] = key;
5568 else if (peer && cmd == DISABLE_KEY)
5569 peer->keys[key->keyidx] = NULL;
5570 else if (peer == NULL)
5571 /* impossible unless FW goes crazy */
5572 ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr);
5573 spin_unlock_bh(&ar->data_lock);
5574
5575 exit:
5576 mutex_unlock(&ar->conf_mutex);
5577 return ret;
5578 }
5579
5580 static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
5581 struct ieee80211_vif *vif,
5582 int keyidx)
5583 {
5584 struct ath10k *ar = hw->priv;
5585 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5586 int ret;
5587
5588 mutex_lock(&arvif->ar->conf_mutex);
5589
5590 if (arvif->ar->state != ATH10K_STATE_ON)
5591 goto unlock;
5592
5593 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
5594 arvif->vdev_id, keyidx);
5595
5596 ret = ath10k_wmi_vdev_set_param(arvif->ar,
5597 arvif->vdev_id,
5598 arvif->ar->wmi.vdev_param->def_keyid,
5599 keyidx);
5600
5601 if (ret) {
5602 ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
5603 arvif->vdev_id,
5604 ret);
5605 goto unlock;
5606 }
5607
5608 arvif->def_wep_key_idx = keyidx;
5609
5610 unlock:
5611 mutex_unlock(&arvif->ar->conf_mutex);
5612 }
5613
5614 static void ath10k_sta_rc_update_wk(struct work_struct *wk)
5615 {
5616 struct ath10k *ar;
5617 struct ath10k_vif *arvif;
5618 struct ath10k_sta *arsta;
5619 struct ieee80211_sta *sta;
5620 struct cfg80211_chan_def def;
5621 enum nl80211_band band;
5622 const u8 *ht_mcs_mask;
5623 const u16 *vht_mcs_mask;
5624 u32 changed, bw, nss, smps;
5625 int err;
5626
5627 arsta = container_of(wk, struct ath10k_sta, update_wk);
5628 sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
5629 arvif = arsta->arvif;
5630 ar = arvif->ar;
5631
5632 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
5633 return;
5634
5635 band = def.chan->band;
5636 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
5637 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
5638
5639 spin_lock_bh(&ar->data_lock);
5640
5641 changed = arsta->changed;
5642 arsta->changed = 0;
5643
5644 bw = arsta->bw;
5645 nss = arsta->nss;
5646 smps = arsta->smps;
5647
5648 spin_unlock_bh(&ar->data_lock);
5649
5650 mutex_lock(&ar->conf_mutex);
5651
5652 nss = max_t(u32, 1, nss);
5653 nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask),
5654 ath10k_mac_max_vht_nss(vht_mcs_mask)));
5655
5656 if (changed & IEEE80211_RC_BW_CHANGED) {
5657 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
5658 sta->addr, bw);
5659
5660 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5661 WMI_PEER_CHAN_WIDTH, bw);
5662 if (err)
5663 ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n",
5664 sta->addr, bw, err);
5665 }
5666
5667 if (changed & IEEE80211_RC_NSS_CHANGED) {
5668 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
5669 sta->addr, nss);
5670
5671 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5672 WMI_PEER_NSS, nss);
5673 if (err)
5674 ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n",
5675 sta->addr, nss, err);
5676 }
5677
5678 if (changed & IEEE80211_RC_SMPS_CHANGED) {
5679 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
5680 sta->addr, smps);
5681
5682 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5683 WMI_PEER_SMPS_STATE, smps);
5684 if (err)
5685 ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n",
5686 sta->addr, smps, err);
5687 }
5688
5689 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
5690 changed & IEEE80211_RC_NSS_CHANGED) {
5691 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
5692 sta->addr);
5693
5694 err = ath10k_station_assoc(ar, arvif->vif, sta, true);
5695 if (err)
5696 ath10k_warn(ar, "failed to reassociate station: %pM\n",
5697 sta->addr);
5698 }
5699
5700 mutex_unlock(&ar->conf_mutex);
5701 }
5702
5703 static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif,
5704 struct ieee80211_sta *sta)
5705 {
5706 struct ath10k *ar = arvif->ar;
5707
5708 lockdep_assert_held(&ar->conf_mutex);
5709
5710 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
5711 return 0;
5712
5713 if (ar->num_stations >= ar->max_num_stations)
5714 return -ENOBUFS;
5715
5716 ar->num_stations++;
5717
5718 return 0;
5719 }
5720
5721 static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif,
5722 struct ieee80211_sta *sta)
5723 {
5724 struct ath10k *ar = arvif->ar;
5725
5726 lockdep_assert_held(&ar->conf_mutex);
5727
5728 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
5729 return;
5730
5731 ar->num_stations--;
5732 }
5733
5734 struct ath10k_mac_tdls_iter_data {
5735 u32 num_tdls_stations;
5736 struct ieee80211_vif *curr_vif;
5737 };
5738
5739 static void ath10k_mac_tdls_vif_stations_count_iter(void *data,
5740 struct ieee80211_sta *sta)
5741 {
5742 struct ath10k_mac_tdls_iter_data *iter_data = data;
5743 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
5744 struct ieee80211_vif *sta_vif = arsta->arvif->vif;
5745
5746 if (sta->tdls && sta_vif == iter_data->curr_vif)
5747 iter_data->num_tdls_stations++;
5748 }
5749
5750 static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
5751 struct ieee80211_vif *vif)
5752 {
5753 struct ath10k_mac_tdls_iter_data data = {};
5754
5755 data.curr_vif = vif;
5756
5757 ieee80211_iterate_stations_atomic(hw,
5758 ath10k_mac_tdls_vif_stations_count_iter,
5759 &data);
5760 return data.num_tdls_stations;
5761 }
5762
5763 static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac,
5764 struct ieee80211_vif *vif)
5765 {
5766 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5767 int *num_tdls_vifs = data;
5768
5769 if (vif->type != NL80211_IFTYPE_STATION)
5770 return;
5771
5772 if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0)
5773 (*num_tdls_vifs)++;
5774 }
5775
5776 static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw)
5777 {
5778 int num_tdls_vifs = 0;
5779
5780 ieee80211_iterate_active_interfaces_atomic(hw,
5781 IEEE80211_IFACE_ITER_NORMAL,
5782 ath10k_mac_tdls_vifs_count_iter,
5783 &num_tdls_vifs);
5784 return num_tdls_vifs;
5785 }
5786
5787 static int ath10k_sta_state(struct ieee80211_hw *hw,
5788 struct ieee80211_vif *vif,
5789 struct ieee80211_sta *sta,
5790 enum ieee80211_sta_state old_state,
5791 enum ieee80211_sta_state new_state)
5792 {
5793 struct ath10k *ar = hw->priv;
5794 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5795 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
5796 struct ath10k_peer *peer;
5797 int ret = 0;
5798 int i;
5799
5800 if (old_state == IEEE80211_STA_NOTEXIST &&
5801 new_state == IEEE80211_STA_NONE) {
5802 memset(arsta, 0, sizeof(*arsta));
5803 arsta->arvif = arvif;
5804 INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
5805
5806 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
5807 ath10k_mac_txq_init(sta->txq[i]);
5808 }
5809
5810 /* cancel must be done outside the mutex to avoid deadlock */
5811 if ((old_state == IEEE80211_STA_NONE &&
5812 new_state == IEEE80211_STA_NOTEXIST))
5813 cancel_work_sync(&arsta->update_wk);
5814
5815 mutex_lock(&ar->conf_mutex);
5816
5817 if (old_state == IEEE80211_STA_NOTEXIST &&
5818 new_state == IEEE80211_STA_NONE) {
5819 /*
5820 * New station addition.
5821 */
5822 enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT;
5823 u32 num_tdls_stations;
5824 u32 num_tdls_vifs;
5825
5826 ath10k_dbg(ar, ATH10K_DBG_MAC,
5827 "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
5828 arvif->vdev_id, sta->addr,
5829 ar->num_stations + 1, ar->max_num_stations,
5830 ar->num_peers + 1, ar->max_num_peers);
5831
5832 ret = ath10k_mac_inc_num_stations(arvif, sta);
5833 if (ret) {
5834 ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
5835 ar->max_num_stations);
5836 goto exit;
5837 }
5838
5839 if (sta->tdls)
5840 peer_type = WMI_PEER_TYPE_TDLS;
5841
5842 ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
5843 sta->addr, peer_type);
5844 if (ret) {
5845 ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
5846 sta->addr, arvif->vdev_id, ret);
5847 ath10k_mac_dec_num_stations(arvif, sta);
5848 goto exit;
5849 }
5850
5851 spin_lock_bh(&ar->data_lock);
5852
5853 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
5854 if (!peer) {
5855 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
5856 vif->addr, arvif->vdev_id);
5857 spin_unlock_bh(&ar->data_lock);
5858 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5859 ath10k_mac_dec_num_stations(arvif, sta);
5860 ret = -ENOENT;
5861 goto exit;
5862 }
5863
5864 arsta->peer_id = find_first_bit(peer->peer_ids,
5865 ATH10K_MAX_NUM_PEER_IDS);
5866
5867 spin_unlock_bh(&ar->data_lock);
5868
5869 if (!sta->tdls)
5870 goto exit;
5871
5872 num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
5873 num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
5874
5875 if (num_tdls_vifs >= ar->max_num_tdls_vdevs &&
5876 num_tdls_stations == 0) {
5877 ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
5878 arvif->vdev_id, ar->max_num_tdls_vdevs);
5879 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5880 ath10k_mac_dec_num_stations(arvif, sta);
5881 ret = -ENOBUFS;
5882 goto exit;
5883 }
5884
5885 if (num_tdls_stations == 0) {
5886 /* This is the first tdls peer in current vif */
5887 enum wmi_tdls_state state = WMI_TDLS_ENABLE_ACTIVE;
5888
5889 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
5890 state);
5891 if (ret) {
5892 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
5893 arvif->vdev_id, ret);
5894 ath10k_peer_delete(ar, arvif->vdev_id,
5895 sta->addr);
5896 ath10k_mac_dec_num_stations(arvif, sta);
5897 goto exit;
5898 }
5899 }
5900
5901 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
5902 WMI_TDLS_PEER_STATE_PEERING);
5903 if (ret) {
5904 ath10k_warn(ar,
5905 "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n",
5906 sta->addr, arvif->vdev_id, ret);
5907 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5908 ath10k_mac_dec_num_stations(arvif, sta);
5909
5910 if (num_tdls_stations != 0)
5911 goto exit;
5912 ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
5913 WMI_TDLS_DISABLE);
5914 }
5915 } else if ((old_state == IEEE80211_STA_NONE &&
5916 new_state == IEEE80211_STA_NOTEXIST)) {
5917 /*
5918 * Existing station deletion.
5919 */
5920 ath10k_dbg(ar, ATH10K_DBG_MAC,
5921 "mac vdev %d peer delete %pM (sta gone)\n",
5922 arvif->vdev_id, sta->addr);
5923
5924 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5925 if (ret)
5926 ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
5927 sta->addr, arvif->vdev_id, ret);
5928
5929 ath10k_mac_dec_num_stations(arvif, sta);
5930
5931 spin_lock_bh(&ar->data_lock);
5932 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
5933 peer = ar->peer_map[i];
5934 if (!peer)
5935 continue;
5936
5937 if (peer->sta == sta) {
5938 ath10k_warn(ar, "found sta peer %pM entry on vdev %i after it was supposedly removed\n",
5939 sta->addr, arvif->vdev_id);
5940 peer->sta = NULL;
5941 }
5942 }
5943 spin_unlock_bh(&ar->data_lock);
5944
5945 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
5946 ath10k_mac_txq_unref(ar, sta->txq[i]);
5947
5948 if (!sta->tdls)
5949 goto exit;
5950
5951 if (ath10k_mac_tdls_vif_stations_count(hw, vif))
5952 goto exit;
5953
5954 /* This was the last tdls peer in current vif */
5955 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
5956 WMI_TDLS_DISABLE);
5957 if (ret) {
5958 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
5959 arvif->vdev_id, ret);
5960 }
5961 } else if (old_state == IEEE80211_STA_AUTH &&
5962 new_state == IEEE80211_STA_ASSOC &&
5963 (vif->type == NL80211_IFTYPE_AP ||
5964 vif->type == NL80211_IFTYPE_MESH_POINT ||
5965 vif->type == NL80211_IFTYPE_ADHOC)) {
5966 /*
5967 * New association.
5968 */
5969 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n",
5970 sta->addr);
5971
5972 ret = ath10k_station_assoc(ar, vif, sta, false);
5973 if (ret)
5974 ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
5975 sta->addr, arvif->vdev_id, ret);
5976 } else if (old_state == IEEE80211_STA_ASSOC &&
5977 new_state == IEEE80211_STA_AUTHORIZED &&
5978 sta->tdls) {
5979 /*
5980 * Tdls station authorized.
5981 */
5982 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n",
5983 sta->addr);
5984
5985 ret = ath10k_station_assoc(ar, vif, sta, false);
5986 if (ret) {
5987 ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n",
5988 sta->addr, arvif->vdev_id, ret);
5989 goto exit;
5990 }
5991
5992 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
5993 WMI_TDLS_PEER_STATE_CONNECTED);
5994 if (ret)
5995 ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n",
5996 sta->addr, arvif->vdev_id, ret);
5997 } else if (old_state == IEEE80211_STA_ASSOC &&
5998 new_state == IEEE80211_STA_AUTH &&
5999 (vif->type == NL80211_IFTYPE_AP ||
6000 vif->type == NL80211_IFTYPE_MESH_POINT ||
6001 vif->type == NL80211_IFTYPE_ADHOC)) {
6002 /*
6003 * Disassociation.
6004 */
6005 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
6006 sta->addr);
6007
6008 ret = ath10k_station_disassoc(ar, vif, sta);
6009 if (ret)
6010 ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n",
6011 sta->addr, arvif->vdev_id, ret);
6012 }
6013 exit:
6014 mutex_unlock(&ar->conf_mutex);
6015 return ret;
6016 }
6017
6018 static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
6019 u16 ac, bool enable)
6020 {
6021 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6022 struct wmi_sta_uapsd_auto_trig_arg arg = {};
6023 u32 prio = 0, acc = 0;
6024 u32 value = 0;
6025 int ret = 0;
6026
6027 lockdep_assert_held(&ar->conf_mutex);
6028
6029 if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
6030 return 0;
6031
6032 switch (ac) {
6033 case IEEE80211_AC_VO:
6034 value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
6035 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
6036 prio = 7;
6037 acc = 3;
6038 break;
6039 case IEEE80211_AC_VI:
6040 value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
6041 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
6042 prio = 5;
6043 acc = 2;
6044 break;
6045 case IEEE80211_AC_BE:
6046 value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
6047 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
6048 prio = 2;
6049 acc = 1;
6050 break;
6051 case IEEE80211_AC_BK:
6052 value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
6053 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
6054 prio = 0;
6055 acc = 0;
6056 break;
6057 }
6058
6059 if (enable)
6060 arvif->u.sta.uapsd |= value;
6061 else
6062 arvif->u.sta.uapsd &= ~value;
6063
6064 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6065 WMI_STA_PS_PARAM_UAPSD,
6066 arvif->u.sta.uapsd);
6067 if (ret) {
6068 ath10k_warn(ar, "failed to set uapsd params: %d\n", ret);
6069 goto exit;
6070 }
6071
6072 if (arvif->u.sta.uapsd)
6073 value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
6074 else
6075 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
6076
6077 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6078 WMI_STA_PS_PARAM_RX_WAKE_POLICY,
6079 value);
6080 if (ret)
6081 ath10k_warn(ar, "failed to set rx wake param: %d\n", ret);
6082
6083 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
6084 if (ret) {
6085 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
6086 arvif->vdev_id, ret);
6087 return ret;
6088 }
6089
6090 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
6091 if (ret) {
6092 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
6093 arvif->vdev_id, ret);
6094 return ret;
6095 }
6096
6097 if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) ||
6098 test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) {
6099 /* Only userspace can make an educated decision when to send
6100 * trigger frame. The following effectively disables u-UAPSD
6101 * autotrigger in firmware (which is enabled by default
6102 * provided the autotrigger service is available).
6103 */
6104
6105 arg.wmm_ac = acc;
6106 arg.user_priority = prio;
6107 arg.service_interval = 0;
6108 arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
6109 arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
6110
6111 ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id,
6112 arvif->bssid, &arg, 1);
6113 if (ret) {
6114 ath10k_warn(ar, "failed to set uapsd auto trigger %d\n",
6115 ret);
6116 return ret;
6117 }
6118 }
6119
6120 exit:
6121 return ret;
6122 }
6123
6124 static int ath10k_conf_tx(struct ieee80211_hw *hw,
6125 struct ieee80211_vif *vif, u16 ac,
6126 const struct ieee80211_tx_queue_params *params)
6127 {
6128 struct ath10k *ar = hw->priv;
6129 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6130 struct wmi_wmm_params_arg *p = NULL;
6131 int ret;
6132
6133 mutex_lock(&ar->conf_mutex);
6134
6135 switch (ac) {
6136 case IEEE80211_AC_VO:
6137 p = &arvif->wmm_params.ac_vo;
6138 break;
6139 case IEEE80211_AC_VI:
6140 p = &arvif->wmm_params.ac_vi;
6141 break;
6142 case IEEE80211_AC_BE:
6143 p = &arvif->wmm_params.ac_be;
6144 break;
6145 case IEEE80211_AC_BK:
6146 p = &arvif->wmm_params.ac_bk;
6147 break;
6148 }
6149
6150 if (WARN_ON(!p)) {
6151 ret = -EINVAL;
6152 goto exit;
6153 }
6154
6155 p->cwmin = params->cw_min;
6156 p->cwmax = params->cw_max;
6157 p->aifs = params->aifs;
6158
6159 /*
6160 * The channel time duration programmed in the HW is in absolute
6161 * microseconds, while mac80211 gives the txop in units of
6162 * 32 microseconds.
6163 */
6164 p->txop = params->txop * 32;
6165
6166 if (ar->wmi.ops->gen_vdev_wmm_conf) {
6167 ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id,
6168 &arvif->wmm_params);
6169 if (ret) {
6170 ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n",
6171 arvif->vdev_id, ret);
6172 goto exit;
6173 }
6174 } else {
6175 /* This won't work well with multi-interface cases but it's
6176 * better than nothing.
6177 */
6178 ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params);
6179 if (ret) {
6180 ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
6181 goto exit;
6182 }
6183 }
6184
6185 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
6186 if (ret)
6187 ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret);
6188
6189 exit:
6190 mutex_unlock(&ar->conf_mutex);
6191 return ret;
6192 }
6193
6194 #define ATH10K_ROC_TIMEOUT_HZ (2 * HZ)
6195
6196 static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
6197 struct ieee80211_vif *vif,
6198 struct ieee80211_channel *chan,
6199 int duration,
6200 enum ieee80211_roc_type type)
6201 {
6202 struct ath10k *ar = hw->priv;
6203 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6204 struct wmi_start_scan_arg arg;
6205 int ret = 0;
6206 u32 scan_time_msec;
6207
6208 mutex_lock(&ar->conf_mutex);
6209
6210 spin_lock_bh(&ar->data_lock);
6211 switch (ar->scan.state) {
6212 case ATH10K_SCAN_IDLE:
6213 reinit_completion(&ar->scan.started);
6214 reinit_completion(&ar->scan.completed);
6215 reinit_completion(&ar->scan.on_channel);
6216 ar->scan.state = ATH10K_SCAN_STARTING;
6217 ar->scan.is_roc = true;
6218 ar->scan.vdev_id = arvif->vdev_id;
6219 ar->scan.roc_freq = chan->center_freq;
6220 ar->scan.roc_notify = true;
6221 ret = 0;
6222 break;
6223 case ATH10K_SCAN_STARTING:
6224 case ATH10K_SCAN_RUNNING:
6225 case ATH10K_SCAN_ABORTING:
6226 ret = -EBUSY;
6227 break;
6228 }
6229 spin_unlock_bh(&ar->data_lock);
6230
6231 if (ret)
6232 goto exit;
6233
6234 scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
6235
6236 memset(&arg, 0, sizeof(arg));
6237 ath10k_wmi_start_scan_init(ar, &arg);
6238 arg.vdev_id = arvif->vdev_id;
6239 arg.scan_id = ATH10K_SCAN_ID;
6240 arg.n_channels = 1;
6241 arg.channels[0] = chan->center_freq;
6242 arg.dwell_time_active = scan_time_msec;
6243 arg.dwell_time_passive = scan_time_msec;
6244 arg.max_scan_time = scan_time_msec;
6245 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
6246 arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
6247 arg.burst_duration_ms = duration;
6248
6249 ret = ath10k_start_scan(ar, &arg);
6250 if (ret) {
6251 ath10k_warn(ar, "failed to start roc scan: %d\n", ret);
6252 spin_lock_bh(&ar->data_lock);
6253 ar->scan.state = ATH10K_SCAN_IDLE;
6254 spin_unlock_bh(&ar->data_lock);
6255 goto exit;
6256 }
6257
6258 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
6259 if (ret == 0) {
6260 ath10k_warn(ar, "failed to switch to channel for roc scan\n");
6261
6262 ret = ath10k_scan_stop(ar);
6263 if (ret)
6264 ath10k_warn(ar, "failed to stop scan: %d\n", ret);
6265
6266 ret = -ETIMEDOUT;
6267 goto exit;
6268 }
6269
6270 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
6271 msecs_to_jiffies(duration));
6272
6273 ret = 0;
6274 exit:
6275 mutex_unlock(&ar->conf_mutex);
6276 return ret;
6277 }
6278
6279 static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
6280 {
6281 struct ath10k *ar = hw->priv;
6282
6283 mutex_lock(&ar->conf_mutex);
6284
6285 spin_lock_bh(&ar->data_lock);
6286 ar->scan.roc_notify = false;
6287 spin_unlock_bh(&ar->data_lock);
6288
6289 ath10k_scan_abort(ar);
6290
6291 mutex_unlock(&ar->conf_mutex);
6292
6293 cancel_delayed_work_sync(&ar->scan.timeout);
6294
6295 return 0;
6296 }
6297
6298 /*
6299 * Both RTS and Fragmentation threshold are interface-specific
6300 * in ath10k, but device-specific in mac80211.
6301 */
6302
6303 static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
6304 {
6305 struct ath10k *ar = hw->priv;
6306 struct ath10k_vif *arvif;
6307 int ret = 0;
6308
6309 mutex_lock(&ar->conf_mutex);
6310 list_for_each_entry(arvif, &ar->arvifs, list) {
6311 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
6312 arvif->vdev_id, value);
6313
6314 ret = ath10k_mac_set_rts(arvif, value);
6315 if (ret) {
6316 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
6317 arvif->vdev_id, ret);
6318 break;
6319 }
6320 }
6321 mutex_unlock(&ar->conf_mutex);
6322
6323 return ret;
6324 }
6325
6326 static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
6327 {
6328 /* Even though there's a WMI enum for fragmentation threshold no known
6329 * firmware actually implements it. Moreover it is not possible to rely
6330 * frame fragmentation to mac80211 because firmware clears the "more
6331 * fragments" bit in frame control making it impossible for remote
6332 * devices to reassemble frames.
6333 *
6334 * Hence implement a dummy callback just to say fragmentation isn't
6335 * supported. This effectively prevents mac80211 from doing frame
6336 * fragmentation in software.
6337 */
6338 return -EOPNOTSUPP;
6339 }
6340
6341 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6342 u32 queues, bool drop)
6343 {
6344 struct ath10k *ar = hw->priv;
6345 bool skip;
6346 long time_left;
6347
6348 /* mac80211 doesn't care if we really xmit queued frames or not
6349 * we'll collect those frames either way if we stop/delete vdevs */
6350 if (drop)
6351 return;
6352
6353 mutex_lock(&ar->conf_mutex);
6354
6355 if (ar->state == ATH10K_STATE_WEDGED)
6356 goto skip;
6357
6358 time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
6359 bool empty;
6360
6361 spin_lock_bh(&ar->htt.tx_lock);
6362 empty = (ar->htt.num_pending_tx == 0);
6363 spin_unlock_bh(&ar->htt.tx_lock);
6364
6365 skip = (ar->state == ATH10K_STATE_WEDGED) ||
6366 test_bit(ATH10K_FLAG_CRASH_FLUSH,
6367 &ar->dev_flags);
6368
6369 (empty || skip);
6370 }), ATH10K_FLUSH_TIMEOUT_HZ);
6371
6372 if (time_left == 0 || skip)
6373 ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
6374 skip, ar->state, time_left);
6375
6376 skip:
6377 mutex_unlock(&ar->conf_mutex);
6378 }
6379
6380 /* TODO: Implement this function properly
6381 * For now it is needed to reply to Probe Requests in IBSS mode.
6382 * Propably we need this information from FW.
6383 */
6384 static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
6385 {
6386 return 1;
6387 }
6388
6389 static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
6390 enum ieee80211_reconfig_type reconfig_type)
6391 {
6392 struct ath10k *ar = hw->priv;
6393
6394 if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
6395 return;
6396
6397 mutex_lock(&ar->conf_mutex);
6398
6399 /* If device failed to restart it will be in a different state, e.g.
6400 * ATH10K_STATE_WEDGED */
6401 if (ar->state == ATH10K_STATE_RESTARTED) {
6402 ath10k_info(ar, "device successfully recovered\n");
6403 ar->state = ATH10K_STATE_ON;
6404 ieee80211_wake_queues(ar->hw);
6405 }
6406
6407 mutex_unlock(&ar->conf_mutex);
6408 }
6409
6410 static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
6411 struct survey_info *survey)
6412 {
6413 struct ath10k *ar = hw->priv;
6414 struct ieee80211_supported_band *sband;
6415 struct survey_info *ar_survey = &ar->survey[idx];
6416 int ret = 0;
6417
6418 mutex_lock(&ar->conf_mutex);
6419
6420 sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
6421 if (sband && idx >= sband->n_channels) {
6422 idx -= sband->n_channels;
6423 sband = NULL;
6424 }
6425
6426 if (!sband)
6427 sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
6428
6429 if (!sband || idx >= sband->n_channels) {
6430 ret = -ENOENT;
6431 goto exit;
6432 }
6433
6434 spin_lock_bh(&ar->data_lock);
6435 memcpy(survey, ar_survey, sizeof(*survey));
6436 spin_unlock_bh(&ar->data_lock);
6437
6438 survey->channel = &sband->channels[idx];
6439
6440 if (ar->rx_channel == survey->channel)
6441 survey->filled |= SURVEY_INFO_IN_USE;
6442
6443 exit:
6444 mutex_unlock(&ar->conf_mutex);
6445 return ret;
6446 }
6447
6448 static bool
6449 ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
6450 enum nl80211_band band,
6451 const struct cfg80211_bitrate_mask *mask)
6452 {
6453 int num_rates = 0;
6454 int i;
6455
6456 num_rates += hweight32(mask->control[band].legacy);
6457
6458 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
6459 num_rates += hweight8(mask->control[band].ht_mcs[i]);
6460
6461 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
6462 num_rates += hweight16(mask->control[band].vht_mcs[i]);
6463
6464 return num_rates == 1;
6465 }
6466
6467 static bool
6468 ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
6469 enum nl80211_band band,
6470 const struct cfg80211_bitrate_mask *mask,
6471 int *nss)
6472 {
6473 struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
6474 u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
6475 u8 ht_nss_mask = 0;
6476 u8 vht_nss_mask = 0;
6477 int i;
6478
6479 if (mask->control[band].legacy)
6480 return false;
6481
6482 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
6483 if (mask->control[band].ht_mcs[i] == 0)
6484 continue;
6485 else if (mask->control[band].ht_mcs[i] ==
6486 sband->ht_cap.mcs.rx_mask[i])
6487 ht_nss_mask |= BIT(i);
6488 else
6489 return false;
6490 }
6491
6492 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6493 if (mask->control[band].vht_mcs[i] == 0)
6494 continue;
6495 else if (mask->control[band].vht_mcs[i] ==
6496 ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
6497 vht_nss_mask |= BIT(i);
6498 else
6499 return false;
6500 }
6501
6502 if (ht_nss_mask != vht_nss_mask)
6503 return false;
6504
6505 if (ht_nss_mask == 0)
6506 return false;
6507
6508 if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
6509 return false;
6510
6511 *nss = fls(ht_nss_mask);
6512
6513 return true;
6514 }
6515
6516 static int
6517 ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
6518 enum nl80211_band band,
6519 const struct cfg80211_bitrate_mask *mask,
6520 u8 *rate, u8 *nss)
6521 {
6522 struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
6523 int rate_idx;
6524 int i;
6525 u16 bitrate;
6526 u8 preamble;
6527 u8 hw_rate;
6528
6529 if (hweight32(mask->control[band].legacy) == 1) {
6530 rate_idx = ffs(mask->control[band].legacy) - 1;
6531
6532 hw_rate = sband->bitrates[rate_idx].hw_value;
6533 bitrate = sband->bitrates[rate_idx].bitrate;
6534
6535 if (ath10k_mac_bitrate_is_cck(bitrate))
6536 preamble = WMI_RATE_PREAMBLE_CCK;
6537 else
6538 preamble = WMI_RATE_PREAMBLE_OFDM;
6539
6540 *nss = 1;
6541 *rate = preamble << 6 |
6542 (*nss - 1) << 4 |
6543 hw_rate << 0;
6544
6545 return 0;
6546 }
6547
6548 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
6549 if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
6550 *nss = i + 1;
6551 *rate = WMI_RATE_PREAMBLE_HT << 6 |
6552 (*nss - 1) << 4 |
6553 (ffs(mask->control[band].ht_mcs[i]) - 1);
6554
6555 return 0;
6556 }
6557 }
6558
6559 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6560 if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
6561 *nss = i + 1;
6562 *rate = WMI_RATE_PREAMBLE_VHT << 6 |
6563 (*nss - 1) << 4 |
6564 (ffs(mask->control[band].vht_mcs[i]) - 1);
6565
6566 return 0;
6567 }
6568 }
6569
6570 return -EINVAL;
6571 }
6572
6573 static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
6574 u8 rate, u8 nss, u8 sgi, u8 ldpc)
6575 {
6576 struct ath10k *ar = arvif->ar;
6577 u32 vdev_param;
6578 int ret;
6579
6580 lockdep_assert_held(&ar->conf_mutex);
6581
6582 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
6583 arvif->vdev_id, rate, nss, sgi);
6584
6585 vdev_param = ar->wmi.vdev_param->fixed_rate;
6586 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate);
6587 if (ret) {
6588 ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
6589 rate, ret);
6590 return ret;
6591 }
6592
6593 vdev_param = ar->wmi.vdev_param->nss;
6594 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss);
6595 if (ret) {
6596 ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret);
6597 return ret;
6598 }
6599
6600 vdev_param = ar->wmi.vdev_param->sgi;
6601 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi);
6602 if (ret) {
6603 ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret);
6604 return ret;
6605 }
6606
6607 vdev_param = ar->wmi.vdev_param->ldpc;
6608 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc);
6609 if (ret) {
6610 ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret);
6611 return ret;
6612 }
6613
6614 return 0;
6615 }
6616
6617 static bool
6618 ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
6619 enum nl80211_band band,
6620 const struct cfg80211_bitrate_mask *mask)
6621 {
6622 int i;
6623 u16 vht_mcs;
6624
6625 /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible
6626 * to express all VHT MCS rate masks. Effectively only the following
6627 * ranges can be used: none, 0-7, 0-8 and 0-9.
6628 */
6629 for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
6630 vht_mcs = mask->control[band].vht_mcs[i];
6631
6632 switch (vht_mcs) {
6633 case 0:
6634 case BIT(8) - 1:
6635 case BIT(9) - 1:
6636 case BIT(10) - 1:
6637 break;
6638 default:
6639 ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n");
6640 return false;
6641 }
6642 }
6643
6644 return true;
6645 }
6646
6647 static void ath10k_mac_set_bitrate_mask_iter(void *data,
6648 struct ieee80211_sta *sta)
6649 {
6650 struct ath10k_vif *arvif = data;
6651 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
6652 struct ath10k *ar = arvif->ar;
6653
6654 if (arsta->arvif != arvif)
6655 return;
6656
6657 spin_lock_bh(&ar->data_lock);
6658 arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
6659 spin_unlock_bh(&ar->data_lock);
6660
6661 ieee80211_queue_work(ar->hw, &arsta->update_wk);
6662 }
6663
6664 static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
6665 struct ieee80211_vif *vif,
6666 const struct cfg80211_bitrate_mask *mask)
6667 {
6668 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6669 struct cfg80211_chan_def def;
6670 struct ath10k *ar = arvif->ar;
6671 enum nl80211_band band;
6672 const u8 *ht_mcs_mask;
6673 const u16 *vht_mcs_mask;
6674 u8 rate;
6675 u8 nss;
6676 u8 sgi;
6677 u8 ldpc;
6678 int single_nss;
6679 int ret;
6680
6681 if (ath10k_mac_vif_chan(vif, &def))
6682 return -EPERM;
6683
6684 band = def.chan->band;
6685 ht_mcs_mask = mask->control[band].ht_mcs;
6686 vht_mcs_mask = mask->control[band].vht_mcs;
6687 ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
6688
6689 sgi = mask->control[band].gi;
6690 if (sgi == NL80211_TXRATE_FORCE_LGI)
6691 return -EINVAL;
6692
6693 if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) {
6694 ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
6695 &rate, &nss);
6696 if (ret) {
6697 ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n",
6698 arvif->vdev_id, ret);
6699 return ret;
6700 }
6701 } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask,
6702 &single_nss)) {
6703 rate = WMI_FIXED_RATE_NONE;
6704 nss = single_nss;
6705 } else {
6706 rate = WMI_FIXED_RATE_NONE;
6707 nss = min(ar->num_rf_chains,
6708 max(ath10k_mac_max_ht_nss(ht_mcs_mask),
6709 ath10k_mac_max_vht_nss(vht_mcs_mask)));
6710
6711 if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask))
6712 return -EINVAL;
6713
6714 mutex_lock(&ar->conf_mutex);
6715
6716 arvif->bitrate_mask = *mask;
6717 ieee80211_iterate_stations_atomic(ar->hw,
6718 ath10k_mac_set_bitrate_mask_iter,
6719 arvif);
6720
6721 mutex_unlock(&ar->conf_mutex);
6722 }
6723
6724 mutex_lock(&ar->conf_mutex);
6725
6726 ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
6727 if (ret) {
6728 ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n",
6729 arvif->vdev_id, ret);
6730 goto exit;
6731 }
6732
6733 exit:
6734 mutex_unlock(&ar->conf_mutex);
6735
6736 return ret;
6737 }
6738
6739 static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
6740 struct ieee80211_vif *vif,
6741 struct ieee80211_sta *sta,
6742 u32 changed)
6743 {
6744 struct ath10k *ar = hw->priv;
6745 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
6746 u32 bw, smps;
6747
6748 spin_lock_bh(&ar->data_lock);
6749
6750 ath10k_dbg(ar, ATH10K_DBG_MAC,
6751 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
6752 sta->addr, changed, sta->bandwidth, sta->rx_nss,
6753 sta->smps_mode);
6754
6755 if (changed & IEEE80211_RC_BW_CHANGED) {
6756 bw = WMI_PEER_CHWIDTH_20MHZ;
6757
6758 switch (sta->bandwidth) {
6759 case IEEE80211_STA_RX_BW_20:
6760 bw = WMI_PEER_CHWIDTH_20MHZ;
6761 break;
6762 case IEEE80211_STA_RX_BW_40:
6763 bw = WMI_PEER_CHWIDTH_40MHZ;
6764 break;
6765 case IEEE80211_STA_RX_BW_80:
6766 bw = WMI_PEER_CHWIDTH_80MHZ;
6767 break;
6768 case IEEE80211_STA_RX_BW_160:
6769 ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n",
6770 sta->bandwidth, sta->addr);
6771 bw = WMI_PEER_CHWIDTH_20MHZ;
6772 break;
6773 }
6774
6775 arsta->bw = bw;
6776 }
6777
6778 if (changed & IEEE80211_RC_NSS_CHANGED)
6779 arsta->nss = sta->rx_nss;
6780
6781 if (changed & IEEE80211_RC_SMPS_CHANGED) {
6782 smps = WMI_PEER_SMPS_PS_NONE;
6783
6784 switch (sta->smps_mode) {
6785 case IEEE80211_SMPS_AUTOMATIC:
6786 case IEEE80211_SMPS_OFF:
6787 smps = WMI_PEER_SMPS_PS_NONE;
6788 break;
6789 case IEEE80211_SMPS_STATIC:
6790 smps = WMI_PEER_SMPS_STATIC;
6791 break;
6792 case IEEE80211_SMPS_DYNAMIC:
6793 smps = WMI_PEER_SMPS_DYNAMIC;
6794 break;
6795 case IEEE80211_SMPS_NUM_MODES:
6796 ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n",
6797 sta->smps_mode, sta->addr);
6798 smps = WMI_PEER_SMPS_PS_NONE;
6799 break;
6800 }
6801
6802 arsta->smps = smps;
6803 }
6804
6805 arsta->changed |= changed;
6806
6807 spin_unlock_bh(&ar->data_lock);
6808
6809 ieee80211_queue_work(hw, &arsta->update_wk);
6810 }
6811
6812 static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
6813 {
6814 /*
6815 * FIXME: Return 0 for time being. Need to figure out whether FW
6816 * has the API to fetch 64-bit local TSF
6817 */
6818
6819 return 0;
6820 }
6821
6822 static void ath10k_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6823 u64 tsf)
6824 {
6825 struct ath10k *ar = hw->priv;
6826 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6827 u32 tsf_offset, vdev_param = ar->wmi.vdev_param->set_tsf;
6828 int ret;
6829
6830 /* Workaround:
6831 *
6832 * Given tsf argument is entire TSF value, but firmware accepts
6833 * only TSF offset to current TSF.
6834 *
6835 * get_tsf function is used to get offset value, however since
6836 * ath10k_get_tsf is not implemented properly, it will return 0 always.
6837 * Luckily all the caller functions to set_tsf, as of now, also rely on
6838 * get_tsf function to get entire tsf value such get_tsf() + tsf_delta,
6839 * final tsf offset value to firmware will be arithmetically correct.
6840 */
6841 tsf_offset = tsf - ath10k_get_tsf(hw, vif);
6842 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
6843 vdev_param, tsf_offset);
6844 if (ret && ret != -EOPNOTSUPP)
6845 ath10k_warn(ar, "failed to set tsf offset: %d\n", ret);
6846 }
6847
6848 static int ath10k_ampdu_action(struct ieee80211_hw *hw,
6849 struct ieee80211_vif *vif,
6850 struct ieee80211_ampdu_params *params)
6851 {
6852 struct ath10k *ar = hw->priv;
6853 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6854 struct ieee80211_sta *sta = params->sta;
6855 enum ieee80211_ampdu_mlme_action action = params->action;
6856 u16 tid = params->tid;
6857
6858 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
6859 arvif->vdev_id, sta->addr, tid, action);
6860
6861 switch (action) {
6862 case IEEE80211_AMPDU_RX_START:
6863 case IEEE80211_AMPDU_RX_STOP:
6864 /* HTT AddBa/DelBa events trigger mac80211 Rx BA session
6865 * creation/removal. Do we need to verify this?
6866 */
6867 return 0;
6868 case IEEE80211_AMPDU_TX_START:
6869 case IEEE80211_AMPDU_TX_STOP_CONT:
6870 case IEEE80211_AMPDU_TX_STOP_FLUSH:
6871 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
6872 case IEEE80211_AMPDU_TX_OPERATIONAL:
6873 /* Firmware offloads Tx aggregation entirely so deny mac80211
6874 * Tx aggregation requests.
6875 */
6876 return -EOPNOTSUPP;
6877 }
6878
6879 return -EINVAL;
6880 }
6881
6882 static void
6883 ath10k_mac_update_rx_channel(struct ath10k *ar,
6884 struct ieee80211_chanctx_conf *ctx,
6885 struct ieee80211_vif_chanctx_switch *vifs,
6886 int n_vifs)
6887 {
6888 struct cfg80211_chan_def *def = NULL;
6889
6890 /* Both locks are required because ar->rx_channel is modified. This
6891 * allows readers to hold either lock.
6892 */
6893 lockdep_assert_held(&ar->conf_mutex);
6894 lockdep_assert_held(&ar->data_lock);
6895
6896 WARN_ON(ctx && vifs);
6897 WARN_ON(vifs && n_vifs != 1);
6898
6899 /* FIXME: Sort of an optimization and a workaround. Peers and vifs are
6900 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each
6901 * ppdu on Rx may reduce performance on low-end systems. It should be
6902 * possible to make tables/hashmaps to speed the lookup up (be vary of
6903 * cpu data cache lines though regarding sizes) but to keep the initial
6904 * implementation simple and less intrusive fallback to the slow lookup
6905 * only for multi-channel cases. Single-channel cases will remain to
6906 * use the old channel derival and thus performance should not be
6907 * affected much.
6908 */
6909 rcu_read_lock();
6910 if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) {
6911 ieee80211_iter_chan_contexts_atomic(ar->hw,
6912 ath10k_mac_get_any_chandef_iter,
6913 &def);
6914
6915 if (vifs)
6916 def = &vifs[0].new_ctx->def;
6917
6918 ar->rx_channel = def->chan;
6919 } else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) ||
6920 (ctx && (ar->state == ATH10K_STATE_RESTARTED))) {
6921 /* During driver restart due to firmware assert, since mac80211
6922 * already has valid channel context for given radio, channel
6923 * context iteration return num_chanctx > 0. So fix rx_channel
6924 * when restart is in progress.
6925 */
6926 ar->rx_channel = ctx->def.chan;
6927 } else {
6928 ar->rx_channel = NULL;
6929 }
6930 rcu_read_unlock();
6931 }
6932
6933 static void
6934 ath10k_mac_update_vif_chan(struct ath10k *ar,
6935 struct ieee80211_vif_chanctx_switch *vifs,
6936 int n_vifs)
6937 {
6938 struct ath10k_vif *arvif;
6939 int ret;
6940 int i;
6941
6942 lockdep_assert_held(&ar->conf_mutex);
6943
6944 /* First stop monitor interface. Some FW versions crash if there's a
6945 * lone monitor interface.
6946 */
6947 if (ar->monitor_started)
6948 ath10k_monitor_stop(ar);
6949
6950 for (i = 0; i < n_vifs; i++) {
6951 arvif = ath10k_vif_to_arvif(vifs[i].vif);
6952
6953 ath10k_dbg(ar, ATH10K_DBG_MAC,
6954 "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
6955 arvif->vdev_id,
6956 vifs[i].old_ctx->def.chan->center_freq,
6957 vifs[i].new_ctx->def.chan->center_freq,
6958 vifs[i].old_ctx->def.width,
6959 vifs[i].new_ctx->def.width);
6960
6961 if (WARN_ON(!arvif->is_started))
6962 continue;
6963
6964 if (WARN_ON(!arvif->is_up))
6965 continue;
6966
6967 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
6968 if (ret) {
6969 ath10k_warn(ar, "failed to down vdev %d: %d\n",
6970 arvif->vdev_id, ret);
6971 continue;
6972 }
6973 }
6974
6975 /* All relevant vdevs are downed and associated channel resources
6976 * should be available for the channel switch now.
6977 */
6978
6979 spin_lock_bh(&ar->data_lock);
6980 ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs);
6981 spin_unlock_bh(&ar->data_lock);
6982
6983 for (i = 0; i < n_vifs; i++) {
6984 arvif = ath10k_vif_to_arvif(vifs[i].vif);
6985
6986 if (WARN_ON(!arvif->is_started))
6987 continue;
6988
6989 if (WARN_ON(!arvif->is_up))
6990 continue;
6991
6992 ret = ath10k_mac_setup_bcn_tmpl(arvif);
6993 if (ret)
6994 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
6995 ret);
6996
6997 ret = ath10k_mac_setup_prb_tmpl(arvif);
6998 if (ret)
6999 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
7000 ret);
7001
7002 ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
7003 if (ret) {
7004 ath10k_warn(ar, "failed to restart vdev %d: %d\n",
7005 arvif->vdev_id, ret);
7006 continue;
7007 }
7008
7009 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
7010 arvif->bssid);
7011 if (ret) {
7012 ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
7013 arvif->vdev_id, ret);
7014 continue;
7015 }
7016 }
7017
7018 ath10k_monitor_recalc(ar);
7019 }
7020
7021 static int
7022 ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
7023 struct ieee80211_chanctx_conf *ctx)
7024 {
7025 struct ath10k *ar = hw->priv;
7026
7027 ath10k_dbg(ar, ATH10K_DBG_MAC,
7028 "mac chanctx add freq %hu width %d ptr %p\n",
7029 ctx->def.chan->center_freq, ctx->def.width, ctx);
7030
7031 mutex_lock(&ar->conf_mutex);
7032
7033 spin_lock_bh(&ar->data_lock);
7034 ath10k_mac_update_rx_channel(ar, ctx, NULL, 0);
7035 spin_unlock_bh(&ar->data_lock);
7036
7037 ath10k_recalc_radar_detection(ar);
7038 ath10k_monitor_recalc(ar);
7039
7040 mutex_unlock(&ar->conf_mutex);
7041
7042 return 0;
7043 }
7044
7045 static void
7046 ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
7047 struct ieee80211_chanctx_conf *ctx)
7048 {
7049 struct ath10k *ar = hw->priv;
7050
7051 ath10k_dbg(ar, ATH10K_DBG_MAC,
7052 "mac chanctx remove freq %hu width %d ptr %p\n",
7053 ctx->def.chan->center_freq, ctx->def.width, ctx);
7054
7055 mutex_lock(&ar->conf_mutex);
7056
7057 spin_lock_bh(&ar->data_lock);
7058 ath10k_mac_update_rx_channel(ar, NULL, NULL, 0);
7059 spin_unlock_bh(&ar->data_lock);
7060
7061 ath10k_recalc_radar_detection(ar);
7062 ath10k_monitor_recalc(ar);
7063
7064 mutex_unlock(&ar->conf_mutex);
7065 }
7066
7067 struct ath10k_mac_change_chanctx_arg {
7068 struct ieee80211_chanctx_conf *ctx;
7069 struct ieee80211_vif_chanctx_switch *vifs;
7070 int n_vifs;
7071 int next_vif;
7072 };
7073
7074 static void
7075 ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
7076 struct ieee80211_vif *vif)
7077 {
7078 struct ath10k_mac_change_chanctx_arg *arg = data;
7079
7080 if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx)
7081 return;
7082
7083 arg->n_vifs++;
7084 }
7085
7086 static void
7087 ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
7088 struct ieee80211_vif *vif)
7089 {
7090 struct ath10k_mac_change_chanctx_arg *arg = data;
7091 struct ieee80211_chanctx_conf *ctx;
7092
7093 ctx = rcu_access_pointer(vif->chanctx_conf);
7094 if (ctx != arg->ctx)
7095 return;
7096
7097 if (WARN_ON(arg->next_vif == arg->n_vifs))
7098 return;
7099
7100 arg->vifs[arg->next_vif].vif = vif;
7101 arg->vifs[arg->next_vif].old_ctx = ctx;
7102 arg->vifs[arg->next_vif].new_ctx = ctx;
7103 arg->next_vif++;
7104 }
7105
7106 static void
7107 ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
7108 struct ieee80211_chanctx_conf *ctx,
7109 u32 changed)
7110 {
7111 struct ath10k *ar = hw->priv;
7112 struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx };
7113
7114 mutex_lock(&ar->conf_mutex);
7115
7116 ath10k_dbg(ar, ATH10K_DBG_MAC,
7117 "mac chanctx change freq %hu width %d ptr %p changed %x\n",
7118 ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
7119
7120 /* This shouldn't really happen because channel switching should use
7121 * switch_vif_chanctx().
7122 */
7123 if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
7124 goto unlock;
7125
7126 if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) {
7127 ieee80211_iterate_active_interfaces_atomic(
7128 hw,
7129 IEEE80211_IFACE_ITER_NORMAL,
7130 ath10k_mac_change_chanctx_cnt_iter,
7131 &arg);
7132 if (arg.n_vifs == 0)
7133 goto radar;
7134
7135 arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]),
7136 GFP_KERNEL);
7137 if (!arg.vifs)
7138 goto radar;
7139
7140 ieee80211_iterate_active_interfaces_atomic(
7141 hw,
7142 IEEE80211_IFACE_ITER_NORMAL,
7143 ath10k_mac_change_chanctx_fill_iter,
7144 &arg);
7145 ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
7146 kfree(arg.vifs);
7147 }
7148
7149 radar:
7150 ath10k_recalc_radar_detection(ar);
7151
7152 /* FIXME: How to configure Rx chains properly? */
7153
7154 /* No other actions are actually necessary. Firmware maintains channel
7155 * definitions per vdev internally and there's no host-side channel
7156 * context abstraction to configure, e.g. channel width.
7157 */
7158
7159 unlock:
7160 mutex_unlock(&ar->conf_mutex);
7161 }
7162
7163 static int
7164 ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
7165 struct ieee80211_vif *vif,
7166 struct ieee80211_chanctx_conf *ctx)
7167 {
7168 struct ath10k *ar = hw->priv;
7169 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7170 int ret;
7171
7172 mutex_lock(&ar->conf_mutex);
7173
7174 ath10k_dbg(ar, ATH10K_DBG_MAC,
7175 "mac chanctx assign ptr %p vdev_id %i\n",
7176 ctx, arvif->vdev_id);
7177
7178 if (WARN_ON(arvif->is_started)) {
7179 mutex_unlock(&ar->conf_mutex);
7180 return -EBUSY;
7181 }
7182
7183 ret = ath10k_vdev_start(arvif, &ctx->def);
7184 if (ret) {
7185 ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n",
7186 arvif->vdev_id, vif->addr,
7187 ctx->def.chan->center_freq, ret);
7188 goto err;
7189 }
7190
7191 arvif->is_started = true;
7192
7193 ret = ath10k_mac_vif_setup_ps(arvif);
7194 if (ret) {
7195 ath10k_warn(ar, "failed to update vdev %i ps: %d\n",
7196 arvif->vdev_id, ret);
7197 goto err_stop;
7198 }
7199
7200 if (vif->type == NL80211_IFTYPE_MONITOR) {
7201 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
7202 if (ret) {
7203 ath10k_warn(ar, "failed to up monitor vdev %i: %d\n",
7204 arvif->vdev_id, ret);
7205 goto err_stop;
7206 }
7207
7208 arvif->is_up = true;
7209 }
7210
7211 mutex_unlock(&ar->conf_mutex);
7212 return 0;
7213
7214 err_stop:
7215 ath10k_vdev_stop(arvif);
7216 arvif->is_started = false;
7217 ath10k_mac_vif_setup_ps(arvif);
7218
7219 err:
7220 mutex_unlock(&ar->conf_mutex);
7221 return ret;
7222 }
7223
7224 static void
7225 ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
7226 struct ieee80211_vif *vif,
7227 struct ieee80211_chanctx_conf *ctx)
7228 {
7229 struct ath10k *ar = hw->priv;
7230 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7231 int ret;
7232
7233 mutex_lock(&ar->conf_mutex);
7234
7235 ath10k_dbg(ar, ATH10K_DBG_MAC,
7236 "mac chanctx unassign ptr %p vdev_id %i\n",
7237 ctx, arvif->vdev_id);
7238
7239 WARN_ON(!arvif->is_started);
7240
7241 if (vif->type == NL80211_IFTYPE_MONITOR) {
7242 WARN_ON(!arvif->is_up);
7243
7244 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
7245 if (ret)
7246 ath10k_warn(ar, "failed to down monitor vdev %i: %d\n",
7247 arvif->vdev_id, ret);
7248
7249 arvif->is_up = false;
7250 }
7251
7252 ret = ath10k_vdev_stop(arvif);
7253 if (ret)
7254 ath10k_warn(ar, "failed to stop vdev %i: %d\n",
7255 arvif->vdev_id, ret);
7256
7257 arvif->is_started = false;
7258
7259 mutex_unlock(&ar->conf_mutex);
7260 }
7261
7262 static int
7263 ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
7264 struct ieee80211_vif_chanctx_switch *vifs,
7265 int n_vifs,
7266 enum ieee80211_chanctx_switch_mode mode)
7267 {
7268 struct ath10k *ar = hw->priv;
7269
7270 mutex_lock(&ar->conf_mutex);
7271
7272 ath10k_dbg(ar, ATH10K_DBG_MAC,
7273 "mac chanctx switch n_vifs %d mode %d\n",
7274 n_vifs, mode);
7275 ath10k_mac_update_vif_chan(ar, vifs, n_vifs);
7276
7277 mutex_unlock(&ar->conf_mutex);
7278 return 0;
7279 }
7280
7281 static const struct ieee80211_ops ath10k_ops = {
7282 .tx = ath10k_mac_op_tx,
7283 .wake_tx_queue = ath10k_mac_op_wake_tx_queue,
7284 .start = ath10k_start,
7285 .stop = ath10k_stop,
7286 .config = ath10k_config,
7287 .add_interface = ath10k_add_interface,
7288 .remove_interface = ath10k_remove_interface,
7289 .configure_filter = ath10k_configure_filter,
7290 .bss_info_changed = ath10k_bss_info_changed,
7291 .hw_scan = ath10k_hw_scan,
7292 .cancel_hw_scan = ath10k_cancel_hw_scan,
7293 .set_key = ath10k_set_key,
7294 .set_default_unicast_key = ath10k_set_default_unicast_key,
7295 .sta_state = ath10k_sta_state,
7296 .conf_tx = ath10k_conf_tx,
7297 .remain_on_channel = ath10k_remain_on_channel,
7298 .cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
7299 .set_rts_threshold = ath10k_set_rts_threshold,
7300 .set_frag_threshold = ath10k_mac_op_set_frag_threshold,
7301 .flush = ath10k_flush,
7302 .tx_last_beacon = ath10k_tx_last_beacon,
7303 .set_antenna = ath10k_set_antenna,
7304 .get_antenna = ath10k_get_antenna,
7305 .reconfig_complete = ath10k_reconfig_complete,
7306 .get_survey = ath10k_get_survey,
7307 .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask,
7308 .sta_rc_update = ath10k_sta_rc_update,
7309 .get_tsf = ath10k_get_tsf,
7310 .set_tsf = ath10k_set_tsf,
7311 .ampdu_action = ath10k_ampdu_action,
7312 .get_et_sset_count = ath10k_debug_get_et_sset_count,
7313 .get_et_stats = ath10k_debug_get_et_stats,
7314 .get_et_strings = ath10k_debug_get_et_strings,
7315 .add_chanctx = ath10k_mac_op_add_chanctx,
7316 .remove_chanctx = ath10k_mac_op_remove_chanctx,
7317 .change_chanctx = ath10k_mac_op_change_chanctx,
7318 .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx,
7319 .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx,
7320 .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx,
7321
7322 CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
7323
7324 #ifdef CONFIG_PM
7325 .suspend = ath10k_wow_op_suspend,
7326 .resume = ath10k_wow_op_resume,
7327 #endif
7328 #ifdef CONFIG_MAC80211_DEBUGFS
7329 .sta_add_debugfs = ath10k_sta_add_debugfs,
7330 #endif
7331 };
7332
7333 #define CHAN2G(_channel, _freq, _flags) { \
7334 .band = NL80211_BAND_2GHZ, \
7335 .hw_value = (_channel), \
7336 .center_freq = (_freq), \
7337 .flags = (_flags), \
7338 .max_antenna_gain = 0, \
7339 .max_power = 30, \
7340 }
7341
7342 #define CHAN5G(_channel, _freq, _flags) { \
7343 .band = NL80211_BAND_5GHZ, \
7344 .hw_value = (_channel), \
7345 .center_freq = (_freq), \
7346 .flags = (_flags), \
7347 .max_antenna_gain = 0, \
7348 .max_power = 30, \
7349 }
7350
7351 static const struct ieee80211_channel ath10k_2ghz_channels[] = {
7352 CHAN2G(1, 2412, 0),
7353 CHAN2G(2, 2417, 0),
7354 CHAN2G(3, 2422, 0),
7355 CHAN2G(4, 2427, 0),
7356 CHAN2G(5, 2432, 0),
7357 CHAN2G(6, 2437, 0),
7358 CHAN2G(7, 2442, 0),
7359 CHAN2G(8, 2447, 0),
7360 CHAN2G(9, 2452, 0),
7361 CHAN2G(10, 2457, 0),
7362 CHAN2G(11, 2462, 0),
7363 CHAN2G(12, 2467, 0),
7364 CHAN2G(13, 2472, 0),
7365 CHAN2G(14, 2484, 0),
7366 };
7367
7368 static const struct ieee80211_channel ath10k_5ghz_channels[] = {
7369 CHAN5G(36, 5180, 0),
7370 CHAN5G(40, 5200, 0),
7371 CHAN5G(44, 5220, 0),
7372 CHAN5G(48, 5240, 0),
7373 CHAN5G(52, 5260, 0),
7374 CHAN5G(56, 5280, 0),
7375 CHAN5G(60, 5300, 0),
7376 CHAN5G(64, 5320, 0),
7377 CHAN5G(100, 5500, 0),
7378 CHAN5G(104, 5520, 0),
7379 CHAN5G(108, 5540, 0),
7380 CHAN5G(112, 5560, 0),
7381 CHAN5G(116, 5580, 0),
7382 CHAN5G(120, 5600, 0),
7383 CHAN5G(124, 5620, 0),
7384 CHAN5G(128, 5640, 0),
7385 CHAN5G(132, 5660, 0),
7386 CHAN5G(136, 5680, 0),
7387 CHAN5G(140, 5700, 0),
7388 CHAN5G(144, 5720, 0),
7389 CHAN5G(149, 5745, 0),
7390 CHAN5G(153, 5765, 0),
7391 CHAN5G(157, 5785, 0),
7392 CHAN5G(161, 5805, 0),
7393 CHAN5G(165, 5825, 0),
7394 };
7395
7396 struct ath10k *ath10k_mac_create(size_t priv_size)
7397 {
7398 struct ieee80211_hw *hw;
7399 struct ath10k *ar;
7400
7401 hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, &ath10k_ops);
7402 if (!hw)
7403 return NULL;
7404
7405 ar = hw->priv;
7406 ar->hw = hw;
7407
7408 return ar;
7409 }
7410
7411 void ath10k_mac_destroy(struct ath10k *ar)
7412 {
7413 ieee80211_free_hw(ar->hw);
7414 }
7415
7416 static const struct ieee80211_iface_limit ath10k_if_limits[] = {
7417 {
7418 .max = 8,
7419 .types = BIT(NL80211_IFTYPE_STATION)
7420 | BIT(NL80211_IFTYPE_P2P_CLIENT)
7421 },
7422 {
7423 .max = 3,
7424 .types = BIT(NL80211_IFTYPE_P2P_GO)
7425 },
7426 {
7427 .max = 1,
7428 .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
7429 },
7430 {
7431 .max = 7,
7432 .types = BIT(NL80211_IFTYPE_AP)
7433 #ifdef CONFIG_MAC80211_MESH
7434 | BIT(NL80211_IFTYPE_MESH_POINT)
7435 #endif
7436 },
7437 };
7438
7439 static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
7440 {
7441 .max = 8,
7442 .types = BIT(NL80211_IFTYPE_AP)
7443 #ifdef CONFIG_MAC80211_MESH
7444 | BIT(NL80211_IFTYPE_MESH_POINT)
7445 #endif
7446 },
7447 {
7448 .max = 1,
7449 .types = BIT(NL80211_IFTYPE_STATION)
7450 },
7451 };
7452
7453 static const struct ieee80211_iface_combination ath10k_if_comb[] = {
7454 {
7455 .limits = ath10k_if_limits,
7456 .n_limits = ARRAY_SIZE(ath10k_if_limits),
7457 .max_interfaces = 8,
7458 .num_different_channels = 1,
7459 .beacon_int_infra_match = true,
7460 },
7461 };
7462
7463 static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
7464 {
7465 .limits = ath10k_10x_if_limits,
7466 .n_limits = ARRAY_SIZE(ath10k_10x_if_limits),
7467 .max_interfaces = 8,
7468 .num_different_channels = 1,
7469 .beacon_int_infra_match = true,
7470 #ifdef CONFIG_ATH10K_DFS_CERTIFIED
7471 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
7472 BIT(NL80211_CHAN_WIDTH_20) |
7473 BIT(NL80211_CHAN_WIDTH_40) |
7474 BIT(NL80211_CHAN_WIDTH_80),
7475 #endif
7476 },
7477 };
7478
7479 static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
7480 {
7481 .max = 2,
7482 .types = BIT(NL80211_IFTYPE_STATION),
7483 },
7484 {
7485 .max = 2,
7486 .types = BIT(NL80211_IFTYPE_AP) |
7487 #ifdef CONFIG_MAC80211_MESH
7488 BIT(NL80211_IFTYPE_MESH_POINT) |
7489 #endif
7490 BIT(NL80211_IFTYPE_P2P_CLIENT) |
7491 BIT(NL80211_IFTYPE_P2P_GO),
7492 },
7493 {
7494 .max = 1,
7495 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
7496 },
7497 };
7498
7499 static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = {
7500 {
7501 .max = 2,
7502 .types = BIT(NL80211_IFTYPE_STATION),
7503 },
7504 {
7505 .max = 2,
7506 .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
7507 },
7508 {
7509 .max = 1,
7510 .types = BIT(NL80211_IFTYPE_AP) |
7511 #ifdef CONFIG_MAC80211_MESH
7512 BIT(NL80211_IFTYPE_MESH_POINT) |
7513 #endif
7514 BIT(NL80211_IFTYPE_P2P_GO),
7515 },
7516 {
7517 .max = 1,
7518 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
7519 },
7520 };
7521
7522 static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
7523 {
7524 .max = 1,
7525 .types = BIT(NL80211_IFTYPE_STATION),
7526 },
7527 {
7528 .max = 1,
7529 .types = BIT(NL80211_IFTYPE_ADHOC),
7530 },
7531 };
7532
7533 /* FIXME: This is not thouroughly tested. These combinations may over- or
7534 * underestimate hw/fw capabilities.
7535 */
7536 static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
7537 {
7538 .limits = ath10k_tlv_if_limit,
7539 .num_different_channels = 1,
7540 .max_interfaces = 4,
7541 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
7542 },
7543 {
7544 .limits = ath10k_tlv_if_limit_ibss,
7545 .num_different_channels = 1,
7546 .max_interfaces = 2,
7547 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
7548 },
7549 };
7550
7551 static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
7552 {
7553 .limits = ath10k_tlv_if_limit,
7554 .num_different_channels = 1,
7555 .max_interfaces = 4,
7556 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
7557 },
7558 {
7559 .limits = ath10k_tlv_qcs_if_limit,
7560 .num_different_channels = 2,
7561 .max_interfaces = 4,
7562 .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit),
7563 },
7564 {
7565 .limits = ath10k_tlv_if_limit_ibss,
7566 .num_different_channels = 1,
7567 .max_interfaces = 2,
7568 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
7569 },
7570 };
7571
7572 static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
7573 {
7574 .max = 1,
7575 .types = BIT(NL80211_IFTYPE_STATION),
7576 },
7577 {
7578 .max = 16,
7579 .types = BIT(NL80211_IFTYPE_AP)
7580 #ifdef CONFIG_MAC80211_MESH
7581 | BIT(NL80211_IFTYPE_MESH_POINT)
7582 #endif
7583 },
7584 };
7585
7586 static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
7587 {
7588 .limits = ath10k_10_4_if_limits,
7589 .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
7590 .max_interfaces = 16,
7591 .num_different_channels = 1,
7592 .beacon_int_infra_match = true,
7593 #ifdef CONFIG_ATH10K_DFS_CERTIFIED
7594 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
7595 BIT(NL80211_CHAN_WIDTH_20) |
7596 BIT(NL80211_CHAN_WIDTH_40) |
7597 BIT(NL80211_CHAN_WIDTH_80),
7598 #endif
7599 },
7600 };
7601
7602 static void ath10k_get_arvif_iter(void *data, u8 *mac,
7603 struct ieee80211_vif *vif)
7604 {
7605 struct ath10k_vif_iter *arvif_iter = data;
7606 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
7607
7608 if (arvif->vdev_id == arvif_iter->vdev_id)
7609 arvif_iter->arvif = arvif;
7610 }
7611
7612 struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
7613 {
7614 struct ath10k_vif_iter arvif_iter;
7615 u32 flags;
7616
7617 memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter));
7618 arvif_iter.vdev_id = vdev_id;
7619
7620 flags = IEEE80211_IFACE_ITER_RESUME_ALL;
7621 ieee80211_iterate_active_interfaces_atomic(ar->hw,
7622 flags,
7623 ath10k_get_arvif_iter,
7624 &arvif_iter);
7625 if (!arvif_iter.arvif) {
7626 ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id);
7627 return NULL;
7628 }
7629
7630 return arvif_iter.arvif;
7631 }
7632
7633 int ath10k_mac_register(struct ath10k *ar)
7634 {
7635 static const u32 cipher_suites[] = {
7636 WLAN_CIPHER_SUITE_WEP40,
7637 WLAN_CIPHER_SUITE_WEP104,
7638 WLAN_CIPHER_SUITE_TKIP,
7639 WLAN_CIPHER_SUITE_CCMP,
7640 WLAN_CIPHER_SUITE_AES_CMAC,
7641 };
7642 struct ieee80211_supported_band *band;
7643 void *channels;
7644 int ret;
7645
7646 SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
7647
7648 SET_IEEE80211_DEV(ar->hw, ar->dev);
7649
7650 BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) +
7651 ARRAY_SIZE(ath10k_5ghz_channels)) !=
7652 ATH10K_NUM_CHANS);
7653
7654 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
7655 channels = kmemdup(ath10k_2ghz_channels,
7656 sizeof(ath10k_2ghz_channels),
7657 GFP_KERNEL);
7658 if (!channels) {
7659 ret = -ENOMEM;
7660 goto err_free;
7661 }
7662
7663 band = &ar->mac.sbands[NL80211_BAND_2GHZ];
7664 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
7665 band->channels = channels;
7666 band->n_bitrates = ath10k_g_rates_size;
7667 band->bitrates = ath10k_g_rates;
7668
7669 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
7670 }
7671
7672 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
7673 channels = kmemdup(ath10k_5ghz_channels,
7674 sizeof(ath10k_5ghz_channels),
7675 GFP_KERNEL);
7676 if (!channels) {
7677 ret = -ENOMEM;
7678 goto err_free;
7679 }
7680
7681 band = &ar->mac.sbands[NL80211_BAND_5GHZ];
7682 band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
7683 band->channels = channels;
7684 band->n_bitrates = ath10k_a_rates_size;
7685 band->bitrates = ath10k_a_rates;
7686 ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
7687 }
7688
7689 ath10k_mac_setup_ht_vht_cap(ar);
7690
7691 ar->hw->wiphy->interface_modes =
7692 BIT(NL80211_IFTYPE_STATION) |
7693 BIT(NL80211_IFTYPE_AP) |
7694 BIT(NL80211_IFTYPE_MESH_POINT);
7695
7696 ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask;
7697 ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask;
7698
7699 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features))
7700 ar->hw->wiphy->interface_modes |=
7701 BIT(NL80211_IFTYPE_P2P_DEVICE) |
7702 BIT(NL80211_IFTYPE_P2P_CLIENT) |
7703 BIT(NL80211_IFTYPE_P2P_GO);
7704
7705 ieee80211_hw_set(ar->hw, SIGNAL_DBM);
7706 ieee80211_hw_set(ar->hw, SUPPORTS_PS);
7707 ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
7708 ieee80211_hw_set(ar->hw, MFP_CAPABLE);
7709 ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
7710 ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
7711 ieee80211_hw_set(ar->hw, AP_LINK_PS);
7712 ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
7713 ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
7714 ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
7715 ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
7716 ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
7717 ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
7718 ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
7719
7720 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
7721 ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
7722
7723 ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
7724 ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
7725
7726 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
7727 ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
7728
7729 if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
7730 ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
7731 ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
7732 }
7733
7734 ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
7735 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
7736
7737 ar->hw->vif_data_size = sizeof(struct ath10k_vif);
7738 ar->hw->sta_data_size = sizeof(struct ath10k_sta);
7739 ar->hw->txq_data_size = sizeof(struct ath10k_txq);
7740
7741 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
7742
7743 if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) {
7744 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
7745
7746 /* Firmware delivers WPS/P2P Probe Requests frames to driver so
7747 * that userspace (e.g. wpa_supplicant/hostapd) can generate
7748 * correct Probe Responses. This is more of a hack advert..
7749 */
7750 ar->hw->wiphy->probe_resp_offload |=
7751 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
7752 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
7753 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
7754 }
7755
7756 if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map))
7757 ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
7758
7759 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
7760 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
7761 ar->hw->wiphy->max_remain_on_channel_duration = 5000;
7762
7763 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
7764 ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
7765 NL80211_FEATURE_AP_SCAN;
7766
7767 ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
7768
7769 ret = ath10k_wow_init(ar);
7770 if (ret) {
7771 ath10k_warn(ar, "failed to init wow: %d\n", ret);
7772 goto err_free;
7773 }
7774
7775 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
7776
7777 /*
7778 * on LL hardware queues are managed entirely by the FW
7779 * so we only advertise to mac we can do the queues thing
7780 */
7781 ar->hw->queues = IEEE80211_MAX_QUEUES;
7782
7783 /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is
7784 * something that vdev_ids can't reach so that we don't stop the queue
7785 * accidentally.
7786 */
7787 ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
7788
7789 switch (ar->running_fw->fw_file.wmi_op_version) {
7790 case ATH10K_FW_WMI_OP_VERSION_MAIN:
7791 ar->hw->wiphy->iface_combinations = ath10k_if_comb;
7792 ar->hw->wiphy->n_iface_combinations =
7793 ARRAY_SIZE(ath10k_if_comb);
7794 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
7795 break;
7796 case ATH10K_FW_WMI_OP_VERSION_TLV:
7797 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
7798 ar->hw->wiphy->iface_combinations =
7799 ath10k_tlv_qcs_if_comb;
7800 ar->hw->wiphy->n_iface_combinations =
7801 ARRAY_SIZE(ath10k_tlv_qcs_if_comb);
7802 } else {
7803 ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb;
7804 ar->hw->wiphy->n_iface_combinations =
7805 ARRAY_SIZE(ath10k_tlv_if_comb);
7806 }
7807 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
7808 break;
7809 case ATH10K_FW_WMI_OP_VERSION_10_1:
7810 case ATH10K_FW_WMI_OP_VERSION_10_2:
7811 case ATH10K_FW_WMI_OP_VERSION_10_2_4:
7812 ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
7813 ar->hw->wiphy->n_iface_combinations =
7814 ARRAY_SIZE(ath10k_10x_if_comb);
7815 break;
7816 case ATH10K_FW_WMI_OP_VERSION_10_4:
7817 ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
7818 ar->hw->wiphy->n_iface_combinations =
7819 ARRAY_SIZE(ath10k_10_4_if_comb);
7820 break;
7821 case ATH10K_FW_WMI_OP_VERSION_UNSET:
7822 case ATH10K_FW_WMI_OP_VERSION_MAX:
7823 WARN_ON(1);
7824 ret = -EINVAL;
7825 goto err_free;
7826 }
7827
7828 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
7829 ar->hw->netdev_features = NETIF_F_HW_CSUM;
7830
7831 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
7832 /* Init ath dfs pattern detector */
7833 ar->ath_common.debug_mask = ATH_DBG_DFS;
7834 ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
7835 NL80211_DFS_UNSET);
7836
7837 if (!ar->dfs_detector)
7838 ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
7839 }
7840
7841 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
7842 ath10k_reg_notifier);
7843 if (ret) {
7844 ath10k_err(ar, "failed to initialise regulatory: %i\n", ret);
7845 goto err_dfs_detector_exit;
7846 }
7847
7848 ar->hw->wiphy->cipher_suites = cipher_suites;
7849 ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
7850
7851 ret = ieee80211_register_hw(ar->hw);
7852 if (ret) {
7853 ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
7854 goto err_dfs_detector_exit;
7855 }
7856
7857 if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
7858 ret = regulatory_hint(ar->hw->wiphy,
7859 ar->ath_common.regulatory.alpha2);
7860 if (ret)
7861 goto err_unregister;
7862 }
7863
7864 return 0;
7865
7866 err_unregister:
7867 ieee80211_unregister_hw(ar->hw);
7868
7869 err_dfs_detector_exit:
7870 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
7871 ar->dfs_detector->exit(ar->dfs_detector);
7872
7873 err_free:
7874 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
7875 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
7876
7877 SET_IEEE80211_DEV(ar->hw, NULL);
7878 return ret;
7879 }
7880
7881 void ath10k_mac_unregister(struct ath10k *ar)
7882 {
7883 ieee80211_unregister_hw(ar->hw);
7884
7885 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
7886 ar->dfs_detector->exit(ar->dfs_detector);
7887
7888 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
7889 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
7890
7891 SET_IEEE80211_DEV(ar->hw, NULL);
7892 }