]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/wireless/ath/ath10k/mac.c
mac80211: Add support for beacon report radio measurement
[mirror_ubuntu-bionic-kernel.git] / drivers / net / wireless / ath / ath10k / mac.c
1 /*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #include "mac.h"
19
20 #include <net/mac80211.h>
21 #include <linux/etherdevice.h>
22
23 #include "hif.h"
24 #include "core.h"
25 #include "debug.h"
26 #include "wmi.h"
27 #include "htt.h"
28 #include "txrx.h"
29 #include "testmode.h"
30 #include "wmi.h"
31 #include "wmi-tlv.h"
32 #include "wmi-ops.h"
33 #include "wow.h"
34
35 /*********/
36 /* Rates */
37 /*********/
38
39 static struct ieee80211_rate ath10k_rates[] = {
40 { .bitrate = 10,
41 .hw_value = ATH10K_HW_RATE_CCK_LP_1M },
42 { .bitrate = 20,
43 .hw_value = ATH10K_HW_RATE_CCK_LP_2M,
44 .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M,
45 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
46 { .bitrate = 55,
47 .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M,
48 .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M,
49 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
50 { .bitrate = 110,
51 .hw_value = ATH10K_HW_RATE_CCK_LP_11M,
52 .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M,
53 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
54
55 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
56 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
57 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
58 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
59 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
60 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
61 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
62 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
63 };
64
65 static struct ieee80211_rate ath10k_rates_rev2[] = {
66 { .bitrate = 10,
67 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M },
68 { .bitrate = 20,
69 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M,
70 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M,
71 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
72 { .bitrate = 55,
73 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
74 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
75 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
76 { .bitrate = 110,
77 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M,
78 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M,
79 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
80
81 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
82 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
83 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
84 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
85 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
86 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
87 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
88 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
89 };
90
91 #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
92
93 #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
94 #define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \
95 ATH10K_MAC_FIRST_OFDM_RATE_IDX)
96 #define ath10k_g_rates (ath10k_rates + 0)
97 #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
98
99 #define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0)
100 #define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2))
101
102 static bool ath10k_mac_bitrate_is_cck(int bitrate)
103 {
104 switch (bitrate) {
105 case 10:
106 case 20:
107 case 55:
108 case 110:
109 return true;
110 }
111
112 return false;
113 }
114
115 static u8 ath10k_mac_bitrate_to_rate(int bitrate)
116 {
117 return DIV_ROUND_UP(bitrate, 5) |
118 (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
119 }
120
121 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
122 u8 hw_rate, bool cck)
123 {
124 const struct ieee80211_rate *rate;
125 int i;
126
127 for (i = 0; i < sband->n_bitrates; i++) {
128 rate = &sband->bitrates[i];
129
130 if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck)
131 continue;
132
133 if (rate->hw_value == hw_rate)
134 return i;
135 else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
136 rate->hw_value_short == hw_rate)
137 return i;
138 }
139
140 return 0;
141 }
142
143 u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
144 u32 bitrate)
145 {
146 int i;
147
148 for (i = 0; i < sband->n_bitrates; i++)
149 if (sband->bitrates[i].bitrate == bitrate)
150 return i;
151
152 return 0;
153 }
154
155 static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
156 {
157 switch ((mcs_map >> (2 * nss)) & 0x3) {
158 case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
159 case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
160 case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
161 }
162 return 0;
163 }
164
165 static u32
166 ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
167 {
168 int nss;
169
170 for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
171 if (ht_mcs_mask[nss])
172 return nss + 1;
173
174 return 1;
175 }
176
177 static u32
178 ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
179 {
180 int nss;
181
182 for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
183 if (vht_mcs_mask[nss])
184 return nss + 1;
185
186 return 1;
187 }
188
189 int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val)
190 {
191 enum wmi_host_platform_type platform_type;
192 int ret;
193
194 if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map))
195 platform_type = WMI_HOST_PLATFORM_LOW_PERF;
196 else
197 platform_type = WMI_HOST_PLATFORM_HIGH_PERF;
198
199 ret = ath10k_wmi_ext_resource_config(ar, platform_type, val);
200
201 if (ret && ret != -EOPNOTSUPP) {
202 ath10k_warn(ar, "failed to configure ext resource: %d\n", ret);
203 return ret;
204 }
205
206 return 0;
207 }
208
209 /**********/
210 /* Crypto */
211 /**********/
212
213 static int ath10k_send_key(struct ath10k_vif *arvif,
214 struct ieee80211_key_conf *key,
215 enum set_key_cmd cmd,
216 const u8 *macaddr, u32 flags)
217 {
218 struct ath10k *ar = arvif->ar;
219 struct wmi_vdev_install_key_arg arg = {
220 .vdev_id = arvif->vdev_id,
221 .key_idx = key->keyidx,
222 .key_len = key->keylen,
223 .key_data = key->key,
224 .key_flags = flags,
225 .macaddr = macaddr,
226 };
227
228 lockdep_assert_held(&arvif->ar->conf_mutex);
229
230 switch (key->cipher) {
231 case WLAN_CIPHER_SUITE_CCMP:
232 arg.key_cipher = WMI_CIPHER_AES_CCM;
233 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
234 break;
235 case WLAN_CIPHER_SUITE_TKIP:
236 arg.key_cipher = WMI_CIPHER_TKIP;
237 arg.key_txmic_len = 8;
238 arg.key_rxmic_len = 8;
239 break;
240 case WLAN_CIPHER_SUITE_WEP40:
241 case WLAN_CIPHER_SUITE_WEP104:
242 arg.key_cipher = WMI_CIPHER_WEP;
243 break;
244 case WLAN_CIPHER_SUITE_AES_CMAC:
245 WARN_ON(1);
246 return -EINVAL;
247 default:
248 ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
249 return -EOPNOTSUPP;
250 }
251
252 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
253 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
254
255 if (cmd == DISABLE_KEY) {
256 arg.key_cipher = WMI_CIPHER_NONE;
257 arg.key_data = NULL;
258 }
259
260 return ath10k_wmi_vdev_install_key(arvif->ar, &arg);
261 }
262
263 static int ath10k_install_key(struct ath10k_vif *arvif,
264 struct ieee80211_key_conf *key,
265 enum set_key_cmd cmd,
266 const u8 *macaddr, u32 flags)
267 {
268 struct ath10k *ar = arvif->ar;
269 int ret;
270 unsigned long time_left;
271
272 lockdep_assert_held(&ar->conf_mutex);
273
274 reinit_completion(&ar->install_key_done);
275
276 if (arvif->nohwcrypt)
277 return 1;
278
279 ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
280 if (ret)
281 return ret;
282
283 time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ);
284 if (time_left == 0)
285 return -ETIMEDOUT;
286
287 return 0;
288 }
289
290 static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
291 const u8 *addr)
292 {
293 struct ath10k *ar = arvif->ar;
294 struct ath10k_peer *peer;
295 int ret;
296 int i;
297 u32 flags;
298
299 lockdep_assert_held(&ar->conf_mutex);
300
301 if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP &&
302 arvif->vif->type != NL80211_IFTYPE_ADHOC &&
303 arvif->vif->type != NL80211_IFTYPE_MESH_POINT))
304 return -EINVAL;
305
306 spin_lock_bh(&ar->data_lock);
307 peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
308 spin_unlock_bh(&ar->data_lock);
309
310 if (!peer)
311 return -ENOENT;
312
313 for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
314 if (arvif->wep_keys[i] == NULL)
315 continue;
316
317 switch (arvif->vif->type) {
318 case NL80211_IFTYPE_AP:
319 flags = WMI_KEY_PAIRWISE;
320
321 if (arvif->def_wep_key_idx == i)
322 flags |= WMI_KEY_TX_USAGE;
323
324 ret = ath10k_install_key(arvif, arvif->wep_keys[i],
325 SET_KEY, addr, flags);
326 if (ret < 0)
327 return ret;
328 break;
329 case NL80211_IFTYPE_ADHOC:
330 ret = ath10k_install_key(arvif, arvif->wep_keys[i],
331 SET_KEY, addr,
332 WMI_KEY_PAIRWISE);
333 if (ret < 0)
334 return ret;
335
336 ret = ath10k_install_key(arvif, arvif->wep_keys[i],
337 SET_KEY, addr, WMI_KEY_GROUP);
338 if (ret < 0)
339 return ret;
340 break;
341 default:
342 WARN_ON(1);
343 return -EINVAL;
344 }
345
346 spin_lock_bh(&ar->data_lock);
347 peer->keys[i] = arvif->wep_keys[i];
348 spin_unlock_bh(&ar->data_lock);
349 }
350
351 /* In some cases (notably with static WEP IBSS with multiple keys)
352 * multicast Tx becomes broken. Both pairwise and groupwise keys are
353 * installed already. Using WMI_KEY_TX_USAGE in different combinations
354 * didn't seem help. Using def_keyid vdev parameter seems to be
355 * effective so use that.
356 *
357 * FIXME: Revisit. Perhaps this can be done in a less hacky way.
358 */
359 if (arvif->vif->type != NL80211_IFTYPE_ADHOC)
360 return 0;
361
362 if (arvif->def_wep_key_idx == -1)
363 return 0;
364
365 ret = ath10k_wmi_vdev_set_param(arvif->ar,
366 arvif->vdev_id,
367 arvif->ar->wmi.vdev_param->def_keyid,
368 arvif->def_wep_key_idx);
369 if (ret) {
370 ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n",
371 arvif->vdev_id, ret);
372 return ret;
373 }
374
375 return 0;
376 }
377
378 static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
379 const u8 *addr)
380 {
381 struct ath10k *ar = arvif->ar;
382 struct ath10k_peer *peer;
383 int first_errno = 0;
384 int ret;
385 int i;
386 u32 flags = 0;
387
388 lockdep_assert_held(&ar->conf_mutex);
389
390 spin_lock_bh(&ar->data_lock);
391 peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
392 spin_unlock_bh(&ar->data_lock);
393
394 if (!peer)
395 return -ENOENT;
396
397 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
398 if (peer->keys[i] == NULL)
399 continue;
400
401 /* key flags are not required to delete the key */
402 ret = ath10k_install_key(arvif, peer->keys[i],
403 DISABLE_KEY, addr, flags);
404 if (ret < 0 && first_errno == 0)
405 first_errno = ret;
406
407 if (ret < 0)
408 ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
409 i, ret);
410
411 spin_lock_bh(&ar->data_lock);
412 peer->keys[i] = NULL;
413 spin_unlock_bh(&ar->data_lock);
414 }
415
416 return first_errno;
417 }
418
419 bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
420 u8 keyidx)
421 {
422 struct ath10k_peer *peer;
423 int i;
424
425 lockdep_assert_held(&ar->data_lock);
426
427 /* We don't know which vdev this peer belongs to,
428 * since WMI doesn't give us that information.
429 *
430 * FIXME: multi-bss needs to be handled.
431 */
432 peer = ath10k_peer_find(ar, 0, addr);
433 if (!peer)
434 return false;
435
436 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
437 if (peer->keys[i] && peer->keys[i]->keyidx == keyidx)
438 return true;
439 }
440
441 return false;
442 }
443
444 static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
445 struct ieee80211_key_conf *key)
446 {
447 struct ath10k *ar = arvif->ar;
448 struct ath10k_peer *peer;
449 u8 addr[ETH_ALEN];
450 int first_errno = 0;
451 int ret;
452 int i;
453 u32 flags = 0;
454
455 lockdep_assert_held(&ar->conf_mutex);
456
457 for (;;) {
458 /* since ath10k_install_key we can't hold data_lock all the
459 * time, so we try to remove the keys incrementally */
460 spin_lock_bh(&ar->data_lock);
461 i = 0;
462 list_for_each_entry(peer, &ar->peers, list) {
463 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
464 if (peer->keys[i] == key) {
465 ether_addr_copy(addr, peer->addr);
466 peer->keys[i] = NULL;
467 break;
468 }
469 }
470
471 if (i < ARRAY_SIZE(peer->keys))
472 break;
473 }
474 spin_unlock_bh(&ar->data_lock);
475
476 if (i == ARRAY_SIZE(peer->keys))
477 break;
478 /* key flags are not required to delete the key */
479 ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
480 if (ret < 0 && first_errno == 0)
481 first_errno = ret;
482
483 if (ret)
484 ath10k_warn(ar, "failed to remove key for %pM: %d\n",
485 addr, ret);
486 }
487
488 return first_errno;
489 }
490
491 static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
492 struct ieee80211_key_conf *key)
493 {
494 struct ath10k *ar = arvif->ar;
495 struct ath10k_peer *peer;
496 int ret;
497
498 lockdep_assert_held(&ar->conf_mutex);
499
500 list_for_each_entry(peer, &ar->peers, list) {
501 if (ether_addr_equal(peer->addr, arvif->vif->addr))
502 continue;
503
504 if (ether_addr_equal(peer->addr, arvif->bssid))
505 continue;
506
507 if (peer->keys[key->keyidx] == key)
508 continue;
509
510 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n",
511 arvif->vdev_id, key->keyidx);
512
513 ret = ath10k_install_peer_wep_keys(arvif, peer->addr);
514 if (ret) {
515 ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n",
516 arvif->vdev_id, peer->addr, ret);
517 return ret;
518 }
519 }
520
521 return 0;
522 }
523
524 /*********************/
525 /* General utilities */
526 /*********************/
527
528 static inline enum wmi_phy_mode
529 chan_to_phymode(const struct cfg80211_chan_def *chandef)
530 {
531 enum wmi_phy_mode phymode = MODE_UNKNOWN;
532
533 switch (chandef->chan->band) {
534 case NL80211_BAND_2GHZ:
535 switch (chandef->width) {
536 case NL80211_CHAN_WIDTH_20_NOHT:
537 if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
538 phymode = MODE_11B;
539 else
540 phymode = MODE_11G;
541 break;
542 case NL80211_CHAN_WIDTH_20:
543 phymode = MODE_11NG_HT20;
544 break;
545 case NL80211_CHAN_WIDTH_40:
546 phymode = MODE_11NG_HT40;
547 break;
548 case NL80211_CHAN_WIDTH_5:
549 case NL80211_CHAN_WIDTH_10:
550 case NL80211_CHAN_WIDTH_80:
551 case NL80211_CHAN_WIDTH_80P80:
552 case NL80211_CHAN_WIDTH_160:
553 phymode = MODE_UNKNOWN;
554 break;
555 }
556 break;
557 case NL80211_BAND_5GHZ:
558 switch (chandef->width) {
559 case NL80211_CHAN_WIDTH_20_NOHT:
560 phymode = MODE_11A;
561 break;
562 case NL80211_CHAN_WIDTH_20:
563 phymode = MODE_11NA_HT20;
564 break;
565 case NL80211_CHAN_WIDTH_40:
566 phymode = MODE_11NA_HT40;
567 break;
568 case NL80211_CHAN_WIDTH_80:
569 phymode = MODE_11AC_VHT80;
570 break;
571 case NL80211_CHAN_WIDTH_5:
572 case NL80211_CHAN_WIDTH_10:
573 case NL80211_CHAN_WIDTH_80P80:
574 case NL80211_CHAN_WIDTH_160:
575 phymode = MODE_UNKNOWN;
576 break;
577 }
578 break;
579 default:
580 break;
581 }
582
583 WARN_ON(phymode == MODE_UNKNOWN);
584 return phymode;
585 }
586
587 static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
588 {
589 /*
590 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
591 * 0 for no restriction
592 * 1 for 1/4 us
593 * 2 for 1/2 us
594 * 3 for 1 us
595 * 4 for 2 us
596 * 5 for 4 us
597 * 6 for 8 us
598 * 7 for 16 us
599 */
600 switch (mpdudensity) {
601 case 0:
602 return 0;
603 case 1:
604 case 2:
605 case 3:
606 /* Our lower layer calculations limit our precision to
607 1 microsecond */
608 return 1;
609 case 4:
610 return 2;
611 case 5:
612 return 4;
613 case 6:
614 return 8;
615 case 7:
616 return 16;
617 default:
618 return 0;
619 }
620 }
621
622 int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
623 struct cfg80211_chan_def *def)
624 {
625 struct ieee80211_chanctx_conf *conf;
626
627 rcu_read_lock();
628 conf = rcu_dereference(vif->chanctx_conf);
629 if (!conf) {
630 rcu_read_unlock();
631 return -ENOENT;
632 }
633
634 *def = conf->def;
635 rcu_read_unlock();
636
637 return 0;
638 }
639
640 static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw,
641 struct ieee80211_chanctx_conf *conf,
642 void *data)
643 {
644 int *num = data;
645
646 (*num)++;
647 }
648
649 static int ath10k_mac_num_chanctxs(struct ath10k *ar)
650 {
651 int num = 0;
652
653 ieee80211_iter_chan_contexts_atomic(ar->hw,
654 ath10k_mac_num_chanctxs_iter,
655 &num);
656
657 return num;
658 }
659
660 static void
661 ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
662 struct ieee80211_chanctx_conf *conf,
663 void *data)
664 {
665 struct cfg80211_chan_def **def = data;
666
667 *def = &conf->def;
668 }
669
670 static int ath10k_peer_create(struct ath10k *ar,
671 struct ieee80211_vif *vif,
672 struct ieee80211_sta *sta,
673 u32 vdev_id,
674 const u8 *addr,
675 enum wmi_peer_type peer_type)
676 {
677 struct ath10k_vif *arvif;
678 struct ath10k_peer *peer;
679 int num_peers = 0;
680 int ret;
681
682 lockdep_assert_held(&ar->conf_mutex);
683
684 num_peers = ar->num_peers;
685
686 /* Each vdev consumes a peer entry as well */
687 list_for_each_entry(arvif, &ar->arvifs, list)
688 num_peers++;
689
690 if (num_peers >= ar->max_num_peers)
691 return -ENOBUFS;
692
693 ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
694 if (ret) {
695 ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
696 addr, vdev_id, ret);
697 return ret;
698 }
699
700 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
701 if (ret) {
702 ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n",
703 addr, vdev_id, ret);
704 return ret;
705 }
706
707 spin_lock_bh(&ar->data_lock);
708
709 peer = ath10k_peer_find(ar, vdev_id, addr);
710 if (!peer) {
711 spin_unlock_bh(&ar->data_lock);
712 ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
713 addr, vdev_id);
714 ath10k_wmi_peer_delete(ar, vdev_id, addr);
715 return -ENOENT;
716 }
717
718 peer->vif = vif;
719 peer->sta = sta;
720
721 spin_unlock_bh(&ar->data_lock);
722
723 ar->num_peers++;
724
725 return 0;
726 }
727
728 static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
729 {
730 struct ath10k *ar = arvif->ar;
731 u32 param;
732 int ret;
733
734 param = ar->wmi.pdev_param->sta_kickout_th;
735 ret = ath10k_wmi_pdev_set_param(ar, param,
736 ATH10K_KICKOUT_THRESHOLD);
737 if (ret) {
738 ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n",
739 arvif->vdev_id, ret);
740 return ret;
741 }
742
743 param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs;
744 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
745 ATH10K_KEEPALIVE_MIN_IDLE);
746 if (ret) {
747 ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n",
748 arvif->vdev_id, ret);
749 return ret;
750 }
751
752 param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs;
753 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
754 ATH10K_KEEPALIVE_MAX_IDLE);
755 if (ret) {
756 ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n",
757 arvif->vdev_id, ret);
758 return ret;
759 }
760
761 param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs;
762 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
763 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
764 if (ret) {
765 ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
766 arvif->vdev_id, ret);
767 return ret;
768 }
769
770 return 0;
771 }
772
773 static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
774 {
775 struct ath10k *ar = arvif->ar;
776 u32 vdev_param;
777
778 vdev_param = ar->wmi.vdev_param->rts_threshold;
779 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
780 }
781
782 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
783 {
784 int ret;
785
786 lockdep_assert_held(&ar->conf_mutex);
787
788 ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
789 if (ret)
790 return ret;
791
792 ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
793 if (ret)
794 return ret;
795
796 ar->num_peers--;
797
798 return 0;
799 }
800
801 static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
802 {
803 struct ath10k_peer *peer, *tmp;
804 int peer_id;
805
806 lockdep_assert_held(&ar->conf_mutex);
807
808 spin_lock_bh(&ar->data_lock);
809 list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
810 if (peer->vdev_id != vdev_id)
811 continue;
812
813 ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
814 peer->addr, vdev_id);
815
816 for_each_set_bit(peer_id, peer->peer_ids,
817 ATH10K_MAX_NUM_PEER_IDS) {
818 ar->peer_map[peer_id] = NULL;
819 }
820
821 list_del(&peer->list);
822 kfree(peer);
823 ar->num_peers--;
824 }
825 spin_unlock_bh(&ar->data_lock);
826 }
827
828 static void ath10k_peer_cleanup_all(struct ath10k *ar)
829 {
830 struct ath10k_peer *peer, *tmp;
831
832 lockdep_assert_held(&ar->conf_mutex);
833
834 spin_lock_bh(&ar->data_lock);
835 list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
836 list_del(&peer->list);
837 kfree(peer);
838 }
839 spin_unlock_bh(&ar->data_lock);
840
841 ar->num_peers = 0;
842 ar->num_stations = 0;
843 }
844
845 static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id,
846 struct ieee80211_sta *sta,
847 enum wmi_tdls_peer_state state)
848 {
849 int ret;
850 struct wmi_tdls_peer_update_cmd_arg arg = {};
851 struct wmi_tdls_peer_capab_arg cap = {};
852 struct wmi_channel_arg chan_arg = {};
853
854 lockdep_assert_held(&ar->conf_mutex);
855
856 arg.vdev_id = vdev_id;
857 arg.peer_state = state;
858 ether_addr_copy(arg.addr, sta->addr);
859
860 cap.peer_max_sp = sta->max_sp;
861 cap.peer_uapsd_queues = sta->uapsd_queues;
862
863 if (state == WMI_TDLS_PEER_STATE_CONNECTED &&
864 !sta->tdls_initiator)
865 cap.is_peer_responder = 1;
866
867 ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg);
868 if (ret) {
869 ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n",
870 arg.addr, vdev_id, ret);
871 return ret;
872 }
873
874 return 0;
875 }
876
877 /************************/
878 /* Interface management */
879 /************************/
880
881 void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
882 {
883 struct ath10k *ar = arvif->ar;
884
885 lockdep_assert_held(&ar->data_lock);
886
887 if (!arvif->beacon)
888 return;
889
890 if (!arvif->beacon_buf)
891 dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
892 arvif->beacon->len, DMA_TO_DEVICE);
893
894 if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED &&
895 arvif->beacon_state != ATH10K_BEACON_SENT))
896 return;
897
898 dev_kfree_skb_any(arvif->beacon);
899
900 arvif->beacon = NULL;
901 arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
902 }
903
904 static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
905 {
906 struct ath10k *ar = arvif->ar;
907
908 lockdep_assert_held(&ar->data_lock);
909
910 ath10k_mac_vif_beacon_free(arvif);
911
912 if (arvif->beacon_buf) {
913 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
914 arvif->beacon_buf, arvif->beacon_paddr);
915 arvif->beacon_buf = NULL;
916 }
917 }
918
919 static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
920 {
921 unsigned long time_left;
922
923 lockdep_assert_held(&ar->conf_mutex);
924
925 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
926 return -ESHUTDOWN;
927
928 time_left = wait_for_completion_timeout(&ar->vdev_setup_done,
929 ATH10K_VDEV_SETUP_TIMEOUT_HZ);
930 if (time_left == 0)
931 return -ETIMEDOUT;
932
933 return 0;
934 }
935
936 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
937 {
938 struct cfg80211_chan_def *chandef = NULL;
939 struct ieee80211_channel *channel = NULL;
940 struct wmi_vdev_start_request_arg arg = {};
941 int ret = 0;
942
943 lockdep_assert_held(&ar->conf_mutex);
944
945 ieee80211_iter_chan_contexts_atomic(ar->hw,
946 ath10k_mac_get_any_chandef_iter,
947 &chandef);
948 if (WARN_ON_ONCE(!chandef))
949 return -ENOENT;
950
951 channel = chandef->chan;
952
953 arg.vdev_id = vdev_id;
954 arg.channel.freq = channel->center_freq;
955 arg.channel.band_center_freq1 = chandef->center_freq1;
956
957 /* TODO setup this dynamically, what in case we
958 don't have any vifs? */
959 arg.channel.mode = chan_to_phymode(chandef);
960 arg.channel.chan_radar =
961 !!(channel->flags & IEEE80211_CHAN_RADAR);
962
963 arg.channel.min_power = 0;
964 arg.channel.max_power = channel->max_power * 2;
965 arg.channel.max_reg_power = channel->max_reg_power * 2;
966 arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
967
968 reinit_completion(&ar->vdev_setup_done);
969
970 ret = ath10k_wmi_vdev_start(ar, &arg);
971 if (ret) {
972 ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n",
973 vdev_id, ret);
974 return ret;
975 }
976
977 ret = ath10k_vdev_setup_sync(ar);
978 if (ret) {
979 ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n",
980 vdev_id, ret);
981 return ret;
982 }
983
984 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
985 if (ret) {
986 ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n",
987 vdev_id, ret);
988 goto vdev_stop;
989 }
990
991 ar->monitor_vdev_id = vdev_id;
992
993 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
994 ar->monitor_vdev_id);
995 return 0;
996
997 vdev_stop:
998 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
999 if (ret)
1000 ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n",
1001 ar->monitor_vdev_id, ret);
1002
1003 return ret;
1004 }
1005
1006 static int ath10k_monitor_vdev_stop(struct ath10k *ar)
1007 {
1008 int ret = 0;
1009
1010 lockdep_assert_held(&ar->conf_mutex);
1011
1012 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
1013 if (ret)
1014 ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n",
1015 ar->monitor_vdev_id, ret);
1016
1017 reinit_completion(&ar->vdev_setup_done);
1018
1019 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
1020 if (ret)
1021 ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n",
1022 ar->monitor_vdev_id, ret);
1023
1024 ret = ath10k_vdev_setup_sync(ar);
1025 if (ret)
1026 ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n",
1027 ar->monitor_vdev_id, ret);
1028
1029 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
1030 ar->monitor_vdev_id);
1031 return ret;
1032 }
1033
1034 static int ath10k_monitor_vdev_create(struct ath10k *ar)
1035 {
1036 int bit, ret = 0;
1037
1038 lockdep_assert_held(&ar->conf_mutex);
1039
1040 if (ar->free_vdev_map == 0) {
1041 ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n");
1042 return -ENOMEM;
1043 }
1044
1045 bit = __ffs64(ar->free_vdev_map);
1046
1047 ar->monitor_vdev_id = bit;
1048
1049 ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
1050 WMI_VDEV_TYPE_MONITOR,
1051 0, ar->mac_addr);
1052 if (ret) {
1053 ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n",
1054 ar->monitor_vdev_id, ret);
1055 return ret;
1056 }
1057
1058 ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
1059 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
1060 ar->monitor_vdev_id);
1061
1062 return 0;
1063 }
1064
1065 static int ath10k_monitor_vdev_delete(struct ath10k *ar)
1066 {
1067 int ret = 0;
1068
1069 lockdep_assert_held(&ar->conf_mutex);
1070
1071 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
1072 if (ret) {
1073 ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n",
1074 ar->monitor_vdev_id, ret);
1075 return ret;
1076 }
1077
1078 ar->free_vdev_map |= 1LL << ar->monitor_vdev_id;
1079
1080 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
1081 ar->monitor_vdev_id);
1082 return ret;
1083 }
1084
1085 static int ath10k_monitor_start(struct ath10k *ar)
1086 {
1087 int ret;
1088
1089 lockdep_assert_held(&ar->conf_mutex);
1090
1091 ret = ath10k_monitor_vdev_create(ar);
1092 if (ret) {
1093 ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret);
1094 return ret;
1095 }
1096
1097 ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
1098 if (ret) {
1099 ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret);
1100 ath10k_monitor_vdev_delete(ar);
1101 return ret;
1102 }
1103
1104 ar->monitor_started = true;
1105 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n");
1106
1107 return 0;
1108 }
1109
1110 static int ath10k_monitor_stop(struct ath10k *ar)
1111 {
1112 int ret;
1113
1114 lockdep_assert_held(&ar->conf_mutex);
1115
1116 ret = ath10k_monitor_vdev_stop(ar);
1117 if (ret) {
1118 ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret);
1119 return ret;
1120 }
1121
1122 ret = ath10k_monitor_vdev_delete(ar);
1123 if (ret) {
1124 ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret);
1125 return ret;
1126 }
1127
1128 ar->monitor_started = false;
1129 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n");
1130
1131 return 0;
1132 }
1133
1134 static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
1135 {
1136 int num_ctx;
1137
1138 /* At least one chanctx is required to derive a channel to start
1139 * monitor vdev on.
1140 */
1141 num_ctx = ath10k_mac_num_chanctxs(ar);
1142 if (num_ctx == 0)
1143 return false;
1144
1145 /* If there's already an existing special monitor interface then don't
1146 * bother creating another monitor vdev.
1147 */
1148 if (ar->monitor_arvif)
1149 return false;
1150
1151 return ar->monitor ||
1152 ar->filter_flags & FIF_OTHER_BSS ||
1153 test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1154 }
1155
1156 static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar)
1157 {
1158 int num_ctx;
1159
1160 num_ctx = ath10k_mac_num_chanctxs(ar);
1161
1162 /* FIXME: Current interface combinations and cfg80211/mac80211 code
1163 * shouldn't allow this but make sure to prevent handling the following
1164 * case anyway since multi-channel DFS hasn't been tested at all.
1165 */
1166 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1)
1167 return false;
1168
1169 return true;
1170 }
1171
1172 static int ath10k_monitor_recalc(struct ath10k *ar)
1173 {
1174 bool needed;
1175 bool allowed;
1176 int ret;
1177
1178 lockdep_assert_held(&ar->conf_mutex);
1179
1180 needed = ath10k_mac_monitor_vdev_is_needed(ar);
1181 allowed = ath10k_mac_monitor_vdev_is_allowed(ar);
1182
1183 ath10k_dbg(ar, ATH10K_DBG_MAC,
1184 "mac monitor recalc started? %d needed? %d allowed? %d\n",
1185 ar->monitor_started, needed, allowed);
1186
1187 if (WARN_ON(needed && !allowed)) {
1188 if (ar->monitor_started) {
1189 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n");
1190
1191 ret = ath10k_monitor_stop(ar);
1192 if (ret)
1193 ath10k_warn(ar, "failed to stop disallowed monitor: %d\n",
1194 ret);
1195 /* not serious */
1196 }
1197
1198 return -EPERM;
1199 }
1200
1201 if (needed == ar->monitor_started)
1202 return 0;
1203
1204 if (needed)
1205 return ath10k_monitor_start(ar);
1206 else
1207 return ath10k_monitor_stop(ar);
1208 }
1209
1210 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
1211 {
1212 struct ath10k *ar = arvif->ar;
1213 u32 vdev_param, rts_cts = 0;
1214
1215 lockdep_assert_held(&ar->conf_mutex);
1216
1217 vdev_param = ar->wmi.vdev_param->enable_rtscts;
1218
1219 rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
1220
1221 if (arvif->num_legacy_stations > 0)
1222 rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
1223 WMI_RTSCTS_PROFILE);
1224 else
1225 rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES,
1226 WMI_RTSCTS_PROFILE);
1227
1228 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
1229 rts_cts);
1230 }
1231
1232 static int ath10k_start_cac(struct ath10k *ar)
1233 {
1234 int ret;
1235
1236 lockdep_assert_held(&ar->conf_mutex);
1237
1238 set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1239
1240 ret = ath10k_monitor_recalc(ar);
1241 if (ret) {
1242 ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret);
1243 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1244 return ret;
1245 }
1246
1247 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
1248 ar->monitor_vdev_id);
1249
1250 return 0;
1251 }
1252
1253 static int ath10k_stop_cac(struct ath10k *ar)
1254 {
1255 lockdep_assert_held(&ar->conf_mutex);
1256
1257 /* CAC is not running - do nothing */
1258 if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
1259 return 0;
1260
1261 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1262 ath10k_monitor_stop(ar);
1263
1264 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n");
1265
1266 return 0;
1267 }
1268
1269 static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw,
1270 struct ieee80211_chanctx_conf *conf,
1271 void *data)
1272 {
1273 bool *ret = data;
1274
1275 if (!*ret && conf->radar_enabled)
1276 *ret = true;
1277 }
1278
1279 static bool ath10k_mac_has_radar_enabled(struct ath10k *ar)
1280 {
1281 bool has_radar = false;
1282
1283 ieee80211_iter_chan_contexts_atomic(ar->hw,
1284 ath10k_mac_has_radar_iter,
1285 &has_radar);
1286
1287 return has_radar;
1288 }
1289
1290 static void ath10k_recalc_radar_detection(struct ath10k *ar)
1291 {
1292 int ret;
1293
1294 lockdep_assert_held(&ar->conf_mutex);
1295
1296 ath10k_stop_cac(ar);
1297
1298 if (!ath10k_mac_has_radar_enabled(ar))
1299 return;
1300
1301 if (ar->num_started_vdevs > 0)
1302 return;
1303
1304 ret = ath10k_start_cac(ar);
1305 if (ret) {
1306 /*
1307 * Not possible to start CAC on current channel so starting
1308 * radiation is not allowed, make this channel DFS_UNAVAILABLE
1309 * by indicating that radar was detected.
1310 */
1311 ath10k_warn(ar, "failed to start CAC: %d\n", ret);
1312 ieee80211_radar_detected(ar->hw);
1313 }
1314 }
1315
1316 static int ath10k_vdev_stop(struct ath10k_vif *arvif)
1317 {
1318 struct ath10k *ar = arvif->ar;
1319 int ret;
1320
1321 lockdep_assert_held(&ar->conf_mutex);
1322
1323 reinit_completion(&ar->vdev_setup_done);
1324
1325 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
1326 if (ret) {
1327 ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
1328 arvif->vdev_id, ret);
1329 return ret;
1330 }
1331
1332 ret = ath10k_vdev_setup_sync(ar);
1333 if (ret) {
1334 ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n",
1335 arvif->vdev_id, ret);
1336 return ret;
1337 }
1338
1339 WARN_ON(ar->num_started_vdevs == 0);
1340
1341 if (ar->num_started_vdevs != 0) {
1342 ar->num_started_vdevs--;
1343 ath10k_recalc_radar_detection(ar);
1344 }
1345
1346 return ret;
1347 }
1348
1349 static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
1350 const struct cfg80211_chan_def *chandef,
1351 bool restart)
1352 {
1353 struct ath10k *ar = arvif->ar;
1354 struct wmi_vdev_start_request_arg arg = {};
1355 int ret = 0;
1356
1357 lockdep_assert_held(&ar->conf_mutex);
1358
1359 reinit_completion(&ar->vdev_setup_done);
1360
1361 arg.vdev_id = arvif->vdev_id;
1362 arg.dtim_period = arvif->dtim_period;
1363 arg.bcn_intval = arvif->beacon_interval;
1364
1365 arg.channel.freq = chandef->chan->center_freq;
1366 arg.channel.band_center_freq1 = chandef->center_freq1;
1367 arg.channel.mode = chan_to_phymode(chandef);
1368
1369 arg.channel.min_power = 0;
1370 arg.channel.max_power = chandef->chan->max_power * 2;
1371 arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
1372 arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
1373
1374 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
1375 arg.ssid = arvif->u.ap.ssid;
1376 arg.ssid_len = arvif->u.ap.ssid_len;
1377 arg.hidden_ssid = arvif->u.ap.hidden_ssid;
1378
1379 /* For now allow DFS for AP mode */
1380 arg.channel.chan_radar =
1381 !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
1382 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
1383 arg.ssid = arvif->vif->bss_conf.ssid;
1384 arg.ssid_len = arvif->vif->bss_conf.ssid_len;
1385 }
1386
1387 ath10k_dbg(ar, ATH10K_DBG_MAC,
1388 "mac vdev %d start center_freq %d phymode %s\n",
1389 arg.vdev_id, arg.channel.freq,
1390 ath10k_wmi_phymode_str(arg.channel.mode));
1391
1392 if (restart)
1393 ret = ath10k_wmi_vdev_restart(ar, &arg);
1394 else
1395 ret = ath10k_wmi_vdev_start(ar, &arg);
1396
1397 if (ret) {
1398 ath10k_warn(ar, "failed to start WMI vdev %i: %d\n",
1399 arg.vdev_id, ret);
1400 return ret;
1401 }
1402
1403 ret = ath10k_vdev_setup_sync(ar);
1404 if (ret) {
1405 ath10k_warn(ar,
1406 "failed to synchronize setup for vdev %i restart %d: %d\n",
1407 arg.vdev_id, restart, ret);
1408 return ret;
1409 }
1410
1411 ar->num_started_vdevs++;
1412 ath10k_recalc_radar_detection(ar);
1413
1414 return ret;
1415 }
1416
1417 static int ath10k_vdev_start(struct ath10k_vif *arvif,
1418 const struct cfg80211_chan_def *def)
1419 {
1420 return ath10k_vdev_start_restart(arvif, def, false);
1421 }
1422
1423 static int ath10k_vdev_restart(struct ath10k_vif *arvif,
1424 const struct cfg80211_chan_def *def)
1425 {
1426 return ath10k_vdev_start_restart(arvif, def, true);
1427 }
1428
1429 static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
1430 struct sk_buff *bcn)
1431 {
1432 struct ath10k *ar = arvif->ar;
1433 struct ieee80211_mgmt *mgmt;
1434 const u8 *p2p_ie;
1435 int ret;
1436
1437 if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p)
1438 return 0;
1439
1440 mgmt = (void *)bcn->data;
1441 p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1442 mgmt->u.beacon.variable,
1443 bcn->len - (mgmt->u.beacon.variable -
1444 bcn->data));
1445 if (!p2p_ie)
1446 return -ENOENT;
1447
1448 ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
1449 if (ret) {
1450 ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n",
1451 arvif->vdev_id, ret);
1452 return ret;
1453 }
1454
1455 return 0;
1456 }
1457
1458 static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
1459 u8 oui_type, size_t ie_offset)
1460 {
1461 size_t len;
1462 const u8 *next;
1463 const u8 *end;
1464 u8 *ie;
1465
1466 if (WARN_ON(skb->len < ie_offset))
1467 return -EINVAL;
1468
1469 ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
1470 skb->data + ie_offset,
1471 skb->len - ie_offset);
1472 if (!ie)
1473 return -ENOENT;
1474
1475 len = ie[1] + 2;
1476 end = skb->data + skb->len;
1477 next = ie + len;
1478
1479 if (WARN_ON(next > end))
1480 return -EINVAL;
1481
1482 memmove(ie, next, end - next);
1483 skb_trim(skb, skb->len - len);
1484
1485 return 0;
1486 }
1487
1488 static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
1489 {
1490 struct ath10k *ar = arvif->ar;
1491 struct ieee80211_hw *hw = ar->hw;
1492 struct ieee80211_vif *vif = arvif->vif;
1493 struct ieee80211_mutable_offsets offs = {};
1494 struct sk_buff *bcn;
1495 int ret;
1496
1497 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1498 return 0;
1499
1500 if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
1501 arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
1502 return 0;
1503
1504 bcn = ieee80211_beacon_get_template(hw, vif, &offs);
1505 if (!bcn) {
1506 ath10k_warn(ar, "failed to get beacon template from mac80211\n");
1507 return -EPERM;
1508 }
1509
1510 ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn);
1511 if (ret) {
1512 ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret);
1513 kfree_skb(bcn);
1514 return ret;
1515 }
1516
1517 /* P2P IE is inserted by firmware automatically (as configured above)
1518 * so remove it from the base beacon template to avoid duplicate P2P
1519 * IEs in beacon frames.
1520 */
1521 ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1522 offsetof(struct ieee80211_mgmt,
1523 u.beacon.variable));
1524
1525 ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0,
1526 0, NULL, 0);
1527 kfree_skb(bcn);
1528
1529 if (ret) {
1530 ath10k_warn(ar, "failed to submit beacon template command: %d\n",
1531 ret);
1532 return ret;
1533 }
1534
1535 return 0;
1536 }
1537
1538 static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
1539 {
1540 struct ath10k *ar = arvif->ar;
1541 struct ieee80211_hw *hw = ar->hw;
1542 struct ieee80211_vif *vif = arvif->vif;
1543 struct sk_buff *prb;
1544 int ret;
1545
1546 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1547 return 0;
1548
1549 if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
1550 return 0;
1551
1552 prb = ieee80211_proberesp_get(hw, vif);
1553 if (!prb) {
1554 ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
1555 return -EPERM;
1556 }
1557
1558 ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb);
1559 kfree_skb(prb);
1560
1561 if (ret) {
1562 ath10k_warn(ar, "failed to submit probe resp template command: %d\n",
1563 ret);
1564 return ret;
1565 }
1566
1567 return 0;
1568 }
1569
1570 static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif)
1571 {
1572 struct ath10k *ar = arvif->ar;
1573 struct cfg80211_chan_def def;
1574 int ret;
1575
1576 /* When originally vdev is started during assign_vif_chanctx() some
1577 * information is missing, notably SSID. Firmware revisions with beacon
1578 * offloading require the SSID to be provided during vdev (re)start to
1579 * handle hidden SSID properly.
1580 *
1581 * Vdev restart must be done after vdev has been both started and
1582 * upped. Otherwise some firmware revisions (at least 10.2) fail to
1583 * deliver vdev restart response event causing timeouts during vdev
1584 * syncing in ath10k.
1585 *
1586 * Note: The vdev down/up and template reinstallation could be skipped
1587 * since only wmi-tlv firmware are known to have beacon offload and
1588 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart
1589 * response delivery. It's probably more robust to keep it as is.
1590 */
1591 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1592 return 0;
1593
1594 if (WARN_ON(!arvif->is_started))
1595 return -EINVAL;
1596
1597 if (WARN_ON(!arvif->is_up))
1598 return -EINVAL;
1599
1600 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
1601 return -EINVAL;
1602
1603 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1604 if (ret) {
1605 ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n",
1606 arvif->vdev_id, ret);
1607 return ret;
1608 }
1609
1610 /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise
1611 * firmware will crash upon vdev up.
1612 */
1613
1614 ret = ath10k_mac_setup_bcn_tmpl(arvif);
1615 if (ret) {
1616 ath10k_warn(ar, "failed to update beacon template: %d\n", ret);
1617 return ret;
1618 }
1619
1620 ret = ath10k_mac_setup_prb_tmpl(arvif);
1621 if (ret) {
1622 ath10k_warn(ar, "failed to update presp template: %d\n", ret);
1623 return ret;
1624 }
1625
1626 ret = ath10k_vdev_restart(arvif, &def);
1627 if (ret) {
1628 ath10k_warn(ar, "failed to restart ap vdev %i: %d\n",
1629 arvif->vdev_id, ret);
1630 return ret;
1631 }
1632
1633 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1634 arvif->bssid);
1635 if (ret) {
1636 ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n",
1637 arvif->vdev_id, ret);
1638 return ret;
1639 }
1640
1641 return 0;
1642 }
1643
1644 static void ath10k_control_beaconing(struct ath10k_vif *arvif,
1645 struct ieee80211_bss_conf *info)
1646 {
1647 struct ath10k *ar = arvif->ar;
1648 int ret = 0;
1649
1650 lockdep_assert_held(&arvif->ar->conf_mutex);
1651
1652 if (!info->enable_beacon) {
1653 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1654 if (ret)
1655 ath10k_warn(ar, "failed to down vdev_id %i: %d\n",
1656 arvif->vdev_id, ret);
1657
1658 arvif->is_up = false;
1659
1660 spin_lock_bh(&arvif->ar->data_lock);
1661 ath10k_mac_vif_beacon_free(arvif);
1662 spin_unlock_bh(&arvif->ar->data_lock);
1663
1664 return;
1665 }
1666
1667 arvif->tx_seq_no = 0x1000;
1668
1669 arvif->aid = 0;
1670 ether_addr_copy(arvif->bssid, info->bssid);
1671
1672 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1673 arvif->bssid);
1674 if (ret) {
1675 ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
1676 arvif->vdev_id, ret);
1677 return;
1678 }
1679
1680 arvif->is_up = true;
1681
1682 ret = ath10k_mac_vif_fix_hidden_ssid(arvif);
1683 if (ret) {
1684 ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n",
1685 arvif->vdev_id, ret);
1686 return;
1687 }
1688
1689 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
1690 }
1691
1692 static void ath10k_control_ibss(struct ath10k_vif *arvif,
1693 struct ieee80211_bss_conf *info,
1694 const u8 self_peer[ETH_ALEN])
1695 {
1696 struct ath10k *ar = arvif->ar;
1697 u32 vdev_param;
1698 int ret = 0;
1699
1700 lockdep_assert_held(&arvif->ar->conf_mutex);
1701
1702 if (!info->ibss_joined) {
1703 if (is_zero_ether_addr(arvif->bssid))
1704 return;
1705
1706 eth_zero_addr(arvif->bssid);
1707
1708 return;
1709 }
1710
1711 vdev_param = arvif->ar->wmi.vdev_param->atim_window;
1712 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
1713 ATH10K_DEFAULT_ATIM);
1714 if (ret)
1715 ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n",
1716 arvif->vdev_id, ret);
1717 }
1718
1719 static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif)
1720 {
1721 struct ath10k *ar = arvif->ar;
1722 u32 param;
1723 u32 value;
1724 int ret;
1725
1726 lockdep_assert_held(&arvif->ar->conf_mutex);
1727
1728 if (arvif->u.sta.uapsd)
1729 value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER;
1730 else
1731 value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
1732
1733 param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
1734 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value);
1735 if (ret) {
1736 ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n",
1737 value, arvif->vdev_id, ret);
1738 return ret;
1739 }
1740
1741 return 0;
1742 }
1743
1744 static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
1745 {
1746 struct ath10k *ar = arvif->ar;
1747 u32 param;
1748 u32 value;
1749 int ret;
1750
1751 lockdep_assert_held(&arvif->ar->conf_mutex);
1752
1753 if (arvif->u.sta.uapsd)
1754 value = WMI_STA_PS_PSPOLL_COUNT_UAPSD;
1755 else
1756 value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
1757
1758 param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
1759 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
1760 param, value);
1761 if (ret) {
1762 ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n",
1763 value, arvif->vdev_id, ret);
1764 return ret;
1765 }
1766
1767 return 0;
1768 }
1769
1770 static int ath10k_mac_num_vifs_started(struct ath10k *ar)
1771 {
1772 struct ath10k_vif *arvif;
1773 int num = 0;
1774
1775 lockdep_assert_held(&ar->conf_mutex);
1776
1777 list_for_each_entry(arvif, &ar->arvifs, list)
1778 if (arvif->is_started)
1779 num++;
1780
1781 return num;
1782 }
1783
1784 static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
1785 {
1786 struct ath10k *ar = arvif->ar;
1787 struct ieee80211_vif *vif = arvif->vif;
1788 struct ieee80211_conf *conf = &ar->hw->conf;
1789 enum wmi_sta_powersave_param param;
1790 enum wmi_sta_ps_mode psmode;
1791 int ret;
1792 int ps_timeout;
1793 bool enable_ps;
1794
1795 lockdep_assert_held(&arvif->ar->conf_mutex);
1796
1797 if (arvif->vif->type != NL80211_IFTYPE_STATION)
1798 return 0;
1799
1800 enable_ps = arvif->ps;
1801
1802 if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
1803 !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
1804 ar->running_fw->fw_file.fw_features)) {
1805 ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
1806 arvif->vdev_id);
1807 enable_ps = false;
1808 }
1809
1810 if (!arvif->is_started) {
1811 /* mac80211 can update vif powersave state while disconnected.
1812 * Firmware doesn't behave nicely and consumes more power than
1813 * necessary if PS is disabled on a non-started vdev. Hence
1814 * force-enable PS for non-running vdevs.
1815 */
1816 psmode = WMI_STA_PS_MODE_ENABLED;
1817 } else if (enable_ps) {
1818 psmode = WMI_STA_PS_MODE_ENABLED;
1819 param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
1820
1821 ps_timeout = conf->dynamic_ps_timeout;
1822 if (ps_timeout == 0) {
1823 /* Firmware doesn't like 0 */
1824 ps_timeout = ieee80211_tu_to_usec(
1825 vif->bss_conf.beacon_int) / 1000;
1826 }
1827
1828 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
1829 ps_timeout);
1830 if (ret) {
1831 ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n",
1832 arvif->vdev_id, ret);
1833 return ret;
1834 }
1835 } else {
1836 psmode = WMI_STA_PS_MODE_DISABLED;
1837 }
1838
1839 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
1840 arvif->vdev_id, psmode ? "enable" : "disable");
1841
1842 ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
1843 if (ret) {
1844 ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n",
1845 psmode, arvif->vdev_id, ret);
1846 return ret;
1847 }
1848
1849 return 0;
1850 }
1851
1852 static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
1853 {
1854 struct ath10k *ar = arvif->ar;
1855 struct wmi_sta_keepalive_arg arg = {};
1856 int ret;
1857
1858 lockdep_assert_held(&arvif->ar->conf_mutex);
1859
1860 if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
1861 return 0;
1862
1863 if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map))
1864 return 0;
1865
1866 /* Some firmware revisions have a bug and ignore the `enabled` field.
1867 * Instead use the interval to disable the keepalive.
1868 */
1869 arg.vdev_id = arvif->vdev_id;
1870 arg.enabled = 1;
1871 arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME;
1872 arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE;
1873
1874 ret = ath10k_wmi_sta_keepalive(ar, &arg);
1875 if (ret) {
1876 ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n",
1877 arvif->vdev_id, ret);
1878 return ret;
1879 }
1880
1881 return 0;
1882 }
1883
1884 static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
1885 {
1886 struct ath10k *ar = arvif->ar;
1887 struct ieee80211_vif *vif = arvif->vif;
1888 int ret;
1889
1890 lockdep_assert_held(&arvif->ar->conf_mutex);
1891
1892 if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)))
1893 return;
1894
1895 if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
1896 return;
1897
1898 if (!vif->csa_active)
1899 return;
1900
1901 if (!arvif->is_up)
1902 return;
1903
1904 if (!ieee80211_csa_is_complete(vif)) {
1905 ieee80211_csa_update_counter(vif);
1906
1907 ret = ath10k_mac_setup_bcn_tmpl(arvif);
1908 if (ret)
1909 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
1910 ret);
1911
1912 ret = ath10k_mac_setup_prb_tmpl(arvif);
1913 if (ret)
1914 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
1915 ret);
1916 } else {
1917 ieee80211_csa_finish(vif);
1918 }
1919 }
1920
1921 static void ath10k_mac_vif_ap_csa_work(struct work_struct *work)
1922 {
1923 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
1924 ap_csa_work);
1925 struct ath10k *ar = arvif->ar;
1926
1927 mutex_lock(&ar->conf_mutex);
1928 ath10k_mac_vif_ap_csa_count_down(arvif);
1929 mutex_unlock(&ar->conf_mutex);
1930 }
1931
1932 static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac,
1933 struct ieee80211_vif *vif)
1934 {
1935 struct sk_buff *skb = data;
1936 struct ieee80211_mgmt *mgmt = (void *)skb->data;
1937 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1938
1939 if (vif->type != NL80211_IFTYPE_STATION)
1940 return;
1941
1942 if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
1943 return;
1944
1945 cancel_delayed_work(&arvif->connection_loss_work);
1946 }
1947
1948 void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb)
1949 {
1950 ieee80211_iterate_active_interfaces_atomic(ar->hw,
1951 IEEE80211_IFACE_ITER_NORMAL,
1952 ath10k_mac_handle_beacon_iter,
1953 skb);
1954 }
1955
1956 static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
1957 struct ieee80211_vif *vif)
1958 {
1959 u32 *vdev_id = data;
1960 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1961 struct ath10k *ar = arvif->ar;
1962 struct ieee80211_hw *hw = ar->hw;
1963
1964 if (arvif->vdev_id != *vdev_id)
1965 return;
1966
1967 if (!arvif->is_up)
1968 return;
1969
1970 ieee80211_beacon_loss(vif);
1971
1972 /* Firmware doesn't report beacon loss events repeatedly. If AP probe
1973 * (done by mac80211) succeeds but beacons do not resume then it
1974 * doesn't make sense to continue operation. Queue connection loss work
1975 * which can be cancelled when beacon is received.
1976 */
1977 ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
1978 ATH10K_CONNECTION_LOSS_HZ);
1979 }
1980
1981 void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id)
1982 {
1983 ieee80211_iterate_active_interfaces_atomic(ar->hw,
1984 IEEE80211_IFACE_ITER_NORMAL,
1985 ath10k_mac_handle_beacon_miss_iter,
1986 &vdev_id);
1987 }
1988
1989 static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work)
1990 {
1991 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
1992 connection_loss_work.work);
1993 struct ieee80211_vif *vif = arvif->vif;
1994
1995 if (!arvif->is_up)
1996 return;
1997
1998 ieee80211_connection_loss(vif);
1999 }
2000
2001 /**********************/
2002 /* Station management */
2003 /**********************/
2004
2005 static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar,
2006 struct ieee80211_vif *vif)
2007 {
2008 /* Some firmware revisions have unstable STA powersave when listen
2009 * interval is set too high (e.g. 5). The symptoms are firmware doesn't
2010 * generate NullFunc frames properly even if buffered frames have been
2011 * indicated in Beacon TIM. Firmware would seldom wake up to pull
2012 * buffered frames. Often pinging the device from AP would simply fail.
2013 *
2014 * As a workaround set it to 1.
2015 */
2016 if (vif->type == NL80211_IFTYPE_STATION)
2017 return 1;
2018
2019 return ar->hw->conf.listen_interval;
2020 }
2021
2022 static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
2023 struct ieee80211_vif *vif,
2024 struct ieee80211_sta *sta,
2025 struct wmi_peer_assoc_complete_arg *arg)
2026 {
2027 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2028 u32 aid;
2029
2030 lockdep_assert_held(&ar->conf_mutex);
2031
2032 if (vif->type == NL80211_IFTYPE_STATION)
2033 aid = vif->bss_conf.aid;
2034 else
2035 aid = sta->aid;
2036
2037 ether_addr_copy(arg->addr, sta->addr);
2038 arg->vdev_id = arvif->vdev_id;
2039 arg->peer_aid = aid;
2040 arg->peer_flags |= arvif->ar->wmi.peer_flags->auth;
2041 arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
2042 arg->peer_num_spatial_streams = 1;
2043 arg->peer_caps = vif->bss_conf.assoc_capability;
2044 }
2045
2046 static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
2047 struct ieee80211_vif *vif,
2048 struct ieee80211_sta *sta,
2049 struct wmi_peer_assoc_complete_arg *arg)
2050 {
2051 struct ieee80211_bss_conf *info = &vif->bss_conf;
2052 struct cfg80211_chan_def def;
2053 struct cfg80211_bss *bss;
2054 const u8 *rsnie = NULL;
2055 const u8 *wpaie = NULL;
2056
2057 lockdep_assert_held(&ar->conf_mutex);
2058
2059 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2060 return;
2061
2062 bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
2063 IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
2064 if (bss) {
2065 const struct cfg80211_bss_ies *ies;
2066
2067 rcu_read_lock();
2068 rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
2069
2070 ies = rcu_dereference(bss->ies);
2071
2072 wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
2073 WLAN_OUI_TYPE_MICROSOFT_WPA,
2074 ies->data,
2075 ies->len);
2076 rcu_read_unlock();
2077 cfg80211_put_bss(ar->hw->wiphy, bss);
2078 }
2079
2080 /* FIXME: base on RSN IE/WPA IE is a correct idea? */
2081 if (rsnie || wpaie) {
2082 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
2083 arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way;
2084 }
2085
2086 if (wpaie) {
2087 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
2088 arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way;
2089 }
2090
2091 if (sta->mfp &&
2092 test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT,
2093 ar->running_fw->fw_file.fw_features)) {
2094 arg->peer_flags |= ar->wmi.peer_flags->pmf;
2095 }
2096 }
2097
2098 static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
2099 struct ieee80211_vif *vif,
2100 struct ieee80211_sta *sta,
2101 struct wmi_peer_assoc_complete_arg *arg)
2102 {
2103 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2104 struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
2105 struct cfg80211_chan_def def;
2106 const struct ieee80211_supported_band *sband;
2107 const struct ieee80211_rate *rates;
2108 enum nl80211_band band;
2109 u32 ratemask;
2110 u8 rate;
2111 int i;
2112
2113 lockdep_assert_held(&ar->conf_mutex);
2114
2115 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2116 return;
2117
2118 band = def.chan->band;
2119 sband = ar->hw->wiphy->bands[band];
2120 ratemask = sta->supp_rates[band];
2121 ratemask &= arvif->bitrate_mask.control[band].legacy;
2122 rates = sband->bitrates;
2123
2124 rateset->num_rates = 0;
2125
2126 for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
2127 if (!(ratemask & 1))
2128 continue;
2129
2130 rate = ath10k_mac_bitrate_to_rate(rates->bitrate);
2131 rateset->rates[rateset->num_rates] = rate;
2132 rateset->num_rates++;
2133 }
2134 }
2135
2136 static bool
2137 ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
2138 {
2139 int nss;
2140
2141 for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
2142 if (ht_mcs_mask[nss])
2143 return false;
2144
2145 return true;
2146 }
2147
2148 static bool
2149 ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
2150 {
2151 int nss;
2152
2153 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
2154 if (vht_mcs_mask[nss])
2155 return false;
2156
2157 return true;
2158 }
2159
2160 static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
2161 struct ieee80211_vif *vif,
2162 struct ieee80211_sta *sta,
2163 struct wmi_peer_assoc_complete_arg *arg)
2164 {
2165 const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2166 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2167 struct cfg80211_chan_def def;
2168 enum nl80211_band band;
2169 const u8 *ht_mcs_mask;
2170 const u16 *vht_mcs_mask;
2171 int i, n;
2172 u8 max_nss;
2173 u32 stbc;
2174
2175 lockdep_assert_held(&ar->conf_mutex);
2176
2177 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2178 return;
2179
2180 if (!ht_cap->ht_supported)
2181 return;
2182
2183 band = def.chan->band;
2184 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2185 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2186
2187 if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) &&
2188 ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2189 return;
2190
2191 arg->peer_flags |= ar->wmi.peer_flags->ht;
2192 arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2193 ht_cap->ampdu_factor)) - 1;
2194
2195 arg->peer_mpdu_density =
2196 ath10k_parse_mpdudensity(ht_cap->ampdu_density);
2197
2198 arg->peer_ht_caps = ht_cap->cap;
2199 arg->peer_rate_caps |= WMI_RC_HT_FLAG;
2200
2201 if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
2202 arg->peer_flags |= ar->wmi.peer_flags->ldbc;
2203
2204 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
2205 arg->peer_flags |= ar->wmi.peer_flags->bw40;
2206 arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
2207 }
2208
2209 if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
2210 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
2211 arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2212
2213 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
2214 arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2215 }
2216
2217 if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
2218 arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
2219 arg->peer_flags |= ar->wmi.peer_flags->stbc;
2220 }
2221
2222 if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
2223 stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
2224 stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
2225 stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
2226 arg->peer_rate_caps |= stbc;
2227 arg->peer_flags |= ar->wmi.peer_flags->stbc;
2228 }
2229
2230 if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
2231 arg->peer_rate_caps |= WMI_RC_TS_FLAG;
2232 else if (ht_cap->mcs.rx_mask[1])
2233 arg->peer_rate_caps |= WMI_RC_DS_FLAG;
2234
2235 for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
2236 if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
2237 (ht_mcs_mask[i / 8] & BIT(i % 8))) {
2238 max_nss = (i / 8) + 1;
2239 arg->peer_ht_rates.rates[n++] = i;
2240 }
2241
2242 /*
2243 * This is a workaround for HT-enabled STAs which break the spec
2244 * and have no HT capabilities RX mask (no HT RX MCS map).
2245 *
2246 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
2247 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
2248 *
2249 * Firmware asserts if such situation occurs.
2250 */
2251 if (n == 0) {
2252 arg->peer_ht_rates.num_rates = 8;
2253 for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
2254 arg->peer_ht_rates.rates[i] = i;
2255 } else {
2256 arg->peer_ht_rates.num_rates = n;
2257 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
2258 }
2259
2260 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
2261 arg->addr,
2262 arg->peer_ht_rates.num_rates,
2263 arg->peer_num_spatial_streams);
2264 }
2265
2266 static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
2267 struct ath10k_vif *arvif,
2268 struct ieee80211_sta *sta)
2269 {
2270 u32 uapsd = 0;
2271 u32 max_sp = 0;
2272 int ret = 0;
2273
2274 lockdep_assert_held(&ar->conf_mutex);
2275
2276 if (sta->wme && sta->uapsd_queues) {
2277 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
2278 sta->uapsd_queues, sta->max_sp);
2279
2280 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
2281 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
2282 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
2283 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
2284 uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
2285 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
2286 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
2287 uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
2288 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
2289 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
2290 uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
2291 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
2292
2293 if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
2294 max_sp = sta->max_sp;
2295
2296 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
2297 sta->addr,
2298 WMI_AP_PS_PEER_PARAM_UAPSD,
2299 uapsd);
2300 if (ret) {
2301 ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n",
2302 arvif->vdev_id, ret);
2303 return ret;
2304 }
2305
2306 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
2307 sta->addr,
2308 WMI_AP_PS_PEER_PARAM_MAX_SP,
2309 max_sp);
2310 if (ret) {
2311 ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n",
2312 arvif->vdev_id, ret);
2313 return ret;
2314 }
2315
2316 /* TODO setup this based on STA listen interval and
2317 beacon interval. Currently we don't know
2318 sta->listen_interval - mac80211 patch required.
2319 Currently use 10 seconds */
2320 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
2321 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
2322 10);
2323 if (ret) {
2324 ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n",
2325 arvif->vdev_id, ret);
2326 return ret;
2327 }
2328 }
2329
2330 return 0;
2331 }
2332
2333 static u16
2334 ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
2335 const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
2336 {
2337 int idx_limit;
2338 int nss;
2339 u16 mcs_map;
2340 u16 mcs;
2341
2342 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
2343 mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
2344 vht_mcs_limit[nss];
2345
2346 if (mcs_map)
2347 idx_limit = fls(mcs_map) - 1;
2348 else
2349 idx_limit = -1;
2350
2351 switch (idx_limit) {
2352 case 0: /* fall through */
2353 case 1: /* fall through */
2354 case 2: /* fall through */
2355 case 3: /* fall through */
2356 case 4: /* fall through */
2357 case 5: /* fall through */
2358 case 6: /* fall through */
2359 default:
2360 /* see ath10k_mac_can_set_bitrate_mask() */
2361 WARN_ON(1);
2362 /* fall through */
2363 case -1:
2364 mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
2365 break;
2366 case 7:
2367 mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
2368 break;
2369 case 8:
2370 mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
2371 break;
2372 case 9:
2373 mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
2374 break;
2375 }
2376
2377 tx_mcs_set &= ~(0x3 << (nss * 2));
2378 tx_mcs_set |= mcs << (nss * 2);
2379 }
2380
2381 return tx_mcs_set;
2382 }
2383
2384 static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
2385 struct ieee80211_vif *vif,
2386 struct ieee80211_sta *sta,
2387 struct wmi_peer_assoc_complete_arg *arg)
2388 {
2389 const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
2390 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2391 struct cfg80211_chan_def def;
2392 enum nl80211_band band;
2393 const u16 *vht_mcs_mask;
2394 u8 ampdu_factor;
2395
2396 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2397 return;
2398
2399 if (!vht_cap->vht_supported)
2400 return;
2401
2402 band = def.chan->band;
2403 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2404
2405 if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2406 return;
2407
2408 arg->peer_flags |= ar->wmi.peer_flags->vht;
2409
2410 if (def.chan->band == NL80211_BAND_2GHZ)
2411 arg->peer_flags |= ar->wmi.peer_flags->vht_2g;
2412
2413 arg->peer_vht_caps = vht_cap->cap;
2414
2415 ampdu_factor = (vht_cap->cap &
2416 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
2417 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
2418
2419 /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
2420 * zero in VHT IE. Using it would result in degraded throughput.
2421 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
2422 * it if VHT max_mpdu is smaller. */
2423 arg->peer_max_mpdu = max(arg->peer_max_mpdu,
2424 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2425 ampdu_factor)) - 1);
2426
2427 if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
2428 arg->peer_flags |= ar->wmi.peer_flags->bw80;
2429
2430 arg->peer_vht_rates.rx_max_rate =
2431 __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
2432 arg->peer_vht_rates.rx_mcs_set =
2433 __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
2434 arg->peer_vht_rates.tx_max_rate =
2435 __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
2436 arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit(
2437 __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
2438
2439 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
2440 sta->addr, arg->peer_max_mpdu, arg->peer_flags);
2441 }
2442
2443 static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
2444 struct ieee80211_vif *vif,
2445 struct ieee80211_sta *sta,
2446 struct wmi_peer_assoc_complete_arg *arg)
2447 {
2448 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2449
2450 switch (arvif->vdev_type) {
2451 case WMI_VDEV_TYPE_AP:
2452 if (sta->wme)
2453 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2454
2455 if (sta->wme && sta->uapsd_queues) {
2456 arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd;
2457 arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
2458 }
2459 break;
2460 case WMI_VDEV_TYPE_STA:
2461 if (vif->bss_conf.qos)
2462 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2463 break;
2464 case WMI_VDEV_TYPE_IBSS:
2465 if (sta->wme)
2466 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2467 break;
2468 default:
2469 break;
2470 }
2471
2472 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n",
2473 sta->addr, !!(arg->peer_flags &
2474 arvif->ar->wmi.peer_flags->qos));
2475 }
2476
2477 static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
2478 {
2479 return sta->supp_rates[NL80211_BAND_2GHZ] >>
2480 ATH10K_MAC_FIRST_OFDM_RATE_IDX;
2481 }
2482
2483 static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
2484 struct ieee80211_vif *vif,
2485 struct ieee80211_sta *sta,
2486 struct wmi_peer_assoc_complete_arg *arg)
2487 {
2488 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2489 struct cfg80211_chan_def def;
2490 enum nl80211_band band;
2491 const u8 *ht_mcs_mask;
2492 const u16 *vht_mcs_mask;
2493 enum wmi_phy_mode phymode = MODE_UNKNOWN;
2494
2495 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2496 return;
2497
2498 band = def.chan->band;
2499 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2500 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2501
2502 switch (band) {
2503 case NL80211_BAND_2GHZ:
2504 if (sta->vht_cap.vht_supported &&
2505 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2506 if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2507 phymode = MODE_11AC_VHT40;
2508 else
2509 phymode = MODE_11AC_VHT20;
2510 } else if (sta->ht_cap.ht_supported &&
2511 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2512 if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2513 phymode = MODE_11NG_HT40;
2514 else
2515 phymode = MODE_11NG_HT20;
2516 } else if (ath10k_mac_sta_has_ofdm_only(sta)) {
2517 phymode = MODE_11G;
2518 } else {
2519 phymode = MODE_11B;
2520 }
2521
2522 break;
2523 case NL80211_BAND_5GHZ:
2524 /*
2525 * Check VHT first.
2526 */
2527 if (sta->vht_cap.vht_supported &&
2528 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2529 if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
2530 phymode = MODE_11AC_VHT80;
2531 else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2532 phymode = MODE_11AC_VHT40;
2533 else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
2534 phymode = MODE_11AC_VHT20;
2535 } else if (sta->ht_cap.ht_supported &&
2536 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2537 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
2538 phymode = MODE_11NA_HT40;
2539 else
2540 phymode = MODE_11NA_HT20;
2541 } else {
2542 phymode = MODE_11A;
2543 }
2544
2545 break;
2546 default:
2547 break;
2548 }
2549
2550 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
2551 sta->addr, ath10k_wmi_phymode_str(phymode));
2552
2553 arg->peer_phymode = phymode;
2554 WARN_ON(phymode == MODE_UNKNOWN);
2555 }
2556
2557 static int ath10k_peer_assoc_prepare(struct ath10k *ar,
2558 struct ieee80211_vif *vif,
2559 struct ieee80211_sta *sta,
2560 struct wmi_peer_assoc_complete_arg *arg)
2561 {
2562 lockdep_assert_held(&ar->conf_mutex);
2563
2564 memset(arg, 0, sizeof(*arg));
2565
2566 ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
2567 ath10k_peer_assoc_h_crypto(ar, vif, sta, arg);
2568 ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
2569 ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
2570 ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
2571 ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
2572 ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
2573
2574 return 0;
2575 }
2576
2577 static const u32 ath10k_smps_map[] = {
2578 [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
2579 [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
2580 [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
2581 [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
2582 };
2583
2584 static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
2585 const u8 *addr,
2586 const struct ieee80211_sta_ht_cap *ht_cap)
2587 {
2588 int smps;
2589
2590 if (!ht_cap->ht_supported)
2591 return 0;
2592
2593 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
2594 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
2595
2596 if (smps >= ARRAY_SIZE(ath10k_smps_map))
2597 return -EINVAL;
2598
2599 return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr,
2600 WMI_PEER_SMPS_STATE,
2601 ath10k_smps_map[smps]);
2602 }
2603
2604 static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
2605 struct ieee80211_vif *vif,
2606 struct ieee80211_sta_vht_cap vht_cap)
2607 {
2608 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2609 int ret;
2610 u32 param;
2611 u32 value;
2612
2613 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC)
2614 return 0;
2615
2616 if (!(ar->vht_cap_info &
2617 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2618 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
2619 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2620 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
2621 return 0;
2622
2623 param = ar->wmi.vdev_param->txbf;
2624 value = 0;
2625
2626 if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED))
2627 return 0;
2628
2629 /* The following logic is correct. If a remote STA advertises support
2630 * for being a beamformer then we should enable us being a beamformee.
2631 */
2632
2633 if (ar->vht_cap_info &
2634 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2635 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
2636 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
2637 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2638
2639 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
2640 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
2641 }
2642
2643 if (ar->vht_cap_info &
2644 (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2645 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
2646 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
2647 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2648
2649 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
2650 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
2651 }
2652
2653 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE)
2654 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2655
2656 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER)
2657 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2658
2659 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value);
2660 if (ret) {
2661 ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n",
2662 value, ret);
2663 return ret;
2664 }
2665
2666 return 0;
2667 }
2668
2669 /* can be called only in mac80211 callbacks due to `key_count` usage */
2670 static void ath10k_bss_assoc(struct ieee80211_hw *hw,
2671 struct ieee80211_vif *vif,
2672 struct ieee80211_bss_conf *bss_conf)
2673 {
2674 struct ath10k *ar = hw->priv;
2675 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2676 struct ieee80211_sta_ht_cap ht_cap;
2677 struct ieee80211_sta_vht_cap vht_cap;
2678 struct wmi_peer_assoc_complete_arg peer_arg;
2679 struct ieee80211_sta *ap_sta;
2680 int ret;
2681
2682 lockdep_assert_held(&ar->conf_mutex);
2683
2684 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
2685 arvif->vdev_id, arvif->bssid, arvif->aid);
2686
2687 rcu_read_lock();
2688
2689 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
2690 if (!ap_sta) {
2691 ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n",
2692 bss_conf->bssid, arvif->vdev_id);
2693 rcu_read_unlock();
2694 return;
2695 }
2696
2697 /* ap_sta must be accessed only within rcu section which must be left
2698 * before calling ath10k_setup_peer_smps() which might sleep. */
2699 ht_cap = ap_sta->ht_cap;
2700 vht_cap = ap_sta->vht_cap;
2701
2702 ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
2703 if (ret) {
2704 ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n",
2705 bss_conf->bssid, arvif->vdev_id, ret);
2706 rcu_read_unlock();
2707 return;
2708 }
2709
2710 rcu_read_unlock();
2711
2712 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
2713 if (ret) {
2714 ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n",
2715 bss_conf->bssid, arvif->vdev_id, ret);
2716 return;
2717 }
2718
2719 ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
2720 if (ret) {
2721 ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n",
2722 arvif->vdev_id, ret);
2723 return;
2724 }
2725
2726 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
2727 if (ret) {
2728 ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n",
2729 arvif->vdev_id, bss_conf->bssid, ret);
2730 return;
2731 }
2732
2733 ath10k_dbg(ar, ATH10K_DBG_MAC,
2734 "mac vdev %d up (associated) bssid %pM aid %d\n",
2735 arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
2736
2737 WARN_ON(arvif->is_up);
2738
2739 arvif->aid = bss_conf->aid;
2740 ether_addr_copy(arvif->bssid, bss_conf->bssid);
2741
2742 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
2743 if (ret) {
2744 ath10k_warn(ar, "failed to set vdev %d up: %d\n",
2745 arvif->vdev_id, ret);
2746 return;
2747 }
2748
2749 arvif->is_up = true;
2750
2751 /* Workaround: Some firmware revisions (tested with qca6174
2752 * WLAN.RM.2.0-00073) have buggy powersave state machine and must be
2753 * poked with peer param command.
2754 */
2755 ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
2756 WMI_PEER_DUMMY_VAR, 1);
2757 if (ret) {
2758 ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
2759 arvif->bssid, arvif->vdev_id, ret);
2760 return;
2761 }
2762 }
2763
2764 static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
2765 struct ieee80211_vif *vif)
2766 {
2767 struct ath10k *ar = hw->priv;
2768 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2769 struct ieee80211_sta_vht_cap vht_cap = {};
2770 int ret;
2771
2772 lockdep_assert_held(&ar->conf_mutex);
2773
2774 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
2775 arvif->vdev_id, arvif->bssid);
2776
2777 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
2778 if (ret)
2779 ath10k_warn(ar, "faield to down vdev %i: %d\n",
2780 arvif->vdev_id, ret);
2781
2782 arvif->def_wep_key_idx = -1;
2783
2784 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
2785 if (ret) {
2786 ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
2787 arvif->vdev_id, ret);
2788 return;
2789 }
2790
2791 arvif->is_up = false;
2792
2793 cancel_delayed_work_sync(&arvif->connection_loss_work);
2794 }
2795
2796 static int ath10k_station_assoc(struct ath10k *ar,
2797 struct ieee80211_vif *vif,
2798 struct ieee80211_sta *sta,
2799 bool reassoc)
2800 {
2801 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2802 struct wmi_peer_assoc_complete_arg peer_arg;
2803 int ret = 0;
2804
2805 lockdep_assert_held(&ar->conf_mutex);
2806
2807 ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg);
2808 if (ret) {
2809 ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
2810 sta->addr, arvif->vdev_id, ret);
2811 return ret;
2812 }
2813
2814 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
2815 if (ret) {
2816 ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
2817 sta->addr, arvif->vdev_id, ret);
2818 return ret;
2819 }
2820
2821 /* Re-assoc is run only to update supported rates for given station. It
2822 * doesn't make much sense to reconfigure the peer completely.
2823 */
2824 if (!reassoc) {
2825 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr,
2826 &sta->ht_cap);
2827 if (ret) {
2828 ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
2829 arvif->vdev_id, ret);
2830 return ret;
2831 }
2832
2833 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
2834 if (ret) {
2835 ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
2836 sta->addr, arvif->vdev_id, ret);
2837 return ret;
2838 }
2839
2840 if (!sta->wme) {
2841 arvif->num_legacy_stations++;
2842 ret = ath10k_recalc_rtscts_prot(arvif);
2843 if (ret) {
2844 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
2845 arvif->vdev_id, ret);
2846 return ret;
2847 }
2848 }
2849
2850 /* Plumb cached keys only for static WEP */
2851 if (arvif->def_wep_key_idx != -1) {
2852 ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
2853 if (ret) {
2854 ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
2855 arvif->vdev_id, ret);
2856 return ret;
2857 }
2858 }
2859 }
2860
2861 return ret;
2862 }
2863
2864 static int ath10k_station_disassoc(struct ath10k *ar,
2865 struct ieee80211_vif *vif,
2866 struct ieee80211_sta *sta)
2867 {
2868 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2869 int ret = 0;
2870
2871 lockdep_assert_held(&ar->conf_mutex);
2872
2873 if (!sta->wme) {
2874 arvif->num_legacy_stations--;
2875 ret = ath10k_recalc_rtscts_prot(arvif);
2876 if (ret) {
2877 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
2878 arvif->vdev_id, ret);
2879 return ret;
2880 }
2881 }
2882
2883 ret = ath10k_clear_peer_keys(arvif, sta->addr);
2884 if (ret) {
2885 ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n",
2886 arvif->vdev_id, ret);
2887 return ret;
2888 }
2889
2890 return ret;
2891 }
2892
2893 /**************/
2894 /* Regulatory */
2895 /**************/
2896
2897 static int ath10k_update_channel_list(struct ath10k *ar)
2898 {
2899 struct ieee80211_hw *hw = ar->hw;
2900 struct ieee80211_supported_band **bands;
2901 enum nl80211_band band;
2902 struct ieee80211_channel *channel;
2903 struct wmi_scan_chan_list_arg arg = {0};
2904 struct wmi_channel_arg *ch;
2905 bool passive;
2906 int len;
2907 int ret;
2908 int i;
2909
2910 lockdep_assert_held(&ar->conf_mutex);
2911
2912 bands = hw->wiphy->bands;
2913 for (band = 0; band < NUM_NL80211_BANDS; band++) {
2914 if (!bands[band])
2915 continue;
2916
2917 for (i = 0; i < bands[band]->n_channels; i++) {
2918 if (bands[band]->channels[i].flags &
2919 IEEE80211_CHAN_DISABLED)
2920 continue;
2921
2922 arg.n_channels++;
2923 }
2924 }
2925
2926 len = sizeof(struct wmi_channel_arg) * arg.n_channels;
2927 arg.channels = kzalloc(len, GFP_KERNEL);
2928 if (!arg.channels)
2929 return -ENOMEM;
2930
2931 ch = arg.channels;
2932 for (band = 0; band < NUM_NL80211_BANDS; band++) {
2933 if (!bands[band])
2934 continue;
2935
2936 for (i = 0; i < bands[band]->n_channels; i++) {
2937 channel = &bands[band]->channels[i];
2938
2939 if (channel->flags & IEEE80211_CHAN_DISABLED)
2940 continue;
2941
2942 ch->allow_ht = true;
2943
2944 /* FIXME: when should we really allow VHT? */
2945 ch->allow_vht = true;
2946
2947 ch->allow_ibss =
2948 !(channel->flags & IEEE80211_CHAN_NO_IR);
2949
2950 ch->ht40plus =
2951 !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
2952
2953 ch->chan_radar =
2954 !!(channel->flags & IEEE80211_CHAN_RADAR);
2955
2956 passive = channel->flags & IEEE80211_CHAN_NO_IR;
2957 ch->passive = passive;
2958
2959 ch->freq = channel->center_freq;
2960 ch->band_center_freq1 = channel->center_freq;
2961 ch->min_power = 0;
2962 ch->max_power = channel->max_power * 2;
2963 ch->max_reg_power = channel->max_reg_power * 2;
2964 ch->max_antenna_gain = channel->max_antenna_gain * 2;
2965 ch->reg_class_id = 0; /* FIXME */
2966
2967 /* FIXME: why use only legacy modes, why not any
2968 * HT/VHT modes? Would that even make any
2969 * difference? */
2970 if (channel->band == NL80211_BAND_2GHZ)
2971 ch->mode = MODE_11G;
2972 else
2973 ch->mode = MODE_11A;
2974
2975 if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
2976 continue;
2977
2978 ath10k_dbg(ar, ATH10K_DBG_WMI,
2979 "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
2980 ch - arg.channels, arg.n_channels,
2981 ch->freq, ch->max_power, ch->max_reg_power,
2982 ch->max_antenna_gain, ch->mode);
2983
2984 ch++;
2985 }
2986 }
2987
2988 ret = ath10k_wmi_scan_chan_list(ar, &arg);
2989 kfree(arg.channels);
2990
2991 return ret;
2992 }
2993
2994 static enum wmi_dfs_region
2995 ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
2996 {
2997 switch (dfs_region) {
2998 case NL80211_DFS_UNSET:
2999 return WMI_UNINIT_DFS_DOMAIN;
3000 case NL80211_DFS_FCC:
3001 return WMI_FCC_DFS_DOMAIN;
3002 case NL80211_DFS_ETSI:
3003 return WMI_ETSI_DFS_DOMAIN;
3004 case NL80211_DFS_JP:
3005 return WMI_MKK4_DFS_DOMAIN;
3006 }
3007 return WMI_UNINIT_DFS_DOMAIN;
3008 }
3009
3010 static void ath10k_regd_update(struct ath10k *ar)
3011 {
3012 struct reg_dmn_pair_mapping *regpair;
3013 int ret;
3014 enum wmi_dfs_region wmi_dfs_reg;
3015 enum nl80211_dfs_regions nl_dfs_reg;
3016
3017 lockdep_assert_held(&ar->conf_mutex);
3018
3019 ret = ath10k_update_channel_list(ar);
3020 if (ret)
3021 ath10k_warn(ar, "failed to update channel list: %d\n", ret);
3022
3023 regpair = ar->ath_common.regulatory.regpair;
3024
3025 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
3026 nl_dfs_reg = ar->dfs_detector->region;
3027 wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
3028 } else {
3029 wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
3030 }
3031
3032 /* Target allows setting up per-band regdomain but ath_common provides
3033 * a combined one only */
3034 ret = ath10k_wmi_pdev_set_regdomain(ar,
3035 regpair->reg_domain,
3036 regpair->reg_domain, /* 2ghz */
3037 regpair->reg_domain, /* 5ghz */
3038 regpair->reg_2ghz_ctl,
3039 regpair->reg_5ghz_ctl,
3040 wmi_dfs_reg);
3041 if (ret)
3042 ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret);
3043 }
3044
3045 static void ath10k_reg_notifier(struct wiphy *wiphy,
3046 struct regulatory_request *request)
3047 {
3048 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
3049 struct ath10k *ar = hw->priv;
3050 bool result;
3051
3052 ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
3053
3054 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
3055 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
3056 request->dfs_region);
3057 result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
3058 request->dfs_region);
3059 if (!result)
3060 ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n",
3061 request->dfs_region);
3062 }
3063
3064 mutex_lock(&ar->conf_mutex);
3065 if (ar->state == ATH10K_STATE_ON)
3066 ath10k_regd_update(ar);
3067 mutex_unlock(&ar->conf_mutex);
3068 }
3069
3070 /***************/
3071 /* TX handlers */
3072 /***************/
3073
3074 enum ath10k_mac_tx_path {
3075 ATH10K_MAC_TX_HTT,
3076 ATH10K_MAC_TX_HTT_MGMT,
3077 ATH10K_MAC_TX_WMI_MGMT,
3078 ATH10K_MAC_TX_UNKNOWN,
3079 };
3080
3081 void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
3082 {
3083 lockdep_assert_held(&ar->htt.tx_lock);
3084
3085 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3086 ar->tx_paused |= BIT(reason);
3087 ieee80211_stop_queues(ar->hw);
3088 }
3089
3090 static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac,
3091 struct ieee80211_vif *vif)
3092 {
3093 struct ath10k *ar = data;
3094 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3095
3096 if (arvif->tx_paused)
3097 return;
3098
3099 ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3100 }
3101
3102 void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
3103 {
3104 lockdep_assert_held(&ar->htt.tx_lock);
3105
3106 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3107 ar->tx_paused &= ~BIT(reason);
3108
3109 if (ar->tx_paused)
3110 return;
3111
3112 ieee80211_iterate_active_interfaces_atomic(ar->hw,
3113 IEEE80211_IFACE_ITER_RESUME_ALL,
3114 ath10k_mac_tx_unlock_iter,
3115 ar);
3116
3117 ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue);
3118 }
3119
3120 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
3121 {
3122 struct ath10k *ar = arvif->ar;
3123
3124 lockdep_assert_held(&ar->htt.tx_lock);
3125
3126 WARN_ON(reason >= BITS_PER_LONG);
3127 arvif->tx_paused |= BIT(reason);
3128 ieee80211_stop_queue(ar->hw, arvif->vdev_id);
3129 }
3130
3131 void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason)
3132 {
3133 struct ath10k *ar = arvif->ar;
3134
3135 lockdep_assert_held(&ar->htt.tx_lock);
3136
3137 WARN_ON(reason >= BITS_PER_LONG);
3138 arvif->tx_paused &= ~BIT(reason);
3139
3140 if (ar->tx_paused)
3141 return;
3142
3143 if (arvif->tx_paused)
3144 return;
3145
3146 ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3147 }
3148
3149 static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
3150 enum wmi_tlv_tx_pause_id pause_id,
3151 enum wmi_tlv_tx_pause_action action)
3152 {
3153 struct ath10k *ar = arvif->ar;
3154
3155 lockdep_assert_held(&ar->htt.tx_lock);
3156
3157 switch (action) {
3158 case WMI_TLV_TX_PAUSE_ACTION_STOP:
3159 ath10k_mac_vif_tx_lock(arvif, pause_id);
3160 break;
3161 case WMI_TLV_TX_PAUSE_ACTION_WAKE:
3162 ath10k_mac_vif_tx_unlock(arvif, pause_id);
3163 break;
3164 default:
3165 ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
3166 action, arvif->vdev_id);
3167 break;
3168 }
3169 }
3170
3171 struct ath10k_mac_tx_pause {
3172 u32 vdev_id;
3173 enum wmi_tlv_tx_pause_id pause_id;
3174 enum wmi_tlv_tx_pause_action action;
3175 };
3176
3177 static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
3178 struct ieee80211_vif *vif)
3179 {
3180 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3181 struct ath10k_mac_tx_pause *arg = data;
3182
3183 if (arvif->vdev_id != arg->vdev_id)
3184 return;
3185
3186 ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
3187 }
3188
3189 void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
3190 enum wmi_tlv_tx_pause_id pause_id,
3191 enum wmi_tlv_tx_pause_action action)
3192 {
3193 struct ath10k_mac_tx_pause arg = {
3194 .vdev_id = vdev_id,
3195 .pause_id = pause_id,
3196 .action = action,
3197 };
3198
3199 spin_lock_bh(&ar->htt.tx_lock);
3200 ieee80211_iterate_active_interfaces_atomic(ar->hw,
3201 IEEE80211_IFACE_ITER_RESUME_ALL,
3202 ath10k_mac_handle_tx_pause_iter,
3203 &arg);
3204 spin_unlock_bh(&ar->htt.tx_lock);
3205 }
3206
3207 static enum ath10k_hw_txrx_mode
3208 ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
3209 struct ieee80211_vif *vif,
3210 struct ieee80211_sta *sta,
3211 struct sk_buff *skb)
3212 {
3213 const struct ieee80211_hdr *hdr = (void *)skb->data;
3214 __le16 fc = hdr->frame_control;
3215
3216 if (!vif || vif->type == NL80211_IFTYPE_MONITOR)
3217 return ATH10K_HW_TXRX_RAW;
3218
3219 if (ieee80211_is_mgmt(fc))
3220 return ATH10K_HW_TXRX_MGMT;
3221
3222 /* Workaround:
3223 *
3224 * NullFunc frames are mostly used to ping if a client or AP are still
3225 * reachable and responsive. This implies tx status reports must be
3226 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can
3227 * come to a conclusion that the other end disappeared and tear down
3228 * BSS connection or it can never disconnect from BSS/client (which is
3229 * the case).
3230 *
3231 * Firmware with HTT older than 3.0 delivers incorrect tx status for
3232 * NullFunc frames to driver. However there's a HTT Mgmt Tx command
3233 * which seems to deliver correct tx reports for NullFunc frames. The
3234 * downside of using it is it ignores client powersave state so it can
3235 * end up disconnecting sleeping clients in AP mode. It should fix STA
3236 * mode though because AP don't sleep.
3237 */
3238 if (ar->htt.target_version_major < 3 &&
3239 (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
3240 !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3241 ar->running_fw->fw_file.fw_features))
3242 return ATH10K_HW_TXRX_MGMT;
3243
3244 /* Workaround:
3245 *
3246 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
3247 * NativeWifi txmode - it selects AP key instead of peer key. It seems
3248 * to work with Ethernet txmode so use it.
3249 *
3250 * FIXME: Check if raw mode works with TDLS.
3251 */
3252 if (ieee80211_is_data_present(fc) && sta && sta->tdls)
3253 return ATH10K_HW_TXRX_ETHERNET;
3254
3255 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
3256 return ATH10K_HW_TXRX_RAW;
3257
3258 return ATH10K_HW_TXRX_NATIVE_WIFI;
3259 }
3260
3261 static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
3262 struct sk_buff *skb)
3263 {
3264 const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3265 const struct ieee80211_hdr *hdr = (void *)skb->data;
3266 const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
3267 IEEE80211_TX_CTL_INJECTED;
3268
3269 if (!ieee80211_has_protected(hdr->frame_control))
3270 return false;
3271
3272 if ((info->flags & mask) == mask)
3273 return false;
3274
3275 if (vif)
3276 return !ath10k_vif_to_arvif(vif)->nohwcrypt;
3277
3278 return true;
3279 }
3280
3281 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
3282 * Control in the header.
3283 */
3284 static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
3285 {
3286 struct ieee80211_hdr *hdr = (void *)skb->data;
3287 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3288 u8 *qos_ctl;
3289
3290 if (!ieee80211_is_data_qos(hdr->frame_control))
3291 return;
3292
3293 qos_ctl = ieee80211_get_qos_ctl(hdr);
3294 memmove(skb->data + IEEE80211_QOS_CTL_LEN,
3295 skb->data, (void *)qos_ctl - (void *)skb->data);
3296 skb_pull(skb, IEEE80211_QOS_CTL_LEN);
3297
3298 /* Some firmware revisions don't handle sending QoS NullFunc well.
3299 * These frames are mainly used for CQM purposes so it doesn't really
3300 * matter whether QoS NullFunc or NullFunc are sent.
3301 */
3302 hdr = (void *)skb->data;
3303 if (ieee80211_is_qos_nullfunc(hdr->frame_control))
3304 cb->flags &= ~ATH10K_SKB_F_QOS;
3305
3306 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
3307 }
3308
3309 static void ath10k_tx_h_8023(struct sk_buff *skb)
3310 {
3311 struct ieee80211_hdr *hdr;
3312 struct rfc1042_hdr *rfc1042;
3313 struct ethhdr *eth;
3314 size_t hdrlen;
3315 u8 da[ETH_ALEN];
3316 u8 sa[ETH_ALEN];
3317 __be16 type;
3318
3319 hdr = (void *)skb->data;
3320 hdrlen = ieee80211_hdrlen(hdr->frame_control);
3321 rfc1042 = (void *)skb->data + hdrlen;
3322
3323 ether_addr_copy(da, ieee80211_get_DA(hdr));
3324 ether_addr_copy(sa, ieee80211_get_SA(hdr));
3325 type = rfc1042->snap_type;
3326
3327 skb_pull(skb, hdrlen + sizeof(*rfc1042));
3328 skb_push(skb, sizeof(*eth));
3329
3330 eth = (void *)skb->data;
3331 ether_addr_copy(eth->h_dest, da);
3332 ether_addr_copy(eth->h_source, sa);
3333 eth->h_proto = type;
3334 }
3335
3336 static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
3337 struct ieee80211_vif *vif,
3338 struct sk_buff *skb)
3339 {
3340 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3341 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3342
3343 /* This is case only for P2P_GO */
3344 if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
3345 return;
3346
3347 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
3348 spin_lock_bh(&ar->data_lock);
3349 if (arvif->u.ap.noa_data)
3350 if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
3351 GFP_ATOMIC))
3352 memcpy(skb_put(skb, arvif->u.ap.noa_len),
3353 arvif->u.ap.noa_data,
3354 arvif->u.ap.noa_len);
3355 spin_unlock_bh(&ar->data_lock);
3356 }
3357 }
3358
3359 static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
3360 struct ieee80211_vif *vif,
3361 struct ieee80211_txq *txq,
3362 struct sk_buff *skb)
3363 {
3364 struct ieee80211_hdr *hdr = (void *)skb->data;
3365 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3366
3367 cb->flags = 0;
3368 if (!ath10k_tx_h_use_hwcrypto(vif, skb))
3369 cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
3370
3371 if (ieee80211_is_mgmt(hdr->frame_control))
3372 cb->flags |= ATH10K_SKB_F_MGMT;
3373
3374 if (ieee80211_is_data_qos(hdr->frame_control))
3375 cb->flags |= ATH10K_SKB_F_QOS;
3376
3377 cb->vif = vif;
3378 cb->txq = txq;
3379 }
3380
3381 bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
3382 {
3383 /* FIXME: Not really sure since when the behaviour changed. At some
3384 * point new firmware stopped requiring creation of peer entries for
3385 * offchannel tx (and actually creating them causes issues with wmi-htc
3386 * tx credit replenishment and reliability). Assuming it's at least 3.4
3387 * because that's when the `freq` was introduced to TX_FRM HTT command.
3388 */
3389 return (ar->htt.target_version_major >= 3 &&
3390 ar->htt.target_version_minor >= 4 &&
3391 ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
3392 }
3393
3394 static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
3395 {
3396 struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
3397 int ret = 0;
3398
3399 spin_lock_bh(&ar->data_lock);
3400
3401 if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) {
3402 ath10k_warn(ar, "wmi mgmt tx queue is full\n");
3403 ret = -ENOSPC;
3404 goto unlock;
3405 }
3406
3407 __skb_queue_tail(q, skb);
3408 ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
3409
3410 unlock:
3411 spin_unlock_bh(&ar->data_lock);
3412
3413 return ret;
3414 }
3415
3416 static enum ath10k_mac_tx_path
3417 ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
3418 struct sk_buff *skb,
3419 enum ath10k_hw_txrx_mode txmode)
3420 {
3421 switch (txmode) {
3422 case ATH10K_HW_TXRX_RAW:
3423 case ATH10K_HW_TXRX_NATIVE_WIFI:
3424 case ATH10K_HW_TXRX_ETHERNET:
3425 return ATH10K_MAC_TX_HTT;
3426 case ATH10K_HW_TXRX_MGMT:
3427 if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3428 ar->running_fw->fw_file.fw_features))
3429 return ATH10K_MAC_TX_WMI_MGMT;
3430 else if (ar->htt.target_version_major >= 3)
3431 return ATH10K_MAC_TX_HTT;
3432 else
3433 return ATH10K_MAC_TX_HTT_MGMT;
3434 }
3435
3436 return ATH10K_MAC_TX_UNKNOWN;
3437 }
3438
3439 static int ath10k_mac_tx_submit(struct ath10k *ar,
3440 enum ath10k_hw_txrx_mode txmode,
3441 enum ath10k_mac_tx_path txpath,
3442 struct sk_buff *skb)
3443 {
3444 struct ath10k_htt *htt = &ar->htt;
3445 int ret = -EINVAL;
3446
3447 switch (txpath) {
3448 case ATH10K_MAC_TX_HTT:
3449 ret = ath10k_htt_tx(htt, txmode, skb);
3450 break;
3451 case ATH10K_MAC_TX_HTT_MGMT:
3452 ret = ath10k_htt_mgmt_tx(htt, skb);
3453 break;
3454 case ATH10K_MAC_TX_WMI_MGMT:
3455 ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
3456 break;
3457 case ATH10K_MAC_TX_UNKNOWN:
3458 WARN_ON_ONCE(1);
3459 ret = -EINVAL;
3460 break;
3461 }
3462
3463 if (ret) {
3464 ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
3465 ret);
3466 ieee80211_free_txskb(ar->hw, skb);
3467 }
3468
3469 return ret;
3470 }
3471
3472 /* This function consumes the sk_buff regardless of return value as far as
3473 * caller is concerned so no freeing is necessary afterwards.
3474 */
3475 static int ath10k_mac_tx(struct ath10k *ar,
3476 struct ieee80211_vif *vif,
3477 struct ieee80211_sta *sta,
3478 enum ath10k_hw_txrx_mode txmode,
3479 enum ath10k_mac_tx_path txpath,
3480 struct sk_buff *skb)
3481 {
3482 struct ieee80211_hw *hw = ar->hw;
3483 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3484 int ret;
3485
3486 /* We should disable CCK RATE due to P2P */
3487 if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
3488 ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
3489
3490 switch (txmode) {
3491 case ATH10K_HW_TXRX_MGMT:
3492 case ATH10K_HW_TXRX_NATIVE_WIFI:
3493 ath10k_tx_h_nwifi(hw, skb);
3494 ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
3495 ath10k_tx_h_seq_no(vif, skb);
3496 break;
3497 case ATH10K_HW_TXRX_ETHERNET:
3498 ath10k_tx_h_8023(skb);
3499 break;
3500 case ATH10K_HW_TXRX_RAW:
3501 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
3502 WARN_ON_ONCE(1);
3503 ieee80211_free_txskb(hw, skb);
3504 return -ENOTSUPP;
3505 }
3506 }
3507
3508 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
3509 if (!ath10k_mac_tx_frm_has_freq(ar)) {
3510 ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
3511 skb);
3512
3513 skb_queue_tail(&ar->offchan_tx_queue, skb);
3514 ieee80211_queue_work(hw, &ar->offchan_tx_work);
3515 return 0;
3516 }
3517 }
3518
3519 ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb);
3520 if (ret) {
3521 ath10k_warn(ar, "failed to submit frame: %d\n", ret);
3522 return ret;
3523 }
3524
3525 return 0;
3526 }
3527
3528 void ath10k_offchan_tx_purge(struct ath10k *ar)
3529 {
3530 struct sk_buff *skb;
3531
3532 for (;;) {
3533 skb = skb_dequeue(&ar->offchan_tx_queue);
3534 if (!skb)
3535 break;
3536
3537 ieee80211_free_txskb(ar->hw, skb);
3538 }
3539 }
3540
3541 void ath10k_offchan_tx_work(struct work_struct *work)
3542 {
3543 struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
3544 struct ath10k_peer *peer;
3545 struct ath10k_vif *arvif;
3546 enum ath10k_hw_txrx_mode txmode;
3547 enum ath10k_mac_tx_path txpath;
3548 struct ieee80211_hdr *hdr;
3549 struct ieee80211_vif *vif;
3550 struct ieee80211_sta *sta;
3551 struct sk_buff *skb;
3552 const u8 *peer_addr;
3553 int vdev_id;
3554 int ret;
3555 unsigned long time_left;
3556 bool tmp_peer_created = false;
3557
3558 /* FW requirement: We must create a peer before FW will send out
3559 * an offchannel frame. Otherwise the frame will be stuck and
3560 * never transmitted. We delete the peer upon tx completion.
3561 * It is unlikely that a peer for offchannel tx will already be
3562 * present. However it may be in some rare cases so account for that.
3563 * Otherwise we might remove a legitimate peer and break stuff. */
3564
3565 for (;;) {
3566 skb = skb_dequeue(&ar->offchan_tx_queue);
3567 if (!skb)
3568 break;
3569
3570 mutex_lock(&ar->conf_mutex);
3571
3572 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p\n",
3573 skb);
3574
3575 hdr = (struct ieee80211_hdr *)skb->data;
3576 peer_addr = ieee80211_get_DA(hdr);
3577
3578 spin_lock_bh(&ar->data_lock);
3579 vdev_id = ar->scan.vdev_id;
3580 peer = ath10k_peer_find(ar, vdev_id, peer_addr);
3581 spin_unlock_bh(&ar->data_lock);
3582
3583 if (peer)
3584 /* FIXME: should this use ath10k_warn()? */
3585 ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
3586 peer_addr, vdev_id);
3587
3588 if (!peer) {
3589 ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
3590 peer_addr,
3591 WMI_PEER_TYPE_DEFAULT);
3592 if (ret)
3593 ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
3594 peer_addr, vdev_id, ret);
3595 tmp_peer_created = (ret == 0);
3596 }
3597
3598 spin_lock_bh(&ar->data_lock);
3599 reinit_completion(&ar->offchan_tx_completed);
3600 ar->offchan_tx_skb = skb;
3601 spin_unlock_bh(&ar->data_lock);
3602
3603 /* It's safe to access vif and sta - conf_mutex guarantees that
3604 * sta_state() and remove_interface() are locked exclusively
3605 * out wrt to this offchannel worker.
3606 */
3607 arvif = ath10k_get_arvif(ar, vdev_id);
3608 if (arvif) {
3609 vif = arvif->vif;
3610 sta = ieee80211_find_sta(vif, peer_addr);
3611 } else {
3612 vif = NULL;
3613 sta = NULL;
3614 }
3615
3616 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3617 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3618
3619 ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
3620 if (ret) {
3621 ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
3622 ret);
3623 /* not serious */
3624 }
3625
3626 time_left =
3627 wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
3628 if (time_left == 0)
3629 ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
3630 skb);
3631
3632 if (!peer && tmp_peer_created) {
3633 ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
3634 if (ret)
3635 ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
3636 peer_addr, vdev_id, ret);
3637 }
3638
3639 mutex_unlock(&ar->conf_mutex);
3640 }
3641 }
3642
3643 void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
3644 {
3645 struct sk_buff *skb;
3646
3647 for (;;) {
3648 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
3649 if (!skb)
3650 break;
3651
3652 ieee80211_free_txskb(ar->hw, skb);
3653 }
3654 }
3655
3656 void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
3657 {
3658 struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
3659 struct sk_buff *skb;
3660 int ret;
3661
3662 for (;;) {
3663 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
3664 if (!skb)
3665 break;
3666
3667 ret = ath10k_wmi_mgmt_tx(ar, skb);
3668 if (ret) {
3669 ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n",
3670 ret);
3671 ieee80211_free_txskb(ar->hw, skb);
3672 }
3673 }
3674 }
3675
3676 static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
3677 {
3678 struct ath10k_txq *artxq = (void *)txq->drv_priv;
3679
3680 if (!txq)
3681 return;
3682
3683 INIT_LIST_HEAD(&artxq->list);
3684 }
3685
3686 static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
3687 {
3688 struct ath10k_txq *artxq = (void *)txq->drv_priv;
3689 struct ath10k_skb_cb *cb;
3690 struct sk_buff *msdu;
3691 int msdu_id;
3692
3693 if (!txq)
3694 return;
3695
3696 spin_lock_bh(&ar->txqs_lock);
3697 if (!list_empty(&artxq->list))
3698 list_del_init(&artxq->list);
3699 spin_unlock_bh(&ar->txqs_lock);
3700
3701 spin_lock_bh(&ar->htt.tx_lock);
3702 idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
3703 cb = ATH10K_SKB_CB(msdu);
3704 if (cb->txq == txq)
3705 cb->txq = NULL;
3706 }
3707 spin_unlock_bh(&ar->htt.tx_lock);
3708 }
3709
3710 struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
3711 u16 peer_id,
3712 u8 tid)
3713 {
3714 struct ath10k_peer *peer;
3715
3716 lockdep_assert_held(&ar->data_lock);
3717
3718 peer = ar->peer_map[peer_id];
3719 if (!peer)
3720 return NULL;
3721
3722 if (peer->sta)
3723 return peer->sta->txq[tid];
3724 else if (peer->vif)
3725 return peer->vif->txq;
3726 else
3727 return NULL;
3728 }
3729
3730 static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
3731 struct ieee80211_txq *txq)
3732 {
3733 struct ath10k *ar = hw->priv;
3734 struct ath10k_txq *artxq = (void *)txq->drv_priv;
3735
3736 /* No need to get locks */
3737
3738 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH)
3739 return true;
3740
3741 if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed)
3742 return true;
3743
3744 if (artxq->num_fw_queued < artxq->num_push_allowed)
3745 return true;
3746
3747 return false;
3748 }
3749
3750 int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
3751 struct ieee80211_txq *txq)
3752 {
3753 struct ath10k *ar = hw->priv;
3754 struct ath10k_htt *htt = &ar->htt;
3755 struct ath10k_txq *artxq = (void *)txq->drv_priv;
3756 struct ieee80211_vif *vif = txq->vif;
3757 struct ieee80211_sta *sta = txq->sta;
3758 enum ath10k_hw_txrx_mode txmode;
3759 enum ath10k_mac_tx_path txpath;
3760 struct sk_buff *skb;
3761 size_t skb_len;
3762 int ret;
3763
3764 spin_lock_bh(&ar->htt.tx_lock);
3765 ret = ath10k_htt_tx_inc_pending(htt);
3766 spin_unlock_bh(&ar->htt.tx_lock);
3767
3768 if (ret)
3769 return ret;
3770
3771 skb = ieee80211_tx_dequeue(hw, txq);
3772 if (!skb) {
3773 spin_lock_bh(&ar->htt.tx_lock);
3774 ath10k_htt_tx_dec_pending(htt);
3775 spin_unlock_bh(&ar->htt.tx_lock);
3776
3777 return -ENOENT;
3778 }
3779
3780 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
3781
3782 skb_len = skb->len;
3783 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3784 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3785
3786 ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
3787 if (unlikely(ret)) {
3788 ath10k_warn(ar, "failed to push frame: %d\n", ret);
3789
3790 spin_lock_bh(&ar->htt.tx_lock);
3791 ath10k_htt_tx_dec_pending(htt);
3792 spin_unlock_bh(&ar->htt.tx_lock);
3793
3794 return ret;
3795 }
3796
3797 spin_lock_bh(&ar->htt.tx_lock);
3798 artxq->num_fw_queued++;
3799 spin_unlock_bh(&ar->htt.tx_lock);
3800
3801 return skb_len;
3802 }
3803
3804 void ath10k_mac_tx_push_pending(struct ath10k *ar)
3805 {
3806 struct ieee80211_hw *hw = ar->hw;
3807 struct ieee80211_txq *txq;
3808 struct ath10k_txq *artxq;
3809 struct ath10k_txq *last;
3810 int ret;
3811 int max;
3812
3813 if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2))
3814 return;
3815
3816 spin_lock_bh(&ar->txqs_lock);
3817 rcu_read_lock();
3818
3819 last = list_last_entry(&ar->txqs, struct ath10k_txq, list);
3820 while (!list_empty(&ar->txqs)) {
3821 artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
3822 txq = container_of((void *)artxq, struct ieee80211_txq,
3823 drv_priv);
3824
3825 /* Prevent aggressive sta/tid taking over tx queue */
3826 max = 16;
3827 ret = 0;
3828 while (ath10k_mac_tx_can_push(hw, txq) && max--) {
3829 ret = ath10k_mac_tx_push_txq(hw, txq);
3830 if (ret < 0)
3831 break;
3832 }
3833
3834 list_del_init(&artxq->list);
3835 if (ret != -ENOENT)
3836 list_add_tail(&artxq->list, &ar->txqs);
3837
3838 ath10k_htt_tx_txq_update(hw, txq);
3839
3840 if (artxq == last || (ret < 0 && ret != -ENOENT))
3841 break;
3842 }
3843
3844 rcu_read_unlock();
3845 spin_unlock_bh(&ar->txqs_lock);
3846 }
3847
3848 /************/
3849 /* Scanning */
3850 /************/
3851
3852 void __ath10k_scan_finish(struct ath10k *ar)
3853 {
3854 lockdep_assert_held(&ar->data_lock);
3855
3856 switch (ar->scan.state) {
3857 case ATH10K_SCAN_IDLE:
3858 break;
3859 case ATH10K_SCAN_RUNNING:
3860 case ATH10K_SCAN_ABORTING:
3861 if (!ar->scan.is_roc) {
3862 struct cfg80211_scan_info info = {
3863 .aborted = (ar->scan.state ==
3864 ATH10K_SCAN_ABORTING),
3865 };
3866
3867 ieee80211_scan_completed(ar->hw, &info);
3868 } else if (ar->scan.roc_notify) {
3869 ieee80211_remain_on_channel_expired(ar->hw);
3870 }
3871 /* fall through */
3872 case ATH10K_SCAN_STARTING:
3873 ar->scan.state = ATH10K_SCAN_IDLE;
3874 ar->scan_channel = NULL;
3875 ar->scan.roc_freq = 0;
3876 ath10k_offchan_tx_purge(ar);
3877 cancel_delayed_work(&ar->scan.timeout);
3878 complete_all(&ar->scan.completed);
3879 break;
3880 }
3881 }
3882
3883 void ath10k_scan_finish(struct ath10k *ar)
3884 {
3885 spin_lock_bh(&ar->data_lock);
3886 __ath10k_scan_finish(ar);
3887 spin_unlock_bh(&ar->data_lock);
3888 }
3889
3890 static int ath10k_scan_stop(struct ath10k *ar)
3891 {
3892 struct wmi_stop_scan_arg arg = {
3893 .req_id = 1, /* FIXME */
3894 .req_type = WMI_SCAN_STOP_ONE,
3895 .u.scan_id = ATH10K_SCAN_ID,
3896 };
3897 int ret;
3898
3899 lockdep_assert_held(&ar->conf_mutex);
3900
3901 ret = ath10k_wmi_stop_scan(ar, &arg);
3902 if (ret) {
3903 ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret);
3904 goto out;
3905 }
3906
3907 ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
3908 if (ret == 0) {
3909 ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
3910 ret = -ETIMEDOUT;
3911 } else if (ret > 0) {
3912 ret = 0;
3913 }
3914
3915 out:
3916 /* Scan state should be updated upon scan completion but in case
3917 * firmware fails to deliver the event (for whatever reason) it is
3918 * desired to clean up scan state anyway. Firmware may have just
3919 * dropped the scan completion event delivery due to transport pipe
3920 * being overflown with data and/or it can recover on its own before
3921 * next scan request is submitted.
3922 */
3923 spin_lock_bh(&ar->data_lock);
3924 if (ar->scan.state != ATH10K_SCAN_IDLE)
3925 __ath10k_scan_finish(ar);
3926 spin_unlock_bh(&ar->data_lock);
3927
3928 return ret;
3929 }
3930
3931 static void ath10k_scan_abort(struct ath10k *ar)
3932 {
3933 int ret;
3934
3935 lockdep_assert_held(&ar->conf_mutex);
3936
3937 spin_lock_bh(&ar->data_lock);
3938
3939 switch (ar->scan.state) {
3940 case ATH10K_SCAN_IDLE:
3941 /* This can happen if timeout worker kicked in and called
3942 * abortion while scan completion was being processed.
3943 */
3944 break;
3945 case ATH10K_SCAN_STARTING:
3946 case ATH10K_SCAN_ABORTING:
3947 ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n",
3948 ath10k_scan_state_str(ar->scan.state),
3949 ar->scan.state);
3950 break;
3951 case ATH10K_SCAN_RUNNING:
3952 ar->scan.state = ATH10K_SCAN_ABORTING;
3953 spin_unlock_bh(&ar->data_lock);
3954
3955 ret = ath10k_scan_stop(ar);
3956 if (ret)
3957 ath10k_warn(ar, "failed to abort scan: %d\n", ret);
3958
3959 spin_lock_bh(&ar->data_lock);
3960 break;
3961 }
3962
3963 spin_unlock_bh(&ar->data_lock);
3964 }
3965
3966 void ath10k_scan_timeout_work(struct work_struct *work)
3967 {
3968 struct ath10k *ar = container_of(work, struct ath10k,
3969 scan.timeout.work);
3970
3971 mutex_lock(&ar->conf_mutex);
3972 ath10k_scan_abort(ar);
3973 mutex_unlock(&ar->conf_mutex);
3974 }
3975
3976 static int ath10k_start_scan(struct ath10k *ar,
3977 const struct wmi_start_scan_arg *arg)
3978 {
3979 int ret;
3980
3981 lockdep_assert_held(&ar->conf_mutex);
3982
3983 ret = ath10k_wmi_start_scan(ar, arg);
3984 if (ret)
3985 return ret;
3986
3987 ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
3988 if (ret == 0) {
3989 ret = ath10k_scan_stop(ar);
3990 if (ret)
3991 ath10k_warn(ar, "failed to stop scan: %d\n", ret);
3992
3993 return -ETIMEDOUT;
3994 }
3995
3996 /* If we failed to start the scan, return error code at
3997 * this point. This is probably due to some issue in the
3998 * firmware, but no need to wedge the driver due to that...
3999 */
4000 spin_lock_bh(&ar->data_lock);
4001 if (ar->scan.state == ATH10K_SCAN_IDLE) {
4002 spin_unlock_bh(&ar->data_lock);
4003 return -EINVAL;
4004 }
4005 spin_unlock_bh(&ar->data_lock);
4006
4007 return 0;
4008 }
4009
4010 /**********************/
4011 /* mac80211 callbacks */
4012 /**********************/
4013
4014 static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
4015 struct ieee80211_tx_control *control,
4016 struct sk_buff *skb)
4017 {
4018 struct ath10k *ar = hw->priv;
4019 struct ath10k_htt *htt = &ar->htt;
4020 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
4021 struct ieee80211_vif *vif = info->control.vif;
4022 struct ieee80211_sta *sta = control->sta;
4023 struct ieee80211_txq *txq = NULL;
4024 struct ieee80211_hdr *hdr = (void *)skb->data;
4025 enum ath10k_hw_txrx_mode txmode;
4026 enum ath10k_mac_tx_path txpath;
4027 bool is_htt;
4028 bool is_mgmt;
4029 bool is_presp;
4030 int ret;
4031
4032 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
4033
4034 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
4035 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
4036 is_htt = (txpath == ATH10K_MAC_TX_HTT ||
4037 txpath == ATH10K_MAC_TX_HTT_MGMT);
4038 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
4039
4040 if (is_htt) {
4041 spin_lock_bh(&ar->htt.tx_lock);
4042 is_presp = ieee80211_is_probe_resp(hdr->frame_control);
4043
4044 ret = ath10k_htt_tx_inc_pending(htt);
4045 if (ret) {
4046 ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
4047 ret);
4048 spin_unlock_bh(&ar->htt.tx_lock);
4049 ieee80211_free_txskb(ar->hw, skb);
4050 return;
4051 }
4052
4053 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
4054 if (ret) {
4055 ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n",
4056 ret);
4057 ath10k_htt_tx_dec_pending(htt);
4058 spin_unlock_bh(&ar->htt.tx_lock);
4059 ieee80211_free_txskb(ar->hw, skb);
4060 return;
4061 }
4062 spin_unlock_bh(&ar->htt.tx_lock);
4063 }
4064
4065 ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
4066 if (ret) {
4067 ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
4068 if (is_htt) {
4069 spin_lock_bh(&ar->htt.tx_lock);
4070 ath10k_htt_tx_dec_pending(htt);
4071 if (is_mgmt)
4072 ath10k_htt_tx_mgmt_dec_pending(htt);
4073 spin_unlock_bh(&ar->htt.tx_lock);
4074 }
4075 return;
4076 }
4077 }
4078
4079 static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
4080 struct ieee80211_txq *txq)
4081 {
4082 struct ath10k *ar = hw->priv;
4083 struct ath10k_txq *artxq = (void *)txq->drv_priv;
4084
4085 spin_lock_bh(&ar->txqs_lock);
4086 if (list_empty(&artxq->list))
4087 list_add_tail(&artxq->list, &ar->txqs);
4088 spin_unlock_bh(&ar->txqs_lock);
4089
4090 ath10k_mac_tx_push_pending(ar);
4091 ath10k_htt_tx_txq_update(hw, txq);
4092 }
4093
4094 /* Must not be called with conf_mutex held as workers can use that also. */
4095 void ath10k_drain_tx(struct ath10k *ar)
4096 {
4097 /* make sure rcu-protected mac80211 tx path itself is drained */
4098 synchronize_net();
4099
4100 ath10k_offchan_tx_purge(ar);
4101 ath10k_mgmt_over_wmi_tx_purge(ar);
4102
4103 cancel_work_sync(&ar->offchan_tx_work);
4104 cancel_work_sync(&ar->wmi_mgmt_tx_work);
4105 }
4106
4107 void ath10k_halt(struct ath10k *ar)
4108 {
4109 struct ath10k_vif *arvif;
4110
4111 lockdep_assert_held(&ar->conf_mutex);
4112
4113 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
4114 ar->filter_flags = 0;
4115 ar->monitor = false;
4116 ar->monitor_arvif = NULL;
4117
4118 if (ar->monitor_started)
4119 ath10k_monitor_stop(ar);
4120
4121 ar->monitor_started = false;
4122 ar->tx_paused = 0;
4123
4124 ath10k_scan_finish(ar);
4125 ath10k_peer_cleanup_all(ar);
4126 ath10k_core_stop(ar);
4127 ath10k_hif_power_down(ar);
4128
4129 spin_lock_bh(&ar->data_lock);
4130 list_for_each_entry(arvif, &ar->arvifs, list)
4131 ath10k_mac_vif_beacon_cleanup(arvif);
4132 spin_unlock_bh(&ar->data_lock);
4133 }
4134
4135 static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
4136 {
4137 struct ath10k *ar = hw->priv;
4138
4139 mutex_lock(&ar->conf_mutex);
4140
4141 *tx_ant = ar->cfg_tx_chainmask;
4142 *rx_ant = ar->cfg_rx_chainmask;
4143
4144 mutex_unlock(&ar->conf_mutex);
4145
4146 return 0;
4147 }
4148
4149 static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
4150 {
4151 /* It is not clear that allowing gaps in chainmask
4152 * is helpful. Probably it will not do what user
4153 * is hoping for, so warn in that case.
4154 */
4155 if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
4156 return;
4157
4158 ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n",
4159 dbg, cm);
4160 }
4161
4162 static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
4163 {
4164 int nsts = ar->vht_cap_info;
4165
4166 nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4167 nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4168
4169 /* If firmware does not deliver to host number of space-time
4170 * streams supported, assume it support up to 4 BF STS and return
4171 * the value for VHT CAP: nsts-1)
4172 */
4173 if (nsts == 0)
4174 return 3;
4175
4176 return nsts;
4177 }
4178
4179 static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
4180 {
4181 int sound_dim = ar->vht_cap_info;
4182
4183 sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4184 sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4185
4186 /* If the sounding dimension is not advertised by the firmware,
4187 * let's use a default value of 1
4188 */
4189 if (sound_dim == 0)
4190 return 1;
4191
4192 return sound_dim;
4193 }
4194
4195 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
4196 {
4197 struct ieee80211_sta_vht_cap vht_cap = {0};
4198 u16 mcs_map;
4199 u32 val;
4200 int i;
4201
4202 vht_cap.vht_supported = 1;
4203 vht_cap.cap = ar->vht_cap_info;
4204
4205 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
4206 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
4207 val = ath10k_mac_get_vht_cap_bf_sts(ar);
4208 val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4209 val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4210
4211 vht_cap.cap |= val;
4212 }
4213
4214 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
4215 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
4216 val = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
4217 val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4218 val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4219
4220 vht_cap.cap |= val;
4221 }
4222
4223 mcs_map = 0;
4224 for (i = 0; i < 8; i++) {
4225 if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i)))
4226 mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
4227 else
4228 mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
4229 }
4230
4231 vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
4232 vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
4233
4234 return vht_cap;
4235 }
4236
4237 static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
4238 {
4239 int i;
4240 struct ieee80211_sta_ht_cap ht_cap = {0};
4241
4242 if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
4243 return ht_cap;
4244
4245 ht_cap.ht_supported = 1;
4246 ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
4247 ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
4248 ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
4249 ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
4250 ht_cap.cap |=
4251 WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT;
4252
4253 if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
4254 ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
4255
4256 if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
4257 ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
4258
4259 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
4260 u32 smps;
4261
4262 smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
4263 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
4264
4265 ht_cap.cap |= smps;
4266 }
4267
4268 if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC)
4269 ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
4270
4271 if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
4272 u32 stbc;
4273
4274 stbc = ar->ht_cap_info;
4275 stbc &= WMI_HT_CAP_RX_STBC;
4276 stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
4277 stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
4278 stbc &= IEEE80211_HT_CAP_RX_STBC;
4279
4280 ht_cap.cap |= stbc;
4281 }
4282
4283 if (ar->ht_cap_info & WMI_HT_CAP_LDPC)
4284 ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
4285
4286 if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
4287 ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
4288
4289 /* max AMSDU is implicitly taken from vht_cap_info */
4290 if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
4291 ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
4292
4293 for (i = 0; i < ar->num_rf_chains; i++) {
4294 if (ar->cfg_rx_chainmask & BIT(i))
4295 ht_cap.mcs.rx_mask[i] = 0xFF;
4296 }
4297
4298 ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
4299
4300 return ht_cap;
4301 }
4302
4303 static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
4304 {
4305 struct ieee80211_supported_band *band;
4306 struct ieee80211_sta_vht_cap vht_cap;
4307 struct ieee80211_sta_ht_cap ht_cap;
4308
4309 ht_cap = ath10k_get_ht_cap(ar);
4310 vht_cap = ath10k_create_vht_cap(ar);
4311
4312 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
4313 band = &ar->mac.sbands[NL80211_BAND_2GHZ];
4314 band->ht_cap = ht_cap;
4315 }
4316 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
4317 band = &ar->mac.sbands[NL80211_BAND_5GHZ];
4318 band->ht_cap = ht_cap;
4319 band->vht_cap = vht_cap;
4320 }
4321 }
4322
4323 static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
4324 {
4325 int ret;
4326
4327 lockdep_assert_held(&ar->conf_mutex);
4328
4329 ath10k_check_chain_mask(ar, tx_ant, "tx");
4330 ath10k_check_chain_mask(ar, rx_ant, "rx");
4331
4332 ar->cfg_tx_chainmask = tx_ant;
4333 ar->cfg_rx_chainmask = rx_ant;
4334
4335 if ((ar->state != ATH10K_STATE_ON) &&
4336 (ar->state != ATH10K_STATE_RESTARTED))
4337 return 0;
4338
4339 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
4340 tx_ant);
4341 if (ret) {
4342 ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n",
4343 ret, tx_ant);
4344 return ret;
4345 }
4346
4347 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
4348 rx_ant);
4349 if (ret) {
4350 ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n",
4351 ret, rx_ant);
4352 return ret;
4353 }
4354
4355 /* Reload HT/VHT capability */
4356 ath10k_mac_setup_ht_vht_cap(ar);
4357
4358 return 0;
4359 }
4360
4361 static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
4362 {
4363 struct ath10k *ar = hw->priv;
4364 int ret;
4365
4366 mutex_lock(&ar->conf_mutex);
4367 ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
4368 mutex_unlock(&ar->conf_mutex);
4369 return ret;
4370 }
4371
4372 static int ath10k_start(struct ieee80211_hw *hw)
4373 {
4374 struct ath10k *ar = hw->priv;
4375 u32 param;
4376 int ret = 0;
4377
4378 /*
4379 * This makes sense only when restarting hw. It is harmless to call
4380 * unconditionally. This is necessary to make sure no HTT/WMI tx
4381 * commands will be submitted while restarting.
4382 */
4383 ath10k_drain_tx(ar);
4384
4385 mutex_lock(&ar->conf_mutex);
4386
4387 switch (ar->state) {
4388 case ATH10K_STATE_OFF:
4389 ar->state = ATH10K_STATE_ON;
4390 break;
4391 case ATH10K_STATE_RESTARTING:
4392 ath10k_halt(ar);
4393 ar->state = ATH10K_STATE_RESTARTED;
4394 break;
4395 case ATH10K_STATE_ON:
4396 case ATH10K_STATE_RESTARTED:
4397 case ATH10K_STATE_WEDGED:
4398 WARN_ON(1);
4399 ret = -EINVAL;
4400 goto err;
4401 case ATH10K_STATE_UTF:
4402 ret = -EBUSY;
4403 goto err;
4404 }
4405
4406 ret = ath10k_hif_power_up(ar);
4407 if (ret) {
4408 ath10k_err(ar, "Could not init hif: %d\n", ret);
4409 goto err_off;
4410 }
4411
4412 ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
4413 &ar->normal_mode_fw);
4414 if (ret) {
4415 ath10k_err(ar, "Could not init core: %d\n", ret);
4416 goto err_power_down;
4417 }
4418
4419 param = ar->wmi.pdev_param->pmf_qos;
4420 ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4421 if (ret) {
4422 ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret);
4423 goto err_core_stop;
4424 }
4425
4426 param = ar->wmi.pdev_param->dynamic_bw;
4427 ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4428 if (ret) {
4429 ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret);
4430 goto err_core_stop;
4431 }
4432
4433 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
4434 ret = ath10k_wmi_adaptive_qcs(ar, true);
4435 if (ret) {
4436 ath10k_warn(ar, "failed to enable adaptive qcs: %d\n",
4437 ret);
4438 goto err_core_stop;
4439 }
4440 }
4441
4442 if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) {
4443 param = ar->wmi.pdev_param->burst_enable;
4444 ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4445 if (ret) {
4446 ath10k_warn(ar, "failed to disable burst: %d\n", ret);
4447 goto err_core_stop;
4448 }
4449 }
4450
4451 __ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
4452
4453 /*
4454 * By default FW set ARP frames ac to voice (6). In that case ARP
4455 * exchange is not working properly for UAPSD enabled AP. ARP requests
4456 * which arrives with access category 0 are processed by network stack
4457 * and send back with access category 0, but FW changes access category
4458 * to 6. Set ARP frames access category to best effort (0) solves
4459 * this problem.
4460 */
4461
4462 param = ar->wmi.pdev_param->arp_ac_override;
4463 ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4464 if (ret) {
4465 ath10k_warn(ar, "failed to set arp ac override parameter: %d\n",
4466 ret);
4467 goto err_core_stop;
4468 }
4469
4470 if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA,
4471 ar->running_fw->fw_file.fw_features)) {
4472 ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1,
4473 WMI_CCA_DETECT_LEVEL_AUTO,
4474 WMI_CCA_DETECT_MARGIN_AUTO);
4475 if (ret) {
4476 ath10k_warn(ar, "failed to enable adaptive cca: %d\n",
4477 ret);
4478 goto err_core_stop;
4479 }
4480 }
4481
4482 param = ar->wmi.pdev_param->ani_enable;
4483 ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4484 if (ret) {
4485 ath10k_warn(ar, "failed to enable ani by default: %d\n",
4486 ret);
4487 goto err_core_stop;
4488 }
4489
4490 ar->ani_enabled = true;
4491
4492 if (ath10k_peer_stats_enabled(ar)) {
4493 param = ar->wmi.pdev_param->peer_stats_update_period;
4494 ret = ath10k_wmi_pdev_set_param(ar, param,
4495 PEER_DEFAULT_STATS_UPDATE_PERIOD);
4496 if (ret) {
4497 ath10k_warn(ar,
4498 "failed to set peer stats period : %d\n",
4499 ret);
4500 goto err_core_stop;
4501 }
4502 }
4503
4504 param = ar->wmi.pdev_param->enable_btcoex;
4505 if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
4506 test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
4507 ar->running_fw->fw_file.fw_features)) {
4508 ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4509 if (ret) {
4510 ath10k_warn(ar,
4511 "failed to set btcoex param: %d\n", ret);
4512 goto err_core_stop;
4513 }
4514 clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
4515 }
4516
4517 ar->num_started_vdevs = 0;
4518 ath10k_regd_update(ar);
4519
4520 ath10k_spectral_start(ar);
4521 ath10k_thermal_set_throttling(ar);
4522
4523 mutex_unlock(&ar->conf_mutex);
4524 return 0;
4525
4526 err_core_stop:
4527 ath10k_core_stop(ar);
4528
4529 err_power_down:
4530 ath10k_hif_power_down(ar);
4531
4532 err_off:
4533 ar->state = ATH10K_STATE_OFF;
4534
4535 err:
4536 mutex_unlock(&ar->conf_mutex);
4537 return ret;
4538 }
4539
4540 static void ath10k_stop(struct ieee80211_hw *hw)
4541 {
4542 struct ath10k *ar = hw->priv;
4543
4544 ath10k_drain_tx(ar);
4545
4546 mutex_lock(&ar->conf_mutex);
4547 if (ar->state != ATH10K_STATE_OFF) {
4548 ath10k_halt(ar);
4549 ar->state = ATH10K_STATE_OFF;
4550 }
4551 mutex_unlock(&ar->conf_mutex);
4552
4553 cancel_delayed_work_sync(&ar->scan.timeout);
4554 cancel_work_sync(&ar->restart_work);
4555 }
4556
4557 static int ath10k_config_ps(struct ath10k *ar)
4558 {
4559 struct ath10k_vif *arvif;
4560 int ret = 0;
4561
4562 lockdep_assert_held(&ar->conf_mutex);
4563
4564 list_for_each_entry(arvif, &ar->arvifs, list) {
4565 ret = ath10k_mac_vif_setup_ps(arvif);
4566 if (ret) {
4567 ath10k_warn(ar, "failed to setup powersave: %d\n", ret);
4568 break;
4569 }
4570 }
4571
4572 return ret;
4573 }
4574
4575 static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
4576 {
4577 int ret;
4578 u32 param;
4579
4580 lockdep_assert_held(&ar->conf_mutex);
4581
4582 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower);
4583
4584 param = ar->wmi.pdev_param->txpower_limit2g;
4585 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
4586 if (ret) {
4587 ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
4588 txpower, ret);
4589 return ret;
4590 }
4591
4592 param = ar->wmi.pdev_param->txpower_limit5g;
4593 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
4594 if (ret) {
4595 ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
4596 txpower, ret);
4597 return ret;
4598 }
4599
4600 return 0;
4601 }
4602
4603 static int ath10k_mac_txpower_recalc(struct ath10k *ar)
4604 {
4605 struct ath10k_vif *arvif;
4606 int ret, txpower = -1;
4607
4608 lockdep_assert_held(&ar->conf_mutex);
4609
4610 list_for_each_entry(arvif, &ar->arvifs, list) {
4611 WARN_ON(arvif->txpower < 0);
4612
4613 if (txpower == -1)
4614 txpower = arvif->txpower;
4615 else
4616 txpower = min(txpower, arvif->txpower);
4617 }
4618
4619 if (WARN_ON(txpower == -1))
4620 return -EINVAL;
4621
4622 ret = ath10k_mac_txpower_setup(ar, txpower);
4623 if (ret) {
4624 ath10k_warn(ar, "failed to setup tx power %d: %d\n",
4625 txpower, ret);
4626 return ret;
4627 }
4628
4629 return 0;
4630 }
4631
4632 static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
4633 {
4634 struct ath10k *ar = hw->priv;
4635 struct ieee80211_conf *conf = &hw->conf;
4636 int ret = 0;
4637
4638 mutex_lock(&ar->conf_mutex);
4639
4640 if (changed & IEEE80211_CONF_CHANGE_PS)
4641 ath10k_config_ps(ar);
4642
4643 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
4644 ar->monitor = conf->flags & IEEE80211_CONF_MONITOR;
4645 ret = ath10k_monitor_recalc(ar);
4646 if (ret)
4647 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
4648 }
4649
4650 mutex_unlock(&ar->conf_mutex);
4651 return ret;
4652 }
4653
4654 static u32 get_nss_from_chainmask(u16 chain_mask)
4655 {
4656 if ((chain_mask & 0xf) == 0xf)
4657 return 4;
4658 else if ((chain_mask & 0x7) == 0x7)
4659 return 3;
4660 else if ((chain_mask & 0x3) == 0x3)
4661 return 2;
4662 return 1;
4663 }
4664
4665 static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
4666 {
4667 u32 value = 0;
4668 struct ath10k *ar = arvif->ar;
4669 int nsts;
4670 int sound_dim;
4671
4672 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
4673 return 0;
4674
4675 nsts = ath10k_mac_get_vht_cap_bf_sts(ar);
4676 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
4677 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
4678 value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
4679
4680 sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
4681 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
4682 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
4683 value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
4684
4685 if (!value)
4686 return 0;
4687
4688 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
4689 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
4690
4691 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
4692 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER |
4693 WMI_VDEV_PARAM_TXBF_SU_TX_BFER);
4694
4695 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
4696 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
4697
4698 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
4699 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE |
4700 WMI_VDEV_PARAM_TXBF_SU_TX_BFEE);
4701
4702 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
4703 ar->wmi.vdev_param->txbf, value);
4704 }
4705
4706 /*
4707 * TODO:
4708 * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
4709 * because we will send mgmt frames without CCK. This requirement
4710 * for P2P_FIND/GO_NEG should be handled by checking CCK flag
4711 * in the TX packet.
4712 */
4713 static int ath10k_add_interface(struct ieee80211_hw *hw,
4714 struct ieee80211_vif *vif)
4715 {
4716 struct ath10k *ar = hw->priv;
4717 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
4718 struct ath10k_peer *peer;
4719 enum wmi_sta_powersave_param param;
4720 int ret = 0;
4721 u32 value;
4722 int bit;
4723 int i;
4724 u32 vdev_param;
4725
4726 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
4727
4728 mutex_lock(&ar->conf_mutex);
4729
4730 memset(arvif, 0, sizeof(*arvif));
4731 ath10k_mac_txq_init(vif->txq);
4732
4733 arvif->ar = ar;
4734 arvif->vif = vif;
4735
4736 INIT_LIST_HEAD(&arvif->list);
4737 INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work);
4738 INIT_DELAYED_WORK(&arvif->connection_loss_work,
4739 ath10k_mac_vif_sta_connection_loss_work);
4740
4741 for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
4742 arvif->bitrate_mask.control[i].legacy = 0xffffffff;
4743 memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
4744 sizeof(arvif->bitrate_mask.control[i].ht_mcs));
4745 memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
4746 sizeof(arvif->bitrate_mask.control[i].vht_mcs));
4747 }
4748
4749 if (ar->num_peers >= ar->max_num_peers) {
4750 ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n");
4751 ret = -ENOBUFS;
4752 goto err;
4753 }
4754
4755 if (ar->free_vdev_map == 0) {
4756 ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
4757 ret = -EBUSY;
4758 goto err;
4759 }
4760 bit = __ffs64(ar->free_vdev_map);
4761
4762 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n",
4763 bit, ar->free_vdev_map);
4764
4765 arvif->vdev_id = bit;
4766 arvif->vdev_subtype =
4767 ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
4768
4769 switch (vif->type) {
4770 case NL80211_IFTYPE_P2P_DEVICE:
4771 arvif->vdev_type = WMI_VDEV_TYPE_STA;
4772 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4773 (ar, WMI_VDEV_SUBTYPE_P2P_DEVICE);
4774 break;
4775 case NL80211_IFTYPE_UNSPECIFIED:
4776 case NL80211_IFTYPE_STATION:
4777 arvif->vdev_type = WMI_VDEV_TYPE_STA;
4778 if (vif->p2p)
4779 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4780 (ar, WMI_VDEV_SUBTYPE_P2P_CLIENT);
4781 break;
4782 case NL80211_IFTYPE_ADHOC:
4783 arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
4784 break;
4785 case NL80211_IFTYPE_MESH_POINT:
4786 if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) {
4787 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4788 (ar, WMI_VDEV_SUBTYPE_MESH_11S);
4789 } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
4790 ret = -EINVAL;
4791 ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n");
4792 goto err;
4793 }
4794 arvif->vdev_type = WMI_VDEV_TYPE_AP;
4795 break;
4796 case NL80211_IFTYPE_AP:
4797 arvif->vdev_type = WMI_VDEV_TYPE_AP;
4798
4799 if (vif->p2p)
4800 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4801 (ar, WMI_VDEV_SUBTYPE_P2P_GO);
4802 break;
4803 case NL80211_IFTYPE_MONITOR:
4804 arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
4805 break;
4806 default:
4807 WARN_ON(1);
4808 break;
4809 }
4810
4811 /* Using vdev_id as queue number will make it very easy to do per-vif
4812 * tx queue locking. This shouldn't wrap due to interface combinations
4813 * but do a modulo for correctness sake and prevent using offchannel tx
4814 * queues for regular vif tx.
4815 */
4816 vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
4817 for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
4818 vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
4819
4820 /* Some firmware revisions don't wait for beacon tx completion before
4821 * sending another SWBA event. This could lead to hardware using old
4822 * (freed) beacon data in some cases, e.g. tx credit starvation
4823 * combined with missed TBTT. This is very very rare.
4824 *
4825 * On non-IOMMU-enabled hosts this could be a possible security issue
4826 * because hw could beacon some random data on the air. On
4827 * IOMMU-enabled hosts DMAR faults would occur in most cases and target
4828 * device would crash.
4829 *
4830 * Since there are no beacon tx completions (implicit nor explicit)
4831 * propagated to host the only workaround for this is to allocate a
4832 * DMA-coherent buffer for a lifetime of a vif and use it for all
4833 * beacon tx commands. Worst case for this approach is some beacons may
4834 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
4835 */
4836 if (vif->type == NL80211_IFTYPE_ADHOC ||
4837 vif->type == NL80211_IFTYPE_MESH_POINT ||
4838 vif->type == NL80211_IFTYPE_AP) {
4839 arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
4840 IEEE80211_MAX_FRAME_LEN,
4841 &arvif->beacon_paddr,
4842 GFP_ATOMIC);
4843 if (!arvif->beacon_buf) {
4844 ret = -ENOMEM;
4845 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
4846 ret);
4847 goto err;
4848 }
4849 }
4850 if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags))
4851 arvif->nohwcrypt = true;
4852
4853 if (arvif->nohwcrypt &&
4854 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
4855 ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
4856 goto err;
4857 }
4858
4859 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
4860 arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
4861 arvif->beacon_buf ? "single-buf" : "per-skb");
4862
4863 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
4864 arvif->vdev_subtype, vif->addr);
4865 if (ret) {
4866 ath10k_warn(ar, "failed to create WMI vdev %i: %d\n",
4867 arvif->vdev_id, ret);
4868 goto err;
4869 }
4870
4871 ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
4872 list_add(&arvif->list, &ar->arvifs);
4873
4874 /* It makes no sense to have firmware do keepalives. mac80211 already
4875 * takes care of this with idle connection polling.
4876 */
4877 ret = ath10k_mac_vif_disable_keepalive(arvif);
4878 if (ret) {
4879 ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n",
4880 arvif->vdev_id, ret);
4881 goto err_vdev_delete;
4882 }
4883
4884 arvif->def_wep_key_idx = -1;
4885
4886 vdev_param = ar->wmi.vdev_param->tx_encap_type;
4887 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
4888 ATH10K_HW_TXRX_NATIVE_WIFI);
4889 /* 10.X firmware does not support this VDEV parameter. Do not warn */
4890 if (ret && ret != -EOPNOTSUPP) {
4891 ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n",
4892 arvif->vdev_id, ret);
4893 goto err_vdev_delete;
4894 }
4895
4896 /* Configuring number of spatial stream for monitor interface is causing
4897 * target assert in qca9888 and qca6174.
4898 */
4899 if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) {
4900 u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
4901
4902 vdev_param = ar->wmi.vdev_param->nss;
4903 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
4904 nss);
4905 if (ret) {
4906 ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n",
4907 arvif->vdev_id, ar->cfg_tx_chainmask, nss,
4908 ret);
4909 goto err_vdev_delete;
4910 }
4911 }
4912
4913 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
4914 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
4915 ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id,
4916 vif->addr, WMI_PEER_TYPE_DEFAULT);
4917 if (ret) {
4918 ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
4919 arvif->vdev_id, ret);
4920 goto err_vdev_delete;
4921 }
4922
4923 spin_lock_bh(&ar->data_lock);
4924
4925 peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr);
4926 if (!peer) {
4927 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
4928 vif->addr, arvif->vdev_id);
4929 spin_unlock_bh(&ar->data_lock);
4930 ret = -ENOENT;
4931 goto err_peer_delete;
4932 }
4933
4934 arvif->peer_id = find_first_bit(peer->peer_ids,
4935 ATH10K_MAX_NUM_PEER_IDS);
4936
4937 spin_unlock_bh(&ar->data_lock);
4938 } else {
4939 arvif->peer_id = HTT_INVALID_PEERID;
4940 }
4941
4942 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
4943 ret = ath10k_mac_set_kickout(arvif);
4944 if (ret) {
4945 ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
4946 arvif->vdev_id, ret);
4947 goto err_peer_delete;
4948 }
4949 }
4950
4951 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
4952 param = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
4953 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
4954 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
4955 param, value);
4956 if (ret) {
4957 ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n",
4958 arvif->vdev_id, ret);
4959 goto err_peer_delete;
4960 }
4961
4962 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
4963 if (ret) {
4964 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
4965 arvif->vdev_id, ret);
4966 goto err_peer_delete;
4967 }
4968
4969 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
4970 if (ret) {
4971 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
4972 arvif->vdev_id, ret);
4973 goto err_peer_delete;
4974 }
4975 }
4976
4977 ret = ath10k_mac_set_txbf_conf(arvif);
4978 if (ret) {
4979 ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n",
4980 arvif->vdev_id, ret);
4981 goto err_peer_delete;
4982 }
4983
4984 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
4985 if (ret) {
4986 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
4987 arvif->vdev_id, ret);
4988 goto err_peer_delete;
4989 }
4990
4991 arvif->txpower = vif->bss_conf.txpower;
4992 ret = ath10k_mac_txpower_recalc(ar);
4993 if (ret) {
4994 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
4995 goto err_peer_delete;
4996 }
4997
4998 if (vif->type == NL80211_IFTYPE_MONITOR) {
4999 ar->monitor_arvif = arvif;
5000 ret = ath10k_monitor_recalc(ar);
5001 if (ret) {
5002 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5003 goto err_peer_delete;
5004 }
5005 }
5006
5007 spin_lock_bh(&ar->htt.tx_lock);
5008 if (!ar->tx_paused)
5009 ieee80211_wake_queue(ar->hw, arvif->vdev_id);
5010 spin_unlock_bh(&ar->htt.tx_lock);
5011
5012 mutex_unlock(&ar->conf_mutex);
5013 return 0;
5014
5015 err_peer_delete:
5016 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5017 arvif->vdev_type == WMI_VDEV_TYPE_IBSS)
5018 ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
5019
5020 err_vdev_delete:
5021 ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
5022 ar->free_vdev_map |= 1LL << arvif->vdev_id;
5023 list_del(&arvif->list);
5024
5025 err:
5026 if (arvif->beacon_buf) {
5027 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
5028 arvif->beacon_buf, arvif->beacon_paddr);
5029 arvif->beacon_buf = NULL;
5030 }
5031
5032 mutex_unlock(&ar->conf_mutex);
5033
5034 return ret;
5035 }
5036
5037 static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif)
5038 {
5039 int i;
5040
5041 for (i = 0; i < BITS_PER_LONG; i++)
5042 ath10k_mac_vif_tx_unlock(arvif, i);
5043 }
5044
5045 static void ath10k_remove_interface(struct ieee80211_hw *hw,
5046 struct ieee80211_vif *vif)
5047 {
5048 struct ath10k *ar = hw->priv;
5049 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5050 struct ath10k_peer *peer;
5051 int ret;
5052 int i;
5053
5054 cancel_work_sync(&arvif->ap_csa_work);
5055 cancel_delayed_work_sync(&arvif->connection_loss_work);
5056
5057 mutex_lock(&ar->conf_mutex);
5058
5059 spin_lock_bh(&ar->data_lock);
5060 ath10k_mac_vif_beacon_cleanup(arvif);
5061 spin_unlock_bh(&ar->data_lock);
5062
5063 ret = ath10k_spectral_vif_stop(arvif);
5064 if (ret)
5065 ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n",
5066 arvif->vdev_id, ret);
5067
5068 ar->free_vdev_map |= 1LL << arvif->vdev_id;
5069 list_del(&arvif->list);
5070
5071 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5072 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5073 ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
5074 vif->addr);
5075 if (ret)
5076 ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n",
5077 arvif->vdev_id, ret);
5078
5079 kfree(arvif->u.ap.noa_data);
5080 }
5081
5082 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
5083 arvif->vdev_id);
5084
5085 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
5086 if (ret)
5087 ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
5088 arvif->vdev_id, ret);
5089
5090 /* Some firmware revisions don't notify host about self-peer removal
5091 * until after associated vdev is deleted.
5092 */
5093 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5094 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5095 ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
5096 vif->addr);
5097 if (ret)
5098 ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n",
5099 arvif->vdev_id, ret);
5100
5101 spin_lock_bh(&ar->data_lock);
5102 ar->num_peers--;
5103 spin_unlock_bh(&ar->data_lock);
5104 }
5105
5106 spin_lock_bh(&ar->data_lock);
5107 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
5108 peer = ar->peer_map[i];
5109 if (!peer)
5110 continue;
5111
5112 if (peer->vif == vif) {
5113 ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n",
5114 vif->addr, arvif->vdev_id);
5115 peer->vif = NULL;
5116 }
5117 }
5118 spin_unlock_bh(&ar->data_lock);
5119
5120 ath10k_peer_cleanup(ar, arvif->vdev_id);
5121 ath10k_mac_txq_unref(ar, vif->txq);
5122
5123 if (vif->type == NL80211_IFTYPE_MONITOR) {
5124 ar->monitor_arvif = NULL;
5125 ret = ath10k_monitor_recalc(ar);
5126 if (ret)
5127 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5128 }
5129
5130 spin_lock_bh(&ar->htt.tx_lock);
5131 ath10k_mac_vif_tx_unlock_all(arvif);
5132 spin_unlock_bh(&ar->htt.tx_lock);
5133
5134 ath10k_mac_txq_unref(ar, vif->txq);
5135
5136 mutex_unlock(&ar->conf_mutex);
5137 }
5138
5139 /*
5140 * FIXME: Has to be verified.
5141 */
5142 #define SUPPORTED_FILTERS \
5143 (FIF_ALLMULTI | \
5144 FIF_CONTROL | \
5145 FIF_PSPOLL | \
5146 FIF_OTHER_BSS | \
5147 FIF_BCN_PRBRESP_PROMISC | \
5148 FIF_PROBE_REQ | \
5149 FIF_FCSFAIL)
5150
5151 static void ath10k_configure_filter(struct ieee80211_hw *hw,
5152 unsigned int changed_flags,
5153 unsigned int *total_flags,
5154 u64 multicast)
5155 {
5156 struct ath10k *ar = hw->priv;
5157 int ret;
5158
5159 mutex_lock(&ar->conf_mutex);
5160
5161 changed_flags &= SUPPORTED_FILTERS;
5162 *total_flags &= SUPPORTED_FILTERS;
5163 ar->filter_flags = *total_flags;
5164
5165 ret = ath10k_monitor_recalc(ar);
5166 if (ret)
5167 ath10k_warn(ar, "failed to recalc montior: %d\n", ret);
5168
5169 mutex_unlock(&ar->conf_mutex);
5170 }
5171
5172 static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
5173 struct ieee80211_vif *vif,
5174 struct ieee80211_bss_conf *info,
5175 u32 changed)
5176 {
5177 struct ath10k *ar = hw->priv;
5178 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5179 int ret = 0;
5180 u32 vdev_param, pdev_param, slottime, preamble;
5181
5182 mutex_lock(&ar->conf_mutex);
5183
5184 if (changed & BSS_CHANGED_IBSS)
5185 ath10k_control_ibss(arvif, info, vif->addr);
5186
5187 if (changed & BSS_CHANGED_BEACON_INT) {
5188 arvif->beacon_interval = info->beacon_int;
5189 vdev_param = ar->wmi.vdev_param->beacon_interval;
5190 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5191 arvif->beacon_interval);
5192 ath10k_dbg(ar, ATH10K_DBG_MAC,
5193 "mac vdev %d beacon_interval %d\n",
5194 arvif->vdev_id, arvif->beacon_interval);
5195
5196 if (ret)
5197 ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n",
5198 arvif->vdev_id, ret);
5199 }
5200
5201 if (changed & BSS_CHANGED_BEACON) {
5202 ath10k_dbg(ar, ATH10K_DBG_MAC,
5203 "vdev %d set beacon tx mode to staggered\n",
5204 arvif->vdev_id);
5205
5206 pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
5207 ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
5208 WMI_BEACON_STAGGERED_MODE);
5209 if (ret)
5210 ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n",
5211 arvif->vdev_id, ret);
5212
5213 ret = ath10k_mac_setup_bcn_tmpl(arvif);
5214 if (ret)
5215 ath10k_warn(ar, "failed to update beacon template: %d\n",
5216 ret);
5217
5218 if (ieee80211_vif_is_mesh(vif)) {
5219 /* mesh doesn't use SSID but firmware needs it */
5220 strncpy(arvif->u.ap.ssid, "mesh",
5221 sizeof(arvif->u.ap.ssid));
5222 arvif->u.ap.ssid_len = 4;
5223 }
5224 }
5225
5226 if (changed & BSS_CHANGED_AP_PROBE_RESP) {
5227 ret = ath10k_mac_setup_prb_tmpl(arvif);
5228 if (ret)
5229 ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n",
5230 arvif->vdev_id, ret);
5231 }
5232
5233 if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
5234 arvif->dtim_period = info->dtim_period;
5235
5236 ath10k_dbg(ar, ATH10K_DBG_MAC,
5237 "mac vdev %d dtim_period %d\n",
5238 arvif->vdev_id, arvif->dtim_period);
5239
5240 vdev_param = ar->wmi.vdev_param->dtim_period;
5241 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5242 arvif->dtim_period);
5243 if (ret)
5244 ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n",
5245 arvif->vdev_id, ret);
5246 }
5247
5248 if (changed & BSS_CHANGED_SSID &&
5249 vif->type == NL80211_IFTYPE_AP) {
5250 arvif->u.ap.ssid_len = info->ssid_len;
5251 if (info->ssid_len)
5252 memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
5253 arvif->u.ap.hidden_ssid = info->hidden_ssid;
5254 }
5255
5256 if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
5257 ether_addr_copy(arvif->bssid, info->bssid);
5258
5259 if (changed & BSS_CHANGED_BEACON_ENABLED)
5260 ath10k_control_beaconing(arvif, info);
5261
5262 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
5263 arvif->use_cts_prot = info->use_cts_prot;
5264 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
5265 arvif->vdev_id, info->use_cts_prot);
5266
5267 ret = ath10k_recalc_rtscts_prot(arvif);
5268 if (ret)
5269 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
5270 arvif->vdev_id, ret);
5271
5272 vdev_param = ar->wmi.vdev_param->protection_mode;
5273 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5274 info->use_cts_prot ? 1 : 0);
5275 if (ret)
5276 ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n",
5277 info->use_cts_prot, arvif->vdev_id, ret);
5278 }
5279
5280 if (changed & BSS_CHANGED_ERP_SLOT) {
5281 if (info->use_short_slot)
5282 slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
5283
5284 else
5285 slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
5286
5287 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
5288 arvif->vdev_id, slottime);
5289
5290 vdev_param = ar->wmi.vdev_param->slot_time;
5291 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5292 slottime);
5293 if (ret)
5294 ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n",
5295 arvif->vdev_id, ret);
5296 }
5297
5298 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
5299 if (info->use_short_preamble)
5300 preamble = WMI_VDEV_PREAMBLE_SHORT;
5301 else
5302 preamble = WMI_VDEV_PREAMBLE_LONG;
5303
5304 ath10k_dbg(ar, ATH10K_DBG_MAC,
5305 "mac vdev %d preamble %dn",
5306 arvif->vdev_id, preamble);
5307
5308 vdev_param = ar->wmi.vdev_param->preamble;
5309 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5310 preamble);
5311 if (ret)
5312 ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n",
5313 arvif->vdev_id, ret);
5314 }
5315
5316 if (changed & BSS_CHANGED_ASSOC) {
5317 if (info->assoc) {
5318 /* Workaround: Make sure monitor vdev is not running
5319 * when associating to prevent some firmware revisions
5320 * (e.g. 10.1 and 10.2) from crashing.
5321 */
5322 if (ar->monitor_started)
5323 ath10k_monitor_stop(ar);
5324 ath10k_bss_assoc(hw, vif, info);
5325 ath10k_monitor_recalc(ar);
5326 } else {
5327 ath10k_bss_disassoc(hw, vif);
5328 }
5329 }
5330
5331 if (changed & BSS_CHANGED_TXPOWER) {
5332 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n",
5333 arvif->vdev_id, info->txpower);
5334
5335 arvif->txpower = info->txpower;
5336 ret = ath10k_mac_txpower_recalc(ar);
5337 if (ret)
5338 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
5339 }
5340
5341 if (changed & BSS_CHANGED_PS) {
5342 arvif->ps = vif->bss_conf.ps;
5343
5344 ret = ath10k_config_ps(ar);
5345 if (ret)
5346 ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n",
5347 arvif->vdev_id, ret);
5348 }
5349
5350 mutex_unlock(&ar->conf_mutex);
5351 }
5352
5353 static int ath10k_hw_scan(struct ieee80211_hw *hw,
5354 struct ieee80211_vif *vif,
5355 struct ieee80211_scan_request *hw_req)
5356 {
5357 struct ath10k *ar = hw->priv;
5358 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5359 struct cfg80211_scan_request *req = &hw_req->req;
5360 struct wmi_start_scan_arg arg;
5361 int ret = 0;
5362 int i;
5363
5364 mutex_lock(&ar->conf_mutex);
5365
5366 spin_lock_bh(&ar->data_lock);
5367 switch (ar->scan.state) {
5368 case ATH10K_SCAN_IDLE:
5369 reinit_completion(&ar->scan.started);
5370 reinit_completion(&ar->scan.completed);
5371 ar->scan.state = ATH10K_SCAN_STARTING;
5372 ar->scan.is_roc = false;
5373 ar->scan.vdev_id = arvif->vdev_id;
5374 ret = 0;
5375 break;
5376 case ATH10K_SCAN_STARTING:
5377 case ATH10K_SCAN_RUNNING:
5378 case ATH10K_SCAN_ABORTING:
5379 ret = -EBUSY;
5380 break;
5381 }
5382 spin_unlock_bh(&ar->data_lock);
5383
5384 if (ret)
5385 goto exit;
5386
5387 memset(&arg, 0, sizeof(arg));
5388 ath10k_wmi_start_scan_init(ar, &arg);
5389 arg.vdev_id = arvif->vdev_id;
5390 arg.scan_id = ATH10K_SCAN_ID;
5391
5392 if (req->ie_len) {
5393 arg.ie_len = req->ie_len;
5394 memcpy(arg.ie, req->ie, arg.ie_len);
5395 }
5396
5397 if (req->n_ssids) {
5398 arg.n_ssids = req->n_ssids;
5399 for (i = 0; i < arg.n_ssids; i++) {
5400 arg.ssids[i].len = req->ssids[i].ssid_len;
5401 arg.ssids[i].ssid = req->ssids[i].ssid;
5402 }
5403 } else {
5404 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
5405 }
5406
5407 if (req->n_channels) {
5408 arg.n_channels = req->n_channels;
5409 for (i = 0; i < arg.n_channels; i++)
5410 arg.channels[i] = req->channels[i]->center_freq;
5411 }
5412
5413 ret = ath10k_start_scan(ar, &arg);
5414 if (ret) {
5415 ath10k_warn(ar, "failed to start hw scan: %d\n", ret);
5416 spin_lock_bh(&ar->data_lock);
5417 ar->scan.state = ATH10K_SCAN_IDLE;
5418 spin_unlock_bh(&ar->data_lock);
5419 }
5420
5421 /* Add a 200ms margin to account for event/command processing */
5422 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
5423 msecs_to_jiffies(arg.max_scan_time +
5424 200));
5425
5426 exit:
5427 mutex_unlock(&ar->conf_mutex);
5428 return ret;
5429 }
5430
5431 static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
5432 struct ieee80211_vif *vif)
5433 {
5434 struct ath10k *ar = hw->priv;
5435
5436 mutex_lock(&ar->conf_mutex);
5437 ath10k_scan_abort(ar);
5438 mutex_unlock(&ar->conf_mutex);
5439
5440 cancel_delayed_work_sync(&ar->scan.timeout);
5441 }
5442
5443 static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
5444 struct ath10k_vif *arvif,
5445 enum set_key_cmd cmd,
5446 struct ieee80211_key_conf *key)
5447 {
5448 u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid;
5449 int ret;
5450
5451 /* 10.1 firmware branch requires default key index to be set to group
5452 * key index after installing it. Otherwise FW/HW Txes corrupted
5453 * frames with multi-vif APs. This is not required for main firmware
5454 * branch (e.g. 636).
5455 *
5456 * This is also needed for 636 fw for IBSS-RSN to work more reliably.
5457 *
5458 * FIXME: It remains unknown if this is required for multi-vif STA
5459 * interfaces on 10.1.
5460 */
5461
5462 if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
5463 arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
5464 return;
5465
5466 if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
5467 return;
5468
5469 if (key->cipher == WLAN_CIPHER_SUITE_WEP104)
5470 return;
5471
5472 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
5473 return;
5474
5475 if (cmd != SET_KEY)
5476 return;
5477
5478 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5479 key->keyidx);
5480 if (ret)
5481 ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n",
5482 arvif->vdev_id, ret);
5483 }
5484
5485 static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5486 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
5487 struct ieee80211_key_conf *key)
5488 {
5489 struct ath10k *ar = hw->priv;
5490 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5491 struct ath10k_peer *peer;
5492 const u8 *peer_addr;
5493 bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
5494 key->cipher == WLAN_CIPHER_SUITE_WEP104;
5495 int ret = 0;
5496 int ret2;
5497 u32 flags = 0;
5498 u32 flags2;
5499
5500 /* this one needs to be done in software */
5501 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
5502 return 1;
5503
5504 if (arvif->nohwcrypt)
5505 return 1;
5506
5507 if (key->keyidx > WMI_MAX_KEY_INDEX)
5508 return -ENOSPC;
5509
5510 mutex_lock(&ar->conf_mutex);
5511
5512 if (sta)
5513 peer_addr = sta->addr;
5514 else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
5515 peer_addr = vif->bss_conf.bssid;
5516 else
5517 peer_addr = vif->addr;
5518
5519 key->hw_key_idx = key->keyidx;
5520
5521 if (is_wep) {
5522 if (cmd == SET_KEY)
5523 arvif->wep_keys[key->keyidx] = key;
5524 else
5525 arvif->wep_keys[key->keyidx] = NULL;
5526 }
5527
5528 /* the peer should not disappear in mid-way (unless FW goes awry) since
5529 * we already hold conf_mutex. we just make sure its there now. */
5530 spin_lock_bh(&ar->data_lock);
5531 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
5532 spin_unlock_bh(&ar->data_lock);
5533
5534 if (!peer) {
5535 if (cmd == SET_KEY) {
5536 ath10k_warn(ar, "failed to install key for non-existent peer %pM\n",
5537 peer_addr);
5538 ret = -EOPNOTSUPP;
5539 goto exit;
5540 } else {
5541 /* if the peer doesn't exist there is no key to disable
5542 * anymore */
5543 goto exit;
5544 }
5545 }
5546
5547 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
5548 flags |= WMI_KEY_PAIRWISE;
5549 else
5550 flags |= WMI_KEY_GROUP;
5551
5552 if (is_wep) {
5553 if (cmd == DISABLE_KEY)
5554 ath10k_clear_vdev_key(arvif, key);
5555
5556 /* When WEP keys are uploaded it's possible that there are
5557 * stations associated already (e.g. when merging) without any
5558 * keys. Static WEP needs an explicit per-peer key upload.
5559 */
5560 if (vif->type == NL80211_IFTYPE_ADHOC &&
5561 cmd == SET_KEY)
5562 ath10k_mac_vif_update_wep_key(arvif, key);
5563
5564 /* 802.1x never sets the def_wep_key_idx so each set_key()
5565 * call changes default tx key.
5566 *
5567 * Static WEP sets def_wep_key_idx via .set_default_unicast_key
5568 * after first set_key().
5569 */
5570 if (cmd == SET_KEY && arvif->def_wep_key_idx == -1)
5571 flags |= WMI_KEY_TX_USAGE;
5572 }
5573
5574 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
5575 if (ret) {
5576 WARN_ON(ret > 0);
5577 ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
5578 arvif->vdev_id, peer_addr, ret);
5579 goto exit;
5580 }
5581
5582 /* mac80211 sets static WEP keys as groupwise while firmware requires
5583 * them to be installed twice as both pairwise and groupwise.
5584 */
5585 if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) {
5586 flags2 = flags;
5587 flags2 &= ~WMI_KEY_GROUP;
5588 flags2 |= WMI_KEY_PAIRWISE;
5589
5590 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
5591 if (ret) {
5592 WARN_ON(ret > 0);
5593 ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
5594 arvif->vdev_id, peer_addr, ret);
5595 ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
5596 peer_addr, flags);
5597 if (ret2) {
5598 WARN_ON(ret2 > 0);
5599 ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
5600 arvif->vdev_id, peer_addr, ret2);
5601 }
5602 goto exit;
5603 }
5604 }
5605
5606 ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
5607
5608 spin_lock_bh(&ar->data_lock);
5609 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
5610 if (peer && cmd == SET_KEY)
5611 peer->keys[key->keyidx] = key;
5612 else if (peer && cmd == DISABLE_KEY)
5613 peer->keys[key->keyidx] = NULL;
5614 else if (peer == NULL)
5615 /* impossible unless FW goes crazy */
5616 ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr);
5617 spin_unlock_bh(&ar->data_lock);
5618
5619 exit:
5620 mutex_unlock(&ar->conf_mutex);
5621 return ret;
5622 }
5623
5624 static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
5625 struct ieee80211_vif *vif,
5626 int keyidx)
5627 {
5628 struct ath10k *ar = hw->priv;
5629 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5630 int ret;
5631
5632 mutex_lock(&arvif->ar->conf_mutex);
5633
5634 if (arvif->ar->state != ATH10K_STATE_ON)
5635 goto unlock;
5636
5637 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
5638 arvif->vdev_id, keyidx);
5639
5640 ret = ath10k_wmi_vdev_set_param(arvif->ar,
5641 arvif->vdev_id,
5642 arvif->ar->wmi.vdev_param->def_keyid,
5643 keyidx);
5644
5645 if (ret) {
5646 ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
5647 arvif->vdev_id,
5648 ret);
5649 goto unlock;
5650 }
5651
5652 arvif->def_wep_key_idx = keyidx;
5653
5654 unlock:
5655 mutex_unlock(&arvif->ar->conf_mutex);
5656 }
5657
5658 static void ath10k_sta_rc_update_wk(struct work_struct *wk)
5659 {
5660 struct ath10k *ar;
5661 struct ath10k_vif *arvif;
5662 struct ath10k_sta *arsta;
5663 struct ieee80211_sta *sta;
5664 struct cfg80211_chan_def def;
5665 enum nl80211_band band;
5666 const u8 *ht_mcs_mask;
5667 const u16 *vht_mcs_mask;
5668 u32 changed, bw, nss, smps;
5669 int err;
5670
5671 arsta = container_of(wk, struct ath10k_sta, update_wk);
5672 sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
5673 arvif = arsta->arvif;
5674 ar = arvif->ar;
5675
5676 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
5677 return;
5678
5679 band = def.chan->band;
5680 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
5681 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
5682
5683 spin_lock_bh(&ar->data_lock);
5684
5685 changed = arsta->changed;
5686 arsta->changed = 0;
5687
5688 bw = arsta->bw;
5689 nss = arsta->nss;
5690 smps = arsta->smps;
5691
5692 spin_unlock_bh(&ar->data_lock);
5693
5694 mutex_lock(&ar->conf_mutex);
5695
5696 nss = max_t(u32, 1, nss);
5697 nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask),
5698 ath10k_mac_max_vht_nss(vht_mcs_mask)));
5699
5700 if (changed & IEEE80211_RC_BW_CHANGED) {
5701 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
5702 sta->addr, bw);
5703
5704 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5705 WMI_PEER_CHAN_WIDTH, bw);
5706 if (err)
5707 ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n",
5708 sta->addr, bw, err);
5709 }
5710
5711 if (changed & IEEE80211_RC_NSS_CHANGED) {
5712 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
5713 sta->addr, nss);
5714
5715 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5716 WMI_PEER_NSS, nss);
5717 if (err)
5718 ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n",
5719 sta->addr, nss, err);
5720 }
5721
5722 if (changed & IEEE80211_RC_SMPS_CHANGED) {
5723 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
5724 sta->addr, smps);
5725
5726 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5727 WMI_PEER_SMPS_STATE, smps);
5728 if (err)
5729 ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n",
5730 sta->addr, smps, err);
5731 }
5732
5733 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
5734 changed & IEEE80211_RC_NSS_CHANGED) {
5735 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
5736 sta->addr);
5737
5738 err = ath10k_station_assoc(ar, arvif->vif, sta, true);
5739 if (err)
5740 ath10k_warn(ar, "failed to reassociate station: %pM\n",
5741 sta->addr);
5742 }
5743
5744 mutex_unlock(&ar->conf_mutex);
5745 }
5746
5747 static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif,
5748 struct ieee80211_sta *sta)
5749 {
5750 struct ath10k *ar = arvif->ar;
5751
5752 lockdep_assert_held(&ar->conf_mutex);
5753
5754 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
5755 return 0;
5756
5757 if (ar->num_stations >= ar->max_num_stations)
5758 return -ENOBUFS;
5759
5760 ar->num_stations++;
5761
5762 return 0;
5763 }
5764
5765 static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif,
5766 struct ieee80211_sta *sta)
5767 {
5768 struct ath10k *ar = arvif->ar;
5769
5770 lockdep_assert_held(&ar->conf_mutex);
5771
5772 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
5773 return;
5774
5775 ar->num_stations--;
5776 }
5777
5778 struct ath10k_mac_tdls_iter_data {
5779 u32 num_tdls_stations;
5780 struct ieee80211_vif *curr_vif;
5781 };
5782
5783 static void ath10k_mac_tdls_vif_stations_count_iter(void *data,
5784 struct ieee80211_sta *sta)
5785 {
5786 struct ath10k_mac_tdls_iter_data *iter_data = data;
5787 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
5788 struct ieee80211_vif *sta_vif = arsta->arvif->vif;
5789
5790 if (sta->tdls && sta_vif == iter_data->curr_vif)
5791 iter_data->num_tdls_stations++;
5792 }
5793
5794 static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
5795 struct ieee80211_vif *vif)
5796 {
5797 struct ath10k_mac_tdls_iter_data data = {};
5798
5799 data.curr_vif = vif;
5800
5801 ieee80211_iterate_stations_atomic(hw,
5802 ath10k_mac_tdls_vif_stations_count_iter,
5803 &data);
5804 return data.num_tdls_stations;
5805 }
5806
5807 static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac,
5808 struct ieee80211_vif *vif)
5809 {
5810 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5811 int *num_tdls_vifs = data;
5812
5813 if (vif->type != NL80211_IFTYPE_STATION)
5814 return;
5815
5816 if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0)
5817 (*num_tdls_vifs)++;
5818 }
5819
5820 static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw)
5821 {
5822 int num_tdls_vifs = 0;
5823
5824 ieee80211_iterate_active_interfaces_atomic(hw,
5825 IEEE80211_IFACE_ITER_NORMAL,
5826 ath10k_mac_tdls_vifs_count_iter,
5827 &num_tdls_vifs);
5828 return num_tdls_vifs;
5829 }
5830
5831 static int ath10k_sta_state(struct ieee80211_hw *hw,
5832 struct ieee80211_vif *vif,
5833 struct ieee80211_sta *sta,
5834 enum ieee80211_sta_state old_state,
5835 enum ieee80211_sta_state new_state)
5836 {
5837 struct ath10k *ar = hw->priv;
5838 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5839 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
5840 struct ath10k_peer *peer;
5841 int ret = 0;
5842 int i;
5843
5844 if (old_state == IEEE80211_STA_NOTEXIST &&
5845 new_state == IEEE80211_STA_NONE) {
5846 memset(arsta, 0, sizeof(*arsta));
5847 arsta->arvif = arvif;
5848 INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
5849
5850 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
5851 ath10k_mac_txq_init(sta->txq[i]);
5852 }
5853
5854 /* cancel must be done outside the mutex to avoid deadlock */
5855 if ((old_state == IEEE80211_STA_NONE &&
5856 new_state == IEEE80211_STA_NOTEXIST))
5857 cancel_work_sync(&arsta->update_wk);
5858
5859 mutex_lock(&ar->conf_mutex);
5860
5861 if (old_state == IEEE80211_STA_NOTEXIST &&
5862 new_state == IEEE80211_STA_NONE) {
5863 /*
5864 * New station addition.
5865 */
5866 enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT;
5867 u32 num_tdls_stations;
5868 u32 num_tdls_vifs;
5869
5870 ath10k_dbg(ar, ATH10K_DBG_MAC,
5871 "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
5872 arvif->vdev_id, sta->addr,
5873 ar->num_stations + 1, ar->max_num_stations,
5874 ar->num_peers + 1, ar->max_num_peers);
5875
5876 ret = ath10k_mac_inc_num_stations(arvif, sta);
5877 if (ret) {
5878 ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
5879 ar->max_num_stations);
5880 goto exit;
5881 }
5882
5883 if (sta->tdls)
5884 peer_type = WMI_PEER_TYPE_TDLS;
5885
5886 ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
5887 sta->addr, peer_type);
5888 if (ret) {
5889 ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
5890 sta->addr, arvif->vdev_id, ret);
5891 ath10k_mac_dec_num_stations(arvif, sta);
5892 goto exit;
5893 }
5894
5895 spin_lock_bh(&ar->data_lock);
5896
5897 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
5898 if (!peer) {
5899 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
5900 vif->addr, arvif->vdev_id);
5901 spin_unlock_bh(&ar->data_lock);
5902 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5903 ath10k_mac_dec_num_stations(arvif, sta);
5904 ret = -ENOENT;
5905 goto exit;
5906 }
5907
5908 arsta->peer_id = find_first_bit(peer->peer_ids,
5909 ATH10K_MAX_NUM_PEER_IDS);
5910
5911 spin_unlock_bh(&ar->data_lock);
5912
5913 if (!sta->tdls)
5914 goto exit;
5915
5916 num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
5917 num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
5918
5919 if (num_tdls_vifs >= ar->max_num_tdls_vdevs &&
5920 num_tdls_stations == 0) {
5921 ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
5922 arvif->vdev_id, ar->max_num_tdls_vdevs);
5923 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5924 ath10k_mac_dec_num_stations(arvif, sta);
5925 ret = -ENOBUFS;
5926 goto exit;
5927 }
5928
5929 if (num_tdls_stations == 0) {
5930 /* This is the first tdls peer in current vif */
5931 enum wmi_tdls_state state = WMI_TDLS_ENABLE_ACTIVE;
5932
5933 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
5934 state);
5935 if (ret) {
5936 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
5937 arvif->vdev_id, ret);
5938 ath10k_peer_delete(ar, arvif->vdev_id,
5939 sta->addr);
5940 ath10k_mac_dec_num_stations(arvif, sta);
5941 goto exit;
5942 }
5943 }
5944
5945 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
5946 WMI_TDLS_PEER_STATE_PEERING);
5947 if (ret) {
5948 ath10k_warn(ar,
5949 "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n",
5950 sta->addr, arvif->vdev_id, ret);
5951 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5952 ath10k_mac_dec_num_stations(arvif, sta);
5953
5954 if (num_tdls_stations != 0)
5955 goto exit;
5956 ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
5957 WMI_TDLS_DISABLE);
5958 }
5959 } else if ((old_state == IEEE80211_STA_NONE &&
5960 new_state == IEEE80211_STA_NOTEXIST)) {
5961 /*
5962 * Existing station deletion.
5963 */
5964 ath10k_dbg(ar, ATH10K_DBG_MAC,
5965 "mac vdev %d peer delete %pM (sta gone)\n",
5966 arvif->vdev_id, sta->addr);
5967
5968 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5969 if (ret)
5970 ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
5971 sta->addr, arvif->vdev_id, ret);
5972
5973 ath10k_mac_dec_num_stations(arvif, sta);
5974
5975 spin_lock_bh(&ar->data_lock);
5976 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
5977 peer = ar->peer_map[i];
5978 if (!peer)
5979 continue;
5980
5981 if (peer->sta == sta) {
5982 ath10k_warn(ar, "found sta peer %pM entry on vdev %i after it was supposedly removed\n",
5983 sta->addr, arvif->vdev_id);
5984 peer->sta = NULL;
5985 }
5986 }
5987 spin_unlock_bh(&ar->data_lock);
5988
5989 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
5990 ath10k_mac_txq_unref(ar, sta->txq[i]);
5991
5992 if (!sta->tdls)
5993 goto exit;
5994
5995 if (ath10k_mac_tdls_vif_stations_count(hw, vif))
5996 goto exit;
5997
5998 /* This was the last tdls peer in current vif */
5999 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
6000 WMI_TDLS_DISABLE);
6001 if (ret) {
6002 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
6003 arvif->vdev_id, ret);
6004 }
6005 } else if (old_state == IEEE80211_STA_AUTH &&
6006 new_state == IEEE80211_STA_ASSOC &&
6007 (vif->type == NL80211_IFTYPE_AP ||
6008 vif->type == NL80211_IFTYPE_MESH_POINT ||
6009 vif->type == NL80211_IFTYPE_ADHOC)) {
6010 /*
6011 * New association.
6012 */
6013 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n",
6014 sta->addr);
6015
6016 ret = ath10k_station_assoc(ar, vif, sta, false);
6017 if (ret)
6018 ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
6019 sta->addr, arvif->vdev_id, ret);
6020 } else if (old_state == IEEE80211_STA_ASSOC &&
6021 new_state == IEEE80211_STA_AUTHORIZED &&
6022 sta->tdls) {
6023 /*
6024 * Tdls station authorized.
6025 */
6026 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n",
6027 sta->addr);
6028
6029 ret = ath10k_station_assoc(ar, vif, sta, false);
6030 if (ret) {
6031 ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n",
6032 sta->addr, arvif->vdev_id, ret);
6033 goto exit;
6034 }
6035
6036 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
6037 WMI_TDLS_PEER_STATE_CONNECTED);
6038 if (ret)
6039 ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n",
6040 sta->addr, arvif->vdev_id, ret);
6041 } else if (old_state == IEEE80211_STA_ASSOC &&
6042 new_state == IEEE80211_STA_AUTH &&
6043 (vif->type == NL80211_IFTYPE_AP ||
6044 vif->type == NL80211_IFTYPE_MESH_POINT ||
6045 vif->type == NL80211_IFTYPE_ADHOC)) {
6046 /*
6047 * Disassociation.
6048 */
6049 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
6050 sta->addr);
6051
6052 ret = ath10k_station_disassoc(ar, vif, sta);
6053 if (ret)
6054 ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n",
6055 sta->addr, arvif->vdev_id, ret);
6056 }
6057 exit:
6058 mutex_unlock(&ar->conf_mutex);
6059 return ret;
6060 }
6061
6062 static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
6063 u16 ac, bool enable)
6064 {
6065 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6066 struct wmi_sta_uapsd_auto_trig_arg arg = {};
6067 u32 prio = 0, acc = 0;
6068 u32 value = 0;
6069 int ret = 0;
6070
6071 lockdep_assert_held(&ar->conf_mutex);
6072
6073 if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
6074 return 0;
6075
6076 switch (ac) {
6077 case IEEE80211_AC_VO:
6078 value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
6079 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
6080 prio = 7;
6081 acc = 3;
6082 break;
6083 case IEEE80211_AC_VI:
6084 value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
6085 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
6086 prio = 5;
6087 acc = 2;
6088 break;
6089 case IEEE80211_AC_BE:
6090 value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
6091 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
6092 prio = 2;
6093 acc = 1;
6094 break;
6095 case IEEE80211_AC_BK:
6096 value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
6097 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
6098 prio = 0;
6099 acc = 0;
6100 break;
6101 }
6102
6103 if (enable)
6104 arvif->u.sta.uapsd |= value;
6105 else
6106 arvif->u.sta.uapsd &= ~value;
6107
6108 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6109 WMI_STA_PS_PARAM_UAPSD,
6110 arvif->u.sta.uapsd);
6111 if (ret) {
6112 ath10k_warn(ar, "failed to set uapsd params: %d\n", ret);
6113 goto exit;
6114 }
6115
6116 if (arvif->u.sta.uapsd)
6117 value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
6118 else
6119 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
6120
6121 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6122 WMI_STA_PS_PARAM_RX_WAKE_POLICY,
6123 value);
6124 if (ret)
6125 ath10k_warn(ar, "failed to set rx wake param: %d\n", ret);
6126
6127 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
6128 if (ret) {
6129 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
6130 arvif->vdev_id, ret);
6131 return ret;
6132 }
6133
6134 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
6135 if (ret) {
6136 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
6137 arvif->vdev_id, ret);
6138 return ret;
6139 }
6140
6141 if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) ||
6142 test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) {
6143 /* Only userspace can make an educated decision when to send
6144 * trigger frame. The following effectively disables u-UAPSD
6145 * autotrigger in firmware (which is enabled by default
6146 * provided the autotrigger service is available).
6147 */
6148
6149 arg.wmm_ac = acc;
6150 arg.user_priority = prio;
6151 arg.service_interval = 0;
6152 arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
6153 arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
6154
6155 ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id,
6156 arvif->bssid, &arg, 1);
6157 if (ret) {
6158 ath10k_warn(ar, "failed to set uapsd auto trigger %d\n",
6159 ret);
6160 return ret;
6161 }
6162 }
6163
6164 exit:
6165 return ret;
6166 }
6167
6168 static int ath10k_conf_tx(struct ieee80211_hw *hw,
6169 struct ieee80211_vif *vif, u16 ac,
6170 const struct ieee80211_tx_queue_params *params)
6171 {
6172 struct ath10k *ar = hw->priv;
6173 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6174 struct wmi_wmm_params_arg *p = NULL;
6175 int ret;
6176
6177 mutex_lock(&ar->conf_mutex);
6178
6179 switch (ac) {
6180 case IEEE80211_AC_VO:
6181 p = &arvif->wmm_params.ac_vo;
6182 break;
6183 case IEEE80211_AC_VI:
6184 p = &arvif->wmm_params.ac_vi;
6185 break;
6186 case IEEE80211_AC_BE:
6187 p = &arvif->wmm_params.ac_be;
6188 break;
6189 case IEEE80211_AC_BK:
6190 p = &arvif->wmm_params.ac_bk;
6191 break;
6192 }
6193
6194 if (WARN_ON(!p)) {
6195 ret = -EINVAL;
6196 goto exit;
6197 }
6198
6199 p->cwmin = params->cw_min;
6200 p->cwmax = params->cw_max;
6201 p->aifs = params->aifs;
6202
6203 /*
6204 * The channel time duration programmed in the HW is in absolute
6205 * microseconds, while mac80211 gives the txop in units of
6206 * 32 microseconds.
6207 */
6208 p->txop = params->txop * 32;
6209
6210 if (ar->wmi.ops->gen_vdev_wmm_conf) {
6211 ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id,
6212 &arvif->wmm_params);
6213 if (ret) {
6214 ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n",
6215 arvif->vdev_id, ret);
6216 goto exit;
6217 }
6218 } else {
6219 /* This won't work well with multi-interface cases but it's
6220 * better than nothing.
6221 */
6222 ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params);
6223 if (ret) {
6224 ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
6225 goto exit;
6226 }
6227 }
6228
6229 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
6230 if (ret)
6231 ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret);
6232
6233 exit:
6234 mutex_unlock(&ar->conf_mutex);
6235 return ret;
6236 }
6237
6238 #define ATH10K_ROC_TIMEOUT_HZ (2 * HZ)
6239
6240 static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
6241 struct ieee80211_vif *vif,
6242 struct ieee80211_channel *chan,
6243 int duration,
6244 enum ieee80211_roc_type type)
6245 {
6246 struct ath10k *ar = hw->priv;
6247 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6248 struct wmi_start_scan_arg arg;
6249 int ret = 0;
6250 u32 scan_time_msec;
6251
6252 mutex_lock(&ar->conf_mutex);
6253
6254 spin_lock_bh(&ar->data_lock);
6255 switch (ar->scan.state) {
6256 case ATH10K_SCAN_IDLE:
6257 reinit_completion(&ar->scan.started);
6258 reinit_completion(&ar->scan.completed);
6259 reinit_completion(&ar->scan.on_channel);
6260 ar->scan.state = ATH10K_SCAN_STARTING;
6261 ar->scan.is_roc = true;
6262 ar->scan.vdev_id = arvif->vdev_id;
6263 ar->scan.roc_freq = chan->center_freq;
6264 ar->scan.roc_notify = true;
6265 ret = 0;
6266 break;
6267 case ATH10K_SCAN_STARTING:
6268 case ATH10K_SCAN_RUNNING:
6269 case ATH10K_SCAN_ABORTING:
6270 ret = -EBUSY;
6271 break;
6272 }
6273 spin_unlock_bh(&ar->data_lock);
6274
6275 if (ret)
6276 goto exit;
6277
6278 scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
6279
6280 memset(&arg, 0, sizeof(arg));
6281 ath10k_wmi_start_scan_init(ar, &arg);
6282 arg.vdev_id = arvif->vdev_id;
6283 arg.scan_id = ATH10K_SCAN_ID;
6284 arg.n_channels = 1;
6285 arg.channels[0] = chan->center_freq;
6286 arg.dwell_time_active = scan_time_msec;
6287 arg.dwell_time_passive = scan_time_msec;
6288 arg.max_scan_time = scan_time_msec;
6289 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
6290 arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
6291 arg.burst_duration_ms = duration;
6292
6293 ret = ath10k_start_scan(ar, &arg);
6294 if (ret) {
6295 ath10k_warn(ar, "failed to start roc scan: %d\n", ret);
6296 spin_lock_bh(&ar->data_lock);
6297 ar->scan.state = ATH10K_SCAN_IDLE;
6298 spin_unlock_bh(&ar->data_lock);
6299 goto exit;
6300 }
6301
6302 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
6303 if (ret == 0) {
6304 ath10k_warn(ar, "failed to switch to channel for roc scan\n");
6305
6306 ret = ath10k_scan_stop(ar);
6307 if (ret)
6308 ath10k_warn(ar, "failed to stop scan: %d\n", ret);
6309
6310 ret = -ETIMEDOUT;
6311 goto exit;
6312 }
6313
6314 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
6315 msecs_to_jiffies(duration));
6316
6317 ret = 0;
6318 exit:
6319 mutex_unlock(&ar->conf_mutex);
6320 return ret;
6321 }
6322
6323 static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
6324 {
6325 struct ath10k *ar = hw->priv;
6326
6327 mutex_lock(&ar->conf_mutex);
6328
6329 spin_lock_bh(&ar->data_lock);
6330 ar->scan.roc_notify = false;
6331 spin_unlock_bh(&ar->data_lock);
6332
6333 ath10k_scan_abort(ar);
6334
6335 mutex_unlock(&ar->conf_mutex);
6336
6337 cancel_delayed_work_sync(&ar->scan.timeout);
6338
6339 return 0;
6340 }
6341
6342 /*
6343 * Both RTS and Fragmentation threshold are interface-specific
6344 * in ath10k, but device-specific in mac80211.
6345 */
6346
6347 static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
6348 {
6349 struct ath10k *ar = hw->priv;
6350 struct ath10k_vif *arvif;
6351 int ret = 0;
6352
6353 mutex_lock(&ar->conf_mutex);
6354 list_for_each_entry(arvif, &ar->arvifs, list) {
6355 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
6356 arvif->vdev_id, value);
6357
6358 ret = ath10k_mac_set_rts(arvif, value);
6359 if (ret) {
6360 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
6361 arvif->vdev_id, ret);
6362 break;
6363 }
6364 }
6365 mutex_unlock(&ar->conf_mutex);
6366
6367 return ret;
6368 }
6369
6370 static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
6371 {
6372 /* Even though there's a WMI enum for fragmentation threshold no known
6373 * firmware actually implements it. Moreover it is not possible to rely
6374 * frame fragmentation to mac80211 because firmware clears the "more
6375 * fragments" bit in frame control making it impossible for remote
6376 * devices to reassemble frames.
6377 *
6378 * Hence implement a dummy callback just to say fragmentation isn't
6379 * supported. This effectively prevents mac80211 from doing frame
6380 * fragmentation in software.
6381 */
6382 return -EOPNOTSUPP;
6383 }
6384
6385 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6386 u32 queues, bool drop)
6387 {
6388 struct ath10k *ar = hw->priv;
6389 bool skip;
6390 long time_left;
6391
6392 /* mac80211 doesn't care if we really xmit queued frames or not
6393 * we'll collect those frames either way if we stop/delete vdevs */
6394 if (drop)
6395 return;
6396
6397 mutex_lock(&ar->conf_mutex);
6398
6399 if (ar->state == ATH10K_STATE_WEDGED)
6400 goto skip;
6401
6402 time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
6403 bool empty;
6404
6405 spin_lock_bh(&ar->htt.tx_lock);
6406 empty = (ar->htt.num_pending_tx == 0);
6407 spin_unlock_bh(&ar->htt.tx_lock);
6408
6409 skip = (ar->state == ATH10K_STATE_WEDGED) ||
6410 test_bit(ATH10K_FLAG_CRASH_FLUSH,
6411 &ar->dev_flags);
6412
6413 (empty || skip);
6414 }), ATH10K_FLUSH_TIMEOUT_HZ);
6415
6416 if (time_left == 0 || skip)
6417 ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
6418 skip, ar->state, time_left);
6419
6420 skip:
6421 mutex_unlock(&ar->conf_mutex);
6422 }
6423
6424 /* TODO: Implement this function properly
6425 * For now it is needed to reply to Probe Requests in IBSS mode.
6426 * Propably we need this information from FW.
6427 */
6428 static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
6429 {
6430 return 1;
6431 }
6432
6433 static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
6434 enum ieee80211_reconfig_type reconfig_type)
6435 {
6436 struct ath10k *ar = hw->priv;
6437
6438 if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
6439 return;
6440
6441 mutex_lock(&ar->conf_mutex);
6442
6443 /* If device failed to restart it will be in a different state, e.g.
6444 * ATH10K_STATE_WEDGED */
6445 if (ar->state == ATH10K_STATE_RESTARTED) {
6446 ath10k_info(ar, "device successfully recovered\n");
6447 ar->state = ATH10K_STATE_ON;
6448 ieee80211_wake_queues(ar->hw);
6449 }
6450
6451 mutex_unlock(&ar->conf_mutex);
6452 }
6453
6454 static void
6455 ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
6456 struct ieee80211_channel *channel)
6457 {
6458 int ret;
6459 enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR;
6460
6461 lockdep_assert_held(&ar->conf_mutex);
6462
6463 if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) ||
6464 (ar->rx_channel != channel))
6465 return;
6466
6467 if (ar->scan.state != ATH10K_SCAN_IDLE) {
6468 ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n");
6469 return;
6470 }
6471
6472 reinit_completion(&ar->bss_survey_done);
6473
6474 ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type);
6475 if (ret) {
6476 ath10k_warn(ar, "failed to send pdev bss chan info request\n");
6477 return;
6478 }
6479
6480 ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
6481 if (!ret) {
6482 ath10k_warn(ar, "bss channel survey timed out\n");
6483 return;
6484 }
6485 }
6486
6487 static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
6488 struct survey_info *survey)
6489 {
6490 struct ath10k *ar = hw->priv;
6491 struct ieee80211_supported_band *sband;
6492 struct survey_info *ar_survey = &ar->survey[idx];
6493 int ret = 0;
6494
6495 mutex_lock(&ar->conf_mutex);
6496
6497 sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
6498 if (sband && idx >= sband->n_channels) {
6499 idx -= sband->n_channels;
6500 sband = NULL;
6501 }
6502
6503 if (!sband)
6504 sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
6505
6506 if (!sband || idx >= sband->n_channels) {
6507 ret = -ENOENT;
6508 goto exit;
6509 }
6510
6511 ath10k_mac_update_bss_chan_survey(ar, survey->channel);
6512
6513 spin_lock_bh(&ar->data_lock);
6514 memcpy(survey, ar_survey, sizeof(*survey));
6515 spin_unlock_bh(&ar->data_lock);
6516
6517 survey->channel = &sband->channels[idx];
6518
6519 if (ar->rx_channel == survey->channel)
6520 survey->filled |= SURVEY_INFO_IN_USE;
6521
6522 exit:
6523 mutex_unlock(&ar->conf_mutex);
6524 return ret;
6525 }
6526
6527 static bool
6528 ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
6529 enum nl80211_band band,
6530 const struct cfg80211_bitrate_mask *mask)
6531 {
6532 int num_rates = 0;
6533 int i;
6534
6535 num_rates += hweight32(mask->control[band].legacy);
6536
6537 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
6538 num_rates += hweight8(mask->control[band].ht_mcs[i]);
6539
6540 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
6541 num_rates += hweight16(mask->control[band].vht_mcs[i]);
6542
6543 return num_rates == 1;
6544 }
6545
6546 static bool
6547 ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
6548 enum nl80211_band band,
6549 const struct cfg80211_bitrate_mask *mask,
6550 int *nss)
6551 {
6552 struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
6553 u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
6554 u8 ht_nss_mask = 0;
6555 u8 vht_nss_mask = 0;
6556 int i;
6557
6558 if (mask->control[band].legacy)
6559 return false;
6560
6561 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
6562 if (mask->control[band].ht_mcs[i] == 0)
6563 continue;
6564 else if (mask->control[band].ht_mcs[i] ==
6565 sband->ht_cap.mcs.rx_mask[i])
6566 ht_nss_mask |= BIT(i);
6567 else
6568 return false;
6569 }
6570
6571 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6572 if (mask->control[band].vht_mcs[i] == 0)
6573 continue;
6574 else if (mask->control[band].vht_mcs[i] ==
6575 ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
6576 vht_nss_mask |= BIT(i);
6577 else
6578 return false;
6579 }
6580
6581 if (ht_nss_mask != vht_nss_mask)
6582 return false;
6583
6584 if (ht_nss_mask == 0)
6585 return false;
6586
6587 if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
6588 return false;
6589
6590 *nss = fls(ht_nss_mask);
6591
6592 return true;
6593 }
6594
6595 static int
6596 ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
6597 enum nl80211_band band,
6598 const struct cfg80211_bitrate_mask *mask,
6599 u8 *rate, u8 *nss)
6600 {
6601 struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
6602 int rate_idx;
6603 int i;
6604 u16 bitrate;
6605 u8 preamble;
6606 u8 hw_rate;
6607
6608 if (hweight32(mask->control[band].legacy) == 1) {
6609 rate_idx = ffs(mask->control[band].legacy) - 1;
6610
6611 hw_rate = sband->bitrates[rate_idx].hw_value;
6612 bitrate = sband->bitrates[rate_idx].bitrate;
6613
6614 if (ath10k_mac_bitrate_is_cck(bitrate))
6615 preamble = WMI_RATE_PREAMBLE_CCK;
6616 else
6617 preamble = WMI_RATE_PREAMBLE_OFDM;
6618
6619 *nss = 1;
6620 *rate = preamble << 6 |
6621 (*nss - 1) << 4 |
6622 hw_rate << 0;
6623
6624 return 0;
6625 }
6626
6627 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
6628 if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
6629 *nss = i + 1;
6630 *rate = WMI_RATE_PREAMBLE_HT << 6 |
6631 (*nss - 1) << 4 |
6632 (ffs(mask->control[band].ht_mcs[i]) - 1);
6633
6634 return 0;
6635 }
6636 }
6637
6638 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6639 if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
6640 *nss = i + 1;
6641 *rate = WMI_RATE_PREAMBLE_VHT << 6 |
6642 (*nss - 1) << 4 |
6643 (ffs(mask->control[band].vht_mcs[i]) - 1);
6644
6645 return 0;
6646 }
6647 }
6648
6649 return -EINVAL;
6650 }
6651
6652 static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
6653 u8 rate, u8 nss, u8 sgi, u8 ldpc)
6654 {
6655 struct ath10k *ar = arvif->ar;
6656 u32 vdev_param;
6657 int ret;
6658
6659 lockdep_assert_held(&ar->conf_mutex);
6660
6661 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
6662 arvif->vdev_id, rate, nss, sgi);
6663
6664 vdev_param = ar->wmi.vdev_param->fixed_rate;
6665 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate);
6666 if (ret) {
6667 ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
6668 rate, ret);
6669 return ret;
6670 }
6671
6672 vdev_param = ar->wmi.vdev_param->nss;
6673 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss);
6674 if (ret) {
6675 ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret);
6676 return ret;
6677 }
6678
6679 vdev_param = ar->wmi.vdev_param->sgi;
6680 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi);
6681 if (ret) {
6682 ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret);
6683 return ret;
6684 }
6685
6686 vdev_param = ar->wmi.vdev_param->ldpc;
6687 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc);
6688 if (ret) {
6689 ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret);
6690 return ret;
6691 }
6692
6693 return 0;
6694 }
6695
6696 static bool
6697 ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
6698 enum nl80211_band band,
6699 const struct cfg80211_bitrate_mask *mask)
6700 {
6701 int i;
6702 u16 vht_mcs;
6703
6704 /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible
6705 * to express all VHT MCS rate masks. Effectively only the following
6706 * ranges can be used: none, 0-7, 0-8 and 0-9.
6707 */
6708 for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
6709 vht_mcs = mask->control[band].vht_mcs[i];
6710
6711 switch (vht_mcs) {
6712 case 0:
6713 case BIT(8) - 1:
6714 case BIT(9) - 1:
6715 case BIT(10) - 1:
6716 break;
6717 default:
6718 ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n");
6719 return false;
6720 }
6721 }
6722
6723 return true;
6724 }
6725
6726 static void ath10k_mac_set_bitrate_mask_iter(void *data,
6727 struct ieee80211_sta *sta)
6728 {
6729 struct ath10k_vif *arvif = data;
6730 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
6731 struct ath10k *ar = arvif->ar;
6732
6733 if (arsta->arvif != arvif)
6734 return;
6735
6736 spin_lock_bh(&ar->data_lock);
6737 arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
6738 spin_unlock_bh(&ar->data_lock);
6739
6740 ieee80211_queue_work(ar->hw, &arsta->update_wk);
6741 }
6742
6743 static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
6744 struct ieee80211_vif *vif,
6745 const struct cfg80211_bitrate_mask *mask)
6746 {
6747 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6748 struct cfg80211_chan_def def;
6749 struct ath10k *ar = arvif->ar;
6750 enum nl80211_band band;
6751 const u8 *ht_mcs_mask;
6752 const u16 *vht_mcs_mask;
6753 u8 rate;
6754 u8 nss;
6755 u8 sgi;
6756 u8 ldpc;
6757 int single_nss;
6758 int ret;
6759
6760 if (ath10k_mac_vif_chan(vif, &def))
6761 return -EPERM;
6762
6763 band = def.chan->band;
6764 ht_mcs_mask = mask->control[band].ht_mcs;
6765 vht_mcs_mask = mask->control[band].vht_mcs;
6766 ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
6767
6768 sgi = mask->control[band].gi;
6769 if (sgi == NL80211_TXRATE_FORCE_LGI)
6770 return -EINVAL;
6771
6772 if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) {
6773 ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
6774 &rate, &nss);
6775 if (ret) {
6776 ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n",
6777 arvif->vdev_id, ret);
6778 return ret;
6779 }
6780 } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask,
6781 &single_nss)) {
6782 rate = WMI_FIXED_RATE_NONE;
6783 nss = single_nss;
6784 } else {
6785 rate = WMI_FIXED_RATE_NONE;
6786 nss = min(ar->num_rf_chains,
6787 max(ath10k_mac_max_ht_nss(ht_mcs_mask),
6788 ath10k_mac_max_vht_nss(vht_mcs_mask)));
6789
6790 if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask))
6791 return -EINVAL;
6792
6793 mutex_lock(&ar->conf_mutex);
6794
6795 arvif->bitrate_mask = *mask;
6796 ieee80211_iterate_stations_atomic(ar->hw,
6797 ath10k_mac_set_bitrate_mask_iter,
6798 arvif);
6799
6800 mutex_unlock(&ar->conf_mutex);
6801 }
6802
6803 mutex_lock(&ar->conf_mutex);
6804
6805 ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
6806 if (ret) {
6807 ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n",
6808 arvif->vdev_id, ret);
6809 goto exit;
6810 }
6811
6812 exit:
6813 mutex_unlock(&ar->conf_mutex);
6814
6815 return ret;
6816 }
6817
6818 static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
6819 struct ieee80211_vif *vif,
6820 struct ieee80211_sta *sta,
6821 u32 changed)
6822 {
6823 struct ath10k *ar = hw->priv;
6824 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
6825 u32 bw, smps;
6826
6827 spin_lock_bh(&ar->data_lock);
6828
6829 ath10k_dbg(ar, ATH10K_DBG_MAC,
6830 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
6831 sta->addr, changed, sta->bandwidth, sta->rx_nss,
6832 sta->smps_mode);
6833
6834 if (changed & IEEE80211_RC_BW_CHANGED) {
6835 bw = WMI_PEER_CHWIDTH_20MHZ;
6836
6837 switch (sta->bandwidth) {
6838 case IEEE80211_STA_RX_BW_20:
6839 bw = WMI_PEER_CHWIDTH_20MHZ;
6840 break;
6841 case IEEE80211_STA_RX_BW_40:
6842 bw = WMI_PEER_CHWIDTH_40MHZ;
6843 break;
6844 case IEEE80211_STA_RX_BW_80:
6845 bw = WMI_PEER_CHWIDTH_80MHZ;
6846 break;
6847 case IEEE80211_STA_RX_BW_160:
6848 ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n",
6849 sta->bandwidth, sta->addr);
6850 bw = WMI_PEER_CHWIDTH_20MHZ;
6851 break;
6852 }
6853
6854 arsta->bw = bw;
6855 }
6856
6857 if (changed & IEEE80211_RC_NSS_CHANGED)
6858 arsta->nss = sta->rx_nss;
6859
6860 if (changed & IEEE80211_RC_SMPS_CHANGED) {
6861 smps = WMI_PEER_SMPS_PS_NONE;
6862
6863 switch (sta->smps_mode) {
6864 case IEEE80211_SMPS_AUTOMATIC:
6865 case IEEE80211_SMPS_OFF:
6866 smps = WMI_PEER_SMPS_PS_NONE;
6867 break;
6868 case IEEE80211_SMPS_STATIC:
6869 smps = WMI_PEER_SMPS_STATIC;
6870 break;
6871 case IEEE80211_SMPS_DYNAMIC:
6872 smps = WMI_PEER_SMPS_DYNAMIC;
6873 break;
6874 case IEEE80211_SMPS_NUM_MODES:
6875 ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n",
6876 sta->smps_mode, sta->addr);
6877 smps = WMI_PEER_SMPS_PS_NONE;
6878 break;
6879 }
6880
6881 arsta->smps = smps;
6882 }
6883
6884 arsta->changed |= changed;
6885
6886 spin_unlock_bh(&ar->data_lock);
6887
6888 ieee80211_queue_work(hw, &arsta->update_wk);
6889 }
6890
6891 static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
6892 {
6893 /*
6894 * FIXME: Return 0 for time being. Need to figure out whether FW
6895 * has the API to fetch 64-bit local TSF
6896 */
6897
6898 return 0;
6899 }
6900
6901 static void ath10k_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6902 u64 tsf)
6903 {
6904 struct ath10k *ar = hw->priv;
6905 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6906 u32 tsf_offset, vdev_param = ar->wmi.vdev_param->set_tsf;
6907 int ret;
6908
6909 /* Workaround:
6910 *
6911 * Given tsf argument is entire TSF value, but firmware accepts
6912 * only TSF offset to current TSF.
6913 *
6914 * get_tsf function is used to get offset value, however since
6915 * ath10k_get_tsf is not implemented properly, it will return 0 always.
6916 * Luckily all the caller functions to set_tsf, as of now, also rely on
6917 * get_tsf function to get entire tsf value such get_tsf() + tsf_delta,
6918 * final tsf offset value to firmware will be arithmetically correct.
6919 */
6920 tsf_offset = tsf - ath10k_get_tsf(hw, vif);
6921 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
6922 vdev_param, tsf_offset);
6923 if (ret && ret != -EOPNOTSUPP)
6924 ath10k_warn(ar, "failed to set tsf offset: %d\n", ret);
6925 }
6926
6927 static int ath10k_ampdu_action(struct ieee80211_hw *hw,
6928 struct ieee80211_vif *vif,
6929 struct ieee80211_ampdu_params *params)
6930 {
6931 struct ath10k *ar = hw->priv;
6932 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6933 struct ieee80211_sta *sta = params->sta;
6934 enum ieee80211_ampdu_mlme_action action = params->action;
6935 u16 tid = params->tid;
6936
6937 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
6938 arvif->vdev_id, sta->addr, tid, action);
6939
6940 switch (action) {
6941 case IEEE80211_AMPDU_RX_START:
6942 case IEEE80211_AMPDU_RX_STOP:
6943 /* HTT AddBa/DelBa events trigger mac80211 Rx BA session
6944 * creation/removal. Do we need to verify this?
6945 */
6946 return 0;
6947 case IEEE80211_AMPDU_TX_START:
6948 case IEEE80211_AMPDU_TX_STOP_CONT:
6949 case IEEE80211_AMPDU_TX_STOP_FLUSH:
6950 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
6951 case IEEE80211_AMPDU_TX_OPERATIONAL:
6952 /* Firmware offloads Tx aggregation entirely so deny mac80211
6953 * Tx aggregation requests.
6954 */
6955 return -EOPNOTSUPP;
6956 }
6957
6958 return -EINVAL;
6959 }
6960
6961 static void
6962 ath10k_mac_update_rx_channel(struct ath10k *ar,
6963 struct ieee80211_chanctx_conf *ctx,
6964 struct ieee80211_vif_chanctx_switch *vifs,
6965 int n_vifs)
6966 {
6967 struct cfg80211_chan_def *def = NULL;
6968
6969 /* Both locks are required because ar->rx_channel is modified. This
6970 * allows readers to hold either lock.
6971 */
6972 lockdep_assert_held(&ar->conf_mutex);
6973 lockdep_assert_held(&ar->data_lock);
6974
6975 WARN_ON(ctx && vifs);
6976 WARN_ON(vifs && n_vifs != 1);
6977
6978 /* FIXME: Sort of an optimization and a workaround. Peers and vifs are
6979 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each
6980 * ppdu on Rx may reduce performance on low-end systems. It should be
6981 * possible to make tables/hashmaps to speed the lookup up (be vary of
6982 * cpu data cache lines though regarding sizes) but to keep the initial
6983 * implementation simple and less intrusive fallback to the slow lookup
6984 * only for multi-channel cases. Single-channel cases will remain to
6985 * use the old channel derival and thus performance should not be
6986 * affected much.
6987 */
6988 rcu_read_lock();
6989 if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) {
6990 ieee80211_iter_chan_contexts_atomic(ar->hw,
6991 ath10k_mac_get_any_chandef_iter,
6992 &def);
6993
6994 if (vifs)
6995 def = &vifs[0].new_ctx->def;
6996
6997 ar->rx_channel = def->chan;
6998 } else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) ||
6999 (ctx && (ar->state == ATH10K_STATE_RESTARTED))) {
7000 /* During driver restart due to firmware assert, since mac80211
7001 * already has valid channel context for given radio, channel
7002 * context iteration return num_chanctx > 0. So fix rx_channel
7003 * when restart is in progress.
7004 */
7005 ar->rx_channel = ctx->def.chan;
7006 } else {
7007 ar->rx_channel = NULL;
7008 }
7009 rcu_read_unlock();
7010 }
7011
7012 static void
7013 ath10k_mac_update_vif_chan(struct ath10k *ar,
7014 struct ieee80211_vif_chanctx_switch *vifs,
7015 int n_vifs)
7016 {
7017 struct ath10k_vif *arvif;
7018 int ret;
7019 int i;
7020
7021 lockdep_assert_held(&ar->conf_mutex);
7022
7023 /* First stop monitor interface. Some FW versions crash if there's a
7024 * lone monitor interface.
7025 */
7026 if (ar->monitor_started)
7027 ath10k_monitor_stop(ar);
7028
7029 for (i = 0; i < n_vifs; i++) {
7030 arvif = ath10k_vif_to_arvif(vifs[i].vif);
7031
7032 ath10k_dbg(ar, ATH10K_DBG_MAC,
7033 "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
7034 arvif->vdev_id,
7035 vifs[i].old_ctx->def.chan->center_freq,
7036 vifs[i].new_ctx->def.chan->center_freq,
7037 vifs[i].old_ctx->def.width,
7038 vifs[i].new_ctx->def.width);
7039
7040 if (WARN_ON(!arvif->is_started))
7041 continue;
7042
7043 if (WARN_ON(!arvif->is_up))
7044 continue;
7045
7046 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
7047 if (ret) {
7048 ath10k_warn(ar, "failed to down vdev %d: %d\n",
7049 arvif->vdev_id, ret);
7050 continue;
7051 }
7052 }
7053
7054 /* All relevant vdevs are downed and associated channel resources
7055 * should be available for the channel switch now.
7056 */
7057
7058 spin_lock_bh(&ar->data_lock);
7059 ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs);
7060 spin_unlock_bh(&ar->data_lock);
7061
7062 for (i = 0; i < n_vifs; i++) {
7063 arvif = ath10k_vif_to_arvif(vifs[i].vif);
7064
7065 if (WARN_ON(!arvif->is_started))
7066 continue;
7067
7068 if (WARN_ON(!arvif->is_up))
7069 continue;
7070
7071 ret = ath10k_mac_setup_bcn_tmpl(arvif);
7072 if (ret)
7073 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
7074 ret);
7075
7076 ret = ath10k_mac_setup_prb_tmpl(arvif);
7077 if (ret)
7078 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
7079 ret);
7080
7081 ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
7082 if (ret) {
7083 ath10k_warn(ar, "failed to restart vdev %d: %d\n",
7084 arvif->vdev_id, ret);
7085 continue;
7086 }
7087
7088 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
7089 arvif->bssid);
7090 if (ret) {
7091 ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
7092 arvif->vdev_id, ret);
7093 continue;
7094 }
7095 }
7096
7097 ath10k_monitor_recalc(ar);
7098 }
7099
7100 static int
7101 ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
7102 struct ieee80211_chanctx_conf *ctx)
7103 {
7104 struct ath10k *ar = hw->priv;
7105
7106 ath10k_dbg(ar, ATH10K_DBG_MAC,
7107 "mac chanctx add freq %hu width %d ptr %p\n",
7108 ctx->def.chan->center_freq, ctx->def.width, ctx);
7109
7110 mutex_lock(&ar->conf_mutex);
7111
7112 spin_lock_bh(&ar->data_lock);
7113 ath10k_mac_update_rx_channel(ar, ctx, NULL, 0);
7114 spin_unlock_bh(&ar->data_lock);
7115
7116 ath10k_recalc_radar_detection(ar);
7117 ath10k_monitor_recalc(ar);
7118
7119 mutex_unlock(&ar->conf_mutex);
7120
7121 return 0;
7122 }
7123
7124 static void
7125 ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
7126 struct ieee80211_chanctx_conf *ctx)
7127 {
7128 struct ath10k *ar = hw->priv;
7129
7130 ath10k_dbg(ar, ATH10K_DBG_MAC,
7131 "mac chanctx remove freq %hu width %d ptr %p\n",
7132 ctx->def.chan->center_freq, ctx->def.width, ctx);
7133
7134 mutex_lock(&ar->conf_mutex);
7135
7136 spin_lock_bh(&ar->data_lock);
7137 ath10k_mac_update_rx_channel(ar, NULL, NULL, 0);
7138 spin_unlock_bh(&ar->data_lock);
7139
7140 ath10k_recalc_radar_detection(ar);
7141 ath10k_monitor_recalc(ar);
7142
7143 mutex_unlock(&ar->conf_mutex);
7144 }
7145
7146 struct ath10k_mac_change_chanctx_arg {
7147 struct ieee80211_chanctx_conf *ctx;
7148 struct ieee80211_vif_chanctx_switch *vifs;
7149 int n_vifs;
7150 int next_vif;
7151 };
7152
7153 static void
7154 ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
7155 struct ieee80211_vif *vif)
7156 {
7157 struct ath10k_mac_change_chanctx_arg *arg = data;
7158
7159 if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx)
7160 return;
7161
7162 arg->n_vifs++;
7163 }
7164
7165 static void
7166 ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
7167 struct ieee80211_vif *vif)
7168 {
7169 struct ath10k_mac_change_chanctx_arg *arg = data;
7170 struct ieee80211_chanctx_conf *ctx;
7171
7172 ctx = rcu_access_pointer(vif->chanctx_conf);
7173 if (ctx != arg->ctx)
7174 return;
7175
7176 if (WARN_ON(arg->next_vif == arg->n_vifs))
7177 return;
7178
7179 arg->vifs[arg->next_vif].vif = vif;
7180 arg->vifs[arg->next_vif].old_ctx = ctx;
7181 arg->vifs[arg->next_vif].new_ctx = ctx;
7182 arg->next_vif++;
7183 }
7184
7185 static void
7186 ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
7187 struct ieee80211_chanctx_conf *ctx,
7188 u32 changed)
7189 {
7190 struct ath10k *ar = hw->priv;
7191 struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx };
7192
7193 mutex_lock(&ar->conf_mutex);
7194
7195 ath10k_dbg(ar, ATH10K_DBG_MAC,
7196 "mac chanctx change freq %hu width %d ptr %p changed %x\n",
7197 ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
7198
7199 /* This shouldn't really happen because channel switching should use
7200 * switch_vif_chanctx().
7201 */
7202 if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
7203 goto unlock;
7204
7205 if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) {
7206 ieee80211_iterate_active_interfaces_atomic(
7207 hw,
7208 IEEE80211_IFACE_ITER_NORMAL,
7209 ath10k_mac_change_chanctx_cnt_iter,
7210 &arg);
7211 if (arg.n_vifs == 0)
7212 goto radar;
7213
7214 arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]),
7215 GFP_KERNEL);
7216 if (!arg.vifs)
7217 goto radar;
7218
7219 ieee80211_iterate_active_interfaces_atomic(
7220 hw,
7221 IEEE80211_IFACE_ITER_NORMAL,
7222 ath10k_mac_change_chanctx_fill_iter,
7223 &arg);
7224 ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
7225 kfree(arg.vifs);
7226 }
7227
7228 radar:
7229 ath10k_recalc_radar_detection(ar);
7230
7231 /* FIXME: How to configure Rx chains properly? */
7232
7233 /* No other actions are actually necessary. Firmware maintains channel
7234 * definitions per vdev internally and there's no host-side channel
7235 * context abstraction to configure, e.g. channel width.
7236 */
7237
7238 unlock:
7239 mutex_unlock(&ar->conf_mutex);
7240 }
7241
7242 static int
7243 ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
7244 struct ieee80211_vif *vif,
7245 struct ieee80211_chanctx_conf *ctx)
7246 {
7247 struct ath10k *ar = hw->priv;
7248 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7249 int ret;
7250
7251 mutex_lock(&ar->conf_mutex);
7252
7253 ath10k_dbg(ar, ATH10K_DBG_MAC,
7254 "mac chanctx assign ptr %p vdev_id %i\n",
7255 ctx, arvif->vdev_id);
7256
7257 if (WARN_ON(arvif->is_started)) {
7258 mutex_unlock(&ar->conf_mutex);
7259 return -EBUSY;
7260 }
7261
7262 ret = ath10k_vdev_start(arvif, &ctx->def);
7263 if (ret) {
7264 ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n",
7265 arvif->vdev_id, vif->addr,
7266 ctx->def.chan->center_freq, ret);
7267 goto err;
7268 }
7269
7270 arvif->is_started = true;
7271
7272 ret = ath10k_mac_vif_setup_ps(arvif);
7273 if (ret) {
7274 ath10k_warn(ar, "failed to update vdev %i ps: %d\n",
7275 arvif->vdev_id, ret);
7276 goto err_stop;
7277 }
7278
7279 if (vif->type == NL80211_IFTYPE_MONITOR) {
7280 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
7281 if (ret) {
7282 ath10k_warn(ar, "failed to up monitor vdev %i: %d\n",
7283 arvif->vdev_id, ret);
7284 goto err_stop;
7285 }
7286
7287 arvif->is_up = true;
7288 }
7289
7290 mutex_unlock(&ar->conf_mutex);
7291 return 0;
7292
7293 err_stop:
7294 ath10k_vdev_stop(arvif);
7295 arvif->is_started = false;
7296 ath10k_mac_vif_setup_ps(arvif);
7297
7298 err:
7299 mutex_unlock(&ar->conf_mutex);
7300 return ret;
7301 }
7302
7303 static void
7304 ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
7305 struct ieee80211_vif *vif,
7306 struct ieee80211_chanctx_conf *ctx)
7307 {
7308 struct ath10k *ar = hw->priv;
7309 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7310 int ret;
7311
7312 mutex_lock(&ar->conf_mutex);
7313
7314 ath10k_dbg(ar, ATH10K_DBG_MAC,
7315 "mac chanctx unassign ptr %p vdev_id %i\n",
7316 ctx, arvif->vdev_id);
7317
7318 WARN_ON(!arvif->is_started);
7319
7320 if (vif->type == NL80211_IFTYPE_MONITOR) {
7321 WARN_ON(!arvif->is_up);
7322
7323 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
7324 if (ret)
7325 ath10k_warn(ar, "failed to down monitor vdev %i: %d\n",
7326 arvif->vdev_id, ret);
7327
7328 arvif->is_up = false;
7329 }
7330
7331 ret = ath10k_vdev_stop(arvif);
7332 if (ret)
7333 ath10k_warn(ar, "failed to stop vdev %i: %d\n",
7334 arvif->vdev_id, ret);
7335
7336 arvif->is_started = false;
7337
7338 mutex_unlock(&ar->conf_mutex);
7339 }
7340
7341 static int
7342 ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
7343 struct ieee80211_vif_chanctx_switch *vifs,
7344 int n_vifs,
7345 enum ieee80211_chanctx_switch_mode mode)
7346 {
7347 struct ath10k *ar = hw->priv;
7348
7349 mutex_lock(&ar->conf_mutex);
7350
7351 ath10k_dbg(ar, ATH10K_DBG_MAC,
7352 "mac chanctx switch n_vifs %d mode %d\n",
7353 n_vifs, mode);
7354 ath10k_mac_update_vif_chan(ar, vifs, n_vifs);
7355
7356 mutex_unlock(&ar->conf_mutex);
7357 return 0;
7358 }
7359
7360 static const struct ieee80211_ops ath10k_ops = {
7361 .tx = ath10k_mac_op_tx,
7362 .wake_tx_queue = ath10k_mac_op_wake_tx_queue,
7363 .start = ath10k_start,
7364 .stop = ath10k_stop,
7365 .config = ath10k_config,
7366 .add_interface = ath10k_add_interface,
7367 .remove_interface = ath10k_remove_interface,
7368 .configure_filter = ath10k_configure_filter,
7369 .bss_info_changed = ath10k_bss_info_changed,
7370 .hw_scan = ath10k_hw_scan,
7371 .cancel_hw_scan = ath10k_cancel_hw_scan,
7372 .set_key = ath10k_set_key,
7373 .set_default_unicast_key = ath10k_set_default_unicast_key,
7374 .sta_state = ath10k_sta_state,
7375 .conf_tx = ath10k_conf_tx,
7376 .remain_on_channel = ath10k_remain_on_channel,
7377 .cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
7378 .set_rts_threshold = ath10k_set_rts_threshold,
7379 .set_frag_threshold = ath10k_mac_op_set_frag_threshold,
7380 .flush = ath10k_flush,
7381 .tx_last_beacon = ath10k_tx_last_beacon,
7382 .set_antenna = ath10k_set_antenna,
7383 .get_antenna = ath10k_get_antenna,
7384 .reconfig_complete = ath10k_reconfig_complete,
7385 .get_survey = ath10k_get_survey,
7386 .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask,
7387 .sta_rc_update = ath10k_sta_rc_update,
7388 .get_tsf = ath10k_get_tsf,
7389 .set_tsf = ath10k_set_tsf,
7390 .ampdu_action = ath10k_ampdu_action,
7391 .get_et_sset_count = ath10k_debug_get_et_sset_count,
7392 .get_et_stats = ath10k_debug_get_et_stats,
7393 .get_et_strings = ath10k_debug_get_et_strings,
7394 .add_chanctx = ath10k_mac_op_add_chanctx,
7395 .remove_chanctx = ath10k_mac_op_remove_chanctx,
7396 .change_chanctx = ath10k_mac_op_change_chanctx,
7397 .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx,
7398 .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx,
7399 .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx,
7400
7401 CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
7402
7403 #ifdef CONFIG_PM
7404 .suspend = ath10k_wow_op_suspend,
7405 .resume = ath10k_wow_op_resume,
7406 #endif
7407 #ifdef CONFIG_MAC80211_DEBUGFS
7408 .sta_add_debugfs = ath10k_sta_add_debugfs,
7409 #endif
7410 };
7411
7412 #define CHAN2G(_channel, _freq, _flags) { \
7413 .band = NL80211_BAND_2GHZ, \
7414 .hw_value = (_channel), \
7415 .center_freq = (_freq), \
7416 .flags = (_flags), \
7417 .max_antenna_gain = 0, \
7418 .max_power = 30, \
7419 }
7420
7421 #define CHAN5G(_channel, _freq, _flags) { \
7422 .band = NL80211_BAND_5GHZ, \
7423 .hw_value = (_channel), \
7424 .center_freq = (_freq), \
7425 .flags = (_flags), \
7426 .max_antenna_gain = 0, \
7427 .max_power = 30, \
7428 }
7429
7430 static const struct ieee80211_channel ath10k_2ghz_channels[] = {
7431 CHAN2G(1, 2412, 0),
7432 CHAN2G(2, 2417, 0),
7433 CHAN2G(3, 2422, 0),
7434 CHAN2G(4, 2427, 0),
7435 CHAN2G(5, 2432, 0),
7436 CHAN2G(6, 2437, 0),
7437 CHAN2G(7, 2442, 0),
7438 CHAN2G(8, 2447, 0),
7439 CHAN2G(9, 2452, 0),
7440 CHAN2G(10, 2457, 0),
7441 CHAN2G(11, 2462, 0),
7442 CHAN2G(12, 2467, 0),
7443 CHAN2G(13, 2472, 0),
7444 CHAN2G(14, 2484, 0),
7445 };
7446
7447 static const struct ieee80211_channel ath10k_5ghz_channels[] = {
7448 CHAN5G(36, 5180, 0),
7449 CHAN5G(40, 5200, 0),
7450 CHAN5G(44, 5220, 0),
7451 CHAN5G(48, 5240, 0),
7452 CHAN5G(52, 5260, 0),
7453 CHAN5G(56, 5280, 0),
7454 CHAN5G(60, 5300, 0),
7455 CHAN5G(64, 5320, 0),
7456 CHAN5G(100, 5500, 0),
7457 CHAN5G(104, 5520, 0),
7458 CHAN5G(108, 5540, 0),
7459 CHAN5G(112, 5560, 0),
7460 CHAN5G(116, 5580, 0),
7461 CHAN5G(120, 5600, 0),
7462 CHAN5G(124, 5620, 0),
7463 CHAN5G(128, 5640, 0),
7464 CHAN5G(132, 5660, 0),
7465 CHAN5G(136, 5680, 0),
7466 CHAN5G(140, 5700, 0),
7467 CHAN5G(144, 5720, 0),
7468 CHAN5G(149, 5745, 0),
7469 CHAN5G(153, 5765, 0),
7470 CHAN5G(157, 5785, 0),
7471 CHAN5G(161, 5805, 0),
7472 CHAN5G(165, 5825, 0),
7473 };
7474
7475 struct ath10k *ath10k_mac_create(size_t priv_size)
7476 {
7477 struct ieee80211_hw *hw;
7478 struct ath10k *ar;
7479
7480 hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, &ath10k_ops);
7481 if (!hw)
7482 return NULL;
7483
7484 ar = hw->priv;
7485 ar->hw = hw;
7486
7487 return ar;
7488 }
7489
7490 void ath10k_mac_destroy(struct ath10k *ar)
7491 {
7492 ieee80211_free_hw(ar->hw);
7493 }
7494
7495 static const struct ieee80211_iface_limit ath10k_if_limits[] = {
7496 {
7497 .max = 8,
7498 .types = BIT(NL80211_IFTYPE_STATION)
7499 | BIT(NL80211_IFTYPE_P2P_CLIENT)
7500 },
7501 {
7502 .max = 3,
7503 .types = BIT(NL80211_IFTYPE_P2P_GO)
7504 },
7505 {
7506 .max = 1,
7507 .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
7508 },
7509 {
7510 .max = 7,
7511 .types = BIT(NL80211_IFTYPE_AP)
7512 #ifdef CONFIG_MAC80211_MESH
7513 | BIT(NL80211_IFTYPE_MESH_POINT)
7514 #endif
7515 },
7516 };
7517
7518 static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
7519 {
7520 .max = 8,
7521 .types = BIT(NL80211_IFTYPE_AP)
7522 #ifdef CONFIG_MAC80211_MESH
7523 | BIT(NL80211_IFTYPE_MESH_POINT)
7524 #endif
7525 },
7526 {
7527 .max = 1,
7528 .types = BIT(NL80211_IFTYPE_STATION)
7529 },
7530 };
7531
7532 static const struct ieee80211_iface_combination ath10k_if_comb[] = {
7533 {
7534 .limits = ath10k_if_limits,
7535 .n_limits = ARRAY_SIZE(ath10k_if_limits),
7536 .max_interfaces = 8,
7537 .num_different_channels = 1,
7538 .beacon_int_infra_match = true,
7539 },
7540 };
7541
7542 static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
7543 {
7544 .limits = ath10k_10x_if_limits,
7545 .n_limits = ARRAY_SIZE(ath10k_10x_if_limits),
7546 .max_interfaces = 8,
7547 .num_different_channels = 1,
7548 .beacon_int_infra_match = true,
7549 #ifdef CONFIG_ATH10K_DFS_CERTIFIED
7550 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
7551 BIT(NL80211_CHAN_WIDTH_20) |
7552 BIT(NL80211_CHAN_WIDTH_40) |
7553 BIT(NL80211_CHAN_WIDTH_80),
7554 #endif
7555 },
7556 };
7557
7558 static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
7559 {
7560 .max = 2,
7561 .types = BIT(NL80211_IFTYPE_STATION),
7562 },
7563 {
7564 .max = 2,
7565 .types = BIT(NL80211_IFTYPE_AP) |
7566 #ifdef CONFIG_MAC80211_MESH
7567 BIT(NL80211_IFTYPE_MESH_POINT) |
7568 #endif
7569 BIT(NL80211_IFTYPE_P2P_CLIENT) |
7570 BIT(NL80211_IFTYPE_P2P_GO),
7571 },
7572 {
7573 .max = 1,
7574 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
7575 },
7576 };
7577
7578 static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = {
7579 {
7580 .max = 2,
7581 .types = BIT(NL80211_IFTYPE_STATION),
7582 },
7583 {
7584 .max = 2,
7585 .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
7586 },
7587 {
7588 .max = 1,
7589 .types = BIT(NL80211_IFTYPE_AP) |
7590 #ifdef CONFIG_MAC80211_MESH
7591 BIT(NL80211_IFTYPE_MESH_POINT) |
7592 #endif
7593 BIT(NL80211_IFTYPE_P2P_GO),
7594 },
7595 {
7596 .max = 1,
7597 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
7598 },
7599 };
7600
7601 static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
7602 {
7603 .max = 1,
7604 .types = BIT(NL80211_IFTYPE_STATION),
7605 },
7606 {
7607 .max = 1,
7608 .types = BIT(NL80211_IFTYPE_ADHOC),
7609 },
7610 };
7611
7612 /* FIXME: This is not thouroughly tested. These combinations may over- or
7613 * underestimate hw/fw capabilities.
7614 */
7615 static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
7616 {
7617 .limits = ath10k_tlv_if_limit,
7618 .num_different_channels = 1,
7619 .max_interfaces = 4,
7620 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
7621 },
7622 {
7623 .limits = ath10k_tlv_if_limit_ibss,
7624 .num_different_channels = 1,
7625 .max_interfaces = 2,
7626 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
7627 },
7628 };
7629
7630 static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
7631 {
7632 .limits = ath10k_tlv_if_limit,
7633 .num_different_channels = 1,
7634 .max_interfaces = 4,
7635 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
7636 },
7637 {
7638 .limits = ath10k_tlv_qcs_if_limit,
7639 .num_different_channels = 2,
7640 .max_interfaces = 4,
7641 .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit),
7642 },
7643 {
7644 .limits = ath10k_tlv_if_limit_ibss,
7645 .num_different_channels = 1,
7646 .max_interfaces = 2,
7647 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
7648 },
7649 };
7650
7651 static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
7652 {
7653 .max = 1,
7654 .types = BIT(NL80211_IFTYPE_STATION),
7655 },
7656 {
7657 .max = 16,
7658 .types = BIT(NL80211_IFTYPE_AP)
7659 #ifdef CONFIG_MAC80211_MESH
7660 | BIT(NL80211_IFTYPE_MESH_POINT)
7661 #endif
7662 },
7663 };
7664
7665 static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
7666 {
7667 .limits = ath10k_10_4_if_limits,
7668 .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
7669 .max_interfaces = 16,
7670 .num_different_channels = 1,
7671 .beacon_int_infra_match = true,
7672 #ifdef CONFIG_ATH10K_DFS_CERTIFIED
7673 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
7674 BIT(NL80211_CHAN_WIDTH_20) |
7675 BIT(NL80211_CHAN_WIDTH_40) |
7676 BIT(NL80211_CHAN_WIDTH_80),
7677 #endif
7678 },
7679 };
7680
7681 static void ath10k_get_arvif_iter(void *data, u8 *mac,
7682 struct ieee80211_vif *vif)
7683 {
7684 struct ath10k_vif_iter *arvif_iter = data;
7685 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
7686
7687 if (arvif->vdev_id == arvif_iter->vdev_id)
7688 arvif_iter->arvif = arvif;
7689 }
7690
7691 struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
7692 {
7693 struct ath10k_vif_iter arvif_iter;
7694 u32 flags;
7695
7696 memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter));
7697 arvif_iter.vdev_id = vdev_id;
7698
7699 flags = IEEE80211_IFACE_ITER_RESUME_ALL;
7700 ieee80211_iterate_active_interfaces_atomic(ar->hw,
7701 flags,
7702 ath10k_get_arvif_iter,
7703 &arvif_iter);
7704 if (!arvif_iter.arvif) {
7705 ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id);
7706 return NULL;
7707 }
7708
7709 return arvif_iter.arvif;
7710 }
7711
7712 int ath10k_mac_register(struct ath10k *ar)
7713 {
7714 static const u32 cipher_suites[] = {
7715 WLAN_CIPHER_SUITE_WEP40,
7716 WLAN_CIPHER_SUITE_WEP104,
7717 WLAN_CIPHER_SUITE_TKIP,
7718 WLAN_CIPHER_SUITE_CCMP,
7719 WLAN_CIPHER_SUITE_AES_CMAC,
7720 };
7721 struct ieee80211_supported_band *band;
7722 void *channels;
7723 int ret;
7724
7725 SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
7726
7727 SET_IEEE80211_DEV(ar->hw, ar->dev);
7728
7729 BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) +
7730 ARRAY_SIZE(ath10k_5ghz_channels)) !=
7731 ATH10K_NUM_CHANS);
7732
7733 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
7734 channels = kmemdup(ath10k_2ghz_channels,
7735 sizeof(ath10k_2ghz_channels),
7736 GFP_KERNEL);
7737 if (!channels) {
7738 ret = -ENOMEM;
7739 goto err_free;
7740 }
7741
7742 band = &ar->mac.sbands[NL80211_BAND_2GHZ];
7743 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
7744 band->channels = channels;
7745
7746 if (ar->hw_params.cck_rate_map_rev2) {
7747 band->n_bitrates = ath10k_g_rates_rev2_size;
7748 band->bitrates = ath10k_g_rates_rev2;
7749 } else {
7750 band->n_bitrates = ath10k_g_rates_size;
7751 band->bitrates = ath10k_g_rates;
7752 }
7753
7754 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
7755 }
7756
7757 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
7758 channels = kmemdup(ath10k_5ghz_channels,
7759 sizeof(ath10k_5ghz_channels),
7760 GFP_KERNEL);
7761 if (!channels) {
7762 ret = -ENOMEM;
7763 goto err_free;
7764 }
7765
7766 band = &ar->mac.sbands[NL80211_BAND_5GHZ];
7767 band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
7768 band->channels = channels;
7769 band->n_bitrates = ath10k_a_rates_size;
7770 band->bitrates = ath10k_a_rates;
7771 ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
7772 }
7773
7774 ath10k_mac_setup_ht_vht_cap(ar);
7775
7776 ar->hw->wiphy->interface_modes =
7777 BIT(NL80211_IFTYPE_STATION) |
7778 BIT(NL80211_IFTYPE_AP) |
7779 BIT(NL80211_IFTYPE_MESH_POINT);
7780
7781 ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask;
7782 ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask;
7783
7784 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features))
7785 ar->hw->wiphy->interface_modes |=
7786 BIT(NL80211_IFTYPE_P2P_DEVICE) |
7787 BIT(NL80211_IFTYPE_P2P_CLIENT) |
7788 BIT(NL80211_IFTYPE_P2P_GO);
7789
7790 ieee80211_hw_set(ar->hw, SIGNAL_DBM);
7791 ieee80211_hw_set(ar->hw, SUPPORTS_PS);
7792 ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
7793 ieee80211_hw_set(ar->hw, MFP_CAPABLE);
7794 ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
7795 ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
7796 ieee80211_hw_set(ar->hw, AP_LINK_PS);
7797 ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
7798 ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
7799 ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
7800 ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
7801 ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
7802 ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
7803 ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
7804
7805 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
7806 ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
7807
7808 ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
7809 ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
7810
7811 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
7812 ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
7813
7814 if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
7815 ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
7816 ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
7817 }
7818
7819 ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
7820 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
7821
7822 ar->hw->vif_data_size = sizeof(struct ath10k_vif);
7823 ar->hw->sta_data_size = sizeof(struct ath10k_sta);
7824 ar->hw->txq_data_size = sizeof(struct ath10k_txq);
7825
7826 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
7827
7828 if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) {
7829 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
7830
7831 /* Firmware delivers WPS/P2P Probe Requests frames to driver so
7832 * that userspace (e.g. wpa_supplicant/hostapd) can generate
7833 * correct Probe Responses. This is more of a hack advert..
7834 */
7835 ar->hw->wiphy->probe_resp_offload |=
7836 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
7837 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
7838 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
7839 }
7840
7841 if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map))
7842 ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
7843
7844 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
7845 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
7846 ar->hw->wiphy->max_remain_on_channel_duration = 5000;
7847
7848 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
7849 ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
7850 NL80211_FEATURE_AP_SCAN;
7851
7852 ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
7853
7854 ret = ath10k_wow_init(ar);
7855 if (ret) {
7856 ath10k_warn(ar, "failed to init wow: %d\n", ret);
7857 goto err_free;
7858 }
7859
7860 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
7861
7862 /*
7863 * on LL hardware queues are managed entirely by the FW
7864 * so we only advertise to mac we can do the queues thing
7865 */
7866 ar->hw->queues = IEEE80211_MAX_QUEUES;
7867
7868 /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is
7869 * something that vdev_ids can't reach so that we don't stop the queue
7870 * accidentally.
7871 */
7872 ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
7873
7874 switch (ar->running_fw->fw_file.wmi_op_version) {
7875 case ATH10K_FW_WMI_OP_VERSION_MAIN:
7876 ar->hw->wiphy->iface_combinations = ath10k_if_comb;
7877 ar->hw->wiphy->n_iface_combinations =
7878 ARRAY_SIZE(ath10k_if_comb);
7879 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
7880 break;
7881 case ATH10K_FW_WMI_OP_VERSION_TLV:
7882 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
7883 ar->hw->wiphy->iface_combinations =
7884 ath10k_tlv_qcs_if_comb;
7885 ar->hw->wiphy->n_iface_combinations =
7886 ARRAY_SIZE(ath10k_tlv_qcs_if_comb);
7887 } else {
7888 ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb;
7889 ar->hw->wiphy->n_iface_combinations =
7890 ARRAY_SIZE(ath10k_tlv_if_comb);
7891 }
7892 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
7893 break;
7894 case ATH10K_FW_WMI_OP_VERSION_10_1:
7895 case ATH10K_FW_WMI_OP_VERSION_10_2:
7896 case ATH10K_FW_WMI_OP_VERSION_10_2_4:
7897 ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
7898 ar->hw->wiphy->n_iface_combinations =
7899 ARRAY_SIZE(ath10k_10x_if_comb);
7900 break;
7901 case ATH10K_FW_WMI_OP_VERSION_10_4:
7902 ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
7903 ar->hw->wiphy->n_iface_combinations =
7904 ARRAY_SIZE(ath10k_10_4_if_comb);
7905 break;
7906 case ATH10K_FW_WMI_OP_VERSION_UNSET:
7907 case ATH10K_FW_WMI_OP_VERSION_MAX:
7908 WARN_ON(1);
7909 ret = -EINVAL;
7910 goto err_free;
7911 }
7912
7913 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
7914 ar->hw->netdev_features = NETIF_F_HW_CSUM;
7915
7916 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
7917 /* Init ath dfs pattern detector */
7918 ar->ath_common.debug_mask = ATH_DBG_DFS;
7919 ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
7920 NL80211_DFS_UNSET);
7921
7922 if (!ar->dfs_detector)
7923 ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
7924 }
7925
7926 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
7927 ath10k_reg_notifier);
7928 if (ret) {
7929 ath10k_err(ar, "failed to initialise regulatory: %i\n", ret);
7930 goto err_dfs_detector_exit;
7931 }
7932
7933 ar->hw->wiphy->cipher_suites = cipher_suites;
7934 ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
7935
7936 ret = ieee80211_register_hw(ar->hw);
7937 if (ret) {
7938 ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
7939 goto err_dfs_detector_exit;
7940 }
7941
7942 if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
7943 ret = regulatory_hint(ar->hw->wiphy,
7944 ar->ath_common.regulatory.alpha2);
7945 if (ret)
7946 goto err_unregister;
7947 }
7948
7949 return 0;
7950
7951 err_unregister:
7952 ieee80211_unregister_hw(ar->hw);
7953
7954 err_dfs_detector_exit:
7955 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
7956 ar->dfs_detector->exit(ar->dfs_detector);
7957
7958 err_free:
7959 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
7960 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
7961
7962 SET_IEEE80211_DEV(ar->hw, NULL);
7963 return ret;
7964 }
7965
7966 void ath10k_mac_unregister(struct ath10k *ar)
7967 {
7968 ieee80211_unregister_hw(ar->hw);
7969
7970 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
7971 ar->dfs_detector->exit(ar->dfs_detector);
7972
7973 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
7974 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
7975
7976 SET_IEEE80211_DEV(ar->hw, NULL);
7977 }