]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/wireless/ath/carl9170/main.c
9d2051aeb782f44a8b54c75c2f1fd3018f96756f
[mirror_ubuntu-jammy-kernel.git] / drivers / net / wireless / ath / carl9170 / main.c
1 /*
2 * Atheros CARL9170 driver
3 *
4 * mac80211 interaction code
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40 #include <linux/init.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <linux/etherdevice.h>
44 #include <linux/random.h>
45 #include <net/mac80211.h>
46 #include <net/cfg80211.h>
47 #include "hw.h"
48 #include "carl9170.h"
49 #include "cmd.h"
50
51 static bool modparam_nohwcrypt;
52 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
53 MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
54
55 int modparam_noht;
56 module_param_named(noht, modparam_noht, int, S_IRUGO);
57 MODULE_PARM_DESC(noht, "Disable MPDU aggregation.");
58
59 #define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
60 .bitrate = (_bitrate), \
61 .flags = (_flags), \
62 .hw_value = (_hw_rate) | (_txpidx) << 4, \
63 }
64
65 struct ieee80211_rate __carl9170_ratetable[] = {
66 RATE(10, 0, 0, 0),
67 RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
68 RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
69 RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
70 RATE(60, 0xb, 0, 0),
71 RATE(90, 0xf, 0, 0),
72 RATE(120, 0xa, 0, 0),
73 RATE(180, 0xe, 0, 0),
74 RATE(240, 0x9, 0, 0),
75 RATE(360, 0xd, 1, 0),
76 RATE(480, 0x8, 2, 0),
77 RATE(540, 0xc, 3, 0),
78 };
79 #undef RATE
80
81 #define carl9170_g_ratetable (__carl9170_ratetable + 0)
82 #define carl9170_g_ratetable_size 12
83 #define carl9170_a_ratetable (__carl9170_ratetable + 4)
84 #define carl9170_a_ratetable_size 8
85
86 /*
87 * NB: The hw_value is used as an index into the carl9170_phy_freq_params
88 * array in phy.c so that we don't have to do frequency lookups!
89 */
90 #define CHAN(_freq, _idx) { \
91 .center_freq = (_freq), \
92 .hw_value = (_idx), \
93 .max_power = 18, /* XXX */ \
94 }
95
96 static struct ieee80211_channel carl9170_2ghz_chantable[] = {
97 CHAN(2412, 0),
98 CHAN(2417, 1),
99 CHAN(2422, 2),
100 CHAN(2427, 3),
101 CHAN(2432, 4),
102 CHAN(2437, 5),
103 CHAN(2442, 6),
104 CHAN(2447, 7),
105 CHAN(2452, 8),
106 CHAN(2457, 9),
107 CHAN(2462, 10),
108 CHAN(2467, 11),
109 CHAN(2472, 12),
110 CHAN(2484, 13),
111 };
112
113 static struct ieee80211_channel carl9170_5ghz_chantable[] = {
114 CHAN(4920, 14),
115 CHAN(4940, 15),
116 CHAN(4960, 16),
117 CHAN(4980, 17),
118 CHAN(5040, 18),
119 CHAN(5060, 19),
120 CHAN(5080, 20),
121 CHAN(5180, 21),
122 CHAN(5200, 22),
123 CHAN(5220, 23),
124 CHAN(5240, 24),
125 CHAN(5260, 25),
126 CHAN(5280, 26),
127 CHAN(5300, 27),
128 CHAN(5320, 28),
129 CHAN(5500, 29),
130 CHAN(5520, 30),
131 CHAN(5540, 31),
132 CHAN(5560, 32),
133 CHAN(5580, 33),
134 CHAN(5600, 34),
135 CHAN(5620, 35),
136 CHAN(5640, 36),
137 CHAN(5660, 37),
138 CHAN(5680, 38),
139 CHAN(5700, 39),
140 CHAN(5745, 40),
141 CHAN(5765, 41),
142 CHAN(5785, 42),
143 CHAN(5805, 43),
144 CHAN(5825, 44),
145 CHAN(5170, 45),
146 CHAN(5190, 46),
147 CHAN(5210, 47),
148 CHAN(5230, 48),
149 };
150 #undef CHAN
151
152 #define CARL9170_HT_CAP \
153 { \
154 .ht_supported = true, \
155 .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
156 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
157 IEEE80211_HT_CAP_SGI_40 | \
158 IEEE80211_HT_CAP_DSSSCCK40 | \
159 IEEE80211_HT_CAP_SM_PS, \
160 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, \
161 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
162 .mcs = { \
163 .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \
164 .rx_highest = cpu_to_le16(300), \
165 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
166 }, \
167 }
168
169 static struct ieee80211_supported_band carl9170_band_2GHz = {
170 .channels = carl9170_2ghz_chantable,
171 .n_channels = ARRAY_SIZE(carl9170_2ghz_chantable),
172 .bitrates = carl9170_g_ratetable,
173 .n_bitrates = carl9170_g_ratetable_size,
174 .ht_cap = CARL9170_HT_CAP,
175 };
176
177 static struct ieee80211_supported_band carl9170_band_5GHz = {
178 .channels = carl9170_5ghz_chantable,
179 .n_channels = ARRAY_SIZE(carl9170_5ghz_chantable),
180 .bitrates = carl9170_a_ratetable,
181 .n_bitrates = carl9170_a_ratetable_size,
182 .ht_cap = CARL9170_HT_CAP,
183 };
184
185 static void carl9170_ampdu_gc(struct ar9170 *ar)
186 {
187 struct carl9170_sta_tid *tid_info;
188 LIST_HEAD(tid_gc);
189
190 rcu_read_lock();
191 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
192 spin_lock_bh(&ar->tx_ampdu_list_lock);
193 if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) {
194 tid_info->state = CARL9170_TID_STATE_KILLED;
195 list_del_rcu(&tid_info->list);
196 ar->tx_ampdu_list_len--;
197 list_add_tail(&tid_info->tmp_list, &tid_gc);
198 }
199 spin_unlock_bh(&ar->tx_ampdu_list_lock);
200
201 }
202 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
203 rcu_read_unlock();
204
205 synchronize_rcu();
206
207 while (!list_empty(&tid_gc)) {
208 struct sk_buff *skb;
209 tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid,
210 tmp_list);
211
212 while ((skb = __skb_dequeue(&tid_info->queue)))
213 carl9170_tx_status(ar, skb, false);
214
215 list_del_init(&tid_info->tmp_list);
216 kfree(tid_info);
217 }
218 }
219
220 static void carl9170_flush(struct ar9170 *ar, bool drop_queued)
221 {
222 if (drop_queued) {
223 int i;
224
225 /*
226 * We can only drop frames which have not been uploaded
227 * to the device yet.
228 */
229
230 for (i = 0; i < ar->hw->queues; i++) {
231 struct sk_buff *skb;
232
233 while ((skb = skb_dequeue(&ar->tx_pending[i]))) {
234 struct ieee80211_tx_info *info;
235
236 info = IEEE80211_SKB_CB(skb);
237 if (info->flags & IEEE80211_TX_CTL_AMPDU)
238 atomic_dec(&ar->tx_ampdu_upload);
239
240 carl9170_tx_status(ar, skb, false);
241 }
242 }
243 }
244
245 /* Wait for all other outstanding frames to timeout. */
246 if (atomic_read(&ar->tx_total_queued))
247 WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0);
248 }
249
250 static void carl9170_flush_ba(struct ar9170 *ar)
251 {
252 struct sk_buff_head free;
253 struct carl9170_sta_tid *tid_info;
254 struct sk_buff *skb;
255
256 __skb_queue_head_init(&free);
257
258 rcu_read_lock();
259 spin_lock_bh(&ar->tx_ampdu_list_lock);
260 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
261 if (tid_info->state > CARL9170_TID_STATE_SUSPEND) {
262 tid_info->state = CARL9170_TID_STATE_SUSPEND;
263
264 spin_lock(&tid_info->lock);
265 while ((skb = __skb_dequeue(&tid_info->queue)))
266 __skb_queue_tail(&free, skb);
267 spin_unlock(&tid_info->lock);
268 }
269 }
270 spin_unlock_bh(&ar->tx_ampdu_list_lock);
271 rcu_read_unlock();
272
273 while ((skb = __skb_dequeue(&free)))
274 carl9170_tx_status(ar, skb, false);
275 }
276
277 static void carl9170_zap_queues(struct ar9170 *ar)
278 {
279 struct carl9170_vif_info *cvif;
280 unsigned int i;
281
282 carl9170_ampdu_gc(ar);
283
284 carl9170_flush_ba(ar);
285 carl9170_flush(ar, true);
286
287 for (i = 0; i < ar->hw->queues; i++) {
288 spin_lock_bh(&ar->tx_status[i].lock);
289 while (!skb_queue_empty(&ar->tx_status[i])) {
290 struct sk_buff *skb;
291
292 skb = skb_peek(&ar->tx_status[i]);
293 carl9170_tx_get_skb(skb);
294 spin_unlock_bh(&ar->tx_status[i].lock);
295 carl9170_tx_drop(ar, skb);
296 spin_lock_bh(&ar->tx_status[i].lock);
297 carl9170_tx_put_skb(skb);
298 }
299 spin_unlock_bh(&ar->tx_status[i].lock);
300 }
301
302 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT < 1);
303 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD < CARL9170_NUM_TX_LIMIT_SOFT);
304 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD >= CARL9170_BAW_BITS);
305
306 /* reinitialize queues statistics */
307 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
308 for (i = 0; i < ar->hw->queues; i++)
309 ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
310
311 for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++)
312 ar->mem_bitmap[i] = 0;
313
314 rcu_read_lock();
315 list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
316 spin_lock_bh(&ar->beacon_lock);
317 dev_kfree_skb_any(cvif->beacon);
318 cvif->beacon = NULL;
319 spin_unlock_bh(&ar->beacon_lock);
320 }
321 rcu_read_unlock();
322
323 atomic_set(&ar->tx_ampdu_upload, 0);
324 atomic_set(&ar->tx_ampdu_scheduler, 0);
325 atomic_set(&ar->tx_total_pending, 0);
326 atomic_set(&ar->tx_total_queued, 0);
327 atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
328 }
329
330 #define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
331 do { \
332 queue.aifs = ai_fs; \
333 queue.cw_min = cwmin; \
334 queue.cw_max = cwmax; \
335 queue.txop = _txop; \
336 } while (0)
337
338 static int carl9170_op_start(struct ieee80211_hw *hw)
339 {
340 struct ar9170 *ar = hw->priv;
341 int err, i;
342
343 mutex_lock(&ar->mutex);
344
345 carl9170_zap_queues(ar);
346
347 /* reset QoS defaults */
348 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VO], 2, 3, 7, 47);
349 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VI], 2, 7, 15, 94);
350 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BE], 3, 15, 1023, 0);
351 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BK], 7, 15, 1023, 0);
352 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_SPECIAL], 2, 3, 7, 0);
353
354 ar->current_factor = ar->current_density = -1;
355 /* "The first key is unique." */
356 ar->usedkeys = 1;
357 ar->filter_state = 0;
358 ar->ps.last_action = jiffies;
359 ar->ps.last_slept = jiffies;
360 ar->erp_mode = CARL9170_ERP_AUTO;
361 ar->rx_software_decryption = false;
362 ar->disable_offload = false;
363
364 for (i = 0; i < ar->hw->queues; i++) {
365 ar->queue_stop_timeout[i] = jiffies;
366 ar->max_queue_stop_timeout[i] = 0;
367 }
368
369 atomic_set(&ar->mem_allocs, 0);
370
371 err = carl9170_usb_open(ar);
372 if (err)
373 goto out;
374
375 err = carl9170_init_mac(ar);
376 if (err)
377 goto out;
378
379 err = carl9170_set_qos(ar);
380 if (err)
381 goto out;
382
383 if (ar->fw.rx_filter) {
384 err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA |
385 CARL9170_RX_FILTER_CTL_OTHER | CARL9170_RX_FILTER_BAD);
386 if (err)
387 goto out;
388 }
389
390 err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER,
391 AR9170_DMA_TRIGGER_RXQ);
392 if (err)
393 goto out;
394
395 /* Clear key-cache */
396 for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) {
397 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
398 0, NULL, 0);
399 if (err)
400 goto out;
401
402 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
403 1, NULL, 0);
404 if (err)
405 goto out;
406
407 if (i < AR9170_CAM_MAX_USER) {
408 err = carl9170_disable_key(ar, i);
409 if (err)
410 goto out;
411 }
412 }
413
414 carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED);
415
416 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
417 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
418
419 ieee80211_wake_queues(ar->hw);
420 err = 0;
421
422 out:
423 mutex_unlock(&ar->mutex);
424 return err;
425 }
426
427 static void carl9170_cancel_worker(struct ar9170 *ar)
428 {
429 cancel_delayed_work_sync(&ar->stat_work);
430 cancel_delayed_work_sync(&ar->tx_janitor);
431 #ifdef CONFIG_CARL9170_LEDS
432 cancel_delayed_work_sync(&ar->led_work);
433 #endif /* CONFIG_CARL9170_LEDS */
434 cancel_work_sync(&ar->ps_work);
435 cancel_work_sync(&ar->ping_work);
436 cancel_work_sync(&ar->ampdu_work);
437 }
438
439 static void carl9170_op_stop(struct ieee80211_hw *hw)
440 {
441 struct ar9170 *ar = hw->priv;
442
443 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
444
445 ieee80211_stop_queues(ar->hw);
446
447 mutex_lock(&ar->mutex);
448 if (IS_ACCEPTING_CMD(ar)) {
449 RCU_INIT_POINTER(ar->beacon_iter, NULL);
450
451 carl9170_led_set_state(ar, 0);
452
453 /* stop DMA */
454 carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0);
455 carl9170_usb_stop(ar);
456 }
457
458 carl9170_zap_queues(ar);
459 mutex_unlock(&ar->mutex);
460
461 carl9170_cancel_worker(ar);
462 }
463
464 static void carl9170_restart_work(struct work_struct *work)
465 {
466 struct ar9170 *ar = container_of(work, struct ar9170,
467 restart_work);
468 int err = -EIO;
469
470 ar->usedkeys = 0;
471 ar->filter_state = 0;
472 carl9170_cancel_worker(ar);
473
474 mutex_lock(&ar->mutex);
475 if (!ar->force_usb_reset) {
476 err = carl9170_usb_restart(ar);
477 if (net_ratelimit()) {
478 if (err)
479 dev_err(&ar->udev->dev, "Failed to restart device (%d).\n", err);
480 else
481 dev_info(&ar->udev->dev, "device restarted successfully.\n");
482 }
483 }
484 carl9170_zap_queues(ar);
485 mutex_unlock(&ar->mutex);
486
487 if (!err && !ar->force_usb_reset) {
488 ar->restart_counter++;
489 atomic_set(&ar->pending_restarts, 0);
490
491 ieee80211_restart_hw(ar->hw);
492 } else {
493 /*
494 * The reset was unsuccessful and the device seems to
495 * be dead. But there's still one option: a low-level
496 * usb subsystem reset...
497 */
498
499 carl9170_usb_reset(ar);
500 }
501 }
502
503 void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
504 {
505 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
506
507 /*
508 * Sometimes, an error can trigger several different reset events.
509 * By ignoring these *surplus* reset events, the device won't be
510 * killed again, right after it has recovered.
511 */
512 if (atomic_inc_return(&ar->pending_restarts) > 1) {
513 dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
514 return;
515 }
516
517 ieee80211_stop_queues(ar->hw);
518
519 dev_err(&ar->udev->dev, "restart device (%d)\n", r);
520
521 if (!WARN_ON(r == CARL9170_RR_NO_REASON) ||
522 !WARN_ON(r >= __CARL9170_RR_LAST))
523 ar->last_reason = r;
524
525 if (!ar->registered)
526 return;
527
528 if (!IS_ACCEPTING_CMD(ar) || ar->needs_full_reset)
529 ar->force_usb_reset = true;
530
531 ieee80211_queue_work(ar->hw, &ar->restart_work);
532
533 /*
534 * At this point, the device instance might have vanished/disabled.
535 * So, don't put any code which access the ar9170 struct
536 * without proper protection.
537 */
538 }
539
540 static void carl9170_ping_work(struct work_struct *work)
541 {
542 struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
543 int err;
544
545 if (!IS_STARTED(ar))
546 return;
547
548 mutex_lock(&ar->mutex);
549 err = carl9170_echo_test(ar, 0xdeadbeef);
550 if (err)
551 carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE);
552 mutex_unlock(&ar->mutex);
553 }
554
555 static int carl9170_init_interface(struct ar9170 *ar,
556 struct ieee80211_vif *vif)
557 {
558 struct ath_common *common = &ar->common;
559 int err;
560
561 if (!vif) {
562 WARN_ON_ONCE(IS_STARTED(ar));
563 return 0;
564 }
565
566 memcpy(common->macaddr, vif->addr, ETH_ALEN);
567
568 if (modparam_nohwcrypt ||
569 ((vif->type != NL80211_IFTYPE_STATION) &&
570 (vif->type != NL80211_IFTYPE_AP))) {
571 ar->rx_software_decryption = true;
572 ar->disable_offload = true;
573 }
574
575 err = carl9170_set_operating_mode(ar);
576 return err;
577 }
578
579 static int carl9170_op_add_interface(struct ieee80211_hw *hw,
580 struct ieee80211_vif *vif)
581 {
582 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
583 struct ieee80211_vif *main_vif;
584 struct ar9170 *ar = hw->priv;
585 int vif_id = -1, err = 0;
586
587 mutex_lock(&ar->mutex);
588 rcu_read_lock();
589 if (vif_priv->active) {
590 /*
591 * Skip the interface structure initialization,
592 * if the vif survived the _restart call.
593 */
594 vif_id = vif_priv->id;
595 vif_priv->enable_beacon = false;
596
597 spin_lock_bh(&ar->beacon_lock);
598 dev_kfree_skb_any(vif_priv->beacon);
599 vif_priv->beacon = NULL;
600 spin_unlock_bh(&ar->beacon_lock);
601
602 goto init;
603 }
604
605 main_vif = carl9170_get_main_vif(ar);
606
607 if (main_vif) {
608 switch (main_vif->type) {
609 case NL80211_IFTYPE_STATION:
610 if (vif->type == NL80211_IFTYPE_STATION)
611 break;
612
613 err = -EBUSY;
614 rcu_read_unlock();
615
616 goto unlock;
617
618 case NL80211_IFTYPE_MESH_POINT:
619 case NL80211_IFTYPE_AP:
620 if ((vif->type == NL80211_IFTYPE_STATION) ||
621 (vif->type == NL80211_IFTYPE_WDS) ||
622 (vif->type == NL80211_IFTYPE_AP) ||
623 (vif->type == NL80211_IFTYPE_MESH_POINT))
624 break;
625
626 err = -EBUSY;
627 rcu_read_unlock();
628 goto unlock;
629
630 default:
631 rcu_read_unlock();
632 goto unlock;
633 }
634 }
635
636 vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0);
637
638 if (vif_id < 0) {
639 rcu_read_unlock();
640
641 err = -ENOSPC;
642 goto unlock;
643 }
644
645 BUG_ON(ar->vif_priv[vif_id].id != vif_id);
646
647 vif_priv->active = true;
648 vif_priv->id = vif_id;
649 vif_priv->enable_beacon = false;
650 ar->vifs++;
651 list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
652 rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
653
654 init:
655 if (carl9170_get_main_vif(ar) == vif) {
656 rcu_assign_pointer(ar->beacon_iter, vif_priv);
657 rcu_read_unlock();
658
659 err = carl9170_init_interface(ar, vif);
660 if (err)
661 goto unlock;
662 } else {
663 rcu_read_unlock();
664 err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
665
666 if (err)
667 goto unlock;
668 }
669
670 if (ar->fw.tx_seq_table) {
671 err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
672 0);
673 if (err)
674 goto unlock;
675 }
676
677 unlock:
678 if (err && (vif_id >= 0)) {
679 vif_priv->active = false;
680 bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
681 ar->vifs--;
682 RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL);
683 list_del_rcu(&vif_priv->list);
684 mutex_unlock(&ar->mutex);
685 synchronize_rcu();
686 } else {
687 if (ar->vifs > 1)
688 ar->ps.off_override |= PS_OFF_VIF;
689
690 mutex_unlock(&ar->mutex);
691 }
692
693 return err;
694 }
695
696 static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
697 struct ieee80211_vif *vif)
698 {
699 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
700 struct ieee80211_vif *main_vif;
701 struct ar9170 *ar = hw->priv;
702 unsigned int id;
703
704 mutex_lock(&ar->mutex);
705
706 if (WARN_ON_ONCE(!vif_priv->active))
707 goto unlock;
708
709 ar->vifs--;
710
711 rcu_read_lock();
712 main_vif = carl9170_get_main_vif(ar);
713
714 id = vif_priv->id;
715
716 vif_priv->active = false;
717 WARN_ON(vif_priv->enable_beacon);
718 vif_priv->enable_beacon = false;
719 list_del_rcu(&vif_priv->list);
720 RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL);
721
722 if (vif == main_vif) {
723 rcu_read_unlock();
724
725 if (ar->vifs) {
726 WARN_ON(carl9170_init_interface(ar,
727 carl9170_get_main_vif(ar)));
728 } else {
729 carl9170_set_operating_mode(ar);
730 }
731 } else {
732 rcu_read_unlock();
733
734 WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL));
735 }
736
737 carl9170_update_beacon(ar, false);
738 carl9170_flush_cab(ar, id);
739
740 spin_lock_bh(&ar->beacon_lock);
741 dev_kfree_skb_any(vif_priv->beacon);
742 vif_priv->beacon = NULL;
743 spin_unlock_bh(&ar->beacon_lock);
744
745 bitmap_release_region(&ar->vif_bitmap, id, 0);
746
747 carl9170_set_beacon_timers(ar);
748
749 if (ar->vifs == 1)
750 ar->ps.off_override &= ~PS_OFF_VIF;
751
752 unlock:
753 mutex_unlock(&ar->mutex);
754
755 synchronize_rcu();
756 }
757
758 void carl9170_ps_check(struct ar9170 *ar)
759 {
760 ieee80211_queue_work(ar->hw, &ar->ps_work);
761 }
762
763 /* caller must hold ar->mutex */
764 static int carl9170_ps_update(struct ar9170 *ar)
765 {
766 bool ps = false;
767 int err = 0;
768
769 if (!ar->ps.off_override)
770 ps = (ar->hw->conf.flags & IEEE80211_CONF_PS);
771
772 if (ps != ar->ps.state) {
773 err = carl9170_powersave(ar, ps);
774 if (err)
775 return err;
776
777 if (ar->ps.state && !ps) {
778 ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
779 ar->ps.last_action);
780 }
781
782 if (ps)
783 ar->ps.last_slept = jiffies;
784
785 ar->ps.last_action = jiffies;
786 ar->ps.state = ps;
787 }
788
789 return 0;
790 }
791
792 static void carl9170_ps_work(struct work_struct *work)
793 {
794 struct ar9170 *ar = container_of(work, struct ar9170,
795 ps_work);
796 mutex_lock(&ar->mutex);
797 if (IS_STARTED(ar))
798 WARN_ON_ONCE(carl9170_ps_update(ar) != 0);
799 mutex_unlock(&ar->mutex);
800 }
801
802 static int carl9170_update_survey(struct ar9170 *ar, bool flush, bool noise)
803 {
804 int err;
805
806 if (noise) {
807 err = carl9170_get_noisefloor(ar);
808 if (err)
809 return err;
810 }
811
812 if (ar->fw.hw_counters) {
813 err = carl9170_collect_tally(ar);
814 if (err)
815 return err;
816 }
817
818 if (flush)
819 memset(&ar->tally, 0, sizeof(ar->tally));
820
821 return 0;
822 }
823
824 static void carl9170_stat_work(struct work_struct *work)
825 {
826 struct ar9170 *ar = container_of(work, struct ar9170, stat_work.work);
827 int err;
828
829 mutex_lock(&ar->mutex);
830 err = carl9170_update_survey(ar, false, true);
831 mutex_unlock(&ar->mutex);
832
833 if (err)
834 return;
835
836 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
837 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
838 }
839
840 static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
841 {
842 struct ar9170 *ar = hw->priv;
843 int err = 0;
844
845 mutex_lock(&ar->mutex);
846 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
847 /* TODO */
848 err = 0;
849 }
850
851 if (changed & IEEE80211_CONF_CHANGE_PS) {
852 err = carl9170_ps_update(ar);
853 if (err)
854 goto out;
855 }
856
857 if (changed & IEEE80211_CONF_CHANGE_SMPS) {
858 /* TODO */
859 err = 0;
860 }
861
862 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
863 /* adjust slot time for 5 GHz */
864 err = carl9170_set_slot_time(ar);
865 if (err)
866 goto out;
867
868 err = carl9170_update_survey(ar, true, false);
869 if (err)
870 goto out;
871
872 err = carl9170_set_channel(ar, hw->conf.channel,
873 hw->conf.channel_type, CARL9170_RFI_NONE);
874 if (err)
875 goto out;
876
877 err = carl9170_update_survey(ar, false, true);
878 if (err)
879 goto out;
880
881 err = carl9170_set_dyn_sifs_ack(ar);
882 if (err)
883 goto out;
884
885 err = carl9170_set_rts_cts_rate(ar);
886 if (err)
887 goto out;
888 }
889
890 if (changed & IEEE80211_CONF_CHANGE_POWER) {
891 err = carl9170_set_mac_tpc(ar, ar->hw->conf.channel);
892 if (err)
893 goto out;
894 }
895
896 out:
897 mutex_unlock(&ar->mutex);
898 return err;
899 }
900
901 static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw,
902 struct netdev_hw_addr_list *mc_list)
903 {
904 struct netdev_hw_addr *ha;
905 u64 mchash;
906
907 /* always get broadcast frames */
908 mchash = 1ULL << (0xff >> 2);
909
910 netdev_hw_addr_list_for_each(ha, mc_list)
911 mchash |= 1ULL << (ha->addr[5] >> 2);
912
913 return mchash;
914 }
915
916 static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
917 unsigned int changed_flags,
918 unsigned int *new_flags,
919 u64 multicast)
920 {
921 struct ar9170 *ar = hw->priv;
922
923 /* mask supported flags */
924 *new_flags &= FIF_ALLMULTI | ar->rx_filter_caps;
925
926 if (!IS_ACCEPTING_CMD(ar))
927 return;
928
929 mutex_lock(&ar->mutex);
930
931 ar->filter_state = *new_flags;
932 /*
933 * We can support more by setting the sniffer bit and
934 * then checking the error flags, later.
935 */
936
937 if (*new_flags & FIF_ALLMULTI)
938 multicast = ~0ULL;
939
940 if (multicast != ar->cur_mc_hash)
941 WARN_ON(carl9170_update_multicast(ar, multicast));
942
943 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
944 ar->sniffer_enabled = !!(*new_flags &
945 (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS));
946
947 WARN_ON(carl9170_set_operating_mode(ar));
948 }
949
950 if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
951 u32 rx_filter = 0;
952
953 if (!ar->fw.ba_filter)
954 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
955
956 if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
957 rx_filter |= CARL9170_RX_FILTER_BAD;
958
959 if (!(*new_flags & FIF_CONTROL))
960 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
961
962 if (!(*new_flags & FIF_PSPOLL))
963 rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
964
965 if (!(*new_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))) {
966 rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
967 rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
968 }
969
970 WARN_ON(carl9170_rx_filter(ar, rx_filter));
971 }
972
973 mutex_unlock(&ar->mutex);
974 }
975
976
977 static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
978 struct ieee80211_vif *vif,
979 struct ieee80211_bss_conf *bss_conf,
980 u32 changed)
981 {
982 struct ar9170 *ar = hw->priv;
983 struct ath_common *common = &ar->common;
984 int err = 0;
985 struct carl9170_vif_info *vif_priv;
986 struct ieee80211_vif *main_vif;
987
988 mutex_lock(&ar->mutex);
989 vif_priv = (void *) vif->drv_priv;
990 main_vif = carl9170_get_main_vif(ar);
991 if (WARN_ON(!main_vif))
992 goto out;
993
994 if (changed & BSS_CHANGED_BEACON_ENABLED) {
995 struct carl9170_vif_info *iter;
996 int i = 0;
997
998 vif_priv->enable_beacon = bss_conf->enable_beacon;
999 rcu_read_lock();
1000 list_for_each_entry_rcu(iter, &ar->vif_list, list) {
1001 if (iter->active && iter->enable_beacon)
1002 i++;
1003
1004 }
1005 rcu_read_unlock();
1006
1007 ar->beacon_enabled = i;
1008 }
1009
1010 if (changed & BSS_CHANGED_BEACON) {
1011 err = carl9170_update_beacon(ar, false);
1012 if (err)
1013 goto out;
1014 }
1015
1016 if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
1017 BSS_CHANGED_BEACON_INT)) {
1018
1019 if (main_vif != vif) {
1020 bss_conf->beacon_int = main_vif->bss_conf.beacon_int;
1021 bss_conf->dtim_period = main_vif->bss_conf.dtim_period;
1022 }
1023
1024 /*
1025 * Therefore a hard limit for the broadcast traffic should
1026 * prevent false alarms.
1027 */
1028 if (vif->type != NL80211_IFTYPE_STATION &&
1029 (bss_conf->beacon_int * bss_conf->dtim_period >=
1030 (CARL9170_QUEUE_STUCK_TIMEOUT / 2))) {
1031 err = -EINVAL;
1032 goto out;
1033 }
1034
1035 err = carl9170_set_beacon_timers(ar);
1036 if (err)
1037 goto out;
1038 }
1039
1040 if (changed & BSS_CHANGED_HT) {
1041 /* TODO */
1042 err = 0;
1043 if (err)
1044 goto out;
1045 }
1046
1047 if (main_vif != vif)
1048 goto out;
1049
1050 /*
1051 * The following settings can only be changed by the
1052 * master interface.
1053 */
1054
1055 if (changed & BSS_CHANGED_BSSID) {
1056 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1057 err = carl9170_set_operating_mode(ar);
1058 if (err)
1059 goto out;
1060 }
1061
1062 if (changed & BSS_CHANGED_ASSOC) {
1063 ar->common.curaid = bss_conf->aid;
1064 err = carl9170_set_beacon_timers(ar);
1065 if (err)
1066 goto out;
1067 }
1068
1069 if (changed & BSS_CHANGED_ERP_SLOT) {
1070 err = carl9170_set_slot_time(ar);
1071 if (err)
1072 goto out;
1073 }
1074
1075 if (changed & BSS_CHANGED_BASIC_RATES) {
1076 err = carl9170_set_mac_rates(ar);
1077 if (err)
1078 goto out;
1079 }
1080
1081 out:
1082 WARN_ON_ONCE(err && IS_STARTED(ar));
1083 mutex_unlock(&ar->mutex);
1084 }
1085
1086 static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw,
1087 struct ieee80211_vif *vif)
1088 {
1089 struct ar9170 *ar = hw->priv;
1090 struct carl9170_tsf_rsp tsf;
1091 int err;
1092
1093 mutex_lock(&ar->mutex);
1094 err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF,
1095 0, NULL, sizeof(tsf), &tsf);
1096 mutex_unlock(&ar->mutex);
1097 if (WARN_ON(err))
1098 return 0;
1099
1100 return le64_to_cpu(tsf.tsf_64);
1101 }
1102
1103 static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1104 struct ieee80211_vif *vif,
1105 struct ieee80211_sta *sta,
1106 struct ieee80211_key_conf *key)
1107 {
1108 struct ar9170 *ar = hw->priv;
1109 int err = 0, i;
1110 u8 ktype;
1111
1112 if (ar->disable_offload || !vif)
1113 return -EOPNOTSUPP;
1114
1115 /*
1116 * We have to fall back to software encryption, whenever
1117 * the user choose to participates in an IBSS or is connected
1118 * to more than one network.
1119 *
1120 * This is very unfortunate, because some machines cannot handle
1121 * the high througput speed in 802.11n networks.
1122 */
1123
1124 if (!is_main_vif(ar, vif)) {
1125 mutex_lock(&ar->mutex);
1126 goto err_softw;
1127 }
1128
1129 /*
1130 * While the hardware supports *catch-all* key, for offloading
1131 * group-key en-/de-cryption. The way of how the hardware
1132 * decides which keyId maps to which key, remains a mystery...
1133 */
1134 if ((vif->type != NL80211_IFTYPE_STATION &&
1135 vif->type != NL80211_IFTYPE_ADHOC) &&
1136 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1137 return -EOPNOTSUPP;
1138
1139 switch (key->cipher) {
1140 case WLAN_CIPHER_SUITE_WEP40:
1141 ktype = AR9170_ENC_ALG_WEP64;
1142 break;
1143 case WLAN_CIPHER_SUITE_WEP104:
1144 ktype = AR9170_ENC_ALG_WEP128;
1145 break;
1146 case WLAN_CIPHER_SUITE_TKIP:
1147 ktype = AR9170_ENC_ALG_TKIP;
1148 break;
1149 case WLAN_CIPHER_SUITE_CCMP:
1150 ktype = AR9170_ENC_ALG_AESCCMP;
1151 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
1152 break;
1153 default:
1154 return -EOPNOTSUPP;
1155 }
1156
1157 mutex_lock(&ar->mutex);
1158 if (cmd == SET_KEY) {
1159 if (!IS_STARTED(ar)) {
1160 err = -EOPNOTSUPP;
1161 goto out;
1162 }
1163
1164 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1165 sta = NULL;
1166
1167 i = 64 + key->keyidx;
1168 } else {
1169 for (i = 0; i < 64; i++)
1170 if (!(ar->usedkeys & BIT(i)))
1171 break;
1172 if (i == 64)
1173 goto err_softw;
1174 }
1175
1176 key->hw_key_idx = i;
1177
1178 err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL,
1179 ktype, 0, key->key,
1180 min_t(u8, 16, key->keylen));
1181 if (err)
1182 goto out;
1183
1184 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1185 err = carl9170_upload_key(ar, i, sta ? sta->addr :
1186 NULL, ktype, 1,
1187 key->key + 16, 16);
1188 if (err)
1189 goto out;
1190
1191 /*
1192 * hardware is not capable generating MMIC
1193 * of fragmented frames!
1194 */
1195 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1196 }
1197
1198 if (i < 64)
1199 ar->usedkeys |= BIT(i);
1200
1201 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1202 } else {
1203 if (!IS_STARTED(ar)) {
1204 /* The device is gone... together with the key ;-) */
1205 err = 0;
1206 goto out;
1207 }
1208
1209 if (key->hw_key_idx < 64) {
1210 ar->usedkeys &= ~BIT(key->hw_key_idx);
1211 } else {
1212 err = carl9170_upload_key(ar, key->hw_key_idx, NULL,
1213 AR9170_ENC_ALG_NONE, 0,
1214 NULL, 0);
1215 if (err)
1216 goto out;
1217
1218 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1219 err = carl9170_upload_key(ar, key->hw_key_idx,
1220 NULL,
1221 AR9170_ENC_ALG_NONE,
1222 1, NULL, 0);
1223 if (err)
1224 goto out;
1225 }
1226
1227 }
1228
1229 err = carl9170_disable_key(ar, key->hw_key_idx);
1230 if (err)
1231 goto out;
1232 }
1233
1234 out:
1235 mutex_unlock(&ar->mutex);
1236 return err;
1237
1238 err_softw:
1239 if (!ar->rx_software_decryption) {
1240 ar->rx_software_decryption = true;
1241 carl9170_set_operating_mode(ar);
1242 }
1243 mutex_unlock(&ar->mutex);
1244 return -ENOSPC;
1245 }
1246
1247 static int carl9170_op_sta_add(struct ieee80211_hw *hw,
1248 struct ieee80211_vif *vif,
1249 struct ieee80211_sta *sta)
1250 {
1251 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1252 unsigned int i;
1253
1254 atomic_set(&sta_info->pending_frames, 0);
1255
1256 if (sta->ht_cap.ht_supported) {
1257 if (sta->ht_cap.ampdu_density > 6) {
1258 /*
1259 * HW does support 16us AMPDU density.
1260 * No HT-Xmit for station.
1261 */
1262
1263 return 0;
1264 }
1265
1266 for (i = 0; i < CARL9170_NUM_TID; i++)
1267 RCU_INIT_POINTER(sta_info->agg[i], NULL);
1268
1269 sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
1270 sta_info->ht_sta = true;
1271 }
1272
1273 return 0;
1274 }
1275
1276 static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
1277 struct ieee80211_vif *vif,
1278 struct ieee80211_sta *sta)
1279 {
1280 struct ar9170 *ar = hw->priv;
1281 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1282 unsigned int i;
1283 bool cleanup = false;
1284
1285 if (sta->ht_cap.ht_supported) {
1286
1287 sta_info->ht_sta = false;
1288
1289 rcu_read_lock();
1290 for (i = 0; i < CARL9170_NUM_TID; i++) {
1291 struct carl9170_sta_tid *tid_info;
1292
1293 tid_info = rcu_dereference(sta_info->agg[i]);
1294 RCU_INIT_POINTER(sta_info->agg[i], NULL);
1295
1296 if (!tid_info)
1297 continue;
1298
1299 spin_lock_bh(&ar->tx_ampdu_list_lock);
1300 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1301 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1302 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1303 cleanup = true;
1304 }
1305 rcu_read_unlock();
1306
1307 if (cleanup)
1308 carl9170_ampdu_gc(ar);
1309 }
1310
1311 return 0;
1312 }
1313
1314 static int carl9170_op_conf_tx(struct ieee80211_hw *hw,
1315 struct ieee80211_vif *vif, u16 queue,
1316 const struct ieee80211_tx_queue_params *param)
1317 {
1318 struct ar9170 *ar = hw->priv;
1319 int ret;
1320
1321 mutex_lock(&ar->mutex);
1322 if (queue < ar->hw->queues) {
1323 memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
1324 ret = carl9170_set_qos(ar);
1325 } else {
1326 ret = -EINVAL;
1327 }
1328
1329 mutex_unlock(&ar->mutex);
1330 return ret;
1331 }
1332
1333 static void carl9170_ampdu_work(struct work_struct *work)
1334 {
1335 struct ar9170 *ar = container_of(work, struct ar9170,
1336 ampdu_work);
1337
1338 if (!IS_STARTED(ar))
1339 return;
1340
1341 mutex_lock(&ar->mutex);
1342 carl9170_ampdu_gc(ar);
1343 mutex_unlock(&ar->mutex);
1344 }
1345
1346 static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1347 struct ieee80211_vif *vif,
1348 enum ieee80211_ampdu_mlme_action action,
1349 struct ieee80211_sta *sta,
1350 u16 tid, u16 *ssn, u8 buf_size)
1351 {
1352 struct ar9170 *ar = hw->priv;
1353 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1354 struct carl9170_sta_tid *tid_info;
1355
1356 if (modparam_noht)
1357 return -EOPNOTSUPP;
1358
1359 switch (action) {
1360 case IEEE80211_AMPDU_TX_START:
1361 if (!sta_info->ht_sta)
1362 return -EOPNOTSUPP;
1363
1364 rcu_read_lock();
1365 if (rcu_dereference(sta_info->agg[tid])) {
1366 rcu_read_unlock();
1367 return -EBUSY;
1368 }
1369
1370 tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
1371 GFP_ATOMIC);
1372 if (!tid_info) {
1373 rcu_read_unlock();
1374 return -ENOMEM;
1375 }
1376
1377 tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn);
1378 tid_info->state = CARL9170_TID_STATE_PROGRESS;
1379 tid_info->tid = tid;
1380 tid_info->max = sta_info->ampdu_max_len;
1381
1382 INIT_LIST_HEAD(&tid_info->list);
1383 INIT_LIST_HEAD(&tid_info->tmp_list);
1384 skb_queue_head_init(&tid_info->queue);
1385 spin_lock_init(&tid_info->lock);
1386
1387 spin_lock_bh(&ar->tx_ampdu_list_lock);
1388 ar->tx_ampdu_list_len++;
1389 list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list);
1390 rcu_assign_pointer(sta_info->agg[tid], tid_info);
1391 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1392 rcu_read_unlock();
1393
1394 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1395 break;
1396
1397 case IEEE80211_AMPDU_TX_STOP_CONT:
1398 case IEEE80211_AMPDU_TX_STOP_FLUSH:
1399 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
1400 rcu_read_lock();
1401 tid_info = rcu_dereference(sta_info->agg[tid]);
1402 if (tid_info) {
1403 spin_lock_bh(&ar->tx_ampdu_list_lock);
1404 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1405 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1406 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1407 }
1408
1409 RCU_INIT_POINTER(sta_info->agg[tid], NULL);
1410 rcu_read_unlock();
1411
1412 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1413 ieee80211_queue_work(ar->hw, &ar->ampdu_work);
1414 break;
1415
1416 case IEEE80211_AMPDU_TX_OPERATIONAL:
1417 rcu_read_lock();
1418 tid_info = rcu_dereference(sta_info->agg[tid]);
1419
1420 sta_info->stats[tid].clear = true;
1421 sta_info->stats[tid].req = false;
1422
1423 if (tid_info) {
1424 bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE);
1425 tid_info->state = CARL9170_TID_STATE_IDLE;
1426 }
1427 rcu_read_unlock();
1428
1429 if (WARN_ON_ONCE(!tid_info))
1430 return -EFAULT;
1431
1432 break;
1433
1434 case IEEE80211_AMPDU_RX_START:
1435 case IEEE80211_AMPDU_RX_STOP:
1436 /* Handled by hardware */
1437 break;
1438
1439 default:
1440 return -EOPNOTSUPP;
1441 }
1442
1443 return 0;
1444 }
1445
1446 #ifdef CONFIG_CARL9170_WPC
1447 static int carl9170_register_wps_button(struct ar9170 *ar)
1448 {
1449 struct input_dev *input;
1450 int err;
1451
1452 if (!(ar->features & CARL9170_WPS_BUTTON))
1453 return 0;
1454
1455 input = input_allocate_device();
1456 if (!input)
1457 return -ENOMEM;
1458
1459 snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button",
1460 wiphy_name(ar->hw->wiphy));
1461
1462 snprintf(ar->wps.phys, sizeof(ar->wps.phys),
1463 "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy));
1464
1465 input->name = ar->wps.name;
1466 input->phys = ar->wps.phys;
1467 input->id.bustype = BUS_USB;
1468 input->dev.parent = &ar->hw->wiphy->dev;
1469
1470 input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
1471
1472 err = input_register_device(input);
1473 if (err) {
1474 input_free_device(input);
1475 return err;
1476 }
1477
1478 ar->wps.pbc = input;
1479 return 0;
1480 }
1481 #endif /* CONFIG_CARL9170_WPC */
1482
1483 #ifdef CONFIG_CARL9170_HWRNG
1484 static int carl9170_rng_get(struct ar9170 *ar)
1485 {
1486
1487 #define RW (CARL9170_MAX_CMD_PAYLOAD_LEN / sizeof(u32))
1488 #define RB (CARL9170_MAX_CMD_PAYLOAD_LEN)
1489
1490 static const __le32 rng_load[RW] = {
1491 [0 ... (RW - 1)] = cpu_to_le32(AR9170_RAND_REG_NUM)};
1492
1493 u32 buf[RW];
1494
1495 unsigned int i, off = 0, transfer, count;
1496 int err;
1497
1498 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN);
1499
1500 if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized)
1501 return -EAGAIN;
1502
1503 count = ARRAY_SIZE(ar->rng.cache);
1504 while (count) {
1505 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1506 RB, (u8 *) rng_load,
1507 RB, (u8 *) buf);
1508 if (err)
1509 return err;
1510
1511 transfer = min_t(unsigned int, count, RW);
1512 for (i = 0; i < transfer; i++)
1513 ar->rng.cache[off + i] = buf[i];
1514
1515 off += transfer;
1516 count -= transfer;
1517 }
1518
1519 ar->rng.cache_idx = 0;
1520
1521 #undef RW
1522 #undef RB
1523 return 0;
1524 }
1525
1526 static int carl9170_rng_read(struct hwrng *rng, u32 *data)
1527 {
1528 struct ar9170 *ar = (struct ar9170 *)rng->priv;
1529 int ret = -EIO;
1530
1531 mutex_lock(&ar->mutex);
1532 if (ar->rng.cache_idx >= ARRAY_SIZE(ar->rng.cache)) {
1533 ret = carl9170_rng_get(ar);
1534 if (ret) {
1535 mutex_unlock(&ar->mutex);
1536 return ret;
1537 }
1538 }
1539
1540 *data = ar->rng.cache[ar->rng.cache_idx++];
1541 mutex_unlock(&ar->mutex);
1542
1543 return sizeof(u16);
1544 }
1545
1546 static void carl9170_unregister_hwrng(struct ar9170 *ar)
1547 {
1548 if (ar->rng.initialized) {
1549 hwrng_unregister(&ar->rng.rng);
1550 ar->rng.initialized = false;
1551 }
1552 }
1553
1554 static int carl9170_register_hwrng(struct ar9170 *ar)
1555 {
1556 int err;
1557
1558 snprintf(ar->rng.name, ARRAY_SIZE(ar->rng.name),
1559 "%s_%s", KBUILD_MODNAME, wiphy_name(ar->hw->wiphy));
1560 ar->rng.rng.name = ar->rng.name;
1561 ar->rng.rng.data_read = carl9170_rng_read;
1562 ar->rng.rng.priv = (unsigned long)ar;
1563
1564 if (WARN_ON(ar->rng.initialized))
1565 return -EALREADY;
1566
1567 err = hwrng_register(&ar->rng.rng);
1568 if (err) {
1569 dev_err(&ar->udev->dev, "Failed to register the random "
1570 "number generator (%d)\n", err);
1571 return err;
1572 }
1573
1574 ar->rng.initialized = true;
1575
1576 err = carl9170_rng_get(ar);
1577 if (err) {
1578 carl9170_unregister_hwrng(ar);
1579 return err;
1580 }
1581
1582 return 0;
1583 }
1584 #endif /* CONFIG_CARL9170_HWRNG */
1585
1586 static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
1587 struct survey_info *survey)
1588 {
1589 struct ar9170 *ar = hw->priv;
1590 struct ieee80211_channel *chan;
1591 struct ieee80211_supported_band *band;
1592 int err, b, i;
1593
1594 chan = ar->channel;
1595 if (!chan)
1596 return -ENODEV;
1597
1598 if (idx == chan->hw_value) {
1599 mutex_lock(&ar->mutex);
1600 err = carl9170_update_survey(ar, false, true);
1601 mutex_unlock(&ar->mutex);
1602 if (err)
1603 return err;
1604 }
1605
1606 for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
1607 band = ar->hw->wiphy->bands[b];
1608
1609 if (!band)
1610 continue;
1611
1612 for (i = 0; i < band->n_channels; i++) {
1613 if (band->channels[i].hw_value == idx) {
1614 chan = &band->channels[i];
1615 goto found;
1616 }
1617 }
1618 }
1619 return -ENOENT;
1620
1621 found:
1622 memcpy(survey, &ar->survey[idx], sizeof(*survey));
1623
1624 survey->channel = chan;
1625 survey->filled = SURVEY_INFO_NOISE_DBM;
1626
1627 if (ar->channel == chan)
1628 survey->filled |= SURVEY_INFO_IN_USE;
1629
1630 if (ar->fw.hw_counters) {
1631 survey->filled |= SURVEY_INFO_CHANNEL_TIME |
1632 SURVEY_INFO_CHANNEL_TIME_BUSY |
1633 SURVEY_INFO_CHANNEL_TIME_TX;
1634 }
1635
1636 return 0;
1637 }
1638
1639 static void carl9170_op_flush(struct ieee80211_hw *hw, bool drop)
1640 {
1641 struct ar9170 *ar = hw->priv;
1642 unsigned int vid;
1643
1644 mutex_lock(&ar->mutex);
1645 for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num)
1646 carl9170_flush_cab(ar, vid);
1647
1648 carl9170_flush(ar, drop);
1649 mutex_unlock(&ar->mutex);
1650 }
1651
1652 static int carl9170_op_get_stats(struct ieee80211_hw *hw,
1653 struct ieee80211_low_level_stats *stats)
1654 {
1655 struct ar9170 *ar = hw->priv;
1656
1657 memset(stats, 0, sizeof(*stats));
1658 stats->dot11ACKFailureCount = ar->tx_ack_failures;
1659 stats->dot11FCSErrorCount = ar->tx_fcs_errors;
1660 return 0;
1661 }
1662
1663 static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
1664 struct ieee80211_vif *vif,
1665 enum sta_notify_cmd cmd,
1666 struct ieee80211_sta *sta)
1667 {
1668 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1669
1670 switch (cmd) {
1671 case STA_NOTIFY_SLEEP:
1672 sta_info->sleeping = true;
1673 if (atomic_read(&sta_info->pending_frames))
1674 ieee80211_sta_block_awake(hw, sta, true);
1675 break;
1676
1677 case STA_NOTIFY_AWAKE:
1678 sta_info->sleeping = false;
1679 break;
1680 }
1681 }
1682
1683 static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw)
1684 {
1685 struct ar9170 *ar = hw->priv;
1686
1687 return !!atomic_read(&ar->tx_total_queued);
1688 }
1689
1690 static const struct ieee80211_ops carl9170_ops = {
1691 .start = carl9170_op_start,
1692 .stop = carl9170_op_stop,
1693 .tx = carl9170_op_tx,
1694 .flush = carl9170_op_flush,
1695 .add_interface = carl9170_op_add_interface,
1696 .remove_interface = carl9170_op_remove_interface,
1697 .config = carl9170_op_config,
1698 .prepare_multicast = carl9170_op_prepare_multicast,
1699 .configure_filter = carl9170_op_configure_filter,
1700 .conf_tx = carl9170_op_conf_tx,
1701 .bss_info_changed = carl9170_op_bss_info_changed,
1702 .get_tsf = carl9170_op_get_tsf,
1703 .set_key = carl9170_op_set_key,
1704 .sta_add = carl9170_op_sta_add,
1705 .sta_remove = carl9170_op_sta_remove,
1706 .sta_notify = carl9170_op_sta_notify,
1707 .get_survey = carl9170_op_get_survey,
1708 .get_stats = carl9170_op_get_stats,
1709 .ampdu_action = carl9170_op_ampdu_action,
1710 .tx_frames_pending = carl9170_tx_frames_pending,
1711 };
1712
1713 void *carl9170_alloc(size_t priv_size)
1714 {
1715 struct ieee80211_hw *hw;
1716 struct ar9170 *ar;
1717 struct sk_buff *skb;
1718 int i;
1719
1720 /*
1721 * this buffer is used for rx stream reconstruction.
1722 * Under heavy load this device (or the transport layer?)
1723 * tends to split the streams into separate rx descriptors.
1724 */
1725
1726 skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
1727 if (!skb)
1728 goto err_nomem;
1729
1730 hw = ieee80211_alloc_hw(priv_size, &carl9170_ops);
1731 if (!hw)
1732 goto err_nomem;
1733
1734 ar = hw->priv;
1735 ar->hw = hw;
1736 ar->rx_failover = skb;
1737
1738 memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head));
1739 ar->rx_has_plcp = false;
1740
1741 /*
1742 * Here's a hidden pitfall!
1743 *
1744 * All 4 AC queues work perfectly well under _legacy_ operation.
1745 * However as soon as aggregation is enabled, the traffic flow
1746 * gets very bumpy. Therefore we have to _switch_ to a
1747 * software AC with a single HW queue.
1748 */
1749 hw->queues = __AR9170_NUM_TXQ;
1750
1751 mutex_init(&ar->mutex);
1752 spin_lock_init(&ar->beacon_lock);
1753 spin_lock_init(&ar->cmd_lock);
1754 spin_lock_init(&ar->tx_stats_lock);
1755 spin_lock_init(&ar->tx_ampdu_list_lock);
1756 spin_lock_init(&ar->mem_lock);
1757 spin_lock_init(&ar->state_lock);
1758 atomic_set(&ar->pending_restarts, 0);
1759 ar->vifs = 0;
1760 for (i = 0; i < ar->hw->queues; i++) {
1761 skb_queue_head_init(&ar->tx_status[i]);
1762 skb_queue_head_init(&ar->tx_pending[i]);
1763
1764 INIT_LIST_HEAD(&ar->bar_list[i]);
1765 spin_lock_init(&ar->bar_list_lock[i]);
1766 }
1767 INIT_WORK(&ar->ps_work, carl9170_ps_work);
1768 INIT_WORK(&ar->ping_work, carl9170_ping_work);
1769 INIT_WORK(&ar->restart_work, carl9170_restart_work);
1770 INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
1771 INIT_DELAYED_WORK(&ar->stat_work, carl9170_stat_work);
1772 INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
1773 INIT_LIST_HEAD(&ar->tx_ampdu_list);
1774 rcu_assign_pointer(ar->tx_ampdu_iter,
1775 (struct carl9170_sta_tid *) &ar->tx_ampdu_list);
1776
1777 bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num);
1778 INIT_LIST_HEAD(&ar->vif_list);
1779 init_completion(&ar->tx_flush);
1780
1781 /* firmware decides which modes we support */
1782 hw->wiphy->interface_modes = 0;
1783
1784 hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
1785 IEEE80211_HW_MFP_CAPABLE |
1786 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
1787 IEEE80211_HW_SUPPORTS_PS |
1788 IEEE80211_HW_PS_NULLFUNC_STACK |
1789 IEEE80211_HW_NEED_DTIM_PERIOD |
1790 IEEE80211_HW_SIGNAL_DBM;
1791
1792 if (!modparam_noht) {
1793 /*
1794 * see the comment above, why we allow the user
1795 * to disable HT by a module parameter.
1796 */
1797 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
1798 }
1799
1800 hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
1801 hw->sta_data_size = sizeof(struct carl9170_sta_info);
1802 hw->vif_data_size = sizeof(struct carl9170_vif_info);
1803
1804 hw->max_rates = CARL9170_TX_MAX_RATES;
1805 hw->max_rate_tries = CARL9170_TX_USER_RATE_TRIES;
1806
1807 for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
1808 ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
1809
1810 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1811
1812 /* As IBSS Encryption is software-based, IBSS RSN is supported. */
1813 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
1814 return ar;
1815
1816 err_nomem:
1817 kfree_skb(skb);
1818 return ERR_PTR(-ENOMEM);
1819 }
1820
1821 static int carl9170_read_eeprom(struct ar9170 *ar)
1822 {
1823 #define RW 8 /* number of words to read at once */
1824 #define RB (sizeof(u32) * RW)
1825 u8 *eeprom = (void *)&ar->eeprom;
1826 __le32 offsets[RW];
1827 int i, j, err;
1828
1829 BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
1830
1831 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_LEN - 4);
1832 #ifndef __CHECKER__
1833 /* don't want to handle trailing remains */
1834 BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
1835 #endif
1836
1837 for (i = 0; i < sizeof(ar->eeprom) / RB; i++) {
1838 for (j = 0; j < RW; j++)
1839 offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
1840 RB * i + 4 * j);
1841
1842 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1843 RB, (u8 *) &offsets,
1844 RB, eeprom + RB * i);
1845 if (err)
1846 return err;
1847 }
1848
1849 #undef RW
1850 #undef RB
1851 return 0;
1852 }
1853
1854 static int carl9170_parse_eeprom(struct ar9170 *ar)
1855 {
1856 struct ath_regulatory *regulatory = &ar->common.regulatory;
1857 unsigned int rx_streams, tx_streams, tx_params = 0;
1858 int bands = 0;
1859 int chans = 0;
1860
1861 if (ar->eeprom.length == cpu_to_le16(0xffff))
1862 return -ENODATA;
1863
1864 rx_streams = hweight8(ar->eeprom.rx_mask);
1865 tx_streams = hweight8(ar->eeprom.tx_mask);
1866
1867 if (rx_streams != tx_streams) {
1868 tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
1869
1870 WARN_ON(!(tx_streams >= 1 && tx_streams <=
1871 IEEE80211_HT_MCS_TX_MAX_STREAMS));
1872
1873 tx_params = (tx_streams - 1) <<
1874 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1875
1876 carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
1877 carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
1878 }
1879
1880 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
1881 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1882 &carl9170_band_2GHz;
1883 chans += carl9170_band_2GHz.n_channels;
1884 bands++;
1885 }
1886 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
1887 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1888 &carl9170_band_5GHz;
1889 chans += carl9170_band_5GHz.n_channels;
1890 bands++;
1891 }
1892
1893 if (!bands)
1894 return -EINVAL;
1895
1896 ar->survey = kzalloc(sizeof(struct survey_info) * chans, GFP_KERNEL);
1897 if (!ar->survey)
1898 return -ENOMEM;
1899 ar->num_channels = chans;
1900
1901 /*
1902 * I measured this, a bandswitch takes roughly
1903 * 135 ms and a frequency switch about 80.
1904 *
1905 * FIXME: measure these values again once EEPROM settings
1906 * are used, that will influence them!
1907 */
1908 if (bands == 2)
1909 ar->hw->channel_change_time = 135 * 1000;
1910 else
1911 ar->hw->channel_change_time = 80 * 1000;
1912
1913 regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
1914
1915 /* second part of wiphy init */
1916 SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address);
1917
1918 return 0;
1919 }
1920
1921 static int carl9170_reg_notifier(struct wiphy *wiphy,
1922 struct regulatory_request *request)
1923 {
1924 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1925 struct ar9170 *ar = hw->priv;
1926
1927 return ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
1928 }
1929
1930 int carl9170_register(struct ar9170 *ar)
1931 {
1932 struct ath_regulatory *regulatory = &ar->common.regulatory;
1933 int err = 0, i;
1934
1935 if (WARN_ON(ar->mem_bitmap))
1936 return -EINVAL;
1937
1938 ar->mem_bitmap = kzalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG) *
1939 sizeof(unsigned long), GFP_KERNEL);
1940
1941 if (!ar->mem_bitmap)
1942 return -ENOMEM;
1943
1944 /* try to read EEPROM, init MAC addr */
1945 err = carl9170_read_eeprom(ar);
1946 if (err)
1947 return err;
1948
1949 err = carl9170_parse_eeprom(ar);
1950 if (err)
1951 return err;
1952
1953 err = ath_regd_init(regulatory, ar->hw->wiphy,
1954 carl9170_reg_notifier);
1955 if (err)
1956 return err;
1957
1958 if (modparam_noht) {
1959 carl9170_band_2GHz.ht_cap.ht_supported = false;
1960 carl9170_band_5GHz.ht_cap.ht_supported = false;
1961 }
1962
1963 for (i = 0; i < ar->fw.vif_num; i++) {
1964 ar->vif_priv[i].id = i;
1965 ar->vif_priv[i].vif = NULL;
1966 }
1967
1968 err = ieee80211_register_hw(ar->hw);
1969 if (err)
1970 return err;
1971
1972 /* mac80211 interface is now registered */
1973 ar->registered = true;
1974
1975 if (!ath_is_world_regd(regulatory))
1976 regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
1977
1978 #ifdef CONFIG_CARL9170_DEBUGFS
1979 carl9170_debugfs_register(ar);
1980 #endif /* CONFIG_CARL9170_DEBUGFS */
1981
1982 err = carl9170_led_init(ar);
1983 if (err)
1984 goto err_unreg;
1985
1986 #ifdef CONFIG_CARL9170_LEDS
1987 err = carl9170_led_register(ar);
1988 if (err)
1989 goto err_unreg;
1990 #endif /* CONFIG_CARL9170_LEDS */
1991
1992 #ifdef CONFIG_CARL9170_WPC
1993 err = carl9170_register_wps_button(ar);
1994 if (err)
1995 goto err_unreg;
1996 #endif /* CONFIG_CARL9170_WPC */
1997
1998 #ifdef CONFIG_CARL9170_HWRNG
1999 err = carl9170_register_hwrng(ar);
2000 if (err)
2001 goto err_unreg;
2002 #endif /* CONFIG_CARL9170_HWRNG */
2003
2004 dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n",
2005 wiphy_name(ar->hw->wiphy));
2006
2007 return 0;
2008
2009 err_unreg:
2010 carl9170_unregister(ar);
2011 return err;
2012 }
2013
2014 void carl9170_unregister(struct ar9170 *ar)
2015 {
2016 if (!ar->registered)
2017 return;
2018
2019 ar->registered = false;
2020
2021 #ifdef CONFIG_CARL9170_LEDS
2022 carl9170_led_unregister(ar);
2023 #endif /* CONFIG_CARL9170_LEDS */
2024
2025 #ifdef CONFIG_CARL9170_DEBUGFS
2026 carl9170_debugfs_unregister(ar);
2027 #endif /* CONFIG_CARL9170_DEBUGFS */
2028
2029 #ifdef CONFIG_CARL9170_WPC
2030 if (ar->wps.pbc) {
2031 input_unregister_device(ar->wps.pbc);
2032 ar->wps.pbc = NULL;
2033 }
2034 #endif /* CONFIG_CARL9170_WPC */
2035
2036 #ifdef CONFIG_CARL9170_HWRNG
2037 carl9170_unregister_hwrng(ar);
2038 #endif /* CONFIG_CARL9170_HWRNG */
2039
2040 carl9170_cancel_worker(ar);
2041 cancel_work_sync(&ar->restart_work);
2042
2043 ieee80211_unregister_hw(ar->hw);
2044 }
2045
2046 void carl9170_free(struct ar9170 *ar)
2047 {
2048 WARN_ON(ar->registered);
2049 WARN_ON(IS_INITIALIZED(ar));
2050
2051 kfree_skb(ar->rx_failover);
2052 ar->rx_failover = NULL;
2053
2054 kfree(ar->mem_bitmap);
2055 ar->mem_bitmap = NULL;
2056
2057 kfree(ar->survey);
2058 ar->survey = NULL;
2059
2060 mutex_destroy(&ar->mutex);
2061
2062 ieee80211_free_hw(ar->hw);
2063 }