]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/wireless/ath/carl9170/main.c
mac80211: add a flag to indicate CCK support for HT clients
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / ath / carl9170 / main.c
1 /*
2 * Atheros CARL9170 driver
3 *
4 * mac80211 interaction code
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40 #include <linux/init.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <linux/etherdevice.h>
44 #include <linux/random.h>
45 #include <net/mac80211.h>
46 #include <net/cfg80211.h>
47 #include "hw.h"
48 #include "carl9170.h"
49 #include "cmd.h"
50
51 static bool modparam_nohwcrypt;
52 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
53 MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
54
55 int modparam_noht;
56 module_param_named(noht, modparam_noht, int, S_IRUGO);
57 MODULE_PARM_DESC(noht, "Disable MPDU aggregation.");
58
59 #define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
60 .bitrate = (_bitrate), \
61 .flags = (_flags), \
62 .hw_value = (_hw_rate) | (_txpidx) << 4, \
63 }
64
65 struct ieee80211_rate __carl9170_ratetable[] = {
66 RATE(10, 0, 0, 0),
67 RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
68 RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
69 RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
70 RATE(60, 0xb, 0, 0),
71 RATE(90, 0xf, 0, 0),
72 RATE(120, 0xa, 0, 0),
73 RATE(180, 0xe, 0, 0),
74 RATE(240, 0x9, 0, 0),
75 RATE(360, 0xd, 1, 0),
76 RATE(480, 0x8, 2, 0),
77 RATE(540, 0xc, 3, 0),
78 };
79 #undef RATE
80
81 #define carl9170_g_ratetable (__carl9170_ratetable + 0)
82 #define carl9170_g_ratetable_size 12
83 #define carl9170_a_ratetable (__carl9170_ratetable + 4)
84 #define carl9170_a_ratetable_size 8
85
86 /*
87 * NB: The hw_value is used as an index into the carl9170_phy_freq_params
88 * array in phy.c so that we don't have to do frequency lookups!
89 */
90 #define CHAN(_freq, _idx) { \
91 .center_freq = (_freq), \
92 .hw_value = (_idx), \
93 .max_power = 18, /* XXX */ \
94 }
95
96 static struct ieee80211_channel carl9170_2ghz_chantable[] = {
97 CHAN(2412, 0),
98 CHAN(2417, 1),
99 CHAN(2422, 2),
100 CHAN(2427, 3),
101 CHAN(2432, 4),
102 CHAN(2437, 5),
103 CHAN(2442, 6),
104 CHAN(2447, 7),
105 CHAN(2452, 8),
106 CHAN(2457, 9),
107 CHAN(2462, 10),
108 CHAN(2467, 11),
109 CHAN(2472, 12),
110 CHAN(2484, 13),
111 };
112
113 static struct ieee80211_channel carl9170_5ghz_chantable[] = {
114 CHAN(4920, 14),
115 CHAN(4940, 15),
116 CHAN(4960, 16),
117 CHAN(4980, 17),
118 CHAN(5040, 18),
119 CHAN(5060, 19),
120 CHAN(5080, 20),
121 CHAN(5180, 21),
122 CHAN(5200, 22),
123 CHAN(5220, 23),
124 CHAN(5240, 24),
125 CHAN(5260, 25),
126 CHAN(5280, 26),
127 CHAN(5300, 27),
128 CHAN(5320, 28),
129 CHAN(5500, 29),
130 CHAN(5520, 30),
131 CHAN(5540, 31),
132 CHAN(5560, 32),
133 CHAN(5580, 33),
134 CHAN(5600, 34),
135 CHAN(5620, 35),
136 CHAN(5640, 36),
137 CHAN(5660, 37),
138 CHAN(5680, 38),
139 CHAN(5700, 39),
140 CHAN(5745, 40),
141 CHAN(5765, 41),
142 CHAN(5785, 42),
143 CHAN(5805, 43),
144 CHAN(5825, 44),
145 CHAN(5170, 45),
146 CHAN(5190, 46),
147 CHAN(5210, 47),
148 CHAN(5230, 48),
149 };
150 #undef CHAN
151
152 #define CARL9170_HT_CAP \
153 { \
154 .ht_supported = true, \
155 .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
156 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
157 IEEE80211_HT_CAP_SGI_40 | \
158 IEEE80211_HT_CAP_DSSSCCK40 | \
159 IEEE80211_HT_CAP_SM_PS, \
160 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, \
161 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
162 .mcs = { \
163 .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \
164 .rx_highest = cpu_to_le16(300), \
165 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
166 }, \
167 }
168
169 static struct ieee80211_supported_band carl9170_band_2GHz = {
170 .channels = carl9170_2ghz_chantable,
171 .n_channels = ARRAY_SIZE(carl9170_2ghz_chantable),
172 .bitrates = carl9170_g_ratetable,
173 .n_bitrates = carl9170_g_ratetable_size,
174 .ht_cap = CARL9170_HT_CAP,
175 };
176
177 static struct ieee80211_supported_band carl9170_band_5GHz = {
178 .channels = carl9170_5ghz_chantable,
179 .n_channels = ARRAY_SIZE(carl9170_5ghz_chantable),
180 .bitrates = carl9170_a_ratetable,
181 .n_bitrates = carl9170_a_ratetable_size,
182 .ht_cap = CARL9170_HT_CAP,
183 };
184
185 static void carl9170_ampdu_gc(struct ar9170 *ar)
186 {
187 struct carl9170_sta_tid *tid_info;
188 LIST_HEAD(tid_gc);
189
190 rcu_read_lock();
191 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
192 spin_lock_bh(&ar->tx_ampdu_list_lock);
193 if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) {
194 tid_info->state = CARL9170_TID_STATE_KILLED;
195 list_del_rcu(&tid_info->list);
196 ar->tx_ampdu_list_len--;
197 list_add_tail(&tid_info->tmp_list, &tid_gc);
198 }
199 spin_unlock_bh(&ar->tx_ampdu_list_lock);
200
201 }
202 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
203 rcu_read_unlock();
204
205 synchronize_rcu();
206
207 while (!list_empty(&tid_gc)) {
208 struct sk_buff *skb;
209 tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid,
210 tmp_list);
211
212 while ((skb = __skb_dequeue(&tid_info->queue)))
213 carl9170_tx_status(ar, skb, false);
214
215 list_del_init(&tid_info->tmp_list);
216 kfree(tid_info);
217 }
218 }
219
220 static void carl9170_flush(struct ar9170 *ar, bool drop_queued)
221 {
222 if (drop_queued) {
223 int i;
224
225 /*
226 * We can only drop frames which have not been uploaded
227 * to the device yet.
228 */
229
230 for (i = 0; i < ar->hw->queues; i++) {
231 struct sk_buff *skb;
232
233 while ((skb = skb_dequeue(&ar->tx_pending[i]))) {
234 struct ieee80211_tx_info *info;
235
236 info = IEEE80211_SKB_CB(skb);
237 if (info->flags & IEEE80211_TX_CTL_AMPDU)
238 atomic_dec(&ar->tx_ampdu_upload);
239
240 carl9170_tx_status(ar, skb, false);
241 }
242 }
243 }
244
245 /* Wait for all other outstanding frames to timeout. */
246 if (atomic_read(&ar->tx_total_queued))
247 WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0);
248 }
249
250 static void carl9170_flush_ba(struct ar9170 *ar)
251 {
252 struct sk_buff_head free;
253 struct carl9170_sta_tid *tid_info;
254 struct sk_buff *skb;
255
256 __skb_queue_head_init(&free);
257
258 rcu_read_lock();
259 spin_lock_bh(&ar->tx_ampdu_list_lock);
260 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
261 if (tid_info->state > CARL9170_TID_STATE_SUSPEND) {
262 tid_info->state = CARL9170_TID_STATE_SUSPEND;
263
264 spin_lock(&tid_info->lock);
265 while ((skb = __skb_dequeue(&tid_info->queue)))
266 __skb_queue_tail(&free, skb);
267 spin_unlock(&tid_info->lock);
268 }
269 }
270 spin_unlock_bh(&ar->tx_ampdu_list_lock);
271 rcu_read_unlock();
272
273 while ((skb = __skb_dequeue(&free)))
274 carl9170_tx_status(ar, skb, false);
275 }
276
277 static void carl9170_zap_queues(struct ar9170 *ar)
278 {
279 struct carl9170_vif_info *cvif;
280 unsigned int i;
281
282 carl9170_ampdu_gc(ar);
283
284 carl9170_flush_ba(ar);
285 carl9170_flush(ar, true);
286
287 for (i = 0; i < ar->hw->queues; i++) {
288 spin_lock_bh(&ar->tx_status[i].lock);
289 while (!skb_queue_empty(&ar->tx_status[i])) {
290 struct sk_buff *skb;
291
292 skb = skb_peek(&ar->tx_status[i]);
293 carl9170_tx_get_skb(skb);
294 spin_unlock_bh(&ar->tx_status[i].lock);
295 carl9170_tx_drop(ar, skb);
296 spin_lock_bh(&ar->tx_status[i].lock);
297 carl9170_tx_put_skb(skb);
298 }
299 spin_unlock_bh(&ar->tx_status[i].lock);
300 }
301
302 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT < 1);
303 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD < CARL9170_NUM_TX_LIMIT_SOFT);
304 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD >= CARL9170_BAW_BITS);
305
306 /* reinitialize queues statistics */
307 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
308 for (i = 0; i < ar->hw->queues; i++)
309 ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
310
311 for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++)
312 ar->mem_bitmap[i] = 0;
313
314 rcu_read_lock();
315 list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
316 spin_lock_bh(&ar->beacon_lock);
317 dev_kfree_skb_any(cvif->beacon);
318 cvif->beacon = NULL;
319 spin_unlock_bh(&ar->beacon_lock);
320 }
321 rcu_read_unlock();
322
323 atomic_set(&ar->tx_ampdu_upload, 0);
324 atomic_set(&ar->tx_ampdu_scheduler, 0);
325 atomic_set(&ar->tx_total_pending, 0);
326 atomic_set(&ar->tx_total_queued, 0);
327 atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
328 }
329
330 #define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
331 do { \
332 queue.aifs = ai_fs; \
333 queue.cw_min = cwmin; \
334 queue.cw_max = cwmax; \
335 queue.txop = _txop; \
336 } while (0)
337
338 static int carl9170_op_start(struct ieee80211_hw *hw)
339 {
340 struct ar9170 *ar = hw->priv;
341 int err, i;
342
343 mutex_lock(&ar->mutex);
344
345 carl9170_zap_queues(ar);
346
347 /* reset QoS defaults */
348 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VO], 2, 3, 7, 47);
349 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VI], 2, 7, 15, 94);
350 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BE], 3, 15, 1023, 0);
351 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BK], 7, 15, 1023, 0);
352 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_SPECIAL], 2, 3, 7, 0);
353
354 ar->current_factor = ar->current_density = -1;
355 /* "The first key is unique." */
356 ar->usedkeys = 1;
357 ar->filter_state = 0;
358 ar->ps.last_action = jiffies;
359 ar->ps.last_slept = jiffies;
360 ar->erp_mode = CARL9170_ERP_AUTO;
361
362 /* Set "disable hw crypto offload" whenever the module parameter
363 * nohwcrypt is true or if the firmware does not support it.
364 */
365 ar->disable_offload = modparam_nohwcrypt |
366 ar->fw.disable_offload_fw;
367 ar->rx_software_decryption = ar->disable_offload;
368
369 for (i = 0; i < ar->hw->queues; i++) {
370 ar->queue_stop_timeout[i] = jiffies;
371 ar->max_queue_stop_timeout[i] = 0;
372 }
373
374 atomic_set(&ar->mem_allocs, 0);
375
376 err = carl9170_usb_open(ar);
377 if (err)
378 goto out;
379
380 err = carl9170_init_mac(ar);
381 if (err)
382 goto out;
383
384 err = carl9170_set_qos(ar);
385 if (err)
386 goto out;
387
388 if (ar->fw.rx_filter) {
389 err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA |
390 CARL9170_RX_FILTER_CTL_OTHER | CARL9170_RX_FILTER_BAD);
391 if (err)
392 goto out;
393 }
394
395 err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER,
396 AR9170_DMA_TRIGGER_RXQ);
397 if (err)
398 goto out;
399
400 /* Clear key-cache */
401 for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) {
402 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
403 0, NULL, 0);
404 if (err)
405 goto out;
406
407 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
408 1, NULL, 0);
409 if (err)
410 goto out;
411
412 if (i < AR9170_CAM_MAX_USER) {
413 err = carl9170_disable_key(ar, i);
414 if (err)
415 goto out;
416 }
417 }
418
419 carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED);
420
421 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
422 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
423
424 ieee80211_wake_queues(ar->hw);
425 err = 0;
426
427 out:
428 mutex_unlock(&ar->mutex);
429 return err;
430 }
431
432 static void carl9170_cancel_worker(struct ar9170 *ar)
433 {
434 cancel_delayed_work_sync(&ar->stat_work);
435 cancel_delayed_work_sync(&ar->tx_janitor);
436 #ifdef CONFIG_CARL9170_LEDS
437 cancel_delayed_work_sync(&ar->led_work);
438 #endif /* CONFIG_CARL9170_LEDS */
439 cancel_work_sync(&ar->ps_work);
440 cancel_work_sync(&ar->ping_work);
441 cancel_work_sync(&ar->ampdu_work);
442 }
443
444 static void carl9170_op_stop(struct ieee80211_hw *hw)
445 {
446 struct ar9170 *ar = hw->priv;
447
448 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
449
450 ieee80211_stop_queues(ar->hw);
451
452 mutex_lock(&ar->mutex);
453 if (IS_ACCEPTING_CMD(ar)) {
454 RCU_INIT_POINTER(ar->beacon_iter, NULL);
455
456 carl9170_led_set_state(ar, 0);
457
458 /* stop DMA */
459 carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0);
460 carl9170_usb_stop(ar);
461 }
462
463 carl9170_zap_queues(ar);
464 mutex_unlock(&ar->mutex);
465
466 carl9170_cancel_worker(ar);
467 }
468
469 static void carl9170_restart_work(struct work_struct *work)
470 {
471 struct ar9170 *ar = container_of(work, struct ar9170,
472 restart_work);
473 int err = -EIO;
474
475 ar->usedkeys = 0;
476 ar->filter_state = 0;
477 carl9170_cancel_worker(ar);
478
479 mutex_lock(&ar->mutex);
480 if (!ar->force_usb_reset) {
481 err = carl9170_usb_restart(ar);
482 if (net_ratelimit()) {
483 if (err)
484 dev_err(&ar->udev->dev, "Failed to restart device (%d).\n", err);
485 else
486 dev_info(&ar->udev->dev, "device restarted successfully.\n");
487 }
488 }
489 carl9170_zap_queues(ar);
490 mutex_unlock(&ar->mutex);
491
492 if (!err && !ar->force_usb_reset) {
493 ar->restart_counter++;
494 atomic_set(&ar->pending_restarts, 0);
495
496 ieee80211_restart_hw(ar->hw);
497 } else {
498 /*
499 * The reset was unsuccessful and the device seems to
500 * be dead. But there's still one option: a low-level
501 * usb subsystem reset...
502 */
503
504 carl9170_usb_reset(ar);
505 }
506 }
507
508 void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
509 {
510 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
511
512 /*
513 * Sometimes, an error can trigger several different reset events.
514 * By ignoring these *surplus* reset events, the device won't be
515 * killed again, right after it has recovered.
516 */
517 if (atomic_inc_return(&ar->pending_restarts) > 1) {
518 dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
519 return;
520 }
521
522 ieee80211_stop_queues(ar->hw);
523
524 dev_err(&ar->udev->dev, "restart device (%d)\n", r);
525
526 if (!WARN_ON(r == CARL9170_RR_NO_REASON) ||
527 !WARN_ON(r >= __CARL9170_RR_LAST))
528 ar->last_reason = r;
529
530 if (!ar->registered)
531 return;
532
533 if (!IS_ACCEPTING_CMD(ar) || ar->needs_full_reset)
534 ar->force_usb_reset = true;
535
536 ieee80211_queue_work(ar->hw, &ar->restart_work);
537
538 /*
539 * At this point, the device instance might have vanished/disabled.
540 * So, don't put any code which access the ar9170 struct
541 * without proper protection.
542 */
543 }
544
545 static void carl9170_ping_work(struct work_struct *work)
546 {
547 struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
548 int err;
549
550 if (!IS_STARTED(ar))
551 return;
552
553 mutex_lock(&ar->mutex);
554 err = carl9170_echo_test(ar, 0xdeadbeef);
555 if (err)
556 carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE);
557 mutex_unlock(&ar->mutex);
558 }
559
560 static int carl9170_init_interface(struct ar9170 *ar,
561 struct ieee80211_vif *vif)
562 {
563 struct ath_common *common = &ar->common;
564 int err;
565
566 if (!vif) {
567 WARN_ON_ONCE(IS_STARTED(ar));
568 return 0;
569 }
570
571 memcpy(common->macaddr, vif->addr, ETH_ALEN);
572
573 /* We have to fall back to software crypto, whenever
574 * the user choose to participates in an IBSS. HW
575 * offload for IBSS RSN is not supported by this driver.
576 *
577 * NOTE: If the previous main interface has already
578 * disabled hw crypto offload, we have to keep this
579 * previous disable_offload setting as it was.
580 * Altough ideally, we should notify mac80211 and tell
581 * it to forget about any HW crypto offload for now.
582 */
583 ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) &&
584 (vif->type != NL80211_IFTYPE_AP));
585
586 /* While the driver supports HW offload in a single
587 * P2P client configuration, it doesn't support HW
588 * offload in the favourit, concurrent P2P GO+CLIENT
589 * configuration. Hence, HW offload will always be
590 * disabled for P2P.
591 */
592 ar->disable_offload |= vif->p2p;
593
594 ar->rx_software_decryption = ar->disable_offload;
595
596 err = carl9170_set_operating_mode(ar);
597 return err;
598 }
599
600 static int carl9170_op_add_interface(struct ieee80211_hw *hw,
601 struct ieee80211_vif *vif)
602 {
603 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
604 struct ieee80211_vif *main_vif, *old_main = NULL;
605 struct ar9170 *ar = hw->priv;
606 int vif_id = -1, err = 0;
607
608 mutex_lock(&ar->mutex);
609 rcu_read_lock();
610 if (vif_priv->active) {
611 /*
612 * Skip the interface structure initialization,
613 * if the vif survived the _restart call.
614 */
615 vif_id = vif_priv->id;
616 vif_priv->enable_beacon = false;
617
618 spin_lock_bh(&ar->beacon_lock);
619 dev_kfree_skb_any(vif_priv->beacon);
620 vif_priv->beacon = NULL;
621 spin_unlock_bh(&ar->beacon_lock);
622
623 goto init;
624 }
625
626 /* Because the AR9170 HW's MAC doesn't provide full support for
627 * multiple, independent interfaces [of different operation modes].
628 * We have to select ONE main interface [main mode of HW], but we
629 * can have multiple slaves [AKA: entry in the ACK-table].
630 *
631 * The first (from HEAD/TOP) interface in the ar->vif_list is
632 * always the main intf. All following intfs in this list
633 * are considered to be slave intfs.
634 */
635 main_vif = carl9170_get_main_vif(ar);
636
637 if (main_vif) {
638 switch (main_vif->type) {
639 case NL80211_IFTYPE_STATION:
640 if (vif->type == NL80211_IFTYPE_STATION)
641 break;
642
643 /* P2P GO [master] use-case
644 * Because the P2P GO station is selected dynamically
645 * by all participating peers of a WIFI Direct network,
646 * the driver has be able to change the main interface
647 * operating mode on the fly.
648 */
649 if (main_vif->p2p && vif->p2p &&
650 vif->type == NL80211_IFTYPE_AP) {
651 old_main = main_vif;
652 break;
653 }
654
655 err = -EBUSY;
656 rcu_read_unlock();
657
658 goto unlock;
659
660 case NL80211_IFTYPE_MESH_POINT:
661 case NL80211_IFTYPE_AP:
662 if ((vif->type == NL80211_IFTYPE_STATION) ||
663 (vif->type == NL80211_IFTYPE_WDS) ||
664 (vif->type == NL80211_IFTYPE_AP) ||
665 (vif->type == NL80211_IFTYPE_MESH_POINT))
666 break;
667
668 err = -EBUSY;
669 rcu_read_unlock();
670 goto unlock;
671
672 default:
673 rcu_read_unlock();
674 goto unlock;
675 }
676 }
677
678 vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0);
679
680 if (vif_id < 0) {
681 rcu_read_unlock();
682
683 err = -ENOSPC;
684 goto unlock;
685 }
686
687 BUG_ON(ar->vif_priv[vif_id].id != vif_id);
688
689 vif_priv->active = true;
690 vif_priv->id = vif_id;
691 vif_priv->enable_beacon = false;
692 ar->vifs++;
693 if (old_main) {
694 /* We end up in here, if the main interface is being replaced.
695 * Put the new main interface at the HEAD of the list and the
696 * previous inteface will automatically become second in line.
697 */
698 list_add_rcu(&vif_priv->list, &ar->vif_list);
699 } else {
700 /* Add new inteface. If the list is empty, it will become the
701 * main inteface, otherwise it will be slave.
702 */
703 list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
704 }
705 rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
706
707 init:
708 main_vif = carl9170_get_main_vif(ar);
709
710 if (main_vif == vif) {
711 rcu_assign_pointer(ar->beacon_iter, vif_priv);
712 rcu_read_unlock();
713
714 if (old_main) {
715 struct carl9170_vif_info *old_main_priv =
716 (void *) old_main->drv_priv;
717 /* downgrade old main intf to slave intf.
718 * NOTE: We are no longer under rcu_read_lock.
719 * But we are still holding ar->mutex, so the
720 * vif data [id, addr] is safe.
721 */
722 err = carl9170_mod_virtual_mac(ar, old_main_priv->id,
723 old_main->addr);
724 if (err)
725 goto unlock;
726 }
727
728 err = carl9170_init_interface(ar, vif);
729 if (err)
730 goto unlock;
731 } else {
732 rcu_read_unlock();
733 err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
734
735 if (err)
736 goto unlock;
737 }
738
739 if (ar->fw.tx_seq_table) {
740 err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
741 0);
742 if (err)
743 goto unlock;
744 }
745
746 unlock:
747 if (err && (vif_id >= 0)) {
748 vif_priv->active = false;
749 bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
750 ar->vifs--;
751 RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL);
752 list_del_rcu(&vif_priv->list);
753 mutex_unlock(&ar->mutex);
754 synchronize_rcu();
755 } else {
756 if (ar->vifs > 1)
757 ar->ps.off_override |= PS_OFF_VIF;
758
759 mutex_unlock(&ar->mutex);
760 }
761
762 return err;
763 }
764
765 static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
766 struct ieee80211_vif *vif)
767 {
768 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
769 struct ieee80211_vif *main_vif;
770 struct ar9170 *ar = hw->priv;
771 unsigned int id;
772
773 mutex_lock(&ar->mutex);
774
775 if (WARN_ON_ONCE(!vif_priv->active))
776 goto unlock;
777
778 ar->vifs--;
779
780 rcu_read_lock();
781 main_vif = carl9170_get_main_vif(ar);
782
783 id = vif_priv->id;
784
785 vif_priv->active = false;
786 WARN_ON(vif_priv->enable_beacon);
787 vif_priv->enable_beacon = false;
788 list_del_rcu(&vif_priv->list);
789 RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL);
790
791 if (vif == main_vif) {
792 rcu_read_unlock();
793
794 if (ar->vifs) {
795 WARN_ON(carl9170_init_interface(ar,
796 carl9170_get_main_vif(ar)));
797 } else {
798 carl9170_set_operating_mode(ar);
799 }
800 } else {
801 rcu_read_unlock();
802
803 WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL));
804 }
805
806 carl9170_update_beacon(ar, false);
807 carl9170_flush_cab(ar, id);
808
809 spin_lock_bh(&ar->beacon_lock);
810 dev_kfree_skb_any(vif_priv->beacon);
811 vif_priv->beacon = NULL;
812 spin_unlock_bh(&ar->beacon_lock);
813
814 bitmap_release_region(&ar->vif_bitmap, id, 0);
815
816 carl9170_set_beacon_timers(ar);
817
818 if (ar->vifs == 1)
819 ar->ps.off_override &= ~PS_OFF_VIF;
820
821 unlock:
822 mutex_unlock(&ar->mutex);
823
824 synchronize_rcu();
825 }
826
827 void carl9170_ps_check(struct ar9170 *ar)
828 {
829 ieee80211_queue_work(ar->hw, &ar->ps_work);
830 }
831
832 /* caller must hold ar->mutex */
833 static int carl9170_ps_update(struct ar9170 *ar)
834 {
835 bool ps = false;
836 int err = 0;
837
838 if (!ar->ps.off_override)
839 ps = (ar->hw->conf.flags & IEEE80211_CONF_PS);
840
841 if (ps != ar->ps.state) {
842 err = carl9170_powersave(ar, ps);
843 if (err)
844 return err;
845
846 if (ar->ps.state && !ps) {
847 ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
848 ar->ps.last_action);
849 }
850
851 if (ps)
852 ar->ps.last_slept = jiffies;
853
854 ar->ps.last_action = jiffies;
855 ar->ps.state = ps;
856 }
857
858 return 0;
859 }
860
861 static void carl9170_ps_work(struct work_struct *work)
862 {
863 struct ar9170 *ar = container_of(work, struct ar9170,
864 ps_work);
865 mutex_lock(&ar->mutex);
866 if (IS_STARTED(ar))
867 WARN_ON_ONCE(carl9170_ps_update(ar) != 0);
868 mutex_unlock(&ar->mutex);
869 }
870
871 static int carl9170_update_survey(struct ar9170 *ar, bool flush, bool noise)
872 {
873 int err;
874
875 if (noise) {
876 err = carl9170_get_noisefloor(ar);
877 if (err)
878 return err;
879 }
880
881 if (ar->fw.hw_counters) {
882 err = carl9170_collect_tally(ar);
883 if (err)
884 return err;
885 }
886
887 if (flush)
888 memset(&ar->tally, 0, sizeof(ar->tally));
889
890 return 0;
891 }
892
893 static void carl9170_stat_work(struct work_struct *work)
894 {
895 struct ar9170 *ar = container_of(work, struct ar9170, stat_work.work);
896 int err;
897
898 mutex_lock(&ar->mutex);
899 err = carl9170_update_survey(ar, false, true);
900 mutex_unlock(&ar->mutex);
901
902 if (err)
903 return;
904
905 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
906 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
907 }
908
909 static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
910 {
911 struct ar9170 *ar = hw->priv;
912 int err = 0;
913
914 mutex_lock(&ar->mutex);
915 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
916 /* TODO */
917 err = 0;
918 }
919
920 if (changed & IEEE80211_CONF_CHANGE_PS) {
921 err = carl9170_ps_update(ar);
922 if (err)
923 goto out;
924 }
925
926 if (changed & IEEE80211_CONF_CHANGE_SMPS) {
927 /* TODO */
928 err = 0;
929 }
930
931 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
932 enum nl80211_channel_type channel_type =
933 cfg80211_get_chandef_type(&hw->conf.chandef);
934
935 /* adjust slot time for 5 GHz */
936 err = carl9170_set_slot_time(ar);
937 if (err)
938 goto out;
939
940 err = carl9170_update_survey(ar, true, false);
941 if (err)
942 goto out;
943
944 err = carl9170_set_channel(ar, hw->conf.chandef.chan,
945 channel_type);
946 if (err)
947 goto out;
948
949 err = carl9170_update_survey(ar, false, true);
950 if (err)
951 goto out;
952
953 err = carl9170_set_dyn_sifs_ack(ar);
954 if (err)
955 goto out;
956
957 err = carl9170_set_rts_cts_rate(ar);
958 if (err)
959 goto out;
960 }
961
962 if (changed & IEEE80211_CONF_CHANGE_POWER) {
963 err = carl9170_set_mac_tpc(ar, ar->hw->conf.chandef.chan);
964 if (err)
965 goto out;
966 }
967
968 out:
969 mutex_unlock(&ar->mutex);
970 return err;
971 }
972
973 static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw,
974 struct netdev_hw_addr_list *mc_list)
975 {
976 struct netdev_hw_addr *ha;
977 u64 mchash;
978
979 /* always get broadcast frames */
980 mchash = 1ULL << (0xff >> 2);
981
982 netdev_hw_addr_list_for_each(ha, mc_list)
983 mchash |= 1ULL << (ha->addr[5] >> 2);
984
985 return mchash;
986 }
987
988 static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
989 unsigned int changed_flags,
990 unsigned int *new_flags,
991 u64 multicast)
992 {
993 struct ar9170 *ar = hw->priv;
994
995 /* mask supported flags */
996 *new_flags &= FIF_ALLMULTI | ar->rx_filter_caps;
997
998 if (!IS_ACCEPTING_CMD(ar))
999 return;
1000
1001 mutex_lock(&ar->mutex);
1002
1003 ar->filter_state = *new_flags;
1004 /*
1005 * We can support more by setting the sniffer bit and
1006 * then checking the error flags, later.
1007 */
1008
1009 if (*new_flags & FIF_ALLMULTI)
1010 multicast = ~0ULL;
1011
1012 if (multicast != ar->cur_mc_hash)
1013 WARN_ON(carl9170_update_multicast(ar, multicast));
1014
1015 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
1016 ar->sniffer_enabled = !!(*new_flags &
1017 (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS));
1018
1019 WARN_ON(carl9170_set_operating_mode(ar));
1020 }
1021
1022 if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
1023 u32 rx_filter = 0;
1024
1025 if (!ar->fw.ba_filter)
1026 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
1027
1028 if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
1029 rx_filter |= CARL9170_RX_FILTER_BAD;
1030
1031 if (!(*new_flags & FIF_CONTROL))
1032 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
1033
1034 if (!(*new_flags & FIF_PSPOLL))
1035 rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
1036
1037 if (!(*new_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))) {
1038 rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
1039 rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
1040 }
1041
1042 WARN_ON(carl9170_rx_filter(ar, rx_filter));
1043 }
1044
1045 mutex_unlock(&ar->mutex);
1046 }
1047
1048
1049 static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
1050 struct ieee80211_vif *vif,
1051 struct ieee80211_bss_conf *bss_conf,
1052 u32 changed)
1053 {
1054 struct ar9170 *ar = hw->priv;
1055 struct ath_common *common = &ar->common;
1056 int err = 0;
1057 struct carl9170_vif_info *vif_priv;
1058 struct ieee80211_vif *main_vif;
1059
1060 mutex_lock(&ar->mutex);
1061 vif_priv = (void *) vif->drv_priv;
1062 main_vif = carl9170_get_main_vif(ar);
1063 if (WARN_ON(!main_vif))
1064 goto out;
1065
1066 if (changed & BSS_CHANGED_BEACON_ENABLED) {
1067 struct carl9170_vif_info *iter;
1068 int i = 0;
1069
1070 vif_priv->enable_beacon = bss_conf->enable_beacon;
1071 rcu_read_lock();
1072 list_for_each_entry_rcu(iter, &ar->vif_list, list) {
1073 if (iter->active && iter->enable_beacon)
1074 i++;
1075
1076 }
1077 rcu_read_unlock();
1078
1079 ar->beacon_enabled = i;
1080 }
1081
1082 if (changed & BSS_CHANGED_BEACON) {
1083 err = carl9170_update_beacon(ar, false);
1084 if (err)
1085 goto out;
1086 }
1087
1088 if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
1089 BSS_CHANGED_BEACON_INT)) {
1090
1091 if (main_vif != vif) {
1092 bss_conf->beacon_int = main_vif->bss_conf.beacon_int;
1093 bss_conf->dtim_period = main_vif->bss_conf.dtim_period;
1094 }
1095
1096 /*
1097 * Therefore a hard limit for the broadcast traffic should
1098 * prevent false alarms.
1099 */
1100 if (vif->type != NL80211_IFTYPE_STATION &&
1101 (bss_conf->beacon_int * bss_conf->dtim_period >=
1102 (CARL9170_QUEUE_STUCK_TIMEOUT / 2))) {
1103 err = -EINVAL;
1104 goto out;
1105 }
1106
1107 err = carl9170_set_beacon_timers(ar);
1108 if (err)
1109 goto out;
1110 }
1111
1112 if (changed & BSS_CHANGED_HT) {
1113 /* TODO */
1114 err = 0;
1115 if (err)
1116 goto out;
1117 }
1118
1119 if (main_vif != vif)
1120 goto out;
1121
1122 /*
1123 * The following settings can only be changed by the
1124 * master interface.
1125 */
1126
1127 if (changed & BSS_CHANGED_BSSID) {
1128 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1129 err = carl9170_set_operating_mode(ar);
1130 if (err)
1131 goto out;
1132 }
1133
1134 if (changed & BSS_CHANGED_ASSOC) {
1135 ar->common.curaid = bss_conf->aid;
1136 err = carl9170_set_beacon_timers(ar);
1137 if (err)
1138 goto out;
1139 }
1140
1141 if (changed & BSS_CHANGED_ERP_SLOT) {
1142 err = carl9170_set_slot_time(ar);
1143 if (err)
1144 goto out;
1145 }
1146
1147 if (changed & BSS_CHANGED_BASIC_RATES) {
1148 err = carl9170_set_mac_rates(ar);
1149 if (err)
1150 goto out;
1151 }
1152
1153 out:
1154 WARN_ON_ONCE(err && IS_STARTED(ar));
1155 mutex_unlock(&ar->mutex);
1156 }
1157
1158 static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw,
1159 struct ieee80211_vif *vif)
1160 {
1161 struct ar9170 *ar = hw->priv;
1162 struct carl9170_tsf_rsp tsf;
1163 int err;
1164
1165 mutex_lock(&ar->mutex);
1166 err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF,
1167 0, NULL, sizeof(tsf), &tsf);
1168 mutex_unlock(&ar->mutex);
1169 if (WARN_ON(err))
1170 return 0;
1171
1172 return le64_to_cpu(tsf.tsf_64);
1173 }
1174
1175 static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1176 struct ieee80211_vif *vif,
1177 struct ieee80211_sta *sta,
1178 struct ieee80211_key_conf *key)
1179 {
1180 struct ar9170 *ar = hw->priv;
1181 int err = 0, i;
1182 u8 ktype;
1183
1184 if (ar->disable_offload || !vif)
1185 return -EOPNOTSUPP;
1186
1187 /* Fall back to software encryption whenever the driver is connected
1188 * to more than one network.
1189 *
1190 * This is very unfortunate, because some machines cannot handle
1191 * the high througput speed in 802.11n networks.
1192 */
1193
1194 if (!is_main_vif(ar, vif)) {
1195 mutex_lock(&ar->mutex);
1196 goto err_softw;
1197 }
1198
1199 /*
1200 * While the hardware supports *catch-all* key, for offloading
1201 * group-key en-/de-cryption. The way of how the hardware
1202 * decides which keyId maps to which key, remains a mystery...
1203 */
1204 if ((vif->type != NL80211_IFTYPE_STATION &&
1205 vif->type != NL80211_IFTYPE_ADHOC) &&
1206 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1207 return -EOPNOTSUPP;
1208
1209 switch (key->cipher) {
1210 case WLAN_CIPHER_SUITE_WEP40:
1211 ktype = AR9170_ENC_ALG_WEP64;
1212 break;
1213 case WLAN_CIPHER_SUITE_WEP104:
1214 ktype = AR9170_ENC_ALG_WEP128;
1215 break;
1216 case WLAN_CIPHER_SUITE_TKIP:
1217 ktype = AR9170_ENC_ALG_TKIP;
1218 break;
1219 case WLAN_CIPHER_SUITE_CCMP:
1220 ktype = AR9170_ENC_ALG_AESCCMP;
1221 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
1222 break;
1223 default:
1224 return -EOPNOTSUPP;
1225 }
1226
1227 mutex_lock(&ar->mutex);
1228 if (cmd == SET_KEY) {
1229 if (!IS_STARTED(ar)) {
1230 err = -EOPNOTSUPP;
1231 goto out;
1232 }
1233
1234 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1235 sta = NULL;
1236
1237 i = 64 + key->keyidx;
1238 } else {
1239 for (i = 0; i < 64; i++)
1240 if (!(ar->usedkeys & BIT(i)))
1241 break;
1242 if (i == 64)
1243 goto err_softw;
1244 }
1245
1246 key->hw_key_idx = i;
1247
1248 err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL,
1249 ktype, 0, key->key,
1250 min_t(u8, 16, key->keylen));
1251 if (err)
1252 goto out;
1253
1254 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1255 err = carl9170_upload_key(ar, i, sta ? sta->addr :
1256 NULL, ktype, 1,
1257 key->key + 16, 16);
1258 if (err)
1259 goto out;
1260
1261 /*
1262 * hardware is not capable generating MMIC
1263 * of fragmented frames!
1264 */
1265 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1266 }
1267
1268 if (i < 64)
1269 ar->usedkeys |= BIT(i);
1270
1271 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1272 } else {
1273 if (!IS_STARTED(ar)) {
1274 /* The device is gone... together with the key ;-) */
1275 err = 0;
1276 goto out;
1277 }
1278
1279 if (key->hw_key_idx < 64) {
1280 ar->usedkeys &= ~BIT(key->hw_key_idx);
1281 } else {
1282 err = carl9170_upload_key(ar, key->hw_key_idx, NULL,
1283 AR9170_ENC_ALG_NONE, 0,
1284 NULL, 0);
1285 if (err)
1286 goto out;
1287
1288 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1289 err = carl9170_upload_key(ar, key->hw_key_idx,
1290 NULL,
1291 AR9170_ENC_ALG_NONE,
1292 1, NULL, 0);
1293 if (err)
1294 goto out;
1295 }
1296
1297 }
1298
1299 err = carl9170_disable_key(ar, key->hw_key_idx);
1300 if (err)
1301 goto out;
1302 }
1303
1304 out:
1305 mutex_unlock(&ar->mutex);
1306 return err;
1307
1308 err_softw:
1309 if (!ar->rx_software_decryption) {
1310 ar->rx_software_decryption = true;
1311 carl9170_set_operating_mode(ar);
1312 }
1313 mutex_unlock(&ar->mutex);
1314 return -ENOSPC;
1315 }
1316
1317 static int carl9170_op_sta_add(struct ieee80211_hw *hw,
1318 struct ieee80211_vif *vif,
1319 struct ieee80211_sta *sta)
1320 {
1321 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1322 unsigned int i;
1323
1324 atomic_set(&sta_info->pending_frames, 0);
1325
1326 if (sta->ht_cap.ht_supported) {
1327 if (sta->ht_cap.ampdu_density > 6) {
1328 /*
1329 * HW does support 16us AMPDU density.
1330 * No HT-Xmit for station.
1331 */
1332
1333 return 0;
1334 }
1335
1336 for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++)
1337 RCU_INIT_POINTER(sta_info->agg[i], NULL);
1338
1339 sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
1340 sta_info->ht_sta = true;
1341 }
1342
1343 return 0;
1344 }
1345
1346 static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
1347 struct ieee80211_vif *vif,
1348 struct ieee80211_sta *sta)
1349 {
1350 struct ar9170 *ar = hw->priv;
1351 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1352 unsigned int i;
1353 bool cleanup = false;
1354
1355 if (sta->ht_cap.ht_supported) {
1356
1357 sta_info->ht_sta = false;
1358
1359 rcu_read_lock();
1360 for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++) {
1361 struct carl9170_sta_tid *tid_info;
1362
1363 tid_info = rcu_dereference(sta_info->agg[i]);
1364 RCU_INIT_POINTER(sta_info->agg[i], NULL);
1365
1366 if (!tid_info)
1367 continue;
1368
1369 spin_lock_bh(&ar->tx_ampdu_list_lock);
1370 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1371 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1372 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1373 cleanup = true;
1374 }
1375 rcu_read_unlock();
1376
1377 if (cleanup)
1378 carl9170_ampdu_gc(ar);
1379 }
1380
1381 return 0;
1382 }
1383
1384 static int carl9170_op_conf_tx(struct ieee80211_hw *hw,
1385 struct ieee80211_vif *vif, u16 queue,
1386 const struct ieee80211_tx_queue_params *param)
1387 {
1388 struct ar9170 *ar = hw->priv;
1389 int ret;
1390
1391 mutex_lock(&ar->mutex);
1392 if (queue < ar->hw->queues) {
1393 memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
1394 ret = carl9170_set_qos(ar);
1395 } else {
1396 ret = -EINVAL;
1397 }
1398
1399 mutex_unlock(&ar->mutex);
1400 return ret;
1401 }
1402
1403 static void carl9170_ampdu_work(struct work_struct *work)
1404 {
1405 struct ar9170 *ar = container_of(work, struct ar9170,
1406 ampdu_work);
1407
1408 if (!IS_STARTED(ar))
1409 return;
1410
1411 mutex_lock(&ar->mutex);
1412 carl9170_ampdu_gc(ar);
1413 mutex_unlock(&ar->mutex);
1414 }
1415
1416 static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1417 struct ieee80211_vif *vif,
1418 enum ieee80211_ampdu_mlme_action action,
1419 struct ieee80211_sta *sta,
1420 u16 tid, u16 *ssn, u8 buf_size)
1421 {
1422 struct ar9170 *ar = hw->priv;
1423 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1424 struct carl9170_sta_tid *tid_info;
1425
1426 if (modparam_noht)
1427 return -EOPNOTSUPP;
1428
1429 switch (action) {
1430 case IEEE80211_AMPDU_TX_START:
1431 if (!sta_info->ht_sta)
1432 return -EOPNOTSUPP;
1433
1434 rcu_read_lock();
1435 if (rcu_dereference(sta_info->agg[tid])) {
1436 rcu_read_unlock();
1437 return -EBUSY;
1438 }
1439
1440 tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
1441 GFP_ATOMIC);
1442 if (!tid_info) {
1443 rcu_read_unlock();
1444 return -ENOMEM;
1445 }
1446
1447 tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn);
1448 tid_info->state = CARL9170_TID_STATE_PROGRESS;
1449 tid_info->tid = tid;
1450 tid_info->max = sta_info->ampdu_max_len;
1451 tid_info->sta = sta;
1452 tid_info->vif = vif;
1453
1454 INIT_LIST_HEAD(&tid_info->list);
1455 INIT_LIST_HEAD(&tid_info->tmp_list);
1456 skb_queue_head_init(&tid_info->queue);
1457 spin_lock_init(&tid_info->lock);
1458
1459 spin_lock_bh(&ar->tx_ampdu_list_lock);
1460 ar->tx_ampdu_list_len++;
1461 list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list);
1462 rcu_assign_pointer(sta_info->agg[tid], tid_info);
1463 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1464 rcu_read_unlock();
1465
1466 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1467 break;
1468
1469 case IEEE80211_AMPDU_TX_STOP_CONT:
1470 case IEEE80211_AMPDU_TX_STOP_FLUSH:
1471 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
1472 rcu_read_lock();
1473 tid_info = rcu_dereference(sta_info->agg[tid]);
1474 if (tid_info) {
1475 spin_lock_bh(&ar->tx_ampdu_list_lock);
1476 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1477 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1478 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1479 }
1480
1481 RCU_INIT_POINTER(sta_info->agg[tid], NULL);
1482 rcu_read_unlock();
1483
1484 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1485 ieee80211_queue_work(ar->hw, &ar->ampdu_work);
1486 break;
1487
1488 case IEEE80211_AMPDU_TX_OPERATIONAL:
1489 rcu_read_lock();
1490 tid_info = rcu_dereference(sta_info->agg[tid]);
1491
1492 sta_info->stats[tid].clear = true;
1493 sta_info->stats[tid].req = false;
1494
1495 if (tid_info) {
1496 bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE);
1497 tid_info->state = CARL9170_TID_STATE_IDLE;
1498 }
1499 rcu_read_unlock();
1500
1501 if (WARN_ON_ONCE(!tid_info))
1502 return -EFAULT;
1503
1504 break;
1505
1506 case IEEE80211_AMPDU_RX_START:
1507 case IEEE80211_AMPDU_RX_STOP:
1508 /* Handled by hardware */
1509 break;
1510
1511 default:
1512 return -EOPNOTSUPP;
1513 }
1514
1515 return 0;
1516 }
1517
1518 #ifdef CONFIG_CARL9170_WPC
1519 static int carl9170_register_wps_button(struct ar9170 *ar)
1520 {
1521 struct input_dev *input;
1522 int err;
1523
1524 if (!(ar->features & CARL9170_WPS_BUTTON))
1525 return 0;
1526
1527 input = input_allocate_device();
1528 if (!input)
1529 return -ENOMEM;
1530
1531 snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button",
1532 wiphy_name(ar->hw->wiphy));
1533
1534 snprintf(ar->wps.phys, sizeof(ar->wps.phys),
1535 "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy));
1536
1537 input->name = ar->wps.name;
1538 input->phys = ar->wps.phys;
1539 input->id.bustype = BUS_USB;
1540 input->dev.parent = &ar->hw->wiphy->dev;
1541
1542 input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
1543
1544 err = input_register_device(input);
1545 if (err) {
1546 input_free_device(input);
1547 return err;
1548 }
1549
1550 ar->wps.pbc = input;
1551 return 0;
1552 }
1553 #endif /* CONFIG_CARL9170_WPC */
1554
1555 #ifdef CONFIG_CARL9170_HWRNG
1556 static int carl9170_rng_get(struct ar9170 *ar)
1557 {
1558
1559 #define RW (CARL9170_MAX_CMD_PAYLOAD_LEN / sizeof(u32))
1560 #define RB (CARL9170_MAX_CMD_PAYLOAD_LEN)
1561
1562 static const __le32 rng_load[RW] = {
1563 [0 ... (RW - 1)] = cpu_to_le32(AR9170_RAND_REG_NUM)};
1564
1565 u32 buf[RW];
1566
1567 unsigned int i, off = 0, transfer, count;
1568 int err;
1569
1570 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN);
1571
1572 if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized)
1573 return -EAGAIN;
1574
1575 count = ARRAY_SIZE(ar->rng.cache);
1576 while (count) {
1577 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1578 RB, (u8 *) rng_load,
1579 RB, (u8 *) buf);
1580 if (err)
1581 return err;
1582
1583 transfer = min_t(unsigned int, count, RW);
1584 for (i = 0; i < transfer; i++)
1585 ar->rng.cache[off + i] = buf[i];
1586
1587 off += transfer;
1588 count -= transfer;
1589 }
1590
1591 ar->rng.cache_idx = 0;
1592
1593 #undef RW
1594 #undef RB
1595 return 0;
1596 }
1597
1598 static int carl9170_rng_read(struct hwrng *rng, u32 *data)
1599 {
1600 struct ar9170 *ar = (struct ar9170 *)rng->priv;
1601 int ret = -EIO;
1602
1603 mutex_lock(&ar->mutex);
1604 if (ar->rng.cache_idx >= ARRAY_SIZE(ar->rng.cache)) {
1605 ret = carl9170_rng_get(ar);
1606 if (ret) {
1607 mutex_unlock(&ar->mutex);
1608 return ret;
1609 }
1610 }
1611
1612 *data = ar->rng.cache[ar->rng.cache_idx++];
1613 mutex_unlock(&ar->mutex);
1614
1615 return sizeof(u16);
1616 }
1617
1618 static void carl9170_unregister_hwrng(struct ar9170 *ar)
1619 {
1620 if (ar->rng.initialized) {
1621 hwrng_unregister(&ar->rng.rng);
1622 ar->rng.initialized = false;
1623 }
1624 }
1625
1626 static int carl9170_register_hwrng(struct ar9170 *ar)
1627 {
1628 int err;
1629
1630 snprintf(ar->rng.name, ARRAY_SIZE(ar->rng.name),
1631 "%s_%s", KBUILD_MODNAME, wiphy_name(ar->hw->wiphy));
1632 ar->rng.rng.name = ar->rng.name;
1633 ar->rng.rng.data_read = carl9170_rng_read;
1634 ar->rng.rng.priv = (unsigned long)ar;
1635
1636 if (WARN_ON(ar->rng.initialized))
1637 return -EALREADY;
1638
1639 err = hwrng_register(&ar->rng.rng);
1640 if (err) {
1641 dev_err(&ar->udev->dev, "Failed to register the random "
1642 "number generator (%d)\n", err);
1643 return err;
1644 }
1645
1646 ar->rng.initialized = true;
1647
1648 err = carl9170_rng_get(ar);
1649 if (err) {
1650 carl9170_unregister_hwrng(ar);
1651 return err;
1652 }
1653
1654 return 0;
1655 }
1656 #endif /* CONFIG_CARL9170_HWRNG */
1657
1658 static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
1659 struct survey_info *survey)
1660 {
1661 struct ar9170 *ar = hw->priv;
1662 struct ieee80211_channel *chan;
1663 struct ieee80211_supported_band *band;
1664 int err, b, i;
1665
1666 chan = ar->channel;
1667 if (!chan)
1668 return -ENODEV;
1669
1670 if (idx == chan->hw_value) {
1671 mutex_lock(&ar->mutex);
1672 err = carl9170_update_survey(ar, false, true);
1673 mutex_unlock(&ar->mutex);
1674 if (err)
1675 return err;
1676 }
1677
1678 for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
1679 band = ar->hw->wiphy->bands[b];
1680
1681 if (!band)
1682 continue;
1683
1684 for (i = 0; i < band->n_channels; i++) {
1685 if (band->channels[i].hw_value == idx) {
1686 chan = &band->channels[i];
1687 goto found;
1688 }
1689 }
1690 }
1691 return -ENOENT;
1692
1693 found:
1694 memcpy(survey, &ar->survey[idx], sizeof(*survey));
1695
1696 survey->channel = chan;
1697 survey->filled = SURVEY_INFO_NOISE_DBM;
1698
1699 if (ar->channel == chan)
1700 survey->filled |= SURVEY_INFO_IN_USE;
1701
1702 if (ar->fw.hw_counters) {
1703 survey->filled |= SURVEY_INFO_CHANNEL_TIME |
1704 SURVEY_INFO_CHANNEL_TIME_BUSY |
1705 SURVEY_INFO_CHANNEL_TIME_TX;
1706 }
1707
1708 return 0;
1709 }
1710
1711 static void carl9170_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
1712 {
1713 struct ar9170 *ar = hw->priv;
1714 unsigned int vid;
1715
1716 mutex_lock(&ar->mutex);
1717 for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num)
1718 carl9170_flush_cab(ar, vid);
1719
1720 carl9170_flush(ar, drop);
1721 mutex_unlock(&ar->mutex);
1722 }
1723
1724 static int carl9170_op_get_stats(struct ieee80211_hw *hw,
1725 struct ieee80211_low_level_stats *stats)
1726 {
1727 struct ar9170 *ar = hw->priv;
1728
1729 memset(stats, 0, sizeof(*stats));
1730 stats->dot11ACKFailureCount = ar->tx_ack_failures;
1731 stats->dot11FCSErrorCount = ar->tx_fcs_errors;
1732 return 0;
1733 }
1734
1735 static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
1736 struct ieee80211_vif *vif,
1737 enum sta_notify_cmd cmd,
1738 struct ieee80211_sta *sta)
1739 {
1740 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1741
1742 switch (cmd) {
1743 case STA_NOTIFY_SLEEP:
1744 sta_info->sleeping = true;
1745 if (atomic_read(&sta_info->pending_frames))
1746 ieee80211_sta_block_awake(hw, sta, true);
1747 break;
1748
1749 case STA_NOTIFY_AWAKE:
1750 sta_info->sleeping = false;
1751 break;
1752 }
1753 }
1754
1755 static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw)
1756 {
1757 struct ar9170 *ar = hw->priv;
1758
1759 return !!atomic_read(&ar->tx_total_queued);
1760 }
1761
1762 static const struct ieee80211_ops carl9170_ops = {
1763 .start = carl9170_op_start,
1764 .stop = carl9170_op_stop,
1765 .tx = carl9170_op_tx,
1766 .flush = carl9170_op_flush,
1767 .add_interface = carl9170_op_add_interface,
1768 .remove_interface = carl9170_op_remove_interface,
1769 .config = carl9170_op_config,
1770 .prepare_multicast = carl9170_op_prepare_multicast,
1771 .configure_filter = carl9170_op_configure_filter,
1772 .conf_tx = carl9170_op_conf_tx,
1773 .bss_info_changed = carl9170_op_bss_info_changed,
1774 .get_tsf = carl9170_op_get_tsf,
1775 .set_key = carl9170_op_set_key,
1776 .sta_add = carl9170_op_sta_add,
1777 .sta_remove = carl9170_op_sta_remove,
1778 .sta_notify = carl9170_op_sta_notify,
1779 .get_survey = carl9170_op_get_survey,
1780 .get_stats = carl9170_op_get_stats,
1781 .ampdu_action = carl9170_op_ampdu_action,
1782 .tx_frames_pending = carl9170_tx_frames_pending,
1783 };
1784
1785 void *carl9170_alloc(size_t priv_size)
1786 {
1787 struct ieee80211_hw *hw;
1788 struct ar9170 *ar;
1789 struct sk_buff *skb;
1790 int i;
1791
1792 /*
1793 * this buffer is used for rx stream reconstruction.
1794 * Under heavy load this device (or the transport layer?)
1795 * tends to split the streams into separate rx descriptors.
1796 */
1797
1798 skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
1799 if (!skb)
1800 goto err_nomem;
1801
1802 hw = ieee80211_alloc_hw(priv_size, &carl9170_ops);
1803 if (!hw)
1804 goto err_nomem;
1805
1806 ar = hw->priv;
1807 ar->hw = hw;
1808 ar->rx_failover = skb;
1809
1810 memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head));
1811 ar->rx_has_plcp = false;
1812
1813 /*
1814 * Here's a hidden pitfall!
1815 *
1816 * All 4 AC queues work perfectly well under _legacy_ operation.
1817 * However as soon as aggregation is enabled, the traffic flow
1818 * gets very bumpy. Therefore we have to _switch_ to a
1819 * software AC with a single HW queue.
1820 */
1821 hw->queues = __AR9170_NUM_TXQ;
1822
1823 mutex_init(&ar->mutex);
1824 spin_lock_init(&ar->beacon_lock);
1825 spin_lock_init(&ar->cmd_lock);
1826 spin_lock_init(&ar->tx_stats_lock);
1827 spin_lock_init(&ar->tx_ampdu_list_lock);
1828 spin_lock_init(&ar->mem_lock);
1829 spin_lock_init(&ar->state_lock);
1830 atomic_set(&ar->pending_restarts, 0);
1831 ar->vifs = 0;
1832 for (i = 0; i < ar->hw->queues; i++) {
1833 skb_queue_head_init(&ar->tx_status[i]);
1834 skb_queue_head_init(&ar->tx_pending[i]);
1835
1836 INIT_LIST_HEAD(&ar->bar_list[i]);
1837 spin_lock_init(&ar->bar_list_lock[i]);
1838 }
1839 INIT_WORK(&ar->ps_work, carl9170_ps_work);
1840 INIT_WORK(&ar->ping_work, carl9170_ping_work);
1841 INIT_WORK(&ar->restart_work, carl9170_restart_work);
1842 INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
1843 INIT_DELAYED_WORK(&ar->stat_work, carl9170_stat_work);
1844 INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
1845 INIT_LIST_HEAD(&ar->tx_ampdu_list);
1846 rcu_assign_pointer(ar->tx_ampdu_iter,
1847 (struct carl9170_sta_tid *) &ar->tx_ampdu_list);
1848
1849 bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num);
1850 INIT_LIST_HEAD(&ar->vif_list);
1851 init_completion(&ar->tx_flush);
1852
1853 /* firmware decides which modes we support */
1854 hw->wiphy->interface_modes = 0;
1855
1856 hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
1857 IEEE80211_HW_MFP_CAPABLE |
1858 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
1859 IEEE80211_HW_SUPPORTS_PS |
1860 IEEE80211_HW_PS_NULLFUNC_STACK |
1861 IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
1862 IEEE80211_HW_SUPPORTS_RC_TABLE |
1863 IEEE80211_HW_SIGNAL_DBM |
1864 IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
1865
1866 if (!modparam_noht) {
1867 /*
1868 * see the comment above, why we allow the user
1869 * to disable HT by a module parameter.
1870 */
1871 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
1872 }
1873
1874 hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
1875 hw->sta_data_size = sizeof(struct carl9170_sta_info);
1876 hw->vif_data_size = sizeof(struct carl9170_vif_info);
1877
1878 hw->max_rates = CARL9170_TX_MAX_RATES;
1879 hw->max_rate_tries = CARL9170_TX_USER_RATE_TRIES;
1880
1881 for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
1882 ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
1883
1884 return ar;
1885
1886 err_nomem:
1887 kfree_skb(skb);
1888 return ERR_PTR(-ENOMEM);
1889 }
1890
1891 static int carl9170_read_eeprom(struct ar9170 *ar)
1892 {
1893 #define RW 8 /* number of words to read at once */
1894 #define RB (sizeof(u32) * RW)
1895 u8 *eeprom = (void *)&ar->eeprom;
1896 __le32 offsets[RW];
1897 int i, j, err;
1898
1899 BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
1900
1901 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_LEN - 4);
1902 #ifndef __CHECKER__
1903 /* don't want to handle trailing remains */
1904 BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
1905 #endif
1906
1907 for (i = 0; i < sizeof(ar->eeprom) / RB; i++) {
1908 for (j = 0; j < RW; j++)
1909 offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
1910 RB * i + 4 * j);
1911
1912 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1913 RB, (u8 *) &offsets,
1914 RB, eeprom + RB * i);
1915 if (err)
1916 return err;
1917 }
1918
1919 #undef RW
1920 #undef RB
1921 return 0;
1922 }
1923
1924 static int carl9170_parse_eeprom(struct ar9170 *ar)
1925 {
1926 struct ath_regulatory *regulatory = &ar->common.regulatory;
1927 unsigned int rx_streams, tx_streams, tx_params = 0;
1928 int bands = 0;
1929 int chans = 0;
1930
1931 if (ar->eeprom.length == cpu_to_le16(0xffff))
1932 return -ENODATA;
1933
1934 rx_streams = hweight8(ar->eeprom.rx_mask);
1935 tx_streams = hweight8(ar->eeprom.tx_mask);
1936
1937 if (rx_streams != tx_streams) {
1938 tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
1939
1940 WARN_ON(!(tx_streams >= 1 && tx_streams <=
1941 IEEE80211_HT_MCS_TX_MAX_STREAMS));
1942
1943 tx_params = (tx_streams - 1) <<
1944 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1945
1946 carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
1947 carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
1948 }
1949
1950 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
1951 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1952 &carl9170_band_2GHz;
1953 chans += carl9170_band_2GHz.n_channels;
1954 bands++;
1955 }
1956 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
1957 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1958 &carl9170_band_5GHz;
1959 chans += carl9170_band_5GHz.n_channels;
1960 bands++;
1961 }
1962
1963 if (!bands)
1964 return -EINVAL;
1965
1966 ar->survey = kzalloc(sizeof(struct survey_info) * chans, GFP_KERNEL);
1967 if (!ar->survey)
1968 return -ENOMEM;
1969 ar->num_channels = chans;
1970
1971 /*
1972 * I measured this, a bandswitch takes roughly
1973 * 135 ms and a frequency switch about 80.
1974 *
1975 * FIXME: measure these values again once EEPROM settings
1976 * are used, that will influence them!
1977 */
1978 if (bands == 2)
1979 ar->hw->channel_change_time = 135 * 1000;
1980 else
1981 ar->hw->channel_change_time = 80 * 1000;
1982
1983 regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
1984
1985 /* second part of wiphy init */
1986 SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address);
1987
1988 return 0;
1989 }
1990
1991 static void carl9170_reg_notifier(struct wiphy *wiphy,
1992 struct regulatory_request *request)
1993 {
1994 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1995 struct ar9170 *ar = hw->priv;
1996
1997 ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
1998 }
1999
2000 int carl9170_register(struct ar9170 *ar)
2001 {
2002 struct ath_regulatory *regulatory = &ar->common.regulatory;
2003 int err = 0, i;
2004
2005 if (WARN_ON(ar->mem_bitmap))
2006 return -EINVAL;
2007
2008 ar->mem_bitmap = kzalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG) *
2009 sizeof(unsigned long), GFP_KERNEL);
2010
2011 if (!ar->mem_bitmap)
2012 return -ENOMEM;
2013
2014 /* try to read EEPROM, init MAC addr */
2015 err = carl9170_read_eeprom(ar);
2016 if (err)
2017 return err;
2018
2019 err = carl9170_parse_eeprom(ar);
2020 if (err)
2021 return err;
2022
2023 err = ath_regd_init(regulatory, ar->hw->wiphy,
2024 carl9170_reg_notifier);
2025 if (err)
2026 return err;
2027
2028 if (modparam_noht) {
2029 carl9170_band_2GHz.ht_cap.ht_supported = false;
2030 carl9170_band_5GHz.ht_cap.ht_supported = false;
2031 }
2032
2033 for (i = 0; i < ar->fw.vif_num; i++) {
2034 ar->vif_priv[i].id = i;
2035 ar->vif_priv[i].vif = NULL;
2036 }
2037
2038 err = ieee80211_register_hw(ar->hw);
2039 if (err)
2040 return err;
2041
2042 /* mac80211 interface is now registered */
2043 ar->registered = true;
2044
2045 if (!ath_is_world_regd(regulatory))
2046 regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
2047
2048 #ifdef CONFIG_CARL9170_DEBUGFS
2049 carl9170_debugfs_register(ar);
2050 #endif /* CONFIG_CARL9170_DEBUGFS */
2051
2052 err = carl9170_led_init(ar);
2053 if (err)
2054 goto err_unreg;
2055
2056 #ifdef CONFIG_CARL9170_LEDS
2057 err = carl9170_led_register(ar);
2058 if (err)
2059 goto err_unreg;
2060 #endif /* CONFIG_CARL9170_LEDS */
2061
2062 #ifdef CONFIG_CARL9170_WPC
2063 err = carl9170_register_wps_button(ar);
2064 if (err)
2065 goto err_unreg;
2066 #endif /* CONFIG_CARL9170_WPC */
2067
2068 #ifdef CONFIG_CARL9170_HWRNG
2069 err = carl9170_register_hwrng(ar);
2070 if (err)
2071 goto err_unreg;
2072 #endif /* CONFIG_CARL9170_HWRNG */
2073
2074 dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n",
2075 wiphy_name(ar->hw->wiphy));
2076
2077 return 0;
2078
2079 err_unreg:
2080 carl9170_unregister(ar);
2081 return err;
2082 }
2083
2084 void carl9170_unregister(struct ar9170 *ar)
2085 {
2086 if (!ar->registered)
2087 return;
2088
2089 ar->registered = false;
2090
2091 #ifdef CONFIG_CARL9170_LEDS
2092 carl9170_led_unregister(ar);
2093 #endif /* CONFIG_CARL9170_LEDS */
2094
2095 #ifdef CONFIG_CARL9170_DEBUGFS
2096 carl9170_debugfs_unregister(ar);
2097 #endif /* CONFIG_CARL9170_DEBUGFS */
2098
2099 #ifdef CONFIG_CARL9170_WPC
2100 if (ar->wps.pbc) {
2101 input_unregister_device(ar->wps.pbc);
2102 ar->wps.pbc = NULL;
2103 }
2104 #endif /* CONFIG_CARL9170_WPC */
2105
2106 #ifdef CONFIG_CARL9170_HWRNG
2107 carl9170_unregister_hwrng(ar);
2108 #endif /* CONFIG_CARL9170_HWRNG */
2109
2110 carl9170_cancel_worker(ar);
2111 cancel_work_sync(&ar->restart_work);
2112
2113 ieee80211_unregister_hw(ar->hw);
2114 }
2115
2116 void carl9170_free(struct ar9170 *ar)
2117 {
2118 WARN_ON(ar->registered);
2119 WARN_ON(IS_INITIALIZED(ar));
2120
2121 kfree_skb(ar->rx_failover);
2122 ar->rx_failover = NULL;
2123
2124 kfree(ar->mem_bitmap);
2125 ar->mem_bitmap = NULL;
2126
2127 kfree(ar->survey);
2128 ar->survey = NULL;
2129
2130 mutex_destroy(&ar->mutex);
2131
2132 ieee80211_free_hw(ar->hw);
2133 }