]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/wireless/ath/carl9170/main.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[mirror_ubuntu-bionic-kernel.git] / drivers / net / wireless / ath / carl9170 / main.c
1 /*
2 * Atheros CARL9170 driver
3 *
4 * mac80211 interaction code
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40 #include <linux/init.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <linux/etherdevice.h>
44 #include <linux/random.h>
45 #include <net/mac80211.h>
46 #include <net/cfg80211.h>
47 #include "hw.h"
48 #include "carl9170.h"
49 #include "cmd.h"
50
51 static int modparam_nohwcrypt;
52 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
53 MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
54
55 int modparam_noht;
56 module_param_named(noht, modparam_noht, int, S_IRUGO);
57 MODULE_PARM_DESC(noht, "Disable MPDU aggregation.");
58
59 #define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
60 .bitrate = (_bitrate), \
61 .flags = (_flags), \
62 .hw_value = (_hw_rate) | (_txpidx) << 4, \
63 }
64
65 struct ieee80211_rate __carl9170_ratetable[] = {
66 RATE(10, 0, 0, 0),
67 RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
68 RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
69 RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
70 RATE(60, 0xb, 0, 0),
71 RATE(90, 0xf, 0, 0),
72 RATE(120, 0xa, 0, 0),
73 RATE(180, 0xe, 0, 0),
74 RATE(240, 0x9, 0, 0),
75 RATE(360, 0xd, 1, 0),
76 RATE(480, 0x8, 2, 0),
77 RATE(540, 0xc, 3, 0),
78 };
79 #undef RATE
80
81 #define carl9170_g_ratetable (__carl9170_ratetable + 0)
82 #define carl9170_g_ratetable_size 12
83 #define carl9170_a_ratetable (__carl9170_ratetable + 4)
84 #define carl9170_a_ratetable_size 8
85
86 /*
87 * NB: The hw_value is used as an index into the carl9170_phy_freq_params
88 * array in phy.c so that we don't have to do frequency lookups!
89 */
90 #define CHAN(_freq, _idx) { \
91 .center_freq = (_freq), \
92 .hw_value = (_idx), \
93 .max_power = 18, /* XXX */ \
94 }
95
96 static struct ieee80211_channel carl9170_2ghz_chantable[] = {
97 CHAN(2412, 0),
98 CHAN(2417, 1),
99 CHAN(2422, 2),
100 CHAN(2427, 3),
101 CHAN(2432, 4),
102 CHAN(2437, 5),
103 CHAN(2442, 6),
104 CHAN(2447, 7),
105 CHAN(2452, 8),
106 CHAN(2457, 9),
107 CHAN(2462, 10),
108 CHAN(2467, 11),
109 CHAN(2472, 12),
110 CHAN(2484, 13),
111 };
112
113 static struct ieee80211_channel carl9170_5ghz_chantable[] = {
114 CHAN(4920, 14),
115 CHAN(4940, 15),
116 CHAN(4960, 16),
117 CHAN(4980, 17),
118 CHAN(5040, 18),
119 CHAN(5060, 19),
120 CHAN(5080, 20),
121 CHAN(5180, 21),
122 CHAN(5200, 22),
123 CHAN(5220, 23),
124 CHAN(5240, 24),
125 CHAN(5260, 25),
126 CHAN(5280, 26),
127 CHAN(5300, 27),
128 CHAN(5320, 28),
129 CHAN(5500, 29),
130 CHAN(5520, 30),
131 CHAN(5540, 31),
132 CHAN(5560, 32),
133 CHAN(5580, 33),
134 CHAN(5600, 34),
135 CHAN(5620, 35),
136 CHAN(5640, 36),
137 CHAN(5660, 37),
138 CHAN(5680, 38),
139 CHAN(5700, 39),
140 CHAN(5745, 40),
141 CHAN(5765, 41),
142 CHAN(5785, 42),
143 CHAN(5805, 43),
144 CHAN(5825, 44),
145 CHAN(5170, 45),
146 CHAN(5190, 46),
147 CHAN(5210, 47),
148 CHAN(5230, 48),
149 };
150 #undef CHAN
151
152 #define CARL9170_HT_CAP \
153 { \
154 .ht_supported = true, \
155 .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
156 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
157 IEEE80211_HT_CAP_SGI_40 | \
158 IEEE80211_HT_CAP_DSSSCCK40 | \
159 IEEE80211_HT_CAP_SM_PS, \
160 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, \
161 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
162 .mcs = { \
163 .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \
164 .rx_highest = cpu_to_le16(300), \
165 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
166 }, \
167 }
168
169 static struct ieee80211_supported_band carl9170_band_2GHz = {
170 .channels = carl9170_2ghz_chantable,
171 .n_channels = ARRAY_SIZE(carl9170_2ghz_chantable),
172 .bitrates = carl9170_g_ratetable,
173 .n_bitrates = carl9170_g_ratetable_size,
174 .ht_cap = CARL9170_HT_CAP,
175 };
176
177 static struct ieee80211_supported_band carl9170_band_5GHz = {
178 .channels = carl9170_5ghz_chantable,
179 .n_channels = ARRAY_SIZE(carl9170_5ghz_chantable),
180 .bitrates = carl9170_a_ratetable,
181 .n_bitrates = carl9170_a_ratetable_size,
182 .ht_cap = CARL9170_HT_CAP,
183 };
184
185 static void carl9170_ampdu_gc(struct ar9170 *ar)
186 {
187 struct carl9170_sta_tid *tid_info;
188 LIST_HEAD(tid_gc);
189
190 rcu_read_lock();
191 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
192 spin_lock_bh(&ar->tx_ampdu_list_lock);
193 if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) {
194 tid_info->state = CARL9170_TID_STATE_KILLED;
195 list_del_rcu(&tid_info->list);
196 ar->tx_ampdu_list_len--;
197 list_add_tail(&tid_info->tmp_list, &tid_gc);
198 }
199 spin_unlock_bh(&ar->tx_ampdu_list_lock);
200
201 }
202 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
203 rcu_read_unlock();
204
205 synchronize_rcu();
206
207 while (!list_empty(&tid_gc)) {
208 struct sk_buff *skb;
209 tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid,
210 tmp_list);
211
212 while ((skb = __skb_dequeue(&tid_info->queue)))
213 carl9170_tx_status(ar, skb, false);
214
215 list_del_init(&tid_info->tmp_list);
216 kfree(tid_info);
217 }
218 }
219
220 static void carl9170_flush(struct ar9170 *ar, bool drop_queued)
221 {
222 if (drop_queued) {
223 int i;
224
225 /*
226 * We can only drop frames which have not been uploaded
227 * to the device yet.
228 */
229
230 for (i = 0; i < ar->hw->queues; i++) {
231 struct sk_buff *skb;
232
233 while ((skb = skb_dequeue(&ar->tx_pending[i]))) {
234 struct ieee80211_tx_info *info;
235
236 info = IEEE80211_SKB_CB(skb);
237 if (info->flags & IEEE80211_TX_CTL_AMPDU)
238 atomic_dec(&ar->tx_ampdu_upload);
239
240 carl9170_tx_status(ar, skb, false);
241 }
242 }
243 }
244
245 /* Wait for all other outstanding frames to timeout. */
246 if (atomic_read(&ar->tx_total_queued))
247 WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0);
248 }
249
250 static void carl9170_flush_ba(struct ar9170 *ar)
251 {
252 struct sk_buff_head free;
253 struct carl9170_sta_tid *tid_info;
254 struct sk_buff *skb;
255
256 __skb_queue_head_init(&free);
257
258 rcu_read_lock();
259 spin_lock_bh(&ar->tx_ampdu_list_lock);
260 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
261 if (tid_info->state > CARL9170_TID_STATE_SUSPEND) {
262 tid_info->state = CARL9170_TID_STATE_SUSPEND;
263
264 spin_lock(&tid_info->lock);
265 while ((skb = __skb_dequeue(&tid_info->queue)))
266 __skb_queue_tail(&free, skb);
267 spin_unlock(&tid_info->lock);
268 }
269 }
270 spin_unlock_bh(&ar->tx_ampdu_list_lock);
271 rcu_read_unlock();
272
273 while ((skb = __skb_dequeue(&free)))
274 carl9170_tx_status(ar, skb, false);
275 }
276
277 static void carl9170_zap_queues(struct ar9170 *ar)
278 {
279 struct carl9170_vif_info *cvif;
280 unsigned int i;
281
282 carl9170_ampdu_gc(ar);
283
284 carl9170_flush_ba(ar);
285 carl9170_flush(ar, true);
286
287 for (i = 0; i < ar->hw->queues; i++) {
288 spin_lock_bh(&ar->tx_status[i].lock);
289 while (!skb_queue_empty(&ar->tx_status[i])) {
290 struct sk_buff *skb;
291
292 skb = skb_peek(&ar->tx_status[i]);
293 carl9170_tx_get_skb(skb);
294 spin_unlock_bh(&ar->tx_status[i].lock);
295 carl9170_tx_drop(ar, skb);
296 spin_lock_bh(&ar->tx_status[i].lock);
297 carl9170_tx_put_skb(skb);
298 }
299 spin_unlock_bh(&ar->tx_status[i].lock);
300 }
301
302 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT < 1);
303 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD < CARL9170_NUM_TX_LIMIT_SOFT);
304 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD >= CARL9170_BAW_BITS);
305
306 /* reinitialize queues statistics */
307 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
308 for (i = 0; i < ar->hw->queues; i++)
309 ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
310
311 for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++)
312 ar->mem_bitmap[i] = 0;
313
314 rcu_read_lock();
315 list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
316 spin_lock_bh(&ar->beacon_lock);
317 dev_kfree_skb_any(cvif->beacon);
318 cvif->beacon = NULL;
319 spin_unlock_bh(&ar->beacon_lock);
320 }
321 rcu_read_unlock();
322
323 atomic_set(&ar->tx_ampdu_upload, 0);
324 atomic_set(&ar->tx_ampdu_scheduler, 0);
325 atomic_set(&ar->tx_total_pending, 0);
326 atomic_set(&ar->tx_total_queued, 0);
327 atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
328 }
329
330 #define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
331 do { \
332 queue.aifs = ai_fs; \
333 queue.cw_min = cwmin; \
334 queue.cw_max = cwmax; \
335 queue.txop = _txop; \
336 } while (0)
337
338 static int carl9170_op_start(struct ieee80211_hw *hw)
339 {
340 struct ar9170 *ar = hw->priv;
341 int err, i;
342
343 mutex_lock(&ar->mutex);
344
345 carl9170_zap_queues(ar);
346
347 /* reset QoS defaults */
348 CARL9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT */
349 CARL9170_FILL_QUEUE(ar->edcf[1], 2, 7, 15, 94); /* VIDEO */
350 CARL9170_FILL_QUEUE(ar->edcf[2], 2, 3, 7, 47); /* VOICE */
351 CARL9170_FILL_QUEUE(ar->edcf[3], 7, 15, 1023, 0); /* BACKGROUND */
352 CARL9170_FILL_QUEUE(ar->edcf[4], 2, 3, 7, 0); /* SPECIAL */
353
354 ar->current_factor = ar->current_density = -1;
355 /* "The first key is unique." */
356 ar->usedkeys = 1;
357 ar->filter_state = 0;
358 ar->ps.last_action = jiffies;
359 ar->ps.last_slept = jiffies;
360 ar->erp_mode = CARL9170_ERP_AUTO;
361 ar->rx_software_decryption = false;
362 ar->disable_offload = false;
363
364 for (i = 0; i < ar->hw->queues; i++) {
365 ar->queue_stop_timeout[i] = jiffies;
366 ar->max_queue_stop_timeout[i] = 0;
367 }
368
369 atomic_set(&ar->mem_allocs, 0);
370
371 err = carl9170_usb_open(ar);
372 if (err)
373 goto out;
374
375 err = carl9170_init_mac(ar);
376 if (err)
377 goto out;
378
379 err = carl9170_set_qos(ar);
380 if (err)
381 goto out;
382
383 if (ar->fw.rx_filter) {
384 err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA |
385 CARL9170_RX_FILTER_CTL_OTHER | CARL9170_RX_FILTER_BAD);
386 if (err)
387 goto out;
388 }
389
390 err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER,
391 AR9170_DMA_TRIGGER_RXQ);
392 if (err)
393 goto out;
394
395 /* Clear key-cache */
396 for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) {
397 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
398 0, NULL, 0);
399 if (err)
400 goto out;
401
402 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
403 1, NULL, 0);
404 if (err)
405 goto out;
406
407 if (i < AR9170_CAM_MAX_USER) {
408 err = carl9170_disable_key(ar, i);
409 if (err)
410 goto out;
411 }
412 }
413
414 carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED);
415
416 ieee80211_wake_queues(ar->hw);
417 err = 0;
418
419 out:
420 mutex_unlock(&ar->mutex);
421 return err;
422 }
423
424 static void carl9170_cancel_worker(struct ar9170 *ar)
425 {
426 cancel_delayed_work_sync(&ar->tx_janitor);
427 #ifdef CONFIG_CARL9170_LEDS
428 cancel_delayed_work_sync(&ar->led_work);
429 #endif /* CONFIG_CARL9170_LEDS */
430 cancel_work_sync(&ar->ps_work);
431 cancel_work_sync(&ar->ping_work);
432 cancel_work_sync(&ar->ampdu_work);
433 }
434
435 static void carl9170_op_stop(struct ieee80211_hw *hw)
436 {
437 struct ar9170 *ar = hw->priv;
438
439 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
440
441 ieee80211_stop_queues(ar->hw);
442
443 mutex_lock(&ar->mutex);
444 if (IS_ACCEPTING_CMD(ar)) {
445 rcu_assign_pointer(ar->beacon_iter, NULL);
446
447 carl9170_led_set_state(ar, 0);
448
449 /* stop DMA */
450 carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0);
451 carl9170_usb_stop(ar);
452 }
453
454 carl9170_zap_queues(ar);
455 mutex_unlock(&ar->mutex);
456
457 carl9170_cancel_worker(ar);
458 }
459
460 static void carl9170_restart_work(struct work_struct *work)
461 {
462 struct ar9170 *ar = container_of(work, struct ar9170,
463 restart_work);
464 int err;
465
466 ar->usedkeys = 0;
467 ar->filter_state = 0;
468 carl9170_cancel_worker(ar);
469
470 mutex_lock(&ar->mutex);
471 err = carl9170_usb_restart(ar);
472 if (net_ratelimit()) {
473 if (err) {
474 dev_err(&ar->udev->dev, "Failed to restart device "
475 " (%d).\n", err);
476 } else {
477 dev_info(&ar->udev->dev, "device restarted "
478 "successfully.\n");
479 }
480 }
481
482 carl9170_zap_queues(ar);
483 mutex_unlock(&ar->mutex);
484 if (!err) {
485 ar->restart_counter++;
486 atomic_set(&ar->pending_restarts, 0);
487
488 ieee80211_restart_hw(ar->hw);
489 } else {
490 /*
491 * The reset was unsuccessful and the device seems to
492 * be dead. But there's still one option: a low-level
493 * usb subsystem reset...
494 */
495
496 carl9170_usb_reset(ar);
497 }
498 }
499
500 void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
501 {
502 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
503
504 /*
505 * Sometimes, an error can trigger several different reset events.
506 * By ignoring these *surplus* reset events, the device won't be
507 * killed again, right after it has recovered.
508 */
509 if (atomic_inc_return(&ar->pending_restarts) > 1) {
510 dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
511 return;
512 }
513
514 ieee80211_stop_queues(ar->hw);
515
516 dev_err(&ar->udev->dev, "restart device (%d)\n", r);
517
518 if (!WARN_ON(r == CARL9170_RR_NO_REASON) ||
519 !WARN_ON(r >= __CARL9170_RR_LAST))
520 ar->last_reason = r;
521
522 if (!ar->registered)
523 return;
524
525 if (IS_ACCEPTING_CMD(ar) && !ar->needs_full_reset)
526 ieee80211_queue_work(ar->hw, &ar->restart_work);
527 else
528 carl9170_usb_reset(ar);
529
530 /*
531 * At this point, the device instance might have vanished/disabled.
532 * So, don't put any code which access the ar9170 struct
533 * without proper protection.
534 */
535 }
536
537 static void carl9170_ping_work(struct work_struct *work)
538 {
539 struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
540 int err;
541
542 if (!IS_STARTED(ar))
543 return;
544
545 mutex_lock(&ar->mutex);
546 err = carl9170_echo_test(ar, 0xdeadbeef);
547 if (err)
548 carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE);
549 mutex_unlock(&ar->mutex);
550 }
551
552 static int carl9170_init_interface(struct ar9170 *ar,
553 struct ieee80211_vif *vif)
554 {
555 struct ath_common *common = &ar->common;
556 int err;
557
558 if (!vif) {
559 WARN_ON_ONCE(IS_STARTED(ar));
560 return 0;
561 }
562
563 memcpy(common->macaddr, vif->addr, ETH_ALEN);
564
565 if (modparam_nohwcrypt ||
566 ((vif->type != NL80211_IFTYPE_STATION) &&
567 (vif->type != NL80211_IFTYPE_AP))) {
568 ar->rx_software_decryption = true;
569 ar->disable_offload = true;
570 }
571
572 err = carl9170_set_operating_mode(ar);
573 return err;
574 }
575
576 static int carl9170_op_add_interface(struct ieee80211_hw *hw,
577 struct ieee80211_vif *vif)
578 {
579 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
580 struct ieee80211_vif *main_vif;
581 struct ar9170 *ar = hw->priv;
582 int vif_id = -1, err = 0;
583
584 mutex_lock(&ar->mutex);
585 rcu_read_lock();
586 if (vif_priv->active) {
587 /*
588 * Skip the interface structure initialization,
589 * if the vif survived the _restart call.
590 */
591 vif_id = vif_priv->id;
592 vif_priv->enable_beacon = false;
593
594 spin_lock_bh(&ar->beacon_lock);
595 dev_kfree_skb_any(vif_priv->beacon);
596 vif_priv->beacon = NULL;
597 spin_unlock_bh(&ar->beacon_lock);
598
599 goto init;
600 }
601
602 main_vif = carl9170_get_main_vif(ar);
603
604 if (main_vif) {
605 switch (main_vif->type) {
606 case NL80211_IFTYPE_STATION:
607 if (vif->type == NL80211_IFTYPE_STATION)
608 break;
609
610 err = -EBUSY;
611 rcu_read_unlock();
612
613 goto unlock;
614
615 case NL80211_IFTYPE_AP:
616 if ((vif->type == NL80211_IFTYPE_STATION) ||
617 (vif->type == NL80211_IFTYPE_WDS) ||
618 (vif->type == NL80211_IFTYPE_AP))
619 break;
620
621 err = -EBUSY;
622 rcu_read_unlock();
623 goto unlock;
624
625 default:
626 rcu_read_unlock();
627 goto unlock;
628 }
629 }
630
631 vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0);
632
633 if (vif_id < 0) {
634 rcu_read_unlock();
635
636 err = -ENOSPC;
637 goto unlock;
638 }
639
640 BUG_ON(ar->vif_priv[vif_id].id != vif_id);
641
642 vif_priv->active = true;
643 vif_priv->id = vif_id;
644 vif_priv->enable_beacon = false;
645 ar->vifs++;
646 list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
647 rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
648
649 init:
650 if (carl9170_get_main_vif(ar) == vif) {
651 rcu_assign_pointer(ar->beacon_iter, vif_priv);
652 rcu_read_unlock();
653
654 err = carl9170_init_interface(ar, vif);
655 if (err)
656 goto unlock;
657 } else {
658 rcu_read_unlock();
659 err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
660
661 if (err)
662 goto unlock;
663 }
664
665 unlock:
666 if (err && (vif_id >= 0)) {
667 vif_priv->active = false;
668 bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
669 ar->vifs--;
670 rcu_assign_pointer(ar->vif_priv[vif_id].vif, NULL);
671 list_del_rcu(&vif_priv->list);
672 mutex_unlock(&ar->mutex);
673 synchronize_rcu();
674 } else {
675 if (ar->vifs > 1)
676 ar->ps.off_override |= PS_OFF_VIF;
677
678 mutex_unlock(&ar->mutex);
679 }
680
681 return err;
682 }
683
684 static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
685 struct ieee80211_vif *vif)
686 {
687 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
688 struct ieee80211_vif *main_vif;
689 struct ar9170 *ar = hw->priv;
690 unsigned int id;
691
692 mutex_lock(&ar->mutex);
693
694 if (WARN_ON_ONCE(!vif_priv->active))
695 goto unlock;
696
697 ar->vifs--;
698
699 rcu_read_lock();
700 main_vif = carl9170_get_main_vif(ar);
701
702 id = vif_priv->id;
703
704 vif_priv->active = false;
705 WARN_ON(vif_priv->enable_beacon);
706 vif_priv->enable_beacon = false;
707 list_del_rcu(&vif_priv->list);
708 rcu_assign_pointer(ar->vif_priv[id].vif, NULL);
709
710 if (vif == main_vif) {
711 rcu_read_unlock();
712
713 if (ar->vifs) {
714 WARN_ON(carl9170_init_interface(ar,
715 carl9170_get_main_vif(ar)));
716 } else {
717 carl9170_set_operating_mode(ar);
718 }
719 } else {
720 rcu_read_unlock();
721
722 WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL));
723 }
724
725 carl9170_update_beacon(ar, false);
726 carl9170_flush_cab(ar, id);
727
728 spin_lock_bh(&ar->beacon_lock);
729 dev_kfree_skb_any(vif_priv->beacon);
730 vif_priv->beacon = NULL;
731 spin_unlock_bh(&ar->beacon_lock);
732
733 bitmap_release_region(&ar->vif_bitmap, id, 0);
734
735 carl9170_set_beacon_timers(ar);
736
737 if (ar->vifs == 1)
738 ar->ps.off_override &= ~PS_OFF_VIF;
739
740 unlock:
741 mutex_unlock(&ar->mutex);
742
743 synchronize_rcu();
744 }
745
746 void carl9170_ps_check(struct ar9170 *ar)
747 {
748 ieee80211_queue_work(ar->hw, &ar->ps_work);
749 }
750
751 /* caller must hold ar->mutex */
752 static int carl9170_ps_update(struct ar9170 *ar)
753 {
754 bool ps = false;
755 int err = 0;
756
757 if (!ar->ps.off_override)
758 ps = (ar->hw->conf.flags & IEEE80211_CONF_PS);
759
760 if (ps != ar->ps.state) {
761 err = carl9170_powersave(ar, ps);
762 if (err)
763 return err;
764
765 if (ar->ps.state && !ps) {
766 ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
767 ar->ps.last_action);
768 }
769
770 if (ps)
771 ar->ps.last_slept = jiffies;
772
773 ar->ps.last_action = jiffies;
774 ar->ps.state = ps;
775 }
776
777 return 0;
778 }
779
780 static void carl9170_ps_work(struct work_struct *work)
781 {
782 struct ar9170 *ar = container_of(work, struct ar9170,
783 ps_work);
784 mutex_lock(&ar->mutex);
785 if (IS_STARTED(ar))
786 WARN_ON_ONCE(carl9170_ps_update(ar) != 0);
787 mutex_unlock(&ar->mutex);
788 }
789
790
791 static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
792 {
793 struct ar9170 *ar = hw->priv;
794 int err = 0;
795
796 mutex_lock(&ar->mutex);
797 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
798 /* TODO */
799 err = 0;
800 }
801
802 if (changed & IEEE80211_CONF_CHANGE_PS) {
803 err = carl9170_ps_update(ar);
804 if (err)
805 goto out;
806 }
807
808 if (changed & IEEE80211_CONF_CHANGE_POWER) {
809 /* TODO */
810 err = 0;
811 }
812
813 if (changed & IEEE80211_CONF_CHANGE_SMPS) {
814 /* TODO */
815 err = 0;
816 }
817
818 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
819 /* adjust slot time for 5 GHz */
820 err = carl9170_set_slot_time(ar);
821 if (err)
822 goto out;
823
824 err = carl9170_set_channel(ar, hw->conf.channel,
825 hw->conf.channel_type, CARL9170_RFI_NONE);
826 if (err)
827 goto out;
828
829 err = carl9170_set_dyn_sifs_ack(ar);
830 if (err)
831 goto out;
832
833 err = carl9170_set_rts_cts_rate(ar);
834 if (err)
835 goto out;
836 }
837
838 out:
839 mutex_unlock(&ar->mutex);
840 return err;
841 }
842
843 static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw,
844 struct netdev_hw_addr_list *mc_list)
845 {
846 struct netdev_hw_addr *ha;
847 u64 mchash;
848
849 /* always get broadcast frames */
850 mchash = 1ULL << (0xff >> 2);
851
852 netdev_hw_addr_list_for_each(ha, mc_list)
853 mchash |= 1ULL << (ha->addr[5] >> 2);
854
855 return mchash;
856 }
857
858 static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
859 unsigned int changed_flags,
860 unsigned int *new_flags,
861 u64 multicast)
862 {
863 struct ar9170 *ar = hw->priv;
864
865 /* mask supported flags */
866 *new_flags &= FIF_ALLMULTI | ar->rx_filter_caps;
867
868 if (!IS_ACCEPTING_CMD(ar))
869 return;
870
871 mutex_lock(&ar->mutex);
872
873 ar->filter_state = *new_flags;
874 /*
875 * We can support more by setting the sniffer bit and
876 * then checking the error flags, later.
877 */
878
879 if (changed_flags & FIF_ALLMULTI && *new_flags & FIF_ALLMULTI)
880 multicast = ~0ULL;
881
882 if (multicast != ar->cur_mc_hash)
883 WARN_ON(carl9170_update_multicast(ar, multicast));
884
885 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
886 ar->sniffer_enabled = !!(*new_flags &
887 (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS));
888
889 WARN_ON(carl9170_set_operating_mode(ar));
890 }
891
892 if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
893 u32 rx_filter = 0;
894
895 if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
896 rx_filter |= CARL9170_RX_FILTER_BAD;
897
898 if (!(*new_flags & FIF_CONTROL))
899 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
900
901 if (!(*new_flags & FIF_PSPOLL))
902 rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
903
904 if (!(*new_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))) {
905 rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
906 rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
907 }
908
909 WARN_ON(carl9170_rx_filter(ar, rx_filter));
910 }
911
912 mutex_unlock(&ar->mutex);
913 }
914
915
916 static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
917 struct ieee80211_vif *vif,
918 struct ieee80211_bss_conf *bss_conf,
919 u32 changed)
920 {
921 struct ar9170 *ar = hw->priv;
922 struct ath_common *common = &ar->common;
923 int err = 0;
924 struct carl9170_vif_info *vif_priv;
925 struct ieee80211_vif *main_vif;
926
927 mutex_lock(&ar->mutex);
928 vif_priv = (void *) vif->drv_priv;
929 main_vif = carl9170_get_main_vif(ar);
930 if (WARN_ON(!main_vif))
931 goto out;
932
933 if (changed & BSS_CHANGED_BEACON_ENABLED) {
934 struct carl9170_vif_info *iter;
935 int i = 0;
936
937 vif_priv->enable_beacon = bss_conf->enable_beacon;
938 rcu_read_lock();
939 list_for_each_entry_rcu(iter, &ar->vif_list, list) {
940 if (iter->active && iter->enable_beacon)
941 i++;
942
943 }
944 rcu_read_unlock();
945
946 ar->beacon_enabled = i;
947 }
948
949 if (changed & BSS_CHANGED_BEACON) {
950 err = carl9170_update_beacon(ar, false);
951 if (err)
952 goto out;
953 }
954
955 if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
956 BSS_CHANGED_BEACON_INT)) {
957
958 if (main_vif != vif) {
959 bss_conf->beacon_int = main_vif->bss_conf.beacon_int;
960 bss_conf->dtim_period = main_vif->bss_conf.dtim_period;
961 }
962
963 /*
964 * Therefore a hard limit for the broadcast traffic should
965 * prevent false alarms.
966 */
967 if (vif->type != NL80211_IFTYPE_STATION &&
968 (bss_conf->beacon_int * bss_conf->dtim_period >=
969 (CARL9170_QUEUE_STUCK_TIMEOUT / 2))) {
970 err = -EINVAL;
971 goto out;
972 }
973
974 err = carl9170_set_beacon_timers(ar);
975 if (err)
976 goto out;
977 }
978
979 if (changed & BSS_CHANGED_HT) {
980 /* TODO */
981 err = 0;
982 if (err)
983 goto out;
984 }
985
986 if (main_vif != vif)
987 goto out;
988
989 /*
990 * The following settings can only be changed by the
991 * master interface.
992 */
993
994 if (changed & BSS_CHANGED_BSSID) {
995 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
996 err = carl9170_set_operating_mode(ar);
997 if (err)
998 goto out;
999 }
1000
1001 if (changed & BSS_CHANGED_ASSOC) {
1002 ar->common.curaid = bss_conf->aid;
1003 err = carl9170_set_beacon_timers(ar);
1004 if (err)
1005 goto out;
1006 }
1007
1008 if (changed & BSS_CHANGED_ERP_SLOT) {
1009 err = carl9170_set_slot_time(ar);
1010 if (err)
1011 goto out;
1012 }
1013
1014 if (changed & BSS_CHANGED_BASIC_RATES) {
1015 err = carl9170_set_mac_rates(ar);
1016 if (err)
1017 goto out;
1018 }
1019
1020 out:
1021 WARN_ON_ONCE(err && IS_STARTED(ar));
1022 mutex_unlock(&ar->mutex);
1023 }
1024
1025 static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw)
1026 {
1027 struct ar9170 *ar = hw->priv;
1028 struct carl9170_tsf_rsp tsf;
1029 int err;
1030
1031 mutex_lock(&ar->mutex);
1032 err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF,
1033 0, NULL, sizeof(tsf), &tsf);
1034 mutex_unlock(&ar->mutex);
1035 if (WARN_ON(err))
1036 return 0;
1037
1038 return le64_to_cpu(tsf.tsf_64);
1039 }
1040
1041 static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1042 struct ieee80211_vif *vif,
1043 struct ieee80211_sta *sta,
1044 struct ieee80211_key_conf *key)
1045 {
1046 struct ar9170 *ar = hw->priv;
1047 int err = 0, i;
1048 u8 ktype;
1049
1050 if (ar->disable_offload || !vif)
1051 return -EOPNOTSUPP;
1052
1053 /*
1054 * We have to fall back to software encryption, whenever
1055 * the user choose to participates in an IBSS or is connected
1056 * to more than one network.
1057 *
1058 * This is very unfortunate, because some machines cannot handle
1059 * the high througput speed in 802.11n networks.
1060 */
1061
1062 if (!is_main_vif(ar, vif))
1063 goto err_softw;
1064
1065 /*
1066 * While the hardware supports *catch-all* key, for offloading
1067 * group-key en-/de-cryption. The way of how the hardware
1068 * decides which keyId maps to which key, remains a mystery...
1069 */
1070 if ((vif->type != NL80211_IFTYPE_STATION &&
1071 vif->type != NL80211_IFTYPE_ADHOC) &&
1072 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1073 return -EOPNOTSUPP;
1074
1075 switch (key->cipher) {
1076 case WLAN_CIPHER_SUITE_WEP40:
1077 ktype = AR9170_ENC_ALG_WEP64;
1078 break;
1079 case WLAN_CIPHER_SUITE_WEP104:
1080 ktype = AR9170_ENC_ALG_WEP128;
1081 break;
1082 case WLAN_CIPHER_SUITE_TKIP:
1083 ktype = AR9170_ENC_ALG_TKIP;
1084 break;
1085 case WLAN_CIPHER_SUITE_CCMP:
1086 ktype = AR9170_ENC_ALG_AESCCMP;
1087 break;
1088 default:
1089 return -EOPNOTSUPP;
1090 }
1091
1092 mutex_lock(&ar->mutex);
1093 if (cmd == SET_KEY) {
1094 if (!IS_STARTED(ar)) {
1095 err = -EOPNOTSUPP;
1096 goto out;
1097 }
1098
1099 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1100 sta = NULL;
1101
1102 i = 64 + key->keyidx;
1103 } else {
1104 for (i = 0; i < 64; i++)
1105 if (!(ar->usedkeys & BIT(i)))
1106 break;
1107 if (i == 64)
1108 goto err_softw;
1109 }
1110
1111 key->hw_key_idx = i;
1112
1113 err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL,
1114 ktype, 0, key->key,
1115 min_t(u8, 16, key->keylen));
1116 if (err)
1117 goto out;
1118
1119 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1120 err = carl9170_upload_key(ar, i, sta ? sta->addr :
1121 NULL, ktype, 1,
1122 key->key + 16, 16);
1123 if (err)
1124 goto out;
1125
1126 /*
1127 * hardware is not capable generating MMIC
1128 * of fragmented frames!
1129 */
1130 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1131 }
1132
1133 if (i < 64)
1134 ar->usedkeys |= BIT(i);
1135
1136 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1137 } else {
1138 if (!IS_STARTED(ar)) {
1139 /* The device is gone... together with the key ;-) */
1140 err = 0;
1141 goto out;
1142 }
1143
1144 if (key->hw_key_idx < 64) {
1145 ar->usedkeys &= ~BIT(key->hw_key_idx);
1146 } else {
1147 err = carl9170_upload_key(ar, key->hw_key_idx, NULL,
1148 AR9170_ENC_ALG_NONE, 0,
1149 NULL, 0);
1150 if (err)
1151 goto out;
1152
1153 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1154 err = carl9170_upload_key(ar, key->hw_key_idx,
1155 NULL,
1156 AR9170_ENC_ALG_NONE,
1157 1, NULL, 0);
1158 if (err)
1159 goto out;
1160 }
1161
1162 }
1163
1164 err = carl9170_disable_key(ar, key->hw_key_idx);
1165 if (err)
1166 goto out;
1167 }
1168
1169 out:
1170 mutex_unlock(&ar->mutex);
1171 return err;
1172
1173 err_softw:
1174 if (!ar->rx_software_decryption) {
1175 ar->rx_software_decryption = true;
1176 carl9170_set_operating_mode(ar);
1177 }
1178 mutex_unlock(&ar->mutex);
1179 return -ENOSPC;
1180 }
1181
1182 static int carl9170_op_sta_add(struct ieee80211_hw *hw,
1183 struct ieee80211_vif *vif,
1184 struct ieee80211_sta *sta)
1185 {
1186 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1187 unsigned int i;
1188
1189 if (sta->ht_cap.ht_supported) {
1190 if (sta->ht_cap.ampdu_density > 6) {
1191 /*
1192 * HW does support 16us AMPDU density.
1193 * No HT-Xmit for station.
1194 */
1195
1196 return 0;
1197 }
1198
1199 for (i = 0; i < CARL9170_NUM_TID; i++)
1200 rcu_assign_pointer(sta_info->agg[i], NULL);
1201
1202 sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
1203 sta_info->ht_sta = true;
1204 }
1205
1206 return 0;
1207 }
1208
1209 static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
1210 struct ieee80211_vif *vif,
1211 struct ieee80211_sta *sta)
1212 {
1213 struct ar9170 *ar = hw->priv;
1214 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1215 unsigned int i;
1216 bool cleanup = false;
1217
1218 if (sta->ht_cap.ht_supported) {
1219
1220 sta_info->ht_sta = false;
1221
1222 rcu_read_lock();
1223 for (i = 0; i < CARL9170_NUM_TID; i++) {
1224 struct carl9170_sta_tid *tid_info;
1225
1226 tid_info = rcu_dereference(sta_info->agg[i]);
1227 rcu_assign_pointer(sta_info->agg[i], NULL);
1228
1229 if (!tid_info)
1230 continue;
1231
1232 spin_lock_bh(&ar->tx_ampdu_list_lock);
1233 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1234 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1235 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1236 cleanup = true;
1237 }
1238 rcu_read_unlock();
1239
1240 if (cleanup)
1241 carl9170_ampdu_gc(ar);
1242 }
1243
1244 return 0;
1245 }
1246
1247 static int carl9170_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
1248 const struct ieee80211_tx_queue_params *param)
1249 {
1250 struct ar9170 *ar = hw->priv;
1251 int ret;
1252
1253 mutex_lock(&ar->mutex);
1254 if (queue < ar->hw->queues) {
1255 memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
1256 ret = carl9170_set_qos(ar);
1257 } else {
1258 ret = -EINVAL;
1259 }
1260
1261 mutex_unlock(&ar->mutex);
1262 return ret;
1263 }
1264
1265 static void carl9170_ampdu_work(struct work_struct *work)
1266 {
1267 struct ar9170 *ar = container_of(work, struct ar9170,
1268 ampdu_work);
1269
1270 if (!IS_STARTED(ar))
1271 return;
1272
1273 mutex_lock(&ar->mutex);
1274 carl9170_ampdu_gc(ar);
1275 mutex_unlock(&ar->mutex);
1276 }
1277
1278 static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1279 struct ieee80211_vif *vif,
1280 enum ieee80211_ampdu_mlme_action action,
1281 struct ieee80211_sta *sta,
1282 u16 tid, u16 *ssn)
1283 {
1284 struct ar9170 *ar = hw->priv;
1285 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1286 struct carl9170_sta_tid *tid_info;
1287
1288 if (modparam_noht)
1289 return -EOPNOTSUPP;
1290
1291 switch (action) {
1292 case IEEE80211_AMPDU_TX_START:
1293 if (!sta_info->ht_sta)
1294 return -EOPNOTSUPP;
1295
1296 rcu_read_lock();
1297 if (rcu_dereference(sta_info->agg[tid])) {
1298 rcu_read_unlock();
1299 return -EBUSY;
1300 }
1301
1302 tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
1303 GFP_ATOMIC);
1304 if (!tid_info) {
1305 rcu_read_unlock();
1306 return -ENOMEM;
1307 }
1308
1309 tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn);
1310 tid_info->state = CARL9170_TID_STATE_PROGRESS;
1311 tid_info->tid = tid;
1312 tid_info->max = sta_info->ampdu_max_len;
1313
1314 INIT_LIST_HEAD(&tid_info->list);
1315 INIT_LIST_HEAD(&tid_info->tmp_list);
1316 skb_queue_head_init(&tid_info->queue);
1317 spin_lock_init(&tid_info->lock);
1318
1319 spin_lock_bh(&ar->tx_ampdu_list_lock);
1320 ar->tx_ampdu_list_len++;
1321 list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list);
1322 rcu_assign_pointer(sta_info->agg[tid], tid_info);
1323 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1324 rcu_read_unlock();
1325
1326 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1327 break;
1328
1329 case IEEE80211_AMPDU_TX_STOP:
1330 rcu_read_lock();
1331 tid_info = rcu_dereference(sta_info->agg[tid]);
1332 if (tid_info) {
1333 spin_lock_bh(&ar->tx_ampdu_list_lock);
1334 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1335 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1336 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1337 }
1338
1339 rcu_assign_pointer(sta_info->agg[tid], NULL);
1340 rcu_read_unlock();
1341
1342 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1343 ieee80211_queue_work(ar->hw, &ar->ampdu_work);
1344 break;
1345
1346 case IEEE80211_AMPDU_TX_OPERATIONAL:
1347 rcu_read_lock();
1348 tid_info = rcu_dereference(sta_info->agg[tid]);
1349
1350 sta_info->stats[tid].clear = true;
1351
1352 if (tid_info) {
1353 bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE);
1354 tid_info->state = CARL9170_TID_STATE_IDLE;
1355 }
1356 rcu_read_unlock();
1357
1358 if (WARN_ON_ONCE(!tid_info))
1359 return -EFAULT;
1360
1361 break;
1362
1363 case IEEE80211_AMPDU_RX_START:
1364 case IEEE80211_AMPDU_RX_STOP:
1365 /* Handled by hardware */
1366 break;
1367
1368 default:
1369 return -EOPNOTSUPP;
1370 }
1371
1372 return 0;
1373 }
1374
1375 #ifdef CONFIG_CARL9170_WPC
1376 static int carl9170_register_wps_button(struct ar9170 *ar)
1377 {
1378 struct input_dev *input;
1379 int err;
1380
1381 if (!(ar->features & CARL9170_WPS_BUTTON))
1382 return 0;
1383
1384 input = input_allocate_device();
1385 if (!input)
1386 return -ENOMEM;
1387
1388 snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button",
1389 wiphy_name(ar->hw->wiphy));
1390
1391 snprintf(ar->wps.phys, sizeof(ar->wps.phys),
1392 "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy));
1393
1394 input->name = ar->wps.name;
1395 input->phys = ar->wps.phys;
1396 input->id.bustype = BUS_USB;
1397 input->dev.parent = &ar->hw->wiphy->dev;
1398
1399 input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
1400
1401 err = input_register_device(input);
1402 if (err) {
1403 input_free_device(input);
1404 return err;
1405 }
1406
1407 ar->wps.pbc = input;
1408 return 0;
1409 }
1410 #endif /* CONFIG_CARL9170_WPC */
1411
1412 static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
1413 struct survey_info *survey)
1414 {
1415 struct ar9170 *ar = hw->priv;
1416 int err;
1417
1418 if (idx != 0)
1419 return -ENOENT;
1420
1421 mutex_lock(&ar->mutex);
1422 err = carl9170_get_noisefloor(ar);
1423 mutex_unlock(&ar->mutex);
1424 if (err)
1425 return err;
1426
1427 survey->channel = ar->channel;
1428 survey->filled = SURVEY_INFO_NOISE_DBM;
1429 survey->noise = ar->noise[0];
1430 return 0;
1431 }
1432
1433 static void carl9170_op_flush(struct ieee80211_hw *hw, bool drop)
1434 {
1435 struct ar9170 *ar = hw->priv;
1436 unsigned int vid;
1437
1438 mutex_lock(&ar->mutex);
1439 for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num)
1440 carl9170_flush_cab(ar, vid);
1441
1442 carl9170_flush(ar, drop);
1443 mutex_unlock(&ar->mutex);
1444 }
1445
1446 static int carl9170_op_get_stats(struct ieee80211_hw *hw,
1447 struct ieee80211_low_level_stats *stats)
1448 {
1449 struct ar9170 *ar = hw->priv;
1450
1451 memset(stats, 0, sizeof(*stats));
1452 stats->dot11ACKFailureCount = ar->tx_ack_failures;
1453 stats->dot11FCSErrorCount = ar->tx_fcs_errors;
1454 return 0;
1455 }
1456
1457 static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
1458 struct ieee80211_vif *vif,
1459 enum sta_notify_cmd cmd,
1460 struct ieee80211_sta *sta)
1461 {
1462 struct ar9170 *ar = hw->priv;
1463 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1464 struct sk_buff *skb, *tmp;
1465 struct sk_buff_head free;
1466 int i;
1467
1468 switch (cmd) {
1469 case STA_NOTIFY_SLEEP:
1470 /*
1471 * Since the peer is no longer listening, we have to return
1472 * as many SKBs as possible back to the mac80211 stack.
1473 * It will deal with the retry procedure, once the peer
1474 * has become available again.
1475 *
1476 * NB: Ideally, the driver should return the all frames in
1477 * the correct, ascending order. However, I think that this
1478 * functionality should be implemented in the stack and not
1479 * here...
1480 */
1481
1482 __skb_queue_head_init(&free);
1483
1484 if (sta->ht_cap.ht_supported) {
1485 rcu_read_lock();
1486 for (i = 0; i < CARL9170_NUM_TID; i++) {
1487 struct carl9170_sta_tid *tid_info;
1488
1489 tid_info = rcu_dereference(sta_info->agg[i]);
1490
1491 if (!tid_info)
1492 continue;
1493
1494 spin_lock_bh(&ar->tx_ampdu_list_lock);
1495 if (tid_info->state >
1496 CARL9170_TID_STATE_SUSPEND)
1497 tid_info->state =
1498 CARL9170_TID_STATE_SUSPEND;
1499 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1500
1501 spin_lock_bh(&tid_info->lock);
1502 while ((skb = __skb_dequeue(&tid_info->queue)))
1503 __skb_queue_tail(&free, skb);
1504 spin_unlock_bh(&tid_info->lock);
1505 }
1506 rcu_read_unlock();
1507 }
1508
1509 for (i = 0; i < ar->hw->queues; i++) {
1510 spin_lock_bh(&ar->tx_pending[i].lock);
1511 skb_queue_walk_safe(&ar->tx_pending[i], skb, tmp) {
1512 struct _carl9170_tx_superframe *super;
1513 struct ieee80211_hdr *hdr;
1514 struct ieee80211_tx_info *info;
1515
1516 super = (void *) skb->data;
1517 hdr = (void *) super->frame_data;
1518
1519 if (compare_ether_addr(hdr->addr1, sta->addr))
1520 continue;
1521
1522 __skb_unlink(skb, &ar->tx_pending[i]);
1523
1524 info = IEEE80211_SKB_CB(skb);
1525 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1526 atomic_dec(&ar->tx_ampdu_upload);
1527
1528 carl9170_tx_status(ar, skb, false);
1529 }
1530 spin_unlock_bh(&ar->tx_pending[i].lock);
1531 }
1532
1533 while ((skb = __skb_dequeue(&free)))
1534 carl9170_tx_status(ar, skb, false);
1535
1536 break;
1537
1538 case STA_NOTIFY_AWAKE:
1539 if (!sta->ht_cap.ht_supported)
1540 return;
1541
1542 rcu_read_lock();
1543 for (i = 0; i < CARL9170_NUM_TID; i++) {
1544 struct carl9170_sta_tid *tid_info;
1545
1546 tid_info = rcu_dereference(sta_info->agg[i]);
1547
1548 if (!tid_info)
1549 continue;
1550
1551 if ((tid_info->state == CARL9170_TID_STATE_SUSPEND))
1552 tid_info->state = CARL9170_TID_STATE_IDLE;
1553 }
1554 rcu_read_unlock();
1555 break;
1556 }
1557 }
1558
1559 static const struct ieee80211_ops carl9170_ops = {
1560 .start = carl9170_op_start,
1561 .stop = carl9170_op_stop,
1562 .tx = carl9170_op_tx,
1563 .flush = carl9170_op_flush,
1564 .add_interface = carl9170_op_add_interface,
1565 .remove_interface = carl9170_op_remove_interface,
1566 .config = carl9170_op_config,
1567 .prepare_multicast = carl9170_op_prepare_multicast,
1568 .configure_filter = carl9170_op_configure_filter,
1569 .conf_tx = carl9170_op_conf_tx,
1570 .bss_info_changed = carl9170_op_bss_info_changed,
1571 .get_tsf = carl9170_op_get_tsf,
1572 .set_key = carl9170_op_set_key,
1573 .sta_add = carl9170_op_sta_add,
1574 .sta_remove = carl9170_op_sta_remove,
1575 .sta_notify = carl9170_op_sta_notify,
1576 .get_survey = carl9170_op_get_survey,
1577 .get_stats = carl9170_op_get_stats,
1578 .ampdu_action = carl9170_op_ampdu_action,
1579 };
1580
1581 void *carl9170_alloc(size_t priv_size)
1582 {
1583 struct ieee80211_hw *hw;
1584 struct ar9170 *ar;
1585 struct sk_buff *skb;
1586 int i;
1587
1588 /*
1589 * this buffer is used for rx stream reconstruction.
1590 * Under heavy load this device (or the transport layer?)
1591 * tends to split the streams into separate rx descriptors.
1592 */
1593
1594 skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
1595 if (!skb)
1596 goto err_nomem;
1597
1598 hw = ieee80211_alloc_hw(priv_size, &carl9170_ops);
1599 if (!hw)
1600 goto err_nomem;
1601
1602 ar = hw->priv;
1603 ar->hw = hw;
1604 ar->rx_failover = skb;
1605
1606 memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head));
1607 ar->rx_has_plcp = false;
1608
1609 /*
1610 * Here's a hidden pitfall!
1611 *
1612 * All 4 AC queues work perfectly well under _legacy_ operation.
1613 * However as soon as aggregation is enabled, the traffic flow
1614 * gets very bumpy. Therefore we have to _switch_ to a
1615 * software AC with a single HW queue.
1616 */
1617 hw->queues = __AR9170_NUM_TXQ;
1618
1619 mutex_init(&ar->mutex);
1620 spin_lock_init(&ar->beacon_lock);
1621 spin_lock_init(&ar->cmd_lock);
1622 spin_lock_init(&ar->tx_stats_lock);
1623 spin_lock_init(&ar->tx_ampdu_list_lock);
1624 spin_lock_init(&ar->mem_lock);
1625 spin_lock_init(&ar->state_lock);
1626 atomic_set(&ar->pending_restarts, 0);
1627 ar->vifs = 0;
1628 for (i = 0; i < ar->hw->queues; i++) {
1629 skb_queue_head_init(&ar->tx_status[i]);
1630 skb_queue_head_init(&ar->tx_pending[i]);
1631 }
1632 INIT_WORK(&ar->ps_work, carl9170_ps_work);
1633 INIT_WORK(&ar->ping_work, carl9170_ping_work);
1634 INIT_WORK(&ar->restart_work, carl9170_restart_work);
1635 INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
1636 INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
1637 INIT_LIST_HEAD(&ar->tx_ampdu_list);
1638 rcu_assign_pointer(ar->tx_ampdu_iter,
1639 (struct carl9170_sta_tid *) &ar->tx_ampdu_list);
1640
1641 bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num);
1642 INIT_LIST_HEAD(&ar->vif_list);
1643 init_completion(&ar->tx_flush);
1644
1645 /*
1646 * Note:
1647 * IBSS/ADHOC and AP mode are only enabled, if the firmware
1648 * supports these modes. The code which will add the
1649 * additional interface_modes is in fw.c.
1650 */
1651 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1652 BIT(NL80211_IFTYPE_P2P_CLIENT);
1653
1654 hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
1655 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
1656 IEEE80211_HW_SUPPORTS_PS |
1657 IEEE80211_HW_PS_NULLFUNC_STACK |
1658 IEEE80211_HW_SIGNAL_DBM;
1659
1660 if (!modparam_noht) {
1661 /*
1662 * see the comment above, why we allow the user
1663 * to disable HT by a module parameter.
1664 */
1665 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
1666 }
1667
1668 hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
1669 hw->sta_data_size = sizeof(struct carl9170_sta_info);
1670 hw->vif_data_size = sizeof(struct carl9170_vif_info);
1671
1672 hw->max_rates = CARL9170_TX_MAX_RATES;
1673 hw->max_rate_tries = CARL9170_TX_USER_RATE_TRIES;
1674
1675 for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
1676 ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
1677
1678 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1679 return ar;
1680
1681 err_nomem:
1682 kfree_skb(skb);
1683 return ERR_PTR(-ENOMEM);
1684 }
1685
1686 static int carl9170_read_eeprom(struct ar9170 *ar)
1687 {
1688 #define RW 8 /* number of words to read at once */
1689 #define RB (sizeof(u32) * RW)
1690 u8 *eeprom = (void *)&ar->eeprom;
1691 __le32 offsets[RW];
1692 int i, j, err;
1693
1694 BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
1695
1696 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_LEN - 4);
1697 #ifndef __CHECKER__
1698 /* don't want to handle trailing remains */
1699 BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
1700 #endif
1701
1702 for (i = 0; i < sizeof(ar->eeprom)/RB; i++) {
1703 for (j = 0; j < RW; j++)
1704 offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
1705 RB * i + 4 * j);
1706
1707 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1708 RB, (u8 *) &offsets,
1709 RB, eeprom + RB * i);
1710 if (err)
1711 return err;
1712 }
1713
1714 #undef RW
1715 #undef RB
1716 return 0;
1717 }
1718
1719 static int carl9170_parse_eeprom(struct ar9170 *ar)
1720 {
1721 struct ath_regulatory *regulatory = &ar->common.regulatory;
1722 unsigned int rx_streams, tx_streams, tx_params = 0;
1723 int bands = 0;
1724
1725 if (ar->eeprom.length == cpu_to_le16(0xffff))
1726 return -ENODATA;
1727
1728 rx_streams = hweight8(ar->eeprom.rx_mask);
1729 tx_streams = hweight8(ar->eeprom.tx_mask);
1730
1731 if (rx_streams != tx_streams) {
1732 tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
1733
1734 WARN_ON(!(tx_streams >= 1 && tx_streams <=
1735 IEEE80211_HT_MCS_TX_MAX_STREAMS));
1736
1737 tx_params = (tx_streams - 1) <<
1738 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1739
1740 carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
1741 carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
1742 }
1743
1744 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
1745 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1746 &carl9170_band_2GHz;
1747 bands++;
1748 }
1749 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
1750 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1751 &carl9170_band_5GHz;
1752 bands++;
1753 }
1754
1755 /*
1756 * I measured this, a bandswitch takes roughly
1757 * 135 ms and a frequency switch about 80.
1758 *
1759 * FIXME: measure these values again once EEPROM settings
1760 * are used, that will influence them!
1761 */
1762 if (bands == 2)
1763 ar->hw->channel_change_time = 135 * 1000;
1764 else
1765 ar->hw->channel_change_time = 80 * 1000;
1766
1767 regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
1768 regulatory->current_rd_ext = le16_to_cpu(ar->eeprom.reg_domain[1]);
1769
1770 /* second part of wiphy init */
1771 SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address);
1772
1773 return bands ? 0 : -EINVAL;
1774 }
1775
1776 static int carl9170_reg_notifier(struct wiphy *wiphy,
1777 struct regulatory_request *request)
1778 {
1779 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1780 struct ar9170 *ar = hw->priv;
1781
1782 return ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
1783 }
1784
1785 int carl9170_register(struct ar9170 *ar)
1786 {
1787 struct ath_regulatory *regulatory = &ar->common.regulatory;
1788 int err = 0, i;
1789
1790 if (WARN_ON(ar->mem_bitmap))
1791 return -EINVAL;
1792
1793 ar->mem_bitmap = kzalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG) *
1794 sizeof(unsigned long), GFP_KERNEL);
1795
1796 if (!ar->mem_bitmap)
1797 return -ENOMEM;
1798
1799 /* try to read EEPROM, init MAC addr */
1800 err = carl9170_read_eeprom(ar);
1801 if (err)
1802 return err;
1803
1804 err = carl9170_fw_fix_eeprom(ar);
1805 if (err)
1806 return err;
1807
1808 err = carl9170_parse_eeprom(ar);
1809 if (err)
1810 return err;
1811
1812 err = ath_regd_init(regulatory, ar->hw->wiphy,
1813 carl9170_reg_notifier);
1814 if (err)
1815 return err;
1816
1817 if (modparam_noht) {
1818 carl9170_band_2GHz.ht_cap.ht_supported = false;
1819 carl9170_band_5GHz.ht_cap.ht_supported = false;
1820 }
1821
1822 for (i = 0; i < ar->fw.vif_num; i++) {
1823 ar->vif_priv[i].id = i;
1824 ar->vif_priv[i].vif = NULL;
1825 }
1826
1827 err = ieee80211_register_hw(ar->hw);
1828 if (err)
1829 return err;
1830
1831 /* mac80211 interface is now registered */
1832 ar->registered = true;
1833
1834 if (!ath_is_world_regd(regulatory))
1835 regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
1836
1837 #ifdef CONFIG_CARL9170_DEBUGFS
1838 carl9170_debugfs_register(ar);
1839 #endif /* CONFIG_CARL9170_DEBUGFS */
1840
1841 err = carl9170_led_init(ar);
1842 if (err)
1843 goto err_unreg;
1844
1845 #ifdef CONFIG_CARL9170_LEDS
1846 err = carl9170_led_register(ar);
1847 if (err)
1848 goto err_unreg;
1849 #endif /* CONFIG_CARL9170_LEDS */
1850
1851 #ifdef CONFIG_CARL9170_WPC
1852 err = carl9170_register_wps_button(ar);
1853 if (err)
1854 goto err_unreg;
1855 #endif /* CONFIG_CARL9170_WPC */
1856
1857 dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n",
1858 wiphy_name(ar->hw->wiphy));
1859
1860 return 0;
1861
1862 err_unreg:
1863 carl9170_unregister(ar);
1864 return err;
1865 }
1866
1867 void carl9170_unregister(struct ar9170 *ar)
1868 {
1869 if (!ar->registered)
1870 return;
1871
1872 ar->registered = false;
1873
1874 #ifdef CONFIG_CARL9170_LEDS
1875 carl9170_led_unregister(ar);
1876 #endif /* CONFIG_CARL9170_LEDS */
1877
1878 #ifdef CONFIG_CARL9170_DEBUGFS
1879 carl9170_debugfs_unregister(ar);
1880 #endif /* CONFIG_CARL9170_DEBUGFS */
1881
1882 #ifdef CONFIG_CARL9170_WPC
1883 if (ar->wps.pbc) {
1884 input_unregister_device(ar->wps.pbc);
1885 ar->wps.pbc = NULL;
1886 }
1887 #endif /* CONFIG_CARL9170_WPC */
1888
1889 carl9170_cancel_worker(ar);
1890 cancel_work_sync(&ar->restart_work);
1891
1892 ieee80211_unregister_hw(ar->hw);
1893 }
1894
1895 void carl9170_free(struct ar9170 *ar)
1896 {
1897 WARN_ON(ar->registered);
1898 WARN_ON(IS_INITIALIZED(ar));
1899
1900 kfree_skb(ar->rx_failover);
1901 ar->rx_failover = NULL;
1902
1903 kfree(ar->mem_bitmap);
1904 ar->mem_bitmap = NULL;
1905
1906 mutex_destroy(&ar->mutex);
1907
1908 ieee80211_free_hw(ar->hw);
1909 }