]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/net/wireless/ath/ar9170/main.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-eoan-kernel.git] / drivers / net / wireless / ath / ar9170 / main.c
1 /*
2 * Atheros AR9170 driver
3 *
4 * mac80211 interaction code
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, Christian Lamparter <chunkeey@web.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40 #include <linux/init.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <linux/etherdevice.h>
44 #include <net/mac80211.h>
45 #include "ar9170.h"
46 #include "hw.h"
47 #include "cmd.h"
48
49 static int modparam_nohwcrypt;
50 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
51 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
52
53 static int modparam_ht;
54 module_param_named(ht, modparam_ht, bool, S_IRUGO);
55 MODULE_PARM_DESC(ht, "enable MPDU aggregation.");
56
57 #define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
58 .bitrate = (_bitrate), \
59 .flags = (_flags), \
60 .hw_value = (_hw_rate) | (_txpidx) << 4, \
61 }
62
63 static struct ieee80211_rate __ar9170_ratetable[] = {
64 RATE(10, 0, 0, 0),
65 RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
66 RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
67 RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
68 RATE(60, 0xb, 0, 0),
69 RATE(90, 0xf, 0, 0),
70 RATE(120, 0xa, 0, 0),
71 RATE(180, 0xe, 0, 0),
72 RATE(240, 0x9, 0, 0),
73 RATE(360, 0xd, 1, 0),
74 RATE(480, 0x8, 2, 0),
75 RATE(540, 0xc, 3, 0),
76 };
77 #undef RATE
78
79 #define ar9170_g_ratetable (__ar9170_ratetable + 0)
80 #define ar9170_g_ratetable_size 12
81 #define ar9170_a_ratetable (__ar9170_ratetable + 4)
82 #define ar9170_a_ratetable_size 8
83
84 /*
85 * NB: The hw_value is used as an index into the ar9170_phy_freq_params
86 * array in phy.c so that we don't have to do frequency lookups!
87 */
88 #define CHAN(_freq, _idx) { \
89 .center_freq = (_freq), \
90 .hw_value = (_idx), \
91 .max_power = 18, /* XXX */ \
92 }
93
94 static struct ieee80211_channel ar9170_2ghz_chantable[] = {
95 CHAN(2412, 0),
96 CHAN(2417, 1),
97 CHAN(2422, 2),
98 CHAN(2427, 3),
99 CHAN(2432, 4),
100 CHAN(2437, 5),
101 CHAN(2442, 6),
102 CHAN(2447, 7),
103 CHAN(2452, 8),
104 CHAN(2457, 9),
105 CHAN(2462, 10),
106 CHAN(2467, 11),
107 CHAN(2472, 12),
108 CHAN(2484, 13),
109 };
110
111 static struct ieee80211_channel ar9170_5ghz_chantable[] = {
112 CHAN(4920, 14),
113 CHAN(4940, 15),
114 CHAN(4960, 16),
115 CHAN(4980, 17),
116 CHAN(5040, 18),
117 CHAN(5060, 19),
118 CHAN(5080, 20),
119 CHAN(5180, 21),
120 CHAN(5200, 22),
121 CHAN(5220, 23),
122 CHAN(5240, 24),
123 CHAN(5260, 25),
124 CHAN(5280, 26),
125 CHAN(5300, 27),
126 CHAN(5320, 28),
127 CHAN(5500, 29),
128 CHAN(5520, 30),
129 CHAN(5540, 31),
130 CHAN(5560, 32),
131 CHAN(5580, 33),
132 CHAN(5600, 34),
133 CHAN(5620, 35),
134 CHAN(5640, 36),
135 CHAN(5660, 37),
136 CHAN(5680, 38),
137 CHAN(5700, 39),
138 CHAN(5745, 40),
139 CHAN(5765, 41),
140 CHAN(5785, 42),
141 CHAN(5805, 43),
142 CHAN(5825, 44),
143 CHAN(5170, 45),
144 CHAN(5190, 46),
145 CHAN(5210, 47),
146 CHAN(5230, 48),
147 };
148 #undef CHAN
149
150 #define AR9170_HT_CAP \
151 { \
152 .ht_supported = true, \
153 .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
154 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
155 IEEE80211_HT_CAP_SGI_40 | \
156 IEEE80211_HT_CAP_GRN_FLD | \
157 IEEE80211_HT_CAP_DSSSCCK40 | \
158 IEEE80211_HT_CAP_SM_PS, \
159 .ampdu_factor = 3, \
160 .ampdu_density = 6, \
161 .mcs = { \
162 .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \
163 .rx_highest = cpu_to_le16(300), \
164 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
165 }, \
166 }
167
168 static struct ieee80211_supported_band ar9170_band_2GHz = {
169 .channels = ar9170_2ghz_chantable,
170 .n_channels = ARRAY_SIZE(ar9170_2ghz_chantable),
171 .bitrates = ar9170_g_ratetable,
172 .n_bitrates = ar9170_g_ratetable_size,
173 .ht_cap = AR9170_HT_CAP,
174 };
175
176 static struct ieee80211_supported_band ar9170_band_5GHz = {
177 .channels = ar9170_5ghz_chantable,
178 .n_channels = ARRAY_SIZE(ar9170_5ghz_chantable),
179 .bitrates = ar9170_a_ratetable,
180 .n_bitrates = ar9170_a_ratetable_size,
181 .ht_cap = AR9170_HT_CAP,
182 };
183
184 static void ar9170_tx(struct ar9170 *ar);
185 static bool ar9170_tx_ampdu(struct ar9170 *ar);
186
187 static inline u16 ar9170_get_seq_h(struct ieee80211_hdr *hdr)
188 {
189 return le16_to_cpu(hdr->seq_ctrl) >> 4;
190 }
191
192 static inline u16 ar9170_get_seq(struct sk_buff *skb)
193 {
194 struct ar9170_tx_control *txc = (void *) skb->data;
195 return ar9170_get_seq_h((void *) txc->frame_data);
196 }
197
198 static inline u16 ar9170_get_tid_h(struct ieee80211_hdr *hdr)
199 {
200 return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
201 }
202
203 static inline u16 ar9170_get_tid(struct sk_buff *skb)
204 {
205 struct ar9170_tx_control *txc = (void *) skb->data;
206 return ar9170_get_tid_h((struct ieee80211_hdr *) txc->frame_data);
207 }
208
209 #define GET_NEXT_SEQ(seq) ((seq + 1) & 0x0fff)
210 #define GET_NEXT_SEQ_FROM_SKB(skb) (GET_NEXT_SEQ(ar9170_get_seq(skb)))
211
212 #if (defined AR9170_QUEUE_DEBUG) || (defined AR9170_TXAGG_DEBUG)
213 static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
214 {
215 struct ar9170_tx_control *txc = (void *) skb->data;
216 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
217 struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
218 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
219
220 printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] s:%d "
221 "mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n",
222 wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb),
223 ieee80211_get_DA(hdr), ar9170_get_seq_h(hdr),
224 le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control),
225 jiffies_to_msecs(arinfo->timeout - jiffies));
226 }
227
228 static void __ar9170_dump_txqueue(struct ar9170 *ar,
229 struct sk_buff_head *queue)
230 {
231 struct sk_buff *skb;
232 int i = 0;
233
234 printk(KERN_DEBUG "---[ cut here ]---\n");
235 printk(KERN_DEBUG "%s: %d entries in queue.\n",
236 wiphy_name(ar->hw->wiphy), skb_queue_len(queue));
237
238 skb_queue_walk(queue, skb) {
239 printk(KERN_DEBUG "index:%d => \n", i++);
240 ar9170_print_txheader(ar, skb);
241 }
242 if (i != skb_queue_len(queue))
243 printk(KERN_DEBUG "WARNING: queue frame counter "
244 "mismatch %d != %d\n", skb_queue_len(queue), i);
245 printk(KERN_DEBUG "---[ end ]---\n");
246 }
247 #endif /* AR9170_QUEUE_DEBUG || AR9170_TXAGG_DEBUG */
248
249 #ifdef AR9170_QUEUE_DEBUG
250 static void ar9170_dump_txqueue(struct ar9170 *ar,
251 struct sk_buff_head *queue)
252 {
253 unsigned long flags;
254
255 spin_lock_irqsave(&queue->lock, flags);
256 __ar9170_dump_txqueue(ar, queue);
257 spin_unlock_irqrestore(&queue->lock, flags);
258 }
259 #endif /* AR9170_QUEUE_DEBUG */
260
261 #ifdef AR9170_QUEUE_STOP_DEBUG
262 static void __ar9170_dump_txstats(struct ar9170 *ar)
263 {
264 int i;
265
266 printk(KERN_DEBUG "%s: QoS queue stats\n",
267 wiphy_name(ar->hw->wiphy));
268
269 for (i = 0; i < __AR9170_NUM_TXQ; i++)
270 printk(KERN_DEBUG "%s: queue:%d limit:%d len:%d waitack:%d "
271 " stopped:%d\n", wiphy_name(ar->hw->wiphy), i,
272 ar->tx_stats[i].limit, ar->tx_stats[i].len,
273 skb_queue_len(&ar->tx_status[i]),
274 ieee80211_queue_stopped(ar->hw, i));
275 }
276 #endif /* AR9170_QUEUE_STOP_DEBUG */
277
278 #ifdef AR9170_TXAGG_DEBUG
279 static void ar9170_dump_tx_status_ampdu(struct ar9170 *ar)
280 {
281 unsigned long flags;
282
283 spin_lock_irqsave(&ar->tx_status_ampdu.lock, flags);
284 printk(KERN_DEBUG "%s: A-MPDU tx_status queue => \n",
285 wiphy_name(ar->hw->wiphy));
286 __ar9170_dump_txqueue(ar, &ar->tx_status_ampdu);
287 spin_unlock_irqrestore(&ar->tx_status_ampdu.lock, flags);
288 }
289
290 #endif /* AR9170_TXAGG_DEBUG */
291
292 /* caller must guarantee exclusive access for _bin_ queue. */
293 static void ar9170_recycle_expired(struct ar9170 *ar,
294 struct sk_buff_head *queue,
295 struct sk_buff_head *bin)
296 {
297 struct sk_buff *skb, *old = NULL;
298 unsigned long flags;
299
300 spin_lock_irqsave(&queue->lock, flags);
301 while ((skb = skb_peek(queue))) {
302 struct ieee80211_tx_info *txinfo;
303 struct ar9170_tx_info *arinfo;
304
305 txinfo = IEEE80211_SKB_CB(skb);
306 arinfo = (void *) txinfo->rate_driver_data;
307
308 if (time_is_before_jiffies(arinfo->timeout)) {
309 #ifdef AR9170_QUEUE_DEBUG
310 printk(KERN_DEBUG "%s: [%ld > %ld] frame expired => "
311 "recycle \n", wiphy_name(ar->hw->wiphy),
312 jiffies, arinfo->timeout);
313 ar9170_print_txheader(ar, skb);
314 #endif /* AR9170_QUEUE_DEBUG */
315 __skb_unlink(skb, queue);
316 __skb_queue_tail(bin, skb);
317 } else {
318 break;
319 }
320
321 if (unlikely(old == skb)) {
322 /* bail out - queue is shot. */
323
324 WARN_ON(1);
325 break;
326 }
327 old = skb;
328 }
329 spin_unlock_irqrestore(&queue->lock, flags);
330 }
331
332 static void ar9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
333 u16 tx_status)
334 {
335 struct ieee80211_tx_info *txinfo;
336 unsigned int retries = 0;
337
338 txinfo = IEEE80211_SKB_CB(skb);
339 ieee80211_tx_info_clear_status(txinfo);
340
341 switch (tx_status) {
342 case AR9170_TX_STATUS_RETRY:
343 retries = 2;
344 case AR9170_TX_STATUS_COMPLETE:
345 txinfo->flags |= IEEE80211_TX_STAT_ACK;
346 break;
347
348 case AR9170_TX_STATUS_FAILED:
349 retries = ar->hw->conf.long_frame_max_tx_count;
350 break;
351
352 default:
353 printk(KERN_ERR "%s: invalid tx_status response (%x).\n",
354 wiphy_name(ar->hw->wiphy), tx_status);
355 break;
356 }
357
358 txinfo->status.rates[0].count = retries + 1;
359 skb_pull(skb, sizeof(struct ar9170_tx_control));
360 ieee80211_tx_status_irqsafe(ar->hw, skb);
361 }
362
363 static void ar9170_tx_fake_ampdu_status(struct ar9170 *ar)
364 {
365 struct sk_buff_head success;
366 struct sk_buff *skb;
367 unsigned int i;
368 unsigned long queue_bitmap = 0;
369
370 skb_queue_head_init(&success);
371
372 while (skb_queue_len(&ar->tx_status_ampdu) > AR9170_NUM_TX_STATUS)
373 __skb_queue_tail(&success, skb_dequeue(&ar->tx_status_ampdu));
374
375 ar9170_recycle_expired(ar, &ar->tx_status_ampdu, &success);
376
377 #ifdef AR9170_TXAGG_DEBUG
378 printk(KERN_DEBUG "%s: collected %d A-MPDU frames.\n",
379 wiphy_name(ar->hw->wiphy), skb_queue_len(&success));
380 __ar9170_dump_txqueue(ar, &success);
381 #endif /* AR9170_TXAGG_DEBUG */
382
383 while ((skb = __skb_dequeue(&success))) {
384 struct ieee80211_tx_info *txinfo;
385
386 queue_bitmap |= BIT(skb_get_queue_mapping(skb));
387
388 txinfo = IEEE80211_SKB_CB(skb);
389 ieee80211_tx_info_clear_status(txinfo);
390
391 txinfo->flags |= IEEE80211_TX_STAT_ACK;
392 txinfo->status.rates[0].count = 1;
393
394 skb_pull(skb, sizeof(struct ar9170_tx_control));
395 ieee80211_tx_status_irqsafe(ar->hw, skb);
396 }
397
398 for_each_set_bit(i, &queue_bitmap, BITS_PER_BYTE) {
399 #ifdef AR9170_QUEUE_STOP_DEBUG
400 printk(KERN_DEBUG "%s: wake queue %d\n",
401 wiphy_name(ar->hw->wiphy), i);
402 __ar9170_dump_txstats(ar);
403 #endif /* AR9170_QUEUE_STOP_DEBUG */
404 ieee80211_wake_queue(ar->hw, i);
405 }
406
407 if (queue_bitmap)
408 ar9170_tx(ar);
409 }
410
411 static void ar9170_tx_ampdu_callback(struct ar9170 *ar, struct sk_buff *skb)
412 {
413 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
414 struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
415
416 arinfo->timeout = jiffies +
417 msecs_to_jiffies(AR9170_BA_TIMEOUT);
418
419 skb_queue_tail(&ar->tx_status_ampdu, skb);
420 ar9170_tx_fake_ampdu_status(ar);
421
422 if (atomic_dec_and_test(&ar->tx_ampdu_pending) &&
423 !list_empty(&ar->tx_ampdu_list))
424 ar9170_tx_ampdu(ar);
425 }
426
427 void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
428 {
429 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
430 struct ar9170_tx_info *arinfo = (void *) info->rate_driver_data;
431 unsigned int queue = skb_get_queue_mapping(skb);
432 unsigned long flags;
433
434 spin_lock_irqsave(&ar->tx_stats_lock, flags);
435 ar->tx_stats[queue].len--;
436
437 if (ar->tx_stats[queue].len < AR9170_NUM_TX_LIMIT_SOFT) {
438 #ifdef AR9170_QUEUE_STOP_DEBUG
439 printk(KERN_DEBUG "%s: wake queue %d\n",
440 wiphy_name(ar->hw->wiphy), queue);
441 __ar9170_dump_txstats(ar);
442 #endif /* AR9170_QUEUE_STOP_DEBUG */
443 ieee80211_wake_queue(ar->hw, queue);
444 }
445 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
446
447 if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
448 ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
449 } else {
450 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
451 ar9170_tx_ampdu_callback(ar, skb);
452 } else {
453 arinfo->timeout = jiffies +
454 msecs_to_jiffies(AR9170_TX_TIMEOUT);
455
456 skb_queue_tail(&ar->tx_status[queue], skb);
457 }
458 }
459
460 if (!ar->tx_stats[queue].len &&
461 !skb_queue_empty(&ar->tx_pending[queue])) {
462 ar9170_tx(ar);
463 }
464 }
465
466 static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar,
467 const u8 *mac,
468 struct sk_buff_head *queue,
469 const u32 rate)
470 {
471 unsigned long flags;
472 struct sk_buff *skb;
473
474 /*
475 * Unfortunately, the firmware does not tell to which (queued) frame
476 * this transmission status report belongs to.
477 *
478 * So we have to make risky guesses - with the scarce information
479 * the firmware provided (-> destination MAC, and phy_control) -
480 * and hope that we picked the right one...
481 */
482
483 spin_lock_irqsave(&queue->lock, flags);
484 skb_queue_walk(queue, skb) {
485 struct ar9170_tx_control *txc = (void *) skb->data;
486 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
487 u32 r;
488
489 if (mac && compare_ether_addr(ieee80211_get_DA(hdr), mac)) {
490 #ifdef AR9170_QUEUE_DEBUG
491 printk(KERN_DEBUG "%s: skip frame => DA %pM != %pM\n",
492 wiphy_name(ar->hw->wiphy), mac,
493 ieee80211_get_DA(hdr));
494 ar9170_print_txheader(ar, skb);
495 #endif /* AR9170_QUEUE_DEBUG */
496 continue;
497 }
498
499 r = (le32_to_cpu(txc->phy_control) & AR9170_TX_PHY_MCS_MASK) >>
500 AR9170_TX_PHY_MCS_SHIFT;
501
502 if ((rate != AR9170_TX_INVALID_RATE) && (r != rate)) {
503 #ifdef AR9170_QUEUE_DEBUG
504 printk(KERN_DEBUG "%s: skip frame => rate %d != %d\n",
505 wiphy_name(ar->hw->wiphy), rate, r);
506 ar9170_print_txheader(ar, skb);
507 #endif /* AR9170_QUEUE_DEBUG */
508 continue;
509 }
510
511 __skb_unlink(skb, queue);
512 spin_unlock_irqrestore(&queue->lock, flags);
513 return skb;
514 }
515
516 #ifdef AR9170_QUEUE_DEBUG
517 printk(KERN_ERR "%s: ESS:[%pM] does not have any "
518 "outstanding frames in queue.\n",
519 wiphy_name(ar->hw->wiphy), mac);
520 __ar9170_dump_txqueue(ar, queue);
521 #endif /* AR9170_QUEUE_DEBUG */
522 spin_unlock_irqrestore(&queue->lock, flags);
523
524 return NULL;
525 }
526
527 static void ar9170_handle_block_ack(struct ar9170 *ar, u16 count, u16 r)
528 {
529 struct sk_buff *skb;
530 struct ieee80211_tx_info *txinfo;
531
532 while (count) {
533 skb = ar9170_get_queued_skb(ar, NULL, &ar->tx_status_ampdu, r);
534 if (!skb)
535 break;
536
537 txinfo = IEEE80211_SKB_CB(skb);
538 ieee80211_tx_info_clear_status(txinfo);
539
540 /* FIXME: maybe more ? */
541 txinfo->status.rates[0].count = 1;
542
543 skb_pull(skb, sizeof(struct ar9170_tx_control));
544 ieee80211_tx_status_irqsafe(ar->hw, skb);
545 count--;
546 }
547
548 #ifdef AR9170_TXAGG_DEBUG
549 if (count) {
550 printk(KERN_DEBUG "%s: got %d more failed mpdus, but no more "
551 "suitable frames left in tx_status queue.\n",
552 wiphy_name(ar->hw->wiphy), count);
553
554 ar9170_dump_tx_status_ampdu(ar);
555 }
556 #endif /* AR9170_TXAGG_DEBUG */
557 }
558
559 /*
560 * This worker tries to keeps an maintain tx_status queues.
561 * So we can guarantee that incoming tx_status reports are
562 * actually for a pending frame.
563 */
564
565 static void ar9170_tx_janitor(struct work_struct *work)
566 {
567 struct ar9170 *ar = container_of(work, struct ar9170,
568 tx_janitor.work);
569 struct sk_buff_head waste;
570 unsigned int i;
571 bool resched = false;
572
573 if (unlikely(!IS_STARTED(ar)))
574 return ;
575
576 skb_queue_head_init(&waste);
577
578 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
579 #ifdef AR9170_QUEUE_DEBUG
580 printk(KERN_DEBUG "%s: garbage collector scans queue:%d\n",
581 wiphy_name(ar->hw->wiphy), i);
582 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
583 ar9170_dump_txqueue(ar, &ar->tx_status[i]);
584 #endif /* AR9170_QUEUE_DEBUG */
585
586 ar9170_recycle_expired(ar, &ar->tx_status[i], &waste);
587 ar9170_recycle_expired(ar, &ar->tx_pending[i], &waste);
588 skb_queue_purge(&waste);
589
590 if (!skb_queue_empty(&ar->tx_status[i]) ||
591 !skb_queue_empty(&ar->tx_pending[i]))
592 resched = true;
593 }
594
595 ar9170_tx_fake_ampdu_status(ar);
596
597 if (!resched)
598 return;
599
600 ieee80211_queue_delayed_work(ar->hw,
601 &ar->tx_janitor,
602 msecs_to_jiffies(AR9170_JANITOR_DELAY));
603 }
604
605 void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
606 {
607 struct ar9170_cmd_response *cmd = (void *) buf;
608
609 if ((cmd->type & 0xc0) != 0xc0) {
610 ar->callback_cmd(ar, len, buf);
611 return;
612 }
613
614 /* hardware event handlers */
615 switch (cmd->type) {
616 case 0xc1: {
617 /*
618 * TX status notification:
619 * bytes: 0c c1 XX YY M1 M2 M3 M4 M5 M6 R4 R3 R2 R1 S2 S1
620 *
621 * XX always 81
622 * YY always 00
623 * M1-M6 is the MAC address
624 * R1-R4 is the transmit rate
625 * S1-S2 is the transmit status
626 */
627
628 struct sk_buff *skb;
629 u32 phy = le32_to_cpu(cmd->tx_status.rate);
630 u32 q = (phy & AR9170_TX_PHY_QOS_MASK) >>
631 AR9170_TX_PHY_QOS_SHIFT;
632 #ifdef AR9170_QUEUE_DEBUG
633 printk(KERN_DEBUG "%s: recv tx_status for %pM, p:%08x, q:%d\n",
634 wiphy_name(ar->hw->wiphy), cmd->tx_status.dst, phy, q);
635 #endif /* AR9170_QUEUE_DEBUG */
636
637 skb = ar9170_get_queued_skb(ar, cmd->tx_status.dst,
638 &ar->tx_status[q],
639 AR9170_TX_INVALID_RATE);
640 if (unlikely(!skb))
641 return ;
642
643 ar9170_tx_status(ar, skb, le16_to_cpu(cmd->tx_status.status));
644 break;
645 }
646
647 case 0xc0:
648 /*
649 * pre-TBTT event
650 */
651 if (ar->vif && ar->vif->type == NL80211_IFTYPE_AP)
652 ieee80211_queue_work(ar->hw, &ar->beacon_work);
653 break;
654
655 case 0xc2:
656 /*
657 * (IBSS) beacon send notification
658 * bytes: 04 c2 XX YY B4 B3 B2 B1
659 *
660 * XX always 80
661 * YY always 00
662 * B1-B4 "should" be the number of send out beacons.
663 */
664 break;
665
666 case 0xc3:
667 /* End of Atim Window */
668 break;
669
670 case 0xc4:
671 /* BlockACK bitmap */
672 break;
673
674 case 0xc5:
675 /* BlockACK events */
676 ar9170_handle_block_ack(ar,
677 le16_to_cpu(cmd->ba_fail_cnt.failed),
678 le16_to_cpu(cmd->ba_fail_cnt.rate));
679 ar9170_tx_fake_ampdu_status(ar);
680 break;
681
682 case 0xc6:
683 /* Watchdog Interrupt */
684 break;
685
686 case 0xc9:
687 /* retransmission issue / SIFS/EIFS collision ?! */
688 break;
689
690 /* firmware debug */
691 case 0xca:
692 printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4, (char *)buf + 4);
693 break;
694 case 0xcb:
695 len -= 4;
696
697 switch (len) {
698 case 1:
699 printk(KERN_DEBUG "ar9170 FW: u8: %#.2x\n",
700 *((char *)buf + 4));
701 break;
702 case 2:
703 printk(KERN_DEBUG "ar9170 FW: u8: %#.4x\n",
704 le16_to_cpup((__le16 *)((char *)buf + 4)));
705 break;
706 case 4:
707 printk(KERN_DEBUG "ar9170 FW: u8: %#.8x\n",
708 le32_to_cpup((__le32 *)((char *)buf + 4)));
709 break;
710 case 8:
711 printk(KERN_DEBUG "ar9170 FW: u8: %#.16lx\n",
712 (unsigned long)le64_to_cpup(
713 (__le64 *)((char *)buf + 4)));
714 break;
715 }
716 break;
717 case 0xcc:
718 print_hex_dump_bytes("ar9170 FW:", DUMP_PREFIX_NONE,
719 (char *)buf + 4, len - 4);
720 break;
721
722 default:
723 printk(KERN_INFO "received unhandled event %x\n", cmd->type);
724 print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len);
725 break;
726 }
727 }
728
729 static void ar9170_rx_reset_rx_mpdu(struct ar9170 *ar)
730 {
731 memset(&ar->rx_mpdu.plcp, 0, sizeof(struct ar9170_rx_head));
732 ar->rx_mpdu.has_plcp = false;
733 }
734
735 int ar9170_nag_limiter(struct ar9170 *ar)
736 {
737 bool print_message;
738
739 /*
740 * we expect all sorts of errors in promiscuous mode.
741 * don't bother with it, it's OK!
742 */
743 if (ar->sniffer_enabled)
744 return false;
745
746 /*
747 * only go for frequent errors! The hardware tends to
748 * do some stupid thing once in a while under load, in
749 * noisy environments or just for fun!
750 */
751 if (time_before(jiffies, ar->bad_hw_nagger) && net_ratelimit())
752 print_message = true;
753 else
754 print_message = false;
755
756 /* reset threshold for "once in a while" */
757 ar->bad_hw_nagger = jiffies + HZ / 4;
758 return print_message;
759 }
760
761 static int ar9170_rx_mac_status(struct ar9170 *ar,
762 struct ar9170_rx_head *head,
763 struct ar9170_rx_macstatus *mac,
764 struct ieee80211_rx_status *status)
765 {
766 u8 error, decrypt;
767
768 BUILD_BUG_ON(sizeof(struct ar9170_rx_head) != 12);
769 BUILD_BUG_ON(sizeof(struct ar9170_rx_macstatus) != 4);
770
771 error = mac->error;
772 if (error & AR9170_RX_ERROR_MMIC) {
773 status->flag |= RX_FLAG_MMIC_ERROR;
774 error &= ~AR9170_RX_ERROR_MMIC;
775 }
776
777 if (error & AR9170_RX_ERROR_PLCP) {
778 status->flag |= RX_FLAG_FAILED_PLCP_CRC;
779 error &= ~AR9170_RX_ERROR_PLCP;
780
781 if (!(ar->filter_state & FIF_PLCPFAIL))
782 return -EINVAL;
783 }
784
785 if (error & AR9170_RX_ERROR_FCS) {
786 status->flag |= RX_FLAG_FAILED_FCS_CRC;
787 error &= ~AR9170_RX_ERROR_FCS;
788
789 if (!(ar->filter_state & FIF_FCSFAIL))
790 return -EINVAL;
791 }
792
793 decrypt = ar9170_get_decrypt_type(mac);
794 if (!(decrypt & AR9170_RX_ENC_SOFTWARE) &&
795 decrypt != AR9170_ENC_ALG_NONE)
796 status->flag |= RX_FLAG_DECRYPTED;
797
798 /* ignore wrong RA errors */
799 error &= ~AR9170_RX_ERROR_WRONG_RA;
800
801 if (error & AR9170_RX_ERROR_DECRYPT) {
802 error &= ~AR9170_RX_ERROR_DECRYPT;
803 /*
804 * Rx decryption is done in place,
805 * the original data is lost anyway.
806 */
807
808 return -EINVAL;
809 }
810
811 /* drop any other error frames */
812 if (unlikely(error)) {
813 /* TODO: update netdevice's RX dropped/errors statistics */
814
815 if (ar9170_nag_limiter(ar))
816 printk(KERN_DEBUG "%s: received frame with "
817 "suspicious error code (%#x).\n",
818 wiphy_name(ar->hw->wiphy), error);
819
820 return -EINVAL;
821 }
822
823 status->band = ar->channel->band;
824 status->freq = ar->channel->center_freq;
825
826 switch (mac->status & AR9170_RX_STATUS_MODULATION_MASK) {
827 case AR9170_RX_STATUS_MODULATION_CCK:
828 if (mac->status & AR9170_RX_STATUS_SHORT_PREAMBLE)
829 status->flag |= RX_FLAG_SHORTPRE;
830 switch (head->plcp[0]) {
831 case 0x0a:
832 status->rate_idx = 0;
833 break;
834 case 0x14:
835 status->rate_idx = 1;
836 break;
837 case 0x37:
838 status->rate_idx = 2;
839 break;
840 case 0x6e:
841 status->rate_idx = 3;
842 break;
843 default:
844 if (ar9170_nag_limiter(ar))
845 printk(KERN_ERR "%s: invalid plcp cck rate "
846 "(%x).\n", wiphy_name(ar->hw->wiphy),
847 head->plcp[0]);
848 return -EINVAL;
849 }
850 break;
851
852 case AR9170_RX_STATUS_MODULATION_DUPOFDM:
853 case AR9170_RX_STATUS_MODULATION_OFDM:
854 switch (head->plcp[0] & 0xf) {
855 case 0xb:
856 status->rate_idx = 0;
857 break;
858 case 0xf:
859 status->rate_idx = 1;
860 break;
861 case 0xa:
862 status->rate_idx = 2;
863 break;
864 case 0xe:
865 status->rate_idx = 3;
866 break;
867 case 0x9:
868 status->rate_idx = 4;
869 break;
870 case 0xd:
871 status->rate_idx = 5;
872 break;
873 case 0x8:
874 status->rate_idx = 6;
875 break;
876 case 0xc:
877 status->rate_idx = 7;
878 break;
879 default:
880 if (ar9170_nag_limiter(ar))
881 printk(KERN_ERR "%s: invalid plcp ofdm rate "
882 "(%x).\n", wiphy_name(ar->hw->wiphy),
883 head->plcp[0]);
884 return -EINVAL;
885 }
886 if (status->band == IEEE80211_BAND_2GHZ)
887 status->rate_idx += 4;
888 break;
889
890 case AR9170_RX_STATUS_MODULATION_HT:
891 if (head->plcp[3] & 0x80)
892 status->flag |= RX_FLAG_40MHZ;
893 if (head->plcp[6] & 0x80)
894 status->flag |= RX_FLAG_SHORT_GI;
895
896 status->rate_idx = clamp(0, 75, head->plcp[6] & 0x7f);
897 status->flag |= RX_FLAG_HT;
898 break;
899
900 default:
901 if (ar9170_nag_limiter(ar))
902 printk(KERN_ERR "%s: invalid modulation\n",
903 wiphy_name(ar->hw->wiphy));
904 return -EINVAL;
905 }
906
907 return 0;
908 }
909
910 static void ar9170_rx_phy_status(struct ar9170 *ar,
911 struct ar9170_rx_phystatus *phy,
912 struct ieee80211_rx_status *status)
913 {
914 int i;
915
916 BUILD_BUG_ON(sizeof(struct ar9170_rx_phystatus) != 20);
917
918 for (i = 0; i < 3; i++)
919 if (phy->rssi[i] != 0x80)
920 status->antenna |= BIT(i);
921
922 /* post-process RSSI */
923 for (i = 0; i < 7; i++)
924 if (phy->rssi[i] & 0x80)
925 phy->rssi[i] = ((phy->rssi[i] & 0x7f) + 1) & 0x7f;
926
927 /* TODO: we could do something with phy_errors */
928 status->signal = ar->noise[0] + phy->rssi_combined;
929 status->noise = ar->noise[0];
930 }
931
932 static struct sk_buff *ar9170_rx_copy_data(u8 *buf, int len)
933 {
934 struct sk_buff *skb;
935 int reserved = 0;
936 struct ieee80211_hdr *hdr = (void *) buf;
937
938 if (ieee80211_is_data_qos(hdr->frame_control)) {
939 u8 *qc = ieee80211_get_qos_ctl(hdr);
940 reserved += NET_IP_ALIGN;
941
942 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
943 reserved += NET_IP_ALIGN;
944 }
945
946 if (ieee80211_has_a4(hdr->frame_control))
947 reserved += NET_IP_ALIGN;
948
949 reserved = 32 + (reserved & NET_IP_ALIGN);
950
951 skb = dev_alloc_skb(len + reserved);
952 if (likely(skb)) {
953 skb_reserve(skb, reserved);
954 memcpy(skb_put(skb, len), buf, len);
955 }
956
957 return skb;
958 }
959
960 /*
961 * If the frame alignment is right (or the kernel has
962 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there
963 * is only a single MPDU in the USB frame, then we could
964 * submit to mac80211 the SKB directly. However, since
965 * there may be multiple packets in one SKB in stream
966 * mode, and we need to observe the proper ordering,
967 * this is non-trivial.
968 */
969
970 static void ar9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
971 {
972 struct ar9170_rx_head *head;
973 struct ar9170_rx_macstatus *mac;
974 struct ar9170_rx_phystatus *phy = NULL;
975 struct ieee80211_rx_status status;
976 struct sk_buff *skb;
977 int mpdu_len;
978
979 if (unlikely(!IS_STARTED(ar) || len < (sizeof(*mac))))
980 return ;
981
982 /* Received MPDU */
983 mpdu_len = len - sizeof(*mac);
984
985 mac = (void *)(buf + mpdu_len);
986 if (unlikely(mac->error & AR9170_RX_ERROR_FATAL)) {
987 /* this frame is too damaged and can't be used - drop it */
988
989 return ;
990 }
991
992 switch (mac->status & AR9170_RX_STATUS_MPDU_MASK) {
993 case AR9170_RX_STATUS_MPDU_FIRST:
994 /* first mpdu packet has the plcp header */
995 if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) {
996 head = (void *) buf;
997 memcpy(&ar->rx_mpdu.plcp, (void *) buf,
998 sizeof(struct ar9170_rx_head));
999
1000 mpdu_len -= sizeof(struct ar9170_rx_head);
1001 buf += sizeof(struct ar9170_rx_head);
1002 ar->rx_mpdu.has_plcp = true;
1003 } else {
1004 if (ar9170_nag_limiter(ar))
1005 printk(KERN_ERR "%s: plcp info is clipped.\n",
1006 wiphy_name(ar->hw->wiphy));
1007 return ;
1008 }
1009 break;
1010
1011 case AR9170_RX_STATUS_MPDU_LAST:
1012 /* last mpdu has a extra tail with phy status information */
1013
1014 if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) {
1015 mpdu_len -= sizeof(struct ar9170_rx_phystatus);
1016 phy = (void *)(buf + mpdu_len);
1017 } else {
1018 if (ar9170_nag_limiter(ar))
1019 printk(KERN_ERR "%s: frame tail is clipped.\n",
1020 wiphy_name(ar->hw->wiphy));
1021 return ;
1022 }
1023
1024 case AR9170_RX_STATUS_MPDU_MIDDLE:
1025 /* middle mpdus are just data */
1026 if (unlikely(!ar->rx_mpdu.has_plcp)) {
1027 if (!ar9170_nag_limiter(ar))
1028 return ;
1029
1030 printk(KERN_ERR "%s: rx stream did not start "
1031 "with a first_mpdu frame tag.\n",
1032 wiphy_name(ar->hw->wiphy));
1033
1034 return ;
1035 }
1036
1037 head = &ar->rx_mpdu.plcp;
1038 break;
1039
1040 case AR9170_RX_STATUS_MPDU_SINGLE:
1041 /* single mpdu - has plcp (head) and phy status (tail) */
1042 head = (void *) buf;
1043
1044 mpdu_len -= sizeof(struct ar9170_rx_head);
1045 mpdu_len -= sizeof(struct ar9170_rx_phystatus);
1046
1047 buf += sizeof(struct ar9170_rx_head);
1048 phy = (void *)(buf + mpdu_len);
1049 break;
1050
1051 default:
1052 BUG_ON(1);
1053 break;
1054 }
1055
1056 if (unlikely(mpdu_len < FCS_LEN))
1057 return ;
1058
1059 memset(&status, 0, sizeof(status));
1060 if (unlikely(ar9170_rx_mac_status(ar, head, mac, &status)))
1061 return ;
1062
1063 if (phy)
1064 ar9170_rx_phy_status(ar, phy, &status);
1065
1066 skb = ar9170_rx_copy_data(buf, mpdu_len);
1067 if (likely(skb)) {
1068 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
1069 ieee80211_rx_irqsafe(ar->hw, skb);
1070 }
1071 }
1072
1073 void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb)
1074 {
1075 unsigned int i, tlen, resplen, wlen = 0, clen = 0;
1076 u8 *tbuf, *respbuf;
1077
1078 tbuf = skb->data;
1079 tlen = skb->len;
1080
1081 while (tlen >= 4) {
1082 clen = tbuf[1] << 8 | tbuf[0];
1083 wlen = ALIGN(clen, 4);
1084
1085 /* check if this is stream has a valid tag.*/
1086 if (tbuf[2] != 0 || tbuf[3] != 0x4e) {
1087 /*
1088 * TODO: handle the highly unlikely event that the
1089 * corrupted stream has the TAG at the right position.
1090 */
1091
1092 /* check if the frame can be repaired. */
1093 if (!ar->rx_failover_missing) {
1094 /* this is no "short read". */
1095 if (ar9170_nag_limiter(ar)) {
1096 printk(KERN_ERR "%s: missing tag!\n",
1097 wiphy_name(ar->hw->wiphy));
1098 goto err_telluser;
1099 } else
1100 goto err_silent;
1101 }
1102
1103 if (ar->rx_failover_missing > tlen) {
1104 if (ar9170_nag_limiter(ar)) {
1105 printk(KERN_ERR "%s: possible multi "
1106 "stream corruption!\n",
1107 wiphy_name(ar->hw->wiphy));
1108 goto err_telluser;
1109 } else
1110 goto err_silent;
1111 }
1112
1113 memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
1114 ar->rx_failover_missing -= tlen;
1115
1116 if (ar->rx_failover_missing <= 0) {
1117 /*
1118 * nested ar9170_rx call!
1119 * termination is guranteed, even when the
1120 * combined frame also have a element with
1121 * a bad tag.
1122 */
1123
1124 ar->rx_failover_missing = 0;
1125 ar9170_rx(ar, ar->rx_failover);
1126
1127 skb_reset_tail_pointer(ar->rx_failover);
1128 skb_trim(ar->rx_failover, 0);
1129 }
1130
1131 return ;
1132 }
1133
1134 /* check if stream is clipped */
1135 if (wlen > tlen - 4) {
1136 if (ar->rx_failover_missing) {
1137 /* TODO: handle double stream corruption. */
1138 if (ar9170_nag_limiter(ar)) {
1139 printk(KERN_ERR "%s: double rx stream "
1140 "corruption!\n",
1141 wiphy_name(ar->hw->wiphy));
1142 goto err_telluser;
1143 } else
1144 goto err_silent;
1145 }
1146
1147 /*
1148 * save incomplete data set.
1149 * the firmware will resend the missing bits when
1150 * the rx - descriptor comes round again.
1151 */
1152
1153 memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
1154 ar->rx_failover_missing = clen - tlen;
1155 return ;
1156 }
1157 resplen = clen;
1158 respbuf = tbuf + 4;
1159 tbuf += wlen + 4;
1160 tlen -= wlen + 4;
1161
1162 i = 0;
1163
1164 /* weird thing, but this is the same in the original driver */
1165 while (resplen > 2 && i < 12 &&
1166 respbuf[0] == 0xff && respbuf[1] == 0xff) {
1167 i += 2;
1168 resplen -= 2;
1169 respbuf += 2;
1170 }
1171
1172 if (resplen < 4)
1173 continue;
1174
1175 /* found the 6 * 0xffff marker? */
1176 if (i == 12)
1177 ar9170_handle_command_response(ar, respbuf, resplen);
1178 else
1179 ar9170_handle_mpdu(ar, respbuf, clen);
1180 }
1181
1182 if (tlen) {
1183 if (net_ratelimit())
1184 printk(KERN_ERR "%s: %d bytes of unprocessed "
1185 "data left in rx stream!\n",
1186 wiphy_name(ar->hw->wiphy), tlen);
1187
1188 goto err_telluser;
1189 }
1190
1191 return ;
1192
1193 err_telluser:
1194 printk(KERN_ERR "%s: damaged RX stream data [want:%d, "
1195 "data:%d, rx:%d, pending:%d ]\n",
1196 wiphy_name(ar->hw->wiphy), clen, wlen, tlen,
1197 ar->rx_failover_missing);
1198
1199 if (ar->rx_failover_missing)
1200 print_hex_dump_bytes("rxbuf:", DUMP_PREFIX_OFFSET,
1201 ar->rx_failover->data,
1202 ar->rx_failover->len);
1203
1204 print_hex_dump_bytes("stream:", DUMP_PREFIX_OFFSET,
1205 skb->data, skb->len);
1206
1207 printk(KERN_ERR "%s: please check your hardware and cables, if "
1208 "you see this message frequently.\n",
1209 wiphy_name(ar->hw->wiphy));
1210
1211 err_silent:
1212 if (ar->rx_failover_missing) {
1213 skb_reset_tail_pointer(ar->rx_failover);
1214 skb_trim(ar->rx_failover, 0);
1215 ar->rx_failover_missing = 0;
1216 }
1217 }
1218
1219 #define AR9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
1220 do { \
1221 queue.aifs = ai_fs; \
1222 queue.cw_min = cwmin; \
1223 queue.cw_max = cwmax; \
1224 queue.txop = _txop; \
1225 } while (0)
1226
1227 static int ar9170_op_start(struct ieee80211_hw *hw)
1228 {
1229 struct ar9170 *ar = hw->priv;
1230 int err, i;
1231
1232 mutex_lock(&ar->mutex);
1233
1234 /* reinitialize queues statistics */
1235 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
1236 for (i = 0; i < __AR9170_NUM_TXQ; i++)
1237 ar->tx_stats[i].limit = AR9170_TXQ_DEPTH;
1238
1239 /* reset QoS defaults */
1240 AR9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT*/
1241 AR9170_FILL_QUEUE(ar->edcf[1], 7, 15, 1023, 0); /* BACKGROUND */
1242 AR9170_FILL_QUEUE(ar->edcf[2], 2, 7, 15, 94); /* VIDEO */
1243 AR9170_FILL_QUEUE(ar->edcf[3], 2, 3, 7, 47); /* VOICE */
1244 AR9170_FILL_QUEUE(ar->edcf[4], 2, 3, 7, 0); /* SPECIAL */
1245
1246 /* set sane AMPDU defaults */
1247 ar->global_ampdu_density = 6;
1248 ar->global_ampdu_factor = 3;
1249
1250 atomic_set(&ar->tx_ampdu_pending, 0);
1251 ar->bad_hw_nagger = jiffies;
1252
1253 err = ar->open(ar);
1254 if (err)
1255 goto out;
1256
1257 err = ar9170_init_mac(ar);
1258 if (err)
1259 goto out;
1260
1261 err = ar9170_set_qos(ar);
1262 if (err)
1263 goto out;
1264
1265 err = ar9170_init_phy(ar, IEEE80211_BAND_2GHZ);
1266 if (err)
1267 goto out;
1268
1269 err = ar9170_init_rf(ar);
1270 if (err)
1271 goto out;
1272
1273 /* start DMA */
1274 err = ar9170_write_reg(ar, 0x1c3d30, 0x100);
1275 if (err)
1276 goto out;
1277
1278 ar->state = AR9170_STARTED;
1279
1280 out:
1281 mutex_unlock(&ar->mutex);
1282 return err;
1283 }
1284
1285 static void ar9170_op_stop(struct ieee80211_hw *hw)
1286 {
1287 struct ar9170 *ar = hw->priv;
1288 unsigned int i;
1289
1290 if (IS_STARTED(ar))
1291 ar->state = AR9170_IDLE;
1292
1293 cancel_delayed_work_sync(&ar->tx_janitor);
1294 #ifdef CONFIG_AR9170_LEDS
1295 cancel_delayed_work_sync(&ar->led_work);
1296 #endif
1297 cancel_work_sync(&ar->beacon_work);
1298
1299 mutex_lock(&ar->mutex);
1300
1301 if (IS_ACCEPTING_CMD(ar)) {
1302 ar9170_set_leds_state(ar, 0);
1303
1304 /* stop DMA */
1305 ar9170_write_reg(ar, 0x1c3d30, 0);
1306 ar->stop(ar);
1307 }
1308
1309 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1310 skb_queue_purge(&ar->tx_pending[i]);
1311 skb_queue_purge(&ar->tx_status[i]);
1312 }
1313 skb_queue_purge(&ar->tx_status_ampdu);
1314
1315 mutex_unlock(&ar->mutex);
1316 }
1317
1318 static void ar9170_tx_indicate_immba(struct ar9170 *ar, struct sk_buff *skb)
1319 {
1320 struct ar9170_tx_control *txc = (void *) skb->data;
1321
1322 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_IMM_AMPDU);
1323 }
1324
1325 static void ar9170_tx_copy_phy(struct ar9170 *ar, struct sk_buff *dst,
1326 struct sk_buff *src)
1327 {
1328 struct ar9170_tx_control *dst_txc, *src_txc;
1329 struct ieee80211_tx_info *dst_info, *src_info;
1330 struct ar9170_tx_info *dst_arinfo, *src_arinfo;
1331
1332 src_txc = (void *) src->data;
1333 src_info = IEEE80211_SKB_CB(src);
1334 src_arinfo = (void *) src_info->rate_driver_data;
1335
1336 dst_txc = (void *) dst->data;
1337 dst_info = IEEE80211_SKB_CB(dst);
1338 dst_arinfo = (void *) dst_info->rate_driver_data;
1339
1340 dst_txc->phy_control = src_txc->phy_control;
1341
1342 /* same MCS for the whole aggregate */
1343 memcpy(dst_info->driver_rates, src_info->driver_rates,
1344 sizeof(dst_info->driver_rates));
1345 }
1346
1347 static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
1348 {
1349 struct ieee80211_hdr *hdr;
1350 struct ar9170_tx_control *txc;
1351 struct ieee80211_tx_info *info;
1352 struct ieee80211_tx_rate *txrate;
1353 struct ar9170_tx_info *arinfo;
1354 unsigned int queue = skb_get_queue_mapping(skb);
1355 u16 keytype = 0;
1356 u16 len, icv = 0;
1357
1358 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
1359
1360 hdr = (void *)skb->data;
1361 info = IEEE80211_SKB_CB(skb);
1362 len = skb->len;
1363
1364 txc = (void *)skb_push(skb, sizeof(*txc));
1365
1366 if (info->control.hw_key) {
1367 icv = info->control.hw_key->icv_len;
1368
1369 switch (info->control.hw_key->alg) {
1370 case ALG_WEP:
1371 keytype = AR9170_TX_MAC_ENCR_RC4;
1372 break;
1373 case ALG_TKIP:
1374 keytype = AR9170_TX_MAC_ENCR_RC4;
1375 break;
1376 case ALG_CCMP:
1377 keytype = AR9170_TX_MAC_ENCR_AES;
1378 break;
1379 default:
1380 WARN_ON(1);
1381 goto err_out;
1382 }
1383 }
1384
1385 /* Length */
1386 txc->length = cpu_to_le16(len + icv + 4);
1387
1388 txc->mac_control = cpu_to_le16(AR9170_TX_MAC_HW_DURATION |
1389 AR9170_TX_MAC_BACKOFF);
1390 txc->mac_control |= cpu_to_le16(ar9170_qos_hwmap[queue] <<
1391 AR9170_TX_MAC_QOS_SHIFT);
1392 txc->mac_control |= cpu_to_le16(keytype);
1393 txc->phy_control = cpu_to_le32(0);
1394
1395 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1396 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
1397
1398 txrate = &info->control.rates[0];
1399 if (txrate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
1400 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
1401 else if (txrate->flags & IEEE80211_TX_RC_USE_RTS_CTS)
1402 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
1403
1404 arinfo = (void *)info->rate_driver_data;
1405 arinfo->timeout = jiffies + msecs_to_jiffies(AR9170_QUEUE_TIMEOUT);
1406
1407 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
1408 (is_valid_ether_addr(ieee80211_get_DA(hdr)))) {
1409 /*
1410 * WARNING:
1411 * Putting the QoS queue bits into an unexplored territory is
1412 * certainly not elegant.
1413 *
1414 * In my defense: This idea provides a reasonable way to
1415 * smuggle valuable information to the tx_status callback.
1416 * Also, the idea behind this bit-abuse came straight from
1417 * the original driver code.
1418 */
1419
1420 txc->phy_control |=
1421 cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
1422
1423 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1424 if (unlikely(!info->control.sta))
1425 goto err_out;
1426
1427 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
1428 } else {
1429 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
1430 }
1431 }
1432
1433 return 0;
1434
1435 err_out:
1436 skb_pull(skb, sizeof(*txc));
1437 return -EINVAL;
1438 }
1439
1440 static void ar9170_tx_prepare_phy(struct ar9170 *ar, struct sk_buff *skb)
1441 {
1442 struct ar9170_tx_control *txc;
1443 struct ieee80211_tx_info *info;
1444 struct ieee80211_rate *rate = NULL;
1445 struct ieee80211_tx_rate *txrate;
1446 u32 power, chains;
1447
1448 txc = (void *) skb->data;
1449 info = IEEE80211_SKB_CB(skb);
1450 txrate = &info->control.rates[0];
1451
1452 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
1453 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
1454
1455 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1456 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE);
1457
1458 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1459 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ);
1460 /* this works because 40 MHz is 2 and dup is 3 */
1461 if (txrate->flags & IEEE80211_TX_RC_DUP_DATA)
1462 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP);
1463
1464 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
1465 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI);
1466
1467 if (txrate->flags & IEEE80211_TX_RC_MCS) {
1468 u32 r = txrate->idx;
1469 u8 *txpower;
1470
1471 /* heavy clip control */
1472 txc->phy_control |= cpu_to_le32((r & 0x7) << 7);
1473
1474 r <<= AR9170_TX_PHY_MCS_SHIFT;
1475 BUG_ON(r & ~AR9170_TX_PHY_MCS_MASK);
1476
1477 txc->phy_control |= cpu_to_le32(r & AR9170_TX_PHY_MCS_MASK);
1478 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
1479
1480 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
1481 if (info->band == IEEE80211_BAND_5GHZ)
1482 txpower = ar->power_5G_ht40;
1483 else
1484 txpower = ar->power_2G_ht40;
1485 } else {
1486 if (info->band == IEEE80211_BAND_5GHZ)
1487 txpower = ar->power_5G_ht20;
1488 else
1489 txpower = ar->power_2G_ht20;
1490 }
1491
1492 power = txpower[(txrate->idx) & 7];
1493 } else {
1494 u8 *txpower;
1495 u32 mod;
1496 u32 phyrate;
1497 u8 idx = txrate->idx;
1498
1499 if (info->band != IEEE80211_BAND_2GHZ) {
1500 idx += 4;
1501 txpower = ar->power_5G_leg;
1502 mod = AR9170_TX_PHY_MOD_OFDM;
1503 } else {
1504 if (idx < 4) {
1505 txpower = ar->power_2G_cck;
1506 mod = AR9170_TX_PHY_MOD_CCK;
1507 } else {
1508 mod = AR9170_TX_PHY_MOD_OFDM;
1509 txpower = ar->power_2G_ofdm;
1510 }
1511 }
1512
1513 rate = &__ar9170_ratetable[idx];
1514
1515 phyrate = rate->hw_value & 0xF;
1516 power = txpower[(rate->hw_value & 0x30) >> 4];
1517 phyrate <<= AR9170_TX_PHY_MCS_SHIFT;
1518
1519 txc->phy_control |= cpu_to_le32(mod);
1520 txc->phy_control |= cpu_to_le32(phyrate);
1521 }
1522
1523 power <<= AR9170_TX_PHY_TX_PWR_SHIFT;
1524 power &= AR9170_TX_PHY_TX_PWR_MASK;
1525 txc->phy_control |= cpu_to_le32(power);
1526
1527 /* set TX chains */
1528 if (ar->eeprom.tx_mask == 1) {
1529 chains = AR9170_TX_PHY_TXCHAIN_1;
1530 } else {
1531 chains = AR9170_TX_PHY_TXCHAIN_2;
1532
1533 /* >= 36M legacy OFDM - use only one chain */
1534 if (rate && rate->bitrate >= 360)
1535 chains = AR9170_TX_PHY_TXCHAIN_1;
1536 }
1537 txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT);
1538 }
1539
1540 static bool ar9170_tx_ampdu(struct ar9170 *ar)
1541 {
1542 struct sk_buff_head agg;
1543 struct ar9170_sta_tid *tid_info = NULL, *tmp;
1544 struct sk_buff *skb, *first = NULL;
1545 unsigned long flags, f2;
1546 unsigned int i = 0;
1547 u16 seq, queue, tmpssn;
1548 bool run = false;
1549
1550 skb_queue_head_init(&agg);
1551
1552 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
1553 if (list_empty(&ar->tx_ampdu_list)) {
1554 #ifdef AR9170_TXAGG_DEBUG
1555 printk(KERN_DEBUG "%s: aggregation list is empty.\n",
1556 wiphy_name(ar->hw->wiphy));
1557 #endif /* AR9170_TXAGG_DEBUG */
1558 goto out_unlock;
1559 }
1560
1561 list_for_each_entry_safe(tid_info, tmp, &ar->tx_ampdu_list, list) {
1562 if (tid_info->state != AR9170_TID_STATE_COMPLETE) {
1563 #ifdef AR9170_TXAGG_DEBUG
1564 printk(KERN_DEBUG "%s: dangling aggregation entry!\n",
1565 wiphy_name(ar->hw->wiphy));
1566 #endif /* AR9170_TXAGG_DEBUG */
1567 continue;
1568 }
1569
1570 if (++i > 64) {
1571 #ifdef AR9170_TXAGG_DEBUG
1572 printk(KERN_DEBUG "%s: enough frames aggregated.\n",
1573 wiphy_name(ar->hw->wiphy));
1574 #endif /* AR9170_TXAGG_DEBUG */
1575 break;
1576 }
1577
1578 queue = TID_TO_WME_AC(tid_info->tid);
1579
1580 if (skb_queue_len(&ar->tx_pending[queue]) >=
1581 AR9170_NUM_TX_AGG_MAX) {
1582 #ifdef AR9170_TXAGG_DEBUG
1583 printk(KERN_DEBUG "%s: queue %d full.\n",
1584 wiphy_name(ar->hw->wiphy), queue);
1585 #endif /* AR9170_TXAGG_DEBUG */
1586 continue;
1587 }
1588
1589 list_del_init(&tid_info->list);
1590
1591 spin_lock_irqsave(&tid_info->queue.lock, f2);
1592 tmpssn = seq = tid_info->ssn;
1593 first = skb_peek(&tid_info->queue);
1594
1595 if (likely(first))
1596 tmpssn = ar9170_get_seq(first);
1597
1598 if (unlikely(tmpssn != seq)) {
1599 #ifdef AR9170_TXAGG_DEBUG
1600 printk(KERN_DEBUG "%s: ssn mismatch [%d != %d]\n.",
1601 wiphy_name(ar->hw->wiphy), seq, tmpssn);
1602 #endif /* AR9170_TXAGG_DEBUG */
1603 tid_info->ssn = tmpssn;
1604 }
1605
1606 #ifdef AR9170_TXAGG_DEBUG
1607 printk(KERN_DEBUG "%s: generate A-MPDU for tid:%d ssn:%d with "
1608 "%d queued frames.\n", wiphy_name(ar->hw->wiphy),
1609 tid_info->tid, tid_info->ssn,
1610 skb_queue_len(&tid_info->queue));
1611 __ar9170_dump_txqueue(ar, &tid_info->queue);
1612 #endif /* AR9170_TXAGG_DEBUG */
1613
1614 while ((skb = skb_peek(&tid_info->queue))) {
1615 if (unlikely(ar9170_get_seq(skb) != seq))
1616 break;
1617
1618 __skb_unlink(skb, &tid_info->queue);
1619 tid_info->ssn = seq = GET_NEXT_SEQ(seq);
1620
1621 if (unlikely(skb_get_queue_mapping(skb) != queue)) {
1622 #ifdef AR9170_TXAGG_DEBUG
1623 printk(KERN_DEBUG "%s: tid:%d(q:%d) queue:%d "
1624 "!match.\n", wiphy_name(ar->hw->wiphy),
1625 tid_info->tid,
1626 TID_TO_WME_AC(tid_info->tid),
1627 skb_get_queue_mapping(skb));
1628 #endif /* AR9170_TXAGG_DEBUG */
1629 dev_kfree_skb_any(skb);
1630 continue;
1631 }
1632
1633 if (unlikely(first == skb)) {
1634 ar9170_tx_prepare_phy(ar, skb);
1635 __skb_queue_tail(&agg, skb);
1636 first = skb;
1637 } else {
1638 ar9170_tx_copy_phy(ar, skb, first);
1639 __skb_queue_tail(&agg, skb);
1640 }
1641
1642 if (unlikely(skb_queue_len(&agg) ==
1643 AR9170_NUM_TX_AGG_MAX))
1644 break;
1645 }
1646
1647 if (skb_queue_empty(&tid_info->queue))
1648 tid_info->active = false;
1649 else
1650 list_add_tail(&tid_info->list,
1651 &ar->tx_ampdu_list);
1652
1653 spin_unlock_irqrestore(&tid_info->queue.lock, f2);
1654
1655 if (unlikely(skb_queue_empty(&agg))) {
1656 #ifdef AR9170_TXAGG_DEBUG
1657 printk(KERN_DEBUG "%s: queued empty list!\n",
1658 wiphy_name(ar->hw->wiphy));
1659 #endif /* AR9170_TXAGG_DEBUG */
1660 continue;
1661 }
1662
1663 /*
1664 * tell the FW/HW that this is the last frame,
1665 * that way it will wait for the immediate block ack.
1666 */
1667 ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
1668
1669 #ifdef AR9170_TXAGG_DEBUG
1670 printk(KERN_DEBUG "%s: generated A-MPDU looks like this:\n",
1671 wiphy_name(ar->hw->wiphy));
1672 __ar9170_dump_txqueue(ar, &agg);
1673 #endif /* AR9170_TXAGG_DEBUG */
1674
1675 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
1676
1677 spin_lock_irqsave(&ar->tx_pending[queue].lock, flags);
1678 skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]);
1679 spin_unlock_irqrestore(&ar->tx_pending[queue].lock, flags);
1680 run = true;
1681
1682 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
1683 }
1684
1685 out_unlock:
1686 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
1687 __skb_queue_purge(&agg);
1688
1689 return run;
1690 }
1691
1692 static void ar9170_tx(struct ar9170 *ar)
1693 {
1694 struct sk_buff *skb;
1695 unsigned long flags;
1696 struct ieee80211_tx_info *info;
1697 struct ar9170_tx_info *arinfo;
1698 unsigned int i, frames, frames_failed, remaining_space;
1699 int err;
1700 bool schedule_garbagecollector = false;
1701
1702 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
1703
1704 if (unlikely(!IS_STARTED(ar)))
1705 return ;
1706
1707 remaining_space = AR9170_TX_MAX_PENDING;
1708
1709 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1710 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1711 frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
1712 skb_queue_len(&ar->tx_pending[i]));
1713
1714 if (remaining_space < frames) {
1715 #ifdef AR9170_QUEUE_DEBUG
1716 printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
1717 "remaining slots:%d, needed:%d\n",
1718 wiphy_name(ar->hw->wiphy), i, remaining_space,
1719 frames);
1720 #endif /* AR9170_QUEUE_DEBUG */
1721 frames = remaining_space;
1722 }
1723
1724 ar->tx_stats[i].len += frames;
1725 ar->tx_stats[i].count += frames;
1726 if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
1727 #ifdef AR9170_QUEUE_DEBUG
1728 printk(KERN_DEBUG "%s: queue %d full\n",
1729 wiphy_name(ar->hw->wiphy), i);
1730
1731 printk(KERN_DEBUG "%s: stuck frames: ===> \n",
1732 wiphy_name(ar->hw->wiphy));
1733 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
1734 ar9170_dump_txqueue(ar, &ar->tx_status[i]);
1735 #endif /* AR9170_QUEUE_DEBUG */
1736
1737 #ifdef AR9170_QUEUE_STOP_DEBUG
1738 printk(KERN_DEBUG "%s: stop queue %d\n",
1739 wiphy_name(ar->hw->wiphy), i);
1740 __ar9170_dump_txstats(ar);
1741 #endif /* AR9170_QUEUE_STOP_DEBUG */
1742 ieee80211_stop_queue(ar->hw, i);
1743 }
1744
1745 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1746
1747 if (!frames)
1748 continue;
1749
1750 frames_failed = 0;
1751 while (frames) {
1752 skb = skb_dequeue(&ar->tx_pending[i]);
1753 if (unlikely(!skb)) {
1754 frames_failed += frames;
1755 frames = 0;
1756 break;
1757 }
1758
1759 info = IEEE80211_SKB_CB(skb);
1760 arinfo = (void *) info->rate_driver_data;
1761
1762 /* TODO: cancel stuck frames */
1763 arinfo->timeout = jiffies +
1764 msecs_to_jiffies(AR9170_TX_TIMEOUT);
1765
1766 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1767 atomic_inc(&ar->tx_ampdu_pending);
1768
1769 #ifdef AR9170_QUEUE_DEBUG
1770 printk(KERN_DEBUG "%s: send frame q:%d =>\n",
1771 wiphy_name(ar->hw->wiphy), i);
1772 ar9170_print_txheader(ar, skb);
1773 #endif /* AR9170_QUEUE_DEBUG */
1774
1775 err = ar->tx(ar, skb);
1776 if (unlikely(err)) {
1777 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1778 atomic_dec(&ar->tx_ampdu_pending);
1779
1780 frames_failed++;
1781 dev_kfree_skb_any(skb);
1782 } else {
1783 remaining_space--;
1784 schedule_garbagecollector = true;
1785 }
1786
1787 frames--;
1788 }
1789
1790 #ifdef AR9170_QUEUE_DEBUG
1791 printk(KERN_DEBUG "%s: ar9170_tx report for queue %d\n",
1792 wiphy_name(ar->hw->wiphy), i);
1793
1794 printk(KERN_DEBUG "%s: unprocessed pending frames left:\n",
1795 wiphy_name(ar->hw->wiphy));
1796 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
1797 #endif /* AR9170_QUEUE_DEBUG */
1798
1799 if (unlikely(frames_failed)) {
1800 #ifdef AR9170_QUEUE_DEBUG
1801 printk(KERN_DEBUG "%s: frames failed %d =>\n",
1802 wiphy_name(ar->hw->wiphy), frames_failed);
1803 #endif /* AR9170_QUEUE_DEBUG */
1804
1805 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1806 ar->tx_stats[i].len -= frames_failed;
1807 ar->tx_stats[i].count -= frames_failed;
1808 #ifdef AR9170_QUEUE_STOP_DEBUG
1809 printk(KERN_DEBUG "%s: wake queue %d\n",
1810 wiphy_name(ar->hw->wiphy), i);
1811 __ar9170_dump_txstats(ar);
1812 #endif /* AR9170_QUEUE_STOP_DEBUG */
1813 ieee80211_wake_queue(ar->hw, i);
1814 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1815 }
1816 }
1817
1818 if (!schedule_garbagecollector)
1819 return;
1820
1821 ieee80211_queue_delayed_work(ar->hw,
1822 &ar->tx_janitor,
1823 msecs_to_jiffies(AR9170_JANITOR_DELAY));
1824 }
1825
1826 static bool ar9170_tx_ampdu_queue(struct ar9170 *ar, struct sk_buff *skb)
1827 {
1828 struct ieee80211_tx_info *txinfo;
1829 struct ar9170_sta_info *sta_info;
1830 struct ar9170_sta_tid *agg;
1831 struct sk_buff *iter;
1832 unsigned long flags, f2;
1833 unsigned int max;
1834 u16 tid, seq, qseq;
1835 bool run = false, queue = false;
1836
1837 tid = ar9170_get_tid(skb);
1838 seq = ar9170_get_seq(skb);
1839 txinfo = IEEE80211_SKB_CB(skb);
1840 sta_info = (void *) txinfo->control.sta->drv_priv;
1841 agg = &sta_info->agg[tid];
1842 max = sta_info->ampdu_max_len;
1843
1844 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
1845
1846 if (unlikely(agg->state != AR9170_TID_STATE_COMPLETE)) {
1847 #ifdef AR9170_TXAGG_DEBUG
1848 printk(KERN_DEBUG "%s: BlockACK session not fully initialized "
1849 "for ESS:%pM tid:%d state:%d.\n",
1850 wiphy_name(ar->hw->wiphy), agg->addr, agg->tid,
1851 agg->state);
1852 #endif /* AR9170_TXAGG_DEBUG */
1853 goto err_unlock;
1854 }
1855
1856 if (!agg->active) {
1857 agg->active = true;
1858 agg->ssn = seq;
1859 queue = true;
1860 }
1861
1862 /* check if seq is within the BA window */
1863 if (unlikely(!BAW_WITHIN(agg->ssn, max, seq))) {
1864 #ifdef AR9170_TXAGG_DEBUG
1865 printk(KERN_DEBUG "%s: frame with tid:%d seq:%d does not "
1866 "fit into BA window (%d - %d)\n",
1867 wiphy_name(ar->hw->wiphy), tid, seq, agg->ssn,
1868 (agg->ssn + max) & 0xfff);
1869 #endif /* AR9170_TXAGG_DEBUG */
1870 goto err_unlock;
1871 }
1872
1873 spin_lock_irqsave(&agg->queue.lock, f2);
1874
1875 skb_queue_reverse_walk(&agg->queue, iter) {
1876 qseq = ar9170_get_seq(iter);
1877
1878 if (GET_NEXT_SEQ(qseq) == seq) {
1879 __skb_queue_after(&agg->queue, iter, skb);
1880 goto queued;
1881 }
1882 }
1883
1884 __skb_queue_head(&agg->queue, skb);
1885
1886 queued:
1887 spin_unlock_irqrestore(&agg->queue.lock, f2);
1888
1889 #ifdef AR9170_TXAGG_DEBUG
1890 printk(KERN_DEBUG "%s: new aggregate %p queued.\n",
1891 wiphy_name(ar->hw->wiphy), skb);
1892 __ar9170_dump_txqueue(ar, &agg->queue);
1893 #endif /* AR9170_TXAGG_DEBUG */
1894
1895 if (skb_queue_len(&agg->queue) >= AR9170_NUM_TX_AGG_MAX)
1896 run = true;
1897
1898 if (queue)
1899 list_add_tail(&agg->list, &ar->tx_ampdu_list);
1900
1901 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
1902 return run;
1903
1904 err_unlock:
1905 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
1906 dev_kfree_skb_irq(skb);
1907 return false;
1908 }
1909
1910 int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1911 {
1912 struct ar9170 *ar = hw->priv;
1913 struct ieee80211_tx_info *info;
1914
1915 if (unlikely(!IS_STARTED(ar)))
1916 goto err_free;
1917
1918 if (unlikely(ar9170_tx_prepare(ar, skb)))
1919 goto err_free;
1920
1921 info = IEEE80211_SKB_CB(skb);
1922 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1923 bool run = ar9170_tx_ampdu_queue(ar, skb);
1924
1925 if (run || !atomic_read(&ar->tx_ampdu_pending))
1926 ar9170_tx_ampdu(ar);
1927 } else {
1928 unsigned int queue = skb_get_queue_mapping(skb);
1929
1930 ar9170_tx_prepare_phy(ar, skb);
1931 skb_queue_tail(&ar->tx_pending[queue], skb);
1932 }
1933
1934 ar9170_tx(ar);
1935 return NETDEV_TX_OK;
1936
1937 err_free:
1938 dev_kfree_skb_any(skb);
1939 return NETDEV_TX_OK;
1940 }
1941
1942 static int ar9170_op_add_interface(struct ieee80211_hw *hw,
1943 struct ieee80211_vif *vif)
1944 {
1945 struct ar9170 *ar = hw->priv;
1946 struct ath_common *common = &ar->common;
1947 int err = 0;
1948
1949 mutex_lock(&ar->mutex);
1950
1951 if (ar->vif) {
1952 err = -EBUSY;
1953 goto unlock;
1954 }
1955
1956 ar->vif = vif;
1957 memcpy(common->macaddr, vif->addr, ETH_ALEN);
1958
1959 if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) {
1960 ar->rx_software_decryption = true;
1961 ar->disable_offload = true;
1962 }
1963
1964 ar->cur_filter = 0;
1965 err = ar9170_update_frame_filter(ar, AR9170_MAC_REG_FTF_DEFAULTS);
1966 if (err)
1967 goto unlock;
1968
1969 err = ar9170_set_operating_mode(ar);
1970
1971 unlock:
1972 mutex_unlock(&ar->mutex);
1973 return err;
1974 }
1975
1976 static void ar9170_op_remove_interface(struct ieee80211_hw *hw,
1977 struct ieee80211_vif *vif)
1978 {
1979 struct ar9170 *ar = hw->priv;
1980
1981 mutex_lock(&ar->mutex);
1982 ar->vif = NULL;
1983 ar9170_update_frame_filter(ar, 0);
1984 ar9170_set_beacon_timers(ar);
1985 dev_kfree_skb(ar->beacon);
1986 ar->beacon = NULL;
1987 ar->sniffer_enabled = false;
1988 ar->rx_software_decryption = false;
1989 ar9170_set_operating_mode(ar);
1990 mutex_unlock(&ar->mutex);
1991 }
1992
1993 static int ar9170_op_config(struct ieee80211_hw *hw, u32 changed)
1994 {
1995 struct ar9170 *ar = hw->priv;
1996 int err = 0;
1997
1998 mutex_lock(&ar->mutex);
1999
2000 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
2001 /* TODO */
2002 err = 0;
2003 }
2004
2005 if (changed & IEEE80211_CONF_CHANGE_PS) {
2006 /* TODO */
2007 err = 0;
2008 }
2009
2010 if (changed & IEEE80211_CONF_CHANGE_POWER) {
2011 /* TODO */
2012 err = 0;
2013 }
2014
2015 if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
2016 /*
2017 * is it long_frame_max_tx_count or short_frame_max_tx_count?
2018 */
2019
2020 err = ar9170_set_hwretry_limit(ar,
2021 ar->hw->conf.long_frame_max_tx_count);
2022 if (err)
2023 goto out;
2024 }
2025
2026 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2027
2028 /* adjust slot time for 5 GHz */
2029 err = ar9170_set_slot_time(ar);
2030 if (err)
2031 goto out;
2032
2033 err = ar9170_set_dyn_sifs_ack(ar);
2034 if (err)
2035 goto out;
2036
2037 err = ar9170_set_channel(ar, hw->conf.channel,
2038 AR9170_RFI_NONE,
2039 nl80211_to_ar9170(hw->conf.channel_type));
2040 if (err)
2041 goto out;
2042 }
2043
2044 out:
2045 mutex_unlock(&ar->mutex);
2046 return err;
2047 }
2048
2049 static u64 ar9170_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count,
2050 struct dev_addr_list *mclist)
2051 {
2052 u64 mchash;
2053 int i;
2054
2055 /* always get broadcast frames */
2056 mchash = 1ULL << (0xff >> 2);
2057
2058 for (i = 0; i < mc_count; i++) {
2059 if (WARN_ON(!mclist))
2060 break;
2061 mchash |= 1ULL << (mclist->dmi_addr[5] >> 2);
2062 mclist = mclist->next;
2063 }
2064
2065 return mchash;
2066 }
2067
2068 static void ar9170_op_configure_filter(struct ieee80211_hw *hw,
2069 unsigned int changed_flags,
2070 unsigned int *new_flags,
2071 u64 multicast)
2072 {
2073 struct ar9170 *ar = hw->priv;
2074
2075 if (unlikely(!IS_ACCEPTING_CMD(ar)))
2076 return ;
2077
2078 mutex_lock(&ar->mutex);
2079
2080 /* mask supported flags */
2081 *new_flags &= FIF_ALLMULTI | FIF_CONTROL | FIF_BCN_PRBRESP_PROMISC |
2082 FIF_PROMISC_IN_BSS | FIF_FCSFAIL | FIF_PLCPFAIL;
2083 ar->filter_state = *new_flags;
2084 /*
2085 * We can support more by setting the sniffer bit and
2086 * then checking the error flags, later.
2087 */
2088
2089 if (changed_flags & FIF_ALLMULTI && *new_flags & FIF_ALLMULTI)
2090 multicast = ~0ULL;
2091
2092 if (multicast != ar->cur_mc_hash)
2093 ar9170_update_multicast(ar, multicast);
2094
2095 if (changed_flags & FIF_CONTROL) {
2096 u32 filter = AR9170_MAC_REG_FTF_PSPOLL |
2097 AR9170_MAC_REG_FTF_RTS |
2098 AR9170_MAC_REG_FTF_CTS |
2099 AR9170_MAC_REG_FTF_ACK |
2100 AR9170_MAC_REG_FTF_CFE |
2101 AR9170_MAC_REG_FTF_CFE_ACK;
2102
2103 if (*new_flags & FIF_CONTROL)
2104 filter |= ar->cur_filter;
2105 else
2106 filter &= (~ar->cur_filter);
2107
2108 ar9170_update_frame_filter(ar, filter);
2109 }
2110
2111 if (changed_flags & FIF_PROMISC_IN_BSS) {
2112 ar->sniffer_enabled = ((*new_flags) & FIF_PROMISC_IN_BSS) != 0;
2113 ar9170_set_operating_mode(ar);
2114 }
2115
2116 mutex_unlock(&ar->mutex);
2117 }
2118
2119
2120 static void ar9170_op_bss_info_changed(struct ieee80211_hw *hw,
2121 struct ieee80211_vif *vif,
2122 struct ieee80211_bss_conf *bss_conf,
2123 u32 changed)
2124 {
2125 struct ar9170 *ar = hw->priv;
2126 struct ath_common *common = &ar->common;
2127 int err = 0;
2128
2129 mutex_lock(&ar->mutex);
2130
2131 if (changed & BSS_CHANGED_BSSID) {
2132 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
2133 err = ar9170_set_operating_mode(ar);
2134 if (err)
2135 goto out;
2136 }
2137
2138 if (changed & BSS_CHANGED_BEACON_ENABLED)
2139 ar->enable_beacon = bss_conf->enable_beacon;
2140
2141 if (changed & BSS_CHANGED_BEACON) {
2142 err = ar9170_update_beacon(ar);
2143 if (err)
2144 goto out;
2145 }
2146
2147 if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
2148 BSS_CHANGED_BEACON_INT)) {
2149 err = ar9170_set_beacon_timers(ar);
2150 if (err)
2151 goto out;
2152 }
2153
2154 if (changed & BSS_CHANGED_ASSOC) {
2155 #ifndef CONFIG_AR9170_LEDS
2156 /* enable assoc LED. */
2157 err = ar9170_set_leds_state(ar, bss_conf->assoc ? 2 : 0);
2158 #endif /* CONFIG_AR9170_LEDS */
2159 }
2160
2161 if (changed & BSS_CHANGED_HT) {
2162 /* TODO */
2163 err = 0;
2164 }
2165
2166 if (changed & BSS_CHANGED_ERP_SLOT) {
2167 err = ar9170_set_slot_time(ar);
2168 if (err)
2169 goto out;
2170 }
2171
2172 if (changed & BSS_CHANGED_BASIC_RATES) {
2173 err = ar9170_set_basic_rates(ar);
2174 if (err)
2175 goto out;
2176 }
2177
2178 out:
2179 mutex_unlock(&ar->mutex);
2180 }
2181
2182 static u64 ar9170_op_get_tsf(struct ieee80211_hw *hw)
2183 {
2184 struct ar9170 *ar = hw->priv;
2185 int err;
2186 u64 tsf;
2187 #define NR 3
2188 static const u32 addr[NR] = { AR9170_MAC_REG_TSF_H,
2189 AR9170_MAC_REG_TSF_L,
2190 AR9170_MAC_REG_TSF_H };
2191 u32 val[NR];
2192 int loops = 0;
2193
2194 mutex_lock(&ar->mutex);
2195
2196 while (loops++ < 10) {
2197 err = ar9170_read_mreg(ar, NR, addr, val);
2198 if (err || val[0] == val[2])
2199 break;
2200 }
2201
2202 mutex_unlock(&ar->mutex);
2203
2204 if (WARN_ON(err))
2205 return 0;
2206 tsf = val[0];
2207 tsf = (tsf << 32) | val[1];
2208 return tsf;
2209 #undef NR
2210 }
2211
2212 static int ar9170_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2213 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
2214 struct ieee80211_key_conf *key)
2215 {
2216 struct ar9170 *ar = hw->priv;
2217 int err = 0, i;
2218 u8 ktype;
2219
2220 if ((!ar->vif) || (ar->disable_offload))
2221 return -EOPNOTSUPP;
2222
2223 switch (key->alg) {
2224 case ALG_WEP:
2225 if (key->keylen == WLAN_KEY_LEN_WEP40)
2226 ktype = AR9170_ENC_ALG_WEP64;
2227 else
2228 ktype = AR9170_ENC_ALG_WEP128;
2229 break;
2230 case ALG_TKIP:
2231 ktype = AR9170_ENC_ALG_TKIP;
2232 break;
2233 case ALG_CCMP:
2234 ktype = AR9170_ENC_ALG_AESCCMP;
2235 break;
2236 default:
2237 return -EOPNOTSUPP;
2238 }
2239
2240 mutex_lock(&ar->mutex);
2241 if (cmd == SET_KEY) {
2242 if (unlikely(!IS_STARTED(ar))) {
2243 err = -EOPNOTSUPP;
2244 goto out;
2245 }
2246
2247 /* group keys need all-zeroes address */
2248 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
2249 sta = NULL;
2250
2251 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
2252 for (i = 0; i < 64; i++)
2253 if (!(ar->usedkeys & BIT(i)))
2254 break;
2255 if (i == 64) {
2256 ar->rx_software_decryption = true;
2257 ar9170_set_operating_mode(ar);
2258 err = -ENOSPC;
2259 goto out;
2260 }
2261 } else {
2262 i = 64 + key->keyidx;
2263 }
2264
2265 key->hw_key_idx = i;
2266
2267 err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL, ktype, 0,
2268 key->key, min_t(u8, 16, key->keylen));
2269 if (err)
2270 goto out;
2271
2272 if (key->alg == ALG_TKIP) {
2273 err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL,
2274 ktype, 1, key->key + 16, 16);
2275 if (err)
2276 goto out;
2277
2278 /*
2279 * hardware is not capable generating the MMIC
2280 * for fragmented frames!
2281 */
2282 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2283 }
2284
2285 if (i < 64)
2286 ar->usedkeys |= BIT(i);
2287
2288 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2289 } else {
2290 if (unlikely(!IS_STARTED(ar))) {
2291 /* The device is gone... together with the key ;-) */
2292 err = 0;
2293 goto out;
2294 }
2295
2296 err = ar9170_disable_key(ar, key->hw_key_idx);
2297 if (err)
2298 goto out;
2299
2300 if (key->hw_key_idx < 64) {
2301 ar->usedkeys &= ~BIT(key->hw_key_idx);
2302 } else {
2303 err = ar9170_upload_key(ar, key->hw_key_idx, NULL,
2304 AR9170_ENC_ALG_NONE, 0,
2305 NULL, 0);
2306 if (err)
2307 goto out;
2308
2309 if (key->alg == ALG_TKIP) {
2310 err = ar9170_upload_key(ar, key->hw_key_idx,
2311 NULL,
2312 AR9170_ENC_ALG_NONE, 1,
2313 NULL, 0);
2314 if (err)
2315 goto out;
2316 }
2317
2318 }
2319 }
2320
2321 ar9170_regwrite_begin(ar);
2322 ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_L, ar->usedkeys);
2323 ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_H, ar->usedkeys >> 32);
2324 ar9170_regwrite_finish();
2325 err = ar9170_regwrite_result();
2326
2327 out:
2328 mutex_unlock(&ar->mutex);
2329
2330 return err;
2331 }
2332
2333 static int ar9170_sta_add(struct ieee80211_hw *hw,
2334 struct ieee80211_vif *vif,
2335 struct ieee80211_sta *sta)
2336 {
2337 struct ar9170 *ar = hw->priv;
2338 struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
2339 unsigned int i;
2340
2341 memset(sta_info, 0, sizeof(*sta_info));
2342
2343 if (!sta->ht_cap.ht_supported)
2344 return 0;
2345
2346 if (sta->ht_cap.ampdu_density > ar->global_ampdu_density)
2347 ar->global_ampdu_density = sta->ht_cap.ampdu_density;
2348
2349 if (sta->ht_cap.ampdu_factor < ar->global_ampdu_factor)
2350 ar->global_ampdu_factor = sta->ht_cap.ampdu_factor;
2351
2352 for (i = 0; i < AR9170_NUM_TID; i++) {
2353 sta_info->agg[i].state = AR9170_TID_STATE_SHUTDOWN;
2354 sta_info->agg[i].active = false;
2355 sta_info->agg[i].ssn = 0;
2356 sta_info->agg[i].tid = i;
2357 INIT_LIST_HEAD(&sta_info->agg[i].list);
2358 skb_queue_head_init(&sta_info->agg[i].queue);
2359 }
2360
2361 sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
2362
2363 return 0;
2364 }
2365
2366 static int ar9170_sta_remove(struct ieee80211_hw *hw,
2367 struct ieee80211_vif *vif,
2368 struct ieee80211_sta *sta)
2369 {
2370 struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
2371 unsigned int i;
2372
2373 if (!sta->ht_cap.ht_supported)
2374 return 0;
2375
2376 for (i = 0; i < AR9170_NUM_TID; i++) {
2377 sta_info->agg[i].state = AR9170_TID_STATE_INVALID;
2378 skb_queue_purge(&sta_info->agg[i].queue);
2379 }
2380
2381 return 0;
2382 }
2383
2384 static int ar9170_get_stats(struct ieee80211_hw *hw,
2385 struct ieee80211_low_level_stats *stats)
2386 {
2387 struct ar9170 *ar = hw->priv;
2388 u32 val;
2389 int err;
2390
2391 mutex_lock(&ar->mutex);
2392 err = ar9170_read_reg(ar, AR9170_MAC_REG_TX_RETRY, &val);
2393 ar->stats.dot11ACKFailureCount += val;
2394
2395 memcpy(stats, &ar->stats, sizeof(*stats));
2396 mutex_unlock(&ar->mutex);
2397
2398 return 0;
2399 }
2400
2401 static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
2402 const struct ieee80211_tx_queue_params *param)
2403 {
2404 struct ar9170 *ar = hw->priv;
2405 int ret;
2406
2407 mutex_lock(&ar->mutex);
2408 if (queue < __AR9170_NUM_TXQ) {
2409 memcpy(&ar->edcf[ar9170_qos_hwmap[queue]],
2410 param, sizeof(*param));
2411
2412 ret = ar9170_set_qos(ar);
2413 } else {
2414 ret = -EINVAL;
2415 }
2416
2417 mutex_unlock(&ar->mutex);
2418 return ret;
2419 }
2420
2421 static int ar9170_ampdu_action(struct ieee80211_hw *hw,
2422 struct ieee80211_vif *vif,
2423 enum ieee80211_ampdu_mlme_action action,
2424 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2425 {
2426 struct ar9170 *ar = hw->priv;
2427 struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
2428 struct ar9170_sta_tid *tid_info = &sta_info->agg[tid];
2429 unsigned long flags;
2430
2431 if (!modparam_ht)
2432 return -EOPNOTSUPP;
2433
2434 switch (action) {
2435 case IEEE80211_AMPDU_TX_START:
2436 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
2437 if (tid_info->state != AR9170_TID_STATE_SHUTDOWN ||
2438 !list_empty(&tid_info->list)) {
2439 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
2440 #ifdef AR9170_TXAGG_DEBUG
2441 printk(KERN_INFO "%s: A-MPDU [ESS:[%pM] tid:[%d]] "
2442 "is in a very bad state!\n",
2443 wiphy_name(hw->wiphy), sta->addr, tid);
2444 #endif /* AR9170_TXAGG_DEBUG */
2445 return -EBUSY;
2446 }
2447
2448 *ssn = tid_info->ssn;
2449 tid_info->state = AR9170_TID_STATE_PROGRESS;
2450 tid_info->active = false;
2451 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
2452 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2453 break;
2454
2455 case IEEE80211_AMPDU_TX_STOP:
2456 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
2457 tid_info->state = AR9170_TID_STATE_SHUTDOWN;
2458 list_del_init(&tid_info->list);
2459 tid_info->active = false;
2460 skb_queue_purge(&tid_info->queue);
2461 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
2462 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2463 break;
2464
2465 case IEEE80211_AMPDU_TX_OPERATIONAL:
2466 #ifdef AR9170_TXAGG_DEBUG
2467 printk(KERN_INFO "%s: A-MPDU for %pM [tid:%d] Operational.\n",
2468 wiphy_name(hw->wiphy), sta->addr, tid);
2469 #endif /* AR9170_TXAGG_DEBUG */
2470 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
2471 sta_info->agg[tid].state = AR9170_TID_STATE_COMPLETE;
2472 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
2473 break;
2474
2475 case IEEE80211_AMPDU_RX_START:
2476 case IEEE80211_AMPDU_RX_STOP:
2477 /* Handled by firmware */
2478 break;
2479
2480 default:
2481 return -EOPNOTSUPP;
2482 }
2483
2484 return 0;
2485 }
2486
2487 static const struct ieee80211_ops ar9170_ops = {
2488 .start = ar9170_op_start,
2489 .stop = ar9170_op_stop,
2490 .tx = ar9170_op_tx,
2491 .add_interface = ar9170_op_add_interface,
2492 .remove_interface = ar9170_op_remove_interface,
2493 .config = ar9170_op_config,
2494 .prepare_multicast = ar9170_op_prepare_multicast,
2495 .configure_filter = ar9170_op_configure_filter,
2496 .conf_tx = ar9170_conf_tx,
2497 .bss_info_changed = ar9170_op_bss_info_changed,
2498 .get_tsf = ar9170_op_get_tsf,
2499 .set_key = ar9170_set_key,
2500 .sta_add = ar9170_sta_add,
2501 .sta_remove = ar9170_sta_remove,
2502 .get_stats = ar9170_get_stats,
2503 .ampdu_action = ar9170_ampdu_action,
2504 };
2505
2506 void *ar9170_alloc(size_t priv_size)
2507 {
2508 struct ieee80211_hw *hw;
2509 struct ar9170 *ar;
2510 struct sk_buff *skb;
2511 int i;
2512
2513 /*
2514 * this buffer is used for rx stream reconstruction.
2515 * Under heavy load this device (or the transport layer?)
2516 * tends to split the streams into separate rx descriptors.
2517 */
2518
2519 skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL);
2520 if (!skb)
2521 goto err_nomem;
2522
2523 hw = ieee80211_alloc_hw(priv_size, &ar9170_ops);
2524 if (!hw)
2525 goto err_nomem;
2526
2527 ar = hw->priv;
2528 ar->hw = hw;
2529 ar->rx_failover = skb;
2530
2531 mutex_init(&ar->mutex);
2532 spin_lock_init(&ar->cmdlock);
2533 spin_lock_init(&ar->tx_stats_lock);
2534 spin_lock_init(&ar->tx_ampdu_list_lock);
2535 skb_queue_head_init(&ar->tx_status_ampdu);
2536 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
2537 skb_queue_head_init(&ar->tx_status[i]);
2538 skb_queue_head_init(&ar->tx_pending[i]);
2539 }
2540 ar9170_rx_reset_rx_mpdu(ar);
2541 INIT_WORK(&ar->beacon_work, ar9170_new_beacon);
2542 INIT_DELAYED_WORK(&ar->tx_janitor, ar9170_tx_janitor);
2543 INIT_LIST_HEAD(&ar->tx_ampdu_list);
2544
2545 /* all hw supports 2.4 GHz, so set channel to 1 by default */
2546 ar->channel = &ar9170_2ghz_chantable[0];
2547
2548 /* first part of wiphy init */
2549 ar->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
2550 BIT(NL80211_IFTYPE_WDS) |
2551 BIT(NL80211_IFTYPE_ADHOC);
2552 ar->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
2553 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2554 IEEE80211_HW_SIGNAL_DBM |
2555 IEEE80211_HW_NOISE_DBM;
2556
2557 if (modparam_ht) {
2558 ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
2559 } else {
2560 ar9170_band_2GHz.ht_cap.ht_supported = false;
2561 ar9170_band_5GHz.ht_cap.ht_supported = false;
2562 }
2563
2564 ar->hw->queues = __AR9170_NUM_TXQ;
2565 ar->hw->extra_tx_headroom = 8;
2566 ar->hw->sta_data_size = sizeof(struct ar9170_sta_info);
2567
2568 ar->hw->max_rates = 1;
2569 ar->hw->max_rate_tries = 3;
2570
2571 for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
2572 ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
2573
2574 return ar;
2575
2576 err_nomem:
2577 kfree_skb(skb);
2578 return ERR_PTR(-ENOMEM);
2579 }
2580
2581 static int ar9170_read_eeprom(struct ar9170 *ar)
2582 {
2583 #define RW 8 /* number of words to read at once */
2584 #define RB (sizeof(u32) * RW)
2585 struct ath_regulatory *regulatory = &ar->common.regulatory;
2586 u8 *eeprom = (void *)&ar->eeprom;
2587 u8 *addr = ar->eeprom.mac_address;
2588 __le32 offsets[RW];
2589 unsigned int rx_streams, tx_streams, tx_params = 0;
2590 int i, j, err, bands = 0;
2591
2592 BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
2593
2594 BUILD_BUG_ON(RB > AR9170_MAX_CMD_LEN - 4);
2595 #ifndef __CHECKER__
2596 /* don't want to handle trailing remains */
2597 BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
2598 #endif
2599
2600 for (i = 0; i < sizeof(ar->eeprom)/RB; i++) {
2601 for (j = 0; j < RW; j++)
2602 offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
2603 RB * i + 4 * j);
2604
2605 err = ar->exec_cmd(ar, AR9170_CMD_RREG,
2606 RB, (u8 *) &offsets,
2607 RB, eeprom + RB * i);
2608 if (err)
2609 return err;
2610 }
2611
2612 #undef RW
2613 #undef RB
2614
2615 if (ar->eeprom.length == cpu_to_le16(0xFFFF))
2616 return -ENODATA;
2617
2618 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
2619 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar9170_band_2GHz;
2620 bands++;
2621 }
2622 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
2623 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &ar9170_band_5GHz;
2624 bands++;
2625 }
2626
2627 rx_streams = hweight8(ar->eeprom.rx_mask);
2628 tx_streams = hweight8(ar->eeprom.tx_mask);
2629
2630 if (rx_streams != tx_streams)
2631 tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
2632
2633 if (tx_streams >= 1 && tx_streams <= IEEE80211_HT_MCS_TX_MAX_STREAMS)
2634 tx_params = (tx_streams - 1) <<
2635 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
2636
2637 ar9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
2638 ar9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
2639
2640 /*
2641 * I measured this, a bandswitch takes roughly
2642 * 135 ms and a frequency switch about 80.
2643 *
2644 * FIXME: measure these values again once EEPROM settings
2645 * are used, that will influence them!
2646 */
2647 if (bands == 2)
2648 ar->hw->channel_change_time = 135 * 1000;
2649 else
2650 ar->hw->channel_change_time = 80 * 1000;
2651
2652 regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
2653 regulatory->current_rd_ext = le16_to_cpu(ar->eeprom.reg_domain[1]);
2654
2655 /* second part of wiphy init */
2656 SET_IEEE80211_PERM_ADDR(ar->hw, addr);
2657
2658 return bands ? 0 : -EINVAL;
2659 }
2660
2661 static int ar9170_reg_notifier(struct wiphy *wiphy,
2662 struct regulatory_request *request)
2663 {
2664 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
2665 struct ar9170 *ar = hw->priv;
2666
2667 return ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
2668 }
2669
2670 int ar9170_register(struct ar9170 *ar, struct device *pdev)
2671 {
2672 struct ath_regulatory *regulatory = &ar->common.regulatory;
2673 int err;
2674
2675 /* try to read EEPROM, init MAC addr */
2676 err = ar9170_read_eeprom(ar);
2677 if (err)
2678 goto err_out;
2679
2680 err = ath_regd_init(regulatory, ar->hw->wiphy,
2681 ar9170_reg_notifier);
2682 if (err)
2683 goto err_out;
2684
2685 err = ieee80211_register_hw(ar->hw);
2686 if (err)
2687 goto err_out;
2688
2689 if (!ath_is_world_regd(regulatory))
2690 regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
2691
2692 err = ar9170_init_leds(ar);
2693 if (err)
2694 goto err_unreg;
2695
2696 #ifdef CONFIG_AR9170_LEDS
2697 err = ar9170_register_leds(ar);
2698 if (err)
2699 goto err_unreg;
2700 #endif /* CONFIG_AR9170_LEDS */
2701
2702 dev_info(pdev, "Atheros AR9170 is registered as '%s'\n",
2703 wiphy_name(ar->hw->wiphy));
2704
2705 ar->registered = true;
2706 return 0;
2707
2708 err_unreg:
2709 ieee80211_unregister_hw(ar->hw);
2710
2711 err_out:
2712 return err;
2713 }
2714
2715 void ar9170_unregister(struct ar9170 *ar)
2716 {
2717 if (ar->registered) {
2718 #ifdef CONFIG_AR9170_LEDS
2719 ar9170_unregister_leds(ar);
2720 #endif /* CONFIG_AR9170_LEDS */
2721
2722 ieee80211_unregister_hw(ar->hw);
2723 }
2724
2725 kfree_skb(ar->rx_failover);
2726 mutex_destroy(&ar->mutex);
2727 }