]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/wireless/ath/ath9k/main.c
Merge branch 'fix/asoc' into for-linus
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / ath / ath9k / main.c
1 /*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/nl80211.h>
18 #include "ath9k.h"
19 #include "btcoex.h"
20
21 static char *dev_info = "ath9k";
22
23 MODULE_AUTHOR("Atheros Communications");
24 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
25 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
26 MODULE_LICENSE("Dual BSD/GPL");
27
28 static int modparam_nohwcrypt;
29 module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
30 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
31
32 static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
33 module_param_named(debug, ath9k_debug, uint, 0);
34 MODULE_PARM_DESC(debug, "Debugging mask");
35
36 /* We use the hw_value as an index into our private channel structure */
37
38 #define CHAN2G(_freq, _idx) { \
39 .center_freq = (_freq), \
40 .hw_value = (_idx), \
41 .max_power = 20, \
42 }
43
44 #define CHAN5G(_freq, _idx) { \
45 .band = IEEE80211_BAND_5GHZ, \
46 .center_freq = (_freq), \
47 .hw_value = (_idx), \
48 .max_power = 20, \
49 }
50
51 /* Some 2 GHz radios are actually tunable on 2312-2732
52 * on 5 MHz steps, we support the channels which we know
53 * we have calibration data for all cards though to make
54 * this static */
55 static struct ieee80211_channel ath9k_2ghz_chantable[] = {
56 CHAN2G(2412, 0), /* Channel 1 */
57 CHAN2G(2417, 1), /* Channel 2 */
58 CHAN2G(2422, 2), /* Channel 3 */
59 CHAN2G(2427, 3), /* Channel 4 */
60 CHAN2G(2432, 4), /* Channel 5 */
61 CHAN2G(2437, 5), /* Channel 6 */
62 CHAN2G(2442, 6), /* Channel 7 */
63 CHAN2G(2447, 7), /* Channel 8 */
64 CHAN2G(2452, 8), /* Channel 9 */
65 CHAN2G(2457, 9), /* Channel 10 */
66 CHAN2G(2462, 10), /* Channel 11 */
67 CHAN2G(2467, 11), /* Channel 12 */
68 CHAN2G(2472, 12), /* Channel 13 */
69 CHAN2G(2484, 13), /* Channel 14 */
70 };
71
72 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
73 * on 5 MHz steps, we support the channels which we know
74 * we have calibration data for all cards though to make
75 * this static */
76 static struct ieee80211_channel ath9k_5ghz_chantable[] = {
77 /* _We_ call this UNII 1 */
78 CHAN5G(5180, 14), /* Channel 36 */
79 CHAN5G(5200, 15), /* Channel 40 */
80 CHAN5G(5220, 16), /* Channel 44 */
81 CHAN5G(5240, 17), /* Channel 48 */
82 /* _We_ call this UNII 2 */
83 CHAN5G(5260, 18), /* Channel 52 */
84 CHAN5G(5280, 19), /* Channel 56 */
85 CHAN5G(5300, 20), /* Channel 60 */
86 CHAN5G(5320, 21), /* Channel 64 */
87 /* _We_ call this "Middle band" */
88 CHAN5G(5500, 22), /* Channel 100 */
89 CHAN5G(5520, 23), /* Channel 104 */
90 CHAN5G(5540, 24), /* Channel 108 */
91 CHAN5G(5560, 25), /* Channel 112 */
92 CHAN5G(5580, 26), /* Channel 116 */
93 CHAN5G(5600, 27), /* Channel 120 */
94 CHAN5G(5620, 28), /* Channel 124 */
95 CHAN5G(5640, 29), /* Channel 128 */
96 CHAN5G(5660, 30), /* Channel 132 */
97 CHAN5G(5680, 31), /* Channel 136 */
98 CHAN5G(5700, 32), /* Channel 140 */
99 /* _We_ call this UNII 3 */
100 CHAN5G(5745, 33), /* Channel 149 */
101 CHAN5G(5765, 34), /* Channel 153 */
102 CHAN5G(5785, 35), /* Channel 157 */
103 CHAN5G(5805, 36), /* Channel 161 */
104 CHAN5G(5825, 37), /* Channel 165 */
105 };
106
107 /* Atheros hardware rate code addition for short premble */
108 #define SHPCHECK(__hw_rate, __flags) \
109 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
110
111 #define RATE(_bitrate, _hw_rate, _flags) { \
112 .bitrate = (_bitrate), \
113 .flags = (_flags), \
114 .hw_value = (_hw_rate), \
115 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
116 }
117
118 static struct ieee80211_rate ath9k_legacy_rates[] = {
119 RATE(10, 0x1b, 0),
120 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
121 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
122 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
123 RATE(60, 0x0b, 0),
124 RATE(90, 0x0f, 0),
125 RATE(120, 0x0a, 0),
126 RATE(180, 0x0e, 0),
127 RATE(240, 0x09, 0),
128 RATE(360, 0x0d, 0),
129 RATE(480, 0x08, 0),
130 RATE(540, 0x0c, 0),
131 };
132
133 static void ath_cache_conf_rate(struct ath_softc *sc,
134 struct ieee80211_conf *conf)
135 {
136 switch (conf->channel->band) {
137 case IEEE80211_BAND_2GHZ:
138 if (conf_is_ht20(conf))
139 sc->cur_rate_mode = ATH9K_MODE_11NG_HT20;
140 else if (conf_is_ht40_minus(conf))
141 sc->cur_rate_mode = ATH9K_MODE_11NG_HT40MINUS;
142 else if (conf_is_ht40_plus(conf))
143 sc->cur_rate_mode = ATH9K_MODE_11NG_HT40PLUS;
144 else
145 sc->cur_rate_mode = ATH9K_MODE_11G;
146 break;
147 case IEEE80211_BAND_5GHZ:
148 if (conf_is_ht20(conf))
149 sc->cur_rate_mode = ATH9K_MODE_11NA_HT20;
150 else if (conf_is_ht40_minus(conf))
151 sc->cur_rate_mode = ATH9K_MODE_11NA_HT40MINUS;
152 else if (conf_is_ht40_plus(conf))
153 sc->cur_rate_mode = ATH9K_MODE_11NA_HT40PLUS;
154 else
155 sc->cur_rate_mode = ATH9K_MODE_11A;
156 break;
157 default:
158 BUG_ON(1);
159 break;
160 }
161 }
162
163 static void ath_update_txpow(struct ath_softc *sc)
164 {
165 struct ath_hw *ah = sc->sc_ah;
166 u32 txpow;
167
168 if (sc->curtxpow != sc->config.txpowlimit) {
169 ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit);
170 /* read back in case value is clamped */
171 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
172 sc->curtxpow = txpow;
173 }
174 }
175
176 static u8 parse_mpdudensity(u8 mpdudensity)
177 {
178 /*
179 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
180 * 0 for no restriction
181 * 1 for 1/4 us
182 * 2 for 1/2 us
183 * 3 for 1 us
184 * 4 for 2 us
185 * 5 for 4 us
186 * 6 for 8 us
187 * 7 for 16 us
188 */
189 switch (mpdudensity) {
190 case 0:
191 return 0;
192 case 1:
193 case 2:
194 case 3:
195 /* Our lower layer calculations limit our precision to
196 1 microsecond */
197 return 1;
198 case 4:
199 return 2;
200 case 5:
201 return 4;
202 case 6:
203 return 8;
204 case 7:
205 return 16;
206 default:
207 return 0;
208 }
209 }
210
211 static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc,
212 struct ieee80211_hw *hw)
213 {
214 struct ieee80211_channel *curchan = hw->conf.channel;
215 struct ath9k_channel *channel;
216 u8 chan_idx;
217
218 chan_idx = curchan->hw_value;
219 channel = &sc->sc_ah->channels[chan_idx];
220 ath9k_update_ichannel(sc, hw, channel);
221 return channel;
222 }
223
224 static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
225 {
226 unsigned long flags;
227 bool ret;
228
229 spin_lock_irqsave(&sc->sc_pm_lock, flags);
230 ret = ath9k_hw_setpower(sc->sc_ah, mode);
231 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
232
233 return ret;
234 }
235
236 void ath9k_ps_wakeup(struct ath_softc *sc)
237 {
238 unsigned long flags;
239
240 spin_lock_irqsave(&sc->sc_pm_lock, flags);
241 if (++sc->ps_usecount != 1)
242 goto unlock;
243
244 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
245
246 unlock:
247 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
248 }
249
250 void ath9k_ps_restore(struct ath_softc *sc)
251 {
252 unsigned long flags;
253
254 spin_lock_irqsave(&sc->sc_pm_lock, flags);
255 if (--sc->ps_usecount != 0)
256 goto unlock;
257
258 if (sc->ps_enabled &&
259 !(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
260 SC_OP_WAIT_FOR_CAB |
261 SC_OP_WAIT_FOR_PSPOLL_DATA |
262 SC_OP_WAIT_FOR_TX_ACK)))
263 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
264
265 unlock:
266 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
267 }
268
269 /*
270 * Set/change channels. If the channel is really being changed, it's done
271 * by reseting the chip. To accomplish this we must first cleanup any pending
272 * DMA, then restart stuff.
273 */
274 int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
275 struct ath9k_channel *hchan)
276 {
277 struct ath_hw *ah = sc->sc_ah;
278 struct ath_common *common = ath9k_hw_common(ah);
279 struct ieee80211_conf *conf = &common->hw->conf;
280 bool fastcc = true, stopped;
281 struct ieee80211_channel *channel = hw->conf.channel;
282 int r;
283
284 if (sc->sc_flags & SC_OP_INVALID)
285 return -EIO;
286
287 ath9k_ps_wakeup(sc);
288
289 /*
290 * This is only performed if the channel settings have
291 * actually changed.
292 *
293 * To switch channels clear any pending DMA operations;
294 * wait long enough for the RX fifo to drain, reset the
295 * hardware at the new frequency, and then re-enable
296 * the relevant bits of the h/w.
297 */
298 ath9k_hw_set_interrupts(ah, 0);
299 ath_drain_all_txq(sc, false);
300 stopped = ath_stoprecv(sc);
301
302 /* XXX: do not flush receive queue here. We don't want
303 * to flush data frames already in queue because of
304 * changing channel. */
305
306 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
307 fastcc = false;
308
309 ath_print(common, ATH_DBG_CONFIG,
310 "(%u MHz) -> (%u MHz), conf_is_ht40: %d\n",
311 sc->sc_ah->curchan->channel,
312 channel->center_freq, conf_is_ht40(conf));
313
314 spin_lock_bh(&sc->sc_resetlock);
315
316 r = ath9k_hw_reset(ah, hchan, fastcc);
317 if (r) {
318 ath_print(common, ATH_DBG_FATAL,
319 "Unable to reset channel (%u Mhz) "
320 "reset status %d\n",
321 channel->center_freq, r);
322 spin_unlock_bh(&sc->sc_resetlock);
323 goto ps_restore;
324 }
325 spin_unlock_bh(&sc->sc_resetlock);
326
327 sc->sc_flags &= ~SC_OP_FULL_RESET;
328
329 if (ath_startrecv(sc) != 0) {
330 ath_print(common, ATH_DBG_FATAL,
331 "Unable to restart recv logic\n");
332 r = -EIO;
333 goto ps_restore;
334 }
335
336 ath_cache_conf_rate(sc, &hw->conf);
337 ath_update_txpow(sc);
338 ath9k_hw_set_interrupts(ah, sc->imask);
339
340 ps_restore:
341 ath9k_ps_restore(sc);
342 return r;
343 }
344
345 /*
346 * This routine performs the periodic noise floor calibration function
347 * that is used to adjust and optimize the chip performance. This
348 * takes environmental changes (location, temperature) into account.
349 * When the task is complete, it reschedules itself depending on the
350 * appropriate interval that was calculated.
351 */
352 static void ath_ani_calibrate(unsigned long data)
353 {
354 struct ath_softc *sc = (struct ath_softc *)data;
355 struct ath_hw *ah = sc->sc_ah;
356 struct ath_common *common = ath9k_hw_common(ah);
357 bool longcal = false;
358 bool shortcal = false;
359 bool aniflag = false;
360 unsigned int timestamp = jiffies_to_msecs(jiffies);
361 u32 cal_interval, short_cal_interval;
362
363 short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
364 ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
365
366 /*
367 * don't calibrate when we're scanning.
368 * we are most likely not on our home channel.
369 */
370 spin_lock(&sc->ani_lock);
371 if (sc->sc_flags & SC_OP_SCANNING)
372 goto set_timer;
373
374 /* Only calibrate if awake */
375 if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE)
376 goto set_timer;
377
378 ath9k_ps_wakeup(sc);
379
380 /* Long calibration runs independently of short calibration. */
381 if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
382 longcal = true;
383 ath_print(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
384 common->ani.longcal_timer = timestamp;
385 }
386
387 /* Short calibration applies only while caldone is false */
388 if (!common->ani.caldone) {
389 if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
390 shortcal = true;
391 ath_print(common, ATH_DBG_ANI,
392 "shortcal @%lu\n", jiffies);
393 common->ani.shortcal_timer = timestamp;
394 common->ani.resetcal_timer = timestamp;
395 }
396 } else {
397 if ((timestamp - common->ani.resetcal_timer) >=
398 ATH_RESTART_CALINTERVAL) {
399 common->ani.caldone = ath9k_hw_reset_calvalid(ah);
400 if (common->ani.caldone)
401 common->ani.resetcal_timer = timestamp;
402 }
403 }
404
405 /* Verify whether we must check ANI */
406 if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
407 aniflag = true;
408 common->ani.checkani_timer = timestamp;
409 }
410
411 /* Skip all processing if there's nothing to do. */
412 if (longcal || shortcal || aniflag) {
413 /* Call ANI routine if necessary */
414 if (aniflag)
415 ath9k_hw_ani_monitor(ah, ah->curchan);
416
417 /* Perform calibration if necessary */
418 if (longcal || shortcal) {
419 common->ani.caldone =
420 ath9k_hw_calibrate(ah,
421 ah->curchan,
422 common->rx_chainmask,
423 longcal);
424
425 if (longcal)
426 common->ani.noise_floor = ath9k_hw_getchan_noise(ah,
427 ah->curchan);
428
429 ath_print(common, ATH_DBG_ANI,
430 " calibrate chan %u/%x nf: %d\n",
431 ah->curchan->channel,
432 ah->curchan->channelFlags,
433 common->ani.noise_floor);
434 }
435 }
436
437 ath9k_ps_restore(sc);
438
439 set_timer:
440 spin_unlock(&sc->ani_lock);
441 /*
442 * Set timer interval based on previous results.
443 * The interval must be the shortest necessary to satisfy ANI,
444 * short calibration and long calibration.
445 */
446 cal_interval = ATH_LONG_CALINTERVAL;
447 if (sc->sc_ah->config.enable_ani)
448 cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
449 if (!common->ani.caldone)
450 cal_interval = min(cal_interval, (u32)short_cal_interval);
451
452 mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
453 }
454
455 static void ath_start_ani(struct ath_common *common)
456 {
457 unsigned long timestamp = jiffies_to_msecs(jiffies);
458
459 common->ani.longcal_timer = timestamp;
460 common->ani.shortcal_timer = timestamp;
461 common->ani.checkani_timer = timestamp;
462
463 mod_timer(&common->ani.timer,
464 jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
465 }
466
467 /*
468 * Update tx/rx chainmask. For legacy association,
469 * hard code chainmask to 1x1, for 11n association, use
470 * the chainmask configuration, for bt coexistence, use
471 * the chainmask configuration even in legacy mode.
472 */
473 void ath_update_chainmask(struct ath_softc *sc, int is_ht)
474 {
475 struct ath_hw *ah = sc->sc_ah;
476 struct ath_common *common = ath9k_hw_common(ah);
477
478 if ((sc->sc_flags & SC_OP_SCANNING) || is_ht ||
479 (ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE)) {
480 common->tx_chainmask = ah->caps.tx_chainmask;
481 common->rx_chainmask = ah->caps.rx_chainmask;
482 } else {
483 common->tx_chainmask = 1;
484 common->rx_chainmask = 1;
485 }
486
487 ath_print(common, ATH_DBG_CONFIG,
488 "tx chmask: %d, rx chmask: %d\n",
489 common->tx_chainmask,
490 common->rx_chainmask);
491 }
492
493 static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
494 {
495 struct ath_node *an;
496
497 an = (struct ath_node *)sta->drv_priv;
498
499 if (sc->sc_flags & SC_OP_TXAGGR) {
500 ath_tx_node_init(sc, an);
501 an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
502 sta->ht_cap.ampdu_factor);
503 an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
504 an->last_rssi = ATH_RSSI_DUMMY_MARKER;
505 }
506 }
507
508 static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
509 {
510 struct ath_node *an = (struct ath_node *)sta->drv_priv;
511
512 if (sc->sc_flags & SC_OP_TXAGGR)
513 ath_tx_node_cleanup(sc, an);
514 }
515
516 static void ath9k_tasklet(unsigned long data)
517 {
518 struct ath_softc *sc = (struct ath_softc *)data;
519 struct ath_hw *ah = sc->sc_ah;
520 struct ath_common *common = ath9k_hw_common(ah);
521
522 u32 status = sc->intrstatus;
523
524 ath9k_ps_wakeup(sc);
525
526 if (status & ATH9K_INT_FATAL) {
527 ath_reset(sc, false);
528 ath9k_ps_restore(sc);
529 return;
530 }
531
532 if (status & (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
533 spin_lock_bh(&sc->rx.rxflushlock);
534 ath_rx_tasklet(sc, 0);
535 spin_unlock_bh(&sc->rx.rxflushlock);
536 }
537
538 if (status & ATH9K_INT_TX)
539 ath_tx_tasklet(sc);
540
541 if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
542 /*
543 * TSF sync does not look correct; remain awake to sync with
544 * the next Beacon.
545 */
546 ath_print(common, ATH_DBG_PS,
547 "TSFOOR - Sync with next Beacon\n");
548 sc->sc_flags |= SC_OP_WAIT_FOR_BEACON | SC_OP_BEACON_SYNC;
549 }
550
551 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
552 if (status & ATH9K_INT_GENTIMER)
553 ath_gen_timer_isr(sc->sc_ah);
554
555 /* re-enable hardware interrupt */
556 ath9k_hw_set_interrupts(ah, sc->imask);
557 ath9k_ps_restore(sc);
558 }
559
560 irqreturn_t ath_isr(int irq, void *dev)
561 {
562 #define SCHED_INTR ( \
563 ATH9K_INT_FATAL | \
564 ATH9K_INT_RXORN | \
565 ATH9K_INT_RXEOL | \
566 ATH9K_INT_RX | \
567 ATH9K_INT_TX | \
568 ATH9K_INT_BMISS | \
569 ATH9K_INT_CST | \
570 ATH9K_INT_TSFOOR | \
571 ATH9K_INT_GENTIMER)
572
573 struct ath_softc *sc = dev;
574 struct ath_hw *ah = sc->sc_ah;
575 enum ath9k_int status;
576 bool sched = false;
577
578 /*
579 * The hardware is not ready/present, don't
580 * touch anything. Note this can happen early
581 * on if the IRQ is shared.
582 */
583 if (sc->sc_flags & SC_OP_INVALID)
584 return IRQ_NONE;
585
586
587 /* shared irq, not for us */
588
589 if (!ath9k_hw_intrpend(ah))
590 return IRQ_NONE;
591
592 /*
593 * Figure out the reason(s) for the interrupt. Note
594 * that the hal returns a pseudo-ISR that may include
595 * bits we haven't explicitly enabled so we mask the
596 * value to insure we only process bits we requested.
597 */
598 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
599 status &= sc->imask; /* discard unasked-for bits */
600
601 /*
602 * If there are no status bits set, then this interrupt was not
603 * for me (should have been caught above).
604 */
605 if (!status)
606 return IRQ_NONE;
607
608 /* Cache the status */
609 sc->intrstatus = status;
610
611 if (status & SCHED_INTR)
612 sched = true;
613
614 /*
615 * If a FATAL or RXORN interrupt is received, we have to reset the
616 * chip immediately.
617 */
618 if (status & (ATH9K_INT_FATAL | ATH9K_INT_RXORN))
619 goto chip_reset;
620
621 if (status & ATH9K_INT_SWBA)
622 tasklet_schedule(&sc->bcon_tasklet);
623
624 if (status & ATH9K_INT_TXURN)
625 ath9k_hw_updatetxtriglevel(ah, true);
626
627 if (status & ATH9K_INT_MIB) {
628 /*
629 * Disable interrupts until we service the MIB
630 * interrupt; otherwise it will continue to
631 * fire.
632 */
633 ath9k_hw_set_interrupts(ah, 0);
634 /*
635 * Let the hal handle the event. We assume
636 * it will clear whatever condition caused
637 * the interrupt.
638 */
639 ath9k_hw_procmibevent(ah);
640 ath9k_hw_set_interrupts(ah, sc->imask);
641 }
642
643 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
644 if (status & ATH9K_INT_TIM_TIMER) {
645 /* Clear RxAbort bit so that we can
646 * receive frames */
647 ath9k_setpower(sc, ATH9K_PM_AWAKE);
648 ath9k_hw_setrxabort(sc->sc_ah, 0);
649 sc->sc_flags |= SC_OP_WAIT_FOR_BEACON;
650 }
651
652 chip_reset:
653
654 ath_debug_stat_interrupt(sc, status);
655
656 if (sched) {
657 /* turn off every interrupt except SWBA */
658 ath9k_hw_set_interrupts(ah, (sc->imask & ATH9K_INT_SWBA));
659 tasklet_schedule(&sc->intr_tq);
660 }
661
662 return IRQ_HANDLED;
663
664 #undef SCHED_INTR
665 }
666
667 static u32 ath_get_extchanmode(struct ath_softc *sc,
668 struct ieee80211_channel *chan,
669 enum nl80211_channel_type channel_type)
670 {
671 u32 chanmode = 0;
672
673 switch (chan->band) {
674 case IEEE80211_BAND_2GHZ:
675 switch(channel_type) {
676 case NL80211_CHAN_NO_HT:
677 case NL80211_CHAN_HT20:
678 chanmode = CHANNEL_G_HT20;
679 break;
680 case NL80211_CHAN_HT40PLUS:
681 chanmode = CHANNEL_G_HT40PLUS;
682 break;
683 case NL80211_CHAN_HT40MINUS:
684 chanmode = CHANNEL_G_HT40MINUS;
685 break;
686 }
687 break;
688 case IEEE80211_BAND_5GHZ:
689 switch(channel_type) {
690 case NL80211_CHAN_NO_HT:
691 case NL80211_CHAN_HT20:
692 chanmode = CHANNEL_A_HT20;
693 break;
694 case NL80211_CHAN_HT40PLUS:
695 chanmode = CHANNEL_A_HT40PLUS;
696 break;
697 case NL80211_CHAN_HT40MINUS:
698 chanmode = CHANNEL_A_HT40MINUS;
699 break;
700 }
701 break;
702 default:
703 break;
704 }
705
706 return chanmode;
707 }
708
709 static int ath_setkey_tkip(struct ath_common *common, u16 keyix, const u8 *key,
710 struct ath9k_keyval *hk, const u8 *addr,
711 bool authenticator)
712 {
713 struct ath_hw *ah = common->ah;
714 const u8 *key_rxmic;
715 const u8 *key_txmic;
716
717 key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
718 key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
719
720 if (addr == NULL) {
721 /*
722 * Group key installation - only two key cache entries are used
723 * regardless of splitmic capability since group key is only
724 * used either for TX or RX.
725 */
726 if (authenticator) {
727 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
728 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_mic));
729 } else {
730 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
731 memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic));
732 }
733 return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
734 }
735 if (!common->splitmic) {
736 /* TX and RX keys share the same key cache entry. */
737 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
738 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
739 return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
740 }
741
742 /* Separate key cache entries for TX and RX */
743
744 /* TX key goes at first index, RX key at +32. */
745 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
746 if (!ath9k_hw_set_keycache_entry(ah, keyix, hk, NULL)) {
747 /* TX MIC entry failed. No need to proceed further */
748 ath_print(common, ATH_DBG_FATAL,
749 "Setting TX MIC Key Failed\n");
750 return 0;
751 }
752
753 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
754 /* XXX delete tx key on failure? */
755 return ath9k_hw_set_keycache_entry(ah, keyix + 32, hk, addr);
756 }
757
758 static int ath_reserve_key_cache_slot_tkip(struct ath_common *common)
759 {
760 int i;
761
762 for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
763 if (test_bit(i, common->keymap) ||
764 test_bit(i + 64, common->keymap))
765 continue; /* At least one part of TKIP key allocated */
766 if (common->splitmic &&
767 (test_bit(i + 32, common->keymap) ||
768 test_bit(i + 64 + 32, common->keymap)))
769 continue; /* At least one part of TKIP key allocated */
770
771 /* Found a free slot for a TKIP key */
772 return i;
773 }
774 return -1;
775 }
776
777 static int ath_reserve_key_cache_slot(struct ath_common *common)
778 {
779 int i;
780
781 /* First, try to find slots that would not be available for TKIP. */
782 if (common->splitmic) {
783 for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) {
784 if (!test_bit(i, common->keymap) &&
785 (test_bit(i + 32, common->keymap) ||
786 test_bit(i + 64, common->keymap) ||
787 test_bit(i + 64 + 32, common->keymap)))
788 return i;
789 if (!test_bit(i + 32, common->keymap) &&
790 (test_bit(i, common->keymap) ||
791 test_bit(i + 64, common->keymap) ||
792 test_bit(i + 64 + 32, common->keymap)))
793 return i + 32;
794 if (!test_bit(i + 64, common->keymap) &&
795 (test_bit(i , common->keymap) ||
796 test_bit(i + 32, common->keymap) ||
797 test_bit(i + 64 + 32, common->keymap)))
798 return i + 64;
799 if (!test_bit(i + 64 + 32, common->keymap) &&
800 (test_bit(i, common->keymap) ||
801 test_bit(i + 32, common->keymap) ||
802 test_bit(i + 64, common->keymap)))
803 return i + 64 + 32;
804 }
805 } else {
806 for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
807 if (!test_bit(i, common->keymap) &&
808 test_bit(i + 64, common->keymap))
809 return i;
810 if (test_bit(i, common->keymap) &&
811 !test_bit(i + 64, common->keymap))
812 return i + 64;
813 }
814 }
815
816 /* No partially used TKIP slots, pick any available slot */
817 for (i = IEEE80211_WEP_NKID; i < common->keymax; i++) {
818 /* Do not allow slots that could be needed for TKIP group keys
819 * to be used. This limitation could be removed if we know that
820 * TKIP will not be used. */
821 if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
822 continue;
823 if (common->splitmic) {
824 if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
825 continue;
826 if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
827 continue;
828 }
829
830 if (!test_bit(i, common->keymap))
831 return i; /* Found a free slot for a key */
832 }
833
834 /* No free slot found */
835 return -1;
836 }
837
838 static int ath_key_config(struct ath_common *common,
839 struct ieee80211_vif *vif,
840 struct ieee80211_sta *sta,
841 struct ieee80211_key_conf *key)
842 {
843 struct ath_hw *ah = common->ah;
844 struct ath9k_keyval hk;
845 const u8 *mac = NULL;
846 int ret = 0;
847 int idx;
848
849 memset(&hk, 0, sizeof(hk));
850
851 switch (key->alg) {
852 case ALG_WEP:
853 hk.kv_type = ATH9K_CIPHER_WEP;
854 break;
855 case ALG_TKIP:
856 hk.kv_type = ATH9K_CIPHER_TKIP;
857 break;
858 case ALG_CCMP:
859 hk.kv_type = ATH9K_CIPHER_AES_CCM;
860 break;
861 default:
862 return -EOPNOTSUPP;
863 }
864
865 hk.kv_len = key->keylen;
866 memcpy(hk.kv_val, key->key, key->keylen);
867
868 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
869 /* For now, use the default keys for broadcast keys. This may
870 * need to change with virtual interfaces. */
871 idx = key->keyidx;
872 } else if (key->keyidx) {
873 if (WARN_ON(!sta))
874 return -EOPNOTSUPP;
875 mac = sta->addr;
876
877 if (vif->type != NL80211_IFTYPE_AP) {
878 /* Only keyidx 0 should be used with unicast key, but
879 * allow this for client mode for now. */
880 idx = key->keyidx;
881 } else
882 return -EIO;
883 } else {
884 if (WARN_ON(!sta))
885 return -EOPNOTSUPP;
886 mac = sta->addr;
887
888 if (key->alg == ALG_TKIP)
889 idx = ath_reserve_key_cache_slot_tkip(common);
890 else
891 idx = ath_reserve_key_cache_slot(common);
892 if (idx < 0)
893 return -ENOSPC; /* no free key cache entries */
894 }
895
896 if (key->alg == ALG_TKIP)
897 ret = ath_setkey_tkip(common, idx, key->key, &hk, mac,
898 vif->type == NL80211_IFTYPE_AP);
899 else
900 ret = ath9k_hw_set_keycache_entry(ah, idx, &hk, mac);
901
902 if (!ret)
903 return -EIO;
904
905 set_bit(idx, common->keymap);
906 if (key->alg == ALG_TKIP) {
907 set_bit(idx + 64, common->keymap);
908 if (common->splitmic) {
909 set_bit(idx + 32, common->keymap);
910 set_bit(idx + 64 + 32, common->keymap);
911 }
912 }
913
914 return idx;
915 }
916
917 static void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
918 {
919 struct ath_hw *ah = common->ah;
920
921 ath9k_hw_keyreset(ah, key->hw_key_idx);
922 if (key->hw_key_idx < IEEE80211_WEP_NKID)
923 return;
924
925 clear_bit(key->hw_key_idx, common->keymap);
926 if (key->alg != ALG_TKIP)
927 return;
928
929 clear_bit(key->hw_key_idx + 64, common->keymap);
930 if (common->splitmic) {
931 clear_bit(key->hw_key_idx + 32, common->keymap);
932 clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
933 }
934 }
935
936 static void setup_ht_cap(struct ath_softc *sc,
937 struct ieee80211_sta_ht_cap *ht_info)
938 {
939 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
940 u8 tx_streams, rx_streams;
941
942 ht_info->ht_supported = true;
943 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
944 IEEE80211_HT_CAP_SM_PS |
945 IEEE80211_HT_CAP_SGI_40 |
946 IEEE80211_HT_CAP_DSSSCCK40;
947
948 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
949 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
950
951 /* set up supported mcs set */
952 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
953 tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
954 1 : 2;
955 rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
956 1 : 2;
957
958 if (tx_streams != rx_streams) {
959 ath_print(common, ATH_DBG_CONFIG,
960 "TX streams %d, RX streams: %d\n",
961 tx_streams, rx_streams);
962 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
963 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
964 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
965 }
966
967 ht_info->mcs.rx_mask[0] = 0xff;
968 if (rx_streams >= 2)
969 ht_info->mcs.rx_mask[1] = 0xff;
970
971 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
972 }
973
974 static void ath9k_bss_assoc_info(struct ath_softc *sc,
975 struct ieee80211_vif *vif,
976 struct ieee80211_bss_conf *bss_conf)
977 {
978 struct ath_hw *ah = sc->sc_ah;
979 struct ath_common *common = ath9k_hw_common(ah);
980
981 if (bss_conf->assoc) {
982 ath_print(common, ATH_DBG_CONFIG,
983 "Bss Info ASSOC %d, bssid: %pM\n",
984 bss_conf->aid, common->curbssid);
985
986 /* New association, store aid */
987 common->curaid = bss_conf->aid;
988 ath9k_hw_write_associd(ah);
989
990 /*
991 * Request a re-configuration of Beacon related timers
992 * on the receipt of the first Beacon frame (i.e.,
993 * after time sync with the AP).
994 */
995 sc->sc_flags |= SC_OP_BEACON_SYNC;
996
997 /* Configure the beacon */
998 ath_beacon_config(sc, vif);
999
1000 /* Reset rssi stats */
1001 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
1002
1003 ath_start_ani(common);
1004 } else {
1005 ath_print(common, ATH_DBG_CONFIG, "Bss Info DISASSOC\n");
1006 common->curaid = 0;
1007 /* Stop ANI */
1008 del_timer_sync(&common->ani.timer);
1009 }
1010 }
1011
1012 /********************************/
1013 /* LED functions */
1014 /********************************/
1015
1016 static void ath_led_blink_work(struct work_struct *work)
1017 {
1018 struct ath_softc *sc = container_of(work, struct ath_softc,
1019 ath_led_blink_work.work);
1020
1021 if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
1022 return;
1023
1024 if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
1025 (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
1026 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
1027 else
1028 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
1029 (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
1030
1031 ieee80211_queue_delayed_work(sc->hw,
1032 &sc->ath_led_blink_work,
1033 (sc->sc_flags & SC_OP_LED_ON) ?
1034 msecs_to_jiffies(sc->led_off_duration) :
1035 msecs_to_jiffies(sc->led_on_duration));
1036
1037 sc->led_on_duration = sc->led_on_cnt ?
1038 max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
1039 ATH_LED_ON_DURATION_IDLE;
1040 sc->led_off_duration = sc->led_off_cnt ?
1041 max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
1042 ATH_LED_OFF_DURATION_IDLE;
1043 sc->led_on_cnt = sc->led_off_cnt = 0;
1044 if (sc->sc_flags & SC_OP_LED_ON)
1045 sc->sc_flags &= ~SC_OP_LED_ON;
1046 else
1047 sc->sc_flags |= SC_OP_LED_ON;
1048 }
1049
1050 static void ath_led_brightness(struct led_classdev *led_cdev,
1051 enum led_brightness brightness)
1052 {
1053 struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
1054 struct ath_softc *sc = led->sc;
1055
1056 switch (brightness) {
1057 case LED_OFF:
1058 if (led->led_type == ATH_LED_ASSOC ||
1059 led->led_type == ATH_LED_RADIO) {
1060 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
1061 (led->led_type == ATH_LED_RADIO));
1062 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
1063 if (led->led_type == ATH_LED_RADIO)
1064 sc->sc_flags &= ~SC_OP_LED_ON;
1065 } else {
1066 sc->led_off_cnt++;
1067 }
1068 break;
1069 case LED_FULL:
1070 if (led->led_type == ATH_LED_ASSOC) {
1071 sc->sc_flags |= SC_OP_LED_ASSOCIATED;
1072 ieee80211_queue_delayed_work(sc->hw,
1073 &sc->ath_led_blink_work, 0);
1074 } else if (led->led_type == ATH_LED_RADIO) {
1075 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
1076 sc->sc_flags |= SC_OP_LED_ON;
1077 } else {
1078 sc->led_on_cnt++;
1079 }
1080 break;
1081 default:
1082 break;
1083 }
1084 }
1085
1086 static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
1087 char *trigger)
1088 {
1089 int ret;
1090
1091 led->sc = sc;
1092 led->led_cdev.name = led->name;
1093 led->led_cdev.default_trigger = trigger;
1094 led->led_cdev.brightness_set = ath_led_brightness;
1095
1096 ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
1097 if (ret)
1098 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1099 "Failed to register led:%s", led->name);
1100 else
1101 led->registered = 1;
1102 return ret;
1103 }
1104
1105 static void ath_unregister_led(struct ath_led *led)
1106 {
1107 if (led->registered) {
1108 led_classdev_unregister(&led->led_cdev);
1109 led->registered = 0;
1110 }
1111 }
1112
1113 static void ath_deinit_leds(struct ath_softc *sc)
1114 {
1115 ath_unregister_led(&sc->assoc_led);
1116 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
1117 ath_unregister_led(&sc->tx_led);
1118 ath_unregister_led(&sc->rx_led);
1119 ath_unregister_led(&sc->radio_led);
1120 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
1121 }
1122
1123 static void ath_init_leds(struct ath_softc *sc)
1124 {
1125 char *trigger;
1126 int ret;
1127
1128 if (AR_SREV_9287(sc->sc_ah))
1129 sc->sc_ah->led_pin = ATH_LED_PIN_9287;
1130 else
1131 sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
1132
1133 /* Configure gpio 1 for output */
1134 ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
1135 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1136 /* LED off, active low */
1137 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
1138
1139 INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
1140
1141 trigger = ieee80211_get_radio_led_name(sc->hw);
1142 snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
1143 "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
1144 ret = ath_register_led(sc, &sc->radio_led, trigger);
1145 sc->radio_led.led_type = ATH_LED_RADIO;
1146 if (ret)
1147 goto fail;
1148
1149 trigger = ieee80211_get_assoc_led_name(sc->hw);
1150 snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
1151 "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
1152 ret = ath_register_led(sc, &sc->assoc_led, trigger);
1153 sc->assoc_led.led_type = ATH_LED_ASSOC;
1154 if (ret)
1155 goto fail;
1156
1157 trigger = ieee80211_get_tx_led_name(sc->hw);
1158 snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
1159 "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
1160 ret = ath_register_led(sc, &sc->tx_led, trigger);
1161 sc->tx_led.led_type = ATH_LED_TX;
1162 if (ret)
1163 goto fail;
1164
1165 trigger = ieee80211_get_rx_led_name(sc->hw);
1166 snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
1167 "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
1168 ret = ath_register_led(sc, &sc->rx_led, trigger);
1169 sc->rx_led.led_type = ATH_LED_RX;
1170 if (ret)
1171 goto fail;
1172
1173 return;
1174
1175 fail:
1176 cancel_delayed_work_sync(&sc->ath_led_blink_work);
1177 ath_deinit_leds(sc);
1178 }
1179
1180 void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
1181 {
1182 struct ath_hw *ah = sc->sc_ah;
1183 struct ath_common *common = ath9k_hw_common(ah);
1184 struct ieee80211_channel *channel = hw->conf.channel;
1185 int r;
1186
1187 ath9k_ps_wakeup(sc);
1188 ath9k_hw_configpcipowersave(ah, 0, 0);
1189
1190 if (!ah->curchan)
1191 ah->curchan = ath_get_curchannel(sc, sc->hw);
1192
1193 spin_lock_bh(&sc->sc_resetlock);
1194 r = ath9k_hw_reset(ah, ah->curchan, false);
1195 if (r) {
1196 ath_print(common, ATH_DBG_FATAL,
1197 "Unable to reset channel %u (%uMhz) ",
1198 "reset status %d\n",
1199 channel->center_freq, r);
1200 }
1201 spin_unlock_bh(&sc->sc_resetlock);
1202
1203 ath_update_txpow(sc);
1204 if (ath_startrecv(sc) != 0) {
1205 ath_print(common, ATH_DBG_FATAL,
1206 "Unable to restart recv logic\n");
1207 return;
1208 }
1209
1210 if (sc->sc_flags & SC_OP_BEACONS)
1211 ath_beacon_config(sc, NULL); /* restart beacons */
1212
1213 /* Re-Enable interrupts */
1214 ath9k_hw_set_interrupts(ah, sc->imask);
1215
1216 /* Enable LED */
1217 ath9k_hw_cfg_output(ah, ah->led_pin,
1218 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1219 ath9k_hw_set_gpio(ah, ah->led_pin, 0);
1220
1221 ieee80211_wake_queues(hw);
1222 ath9k_ps_restore(sc);
1223 }
1224
1225 void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
1226 {
1227 struct ath_hw *ah = sc->sc_ah;
1228 struct ieee80211_channel *channel = hw->conf.channel;
1229 int r;
1230
1231 ath9k_ps_wakeup(sc);
1232 ieee80211_stop_queues(hw);
1233
1234 /* Disable LED */
1235 ath9k_hw_set_gpio(ah, ah->led_pin, 1);
1236 ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
1237
1238 /* Disable interrupts */
1239 ath9k_hw_set_interrupts(ah, 0);
1240
1241 ath_drain_all_txq(sc, false); /* clear pending tx frames */
1242 ath_stoprecv(sc); /* turn off frame recv */
1243 ath_flushrecv(sc); /* flush recv queue */
1244
1245 if (!ah->curchan)
1246 ah->curchan = ath_get_curchannel(sc, hw);
1247
1248 spin_lock_bh(&sc->sc_resetlock);
1249 r = ath9k_hw_reset(ah, ah->curchan, false);
1250 if (r) {
1251 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1252 "Unable to reset channel %u (%uMhz) "
1253 "reset status %d\n",
1254 channel->center_freq, r);
1255 }
1256 spin_unlock_bh(&sc->sc_resetlock);
1257
1258 ath9k_hw_phy_disable(ah);
1259 ath9k_hw_configpcipowersave(ah, 1, 1);
1260 ath9k_ps_restore(sc);
1261 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
1262 }
1263
1264 /*******************/
1265 /* Rfkill */
1266 /*******************/
1267
1268 static bool ath_is_rfkill_set(struct ath_softc *sc)
1269 {
1270 struct ath_hw *ah = sc->sc_ah;
1271
1272 return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
1273 ah->rfkill_polarity;
1274 }
1275
1276 static void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
1277 {
1278 struct ath_wiphy *aphy = hw->priv;
1279 struct ath_softc *sc = aphy->sc;
1280 bool blocked = !!ath_is_rfkill_set(sc);
1281
1282 wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
1283 }
1284
1285 static void ath_start_rfkill_poll(struct ath_softc *sc)
1286 {
1287 struct ath_hw *ah = sc->sc_ah;
1288
1289 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1290 wiphy_rfkill_start_polling(sc->hw->wiphy);
1291 }
1292
1293 static void ath9k_uninit_hw(struct ath_softc *sc)
1294 {
1295 struct ath_hw *ah = sc->sc_ah;
1296
1297 BUG_ON(!ah);
1298
1299 ath9k_exit_debug(ah);
1300 ath9k_hw_detach(ah);
1301 sc->sc_ah = NULL;
1302 }
1303
1304 static void ath_clean_core(struct ath_softc *sc)
1305 {
1306 struct ieee80211_hw *hw = sc->hw;
1307 struct ath_hw *ah = sc->sc_ah;
1308 int i = 0;
1309
1310 ath9k_ps_wakeup(sc);
1311
1312 dev_dbg(sc->dev, "Detach ATH hw\n");
1313
1314 ath_deinit_leds(sc);
1315 wiphy_rfkill_stop_polling(sc->hw->wiphy);
1316
1317 for (i = 0; i < sc->num_sec_wiphy; i++) {
1318 struct ath_wiphy *aphy = sc->sec_wiphy[i];
1319 if (aphy == NULL)
1320 continue;
1321 sc->sec_wiphy[i] = NULL;
1322 ieee80211_unregister_hw(aphy->hw);
1323 ieee80211_free_hw(aphy->hw);
1324 }
1325 ieee80211_unregister_hw(hw);
1326 ath_rx_cleanup(sc);
1327 ath_tx_cleanup(sc);
1328
1329 tasklet_kill(&sc->intr_tq);
1330 tasklet_kill(&sc->bcon_tasklet);
1331
1332 if (!(sc->sc_flags & SC_OP_INVALID))
1333 ath9k_setpower(sc, ATH9K_PM_AWAKE);
1334
1335 /* cleanup tx queues */
1336 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1337 if (ATH_TXQ_SETUP(sc, i))
1338 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1339
1340 if ((sc->btcoex.no_stomp_timer) &&
1341 ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
1342 ath_gen_timer_free(ah, sc->btcoex.no_stomp_timer);
1343 }
1344
1345 void ath_detach(struct ath_softc *sc)
1346 {
1347 ath_clean_core(sc);
1348 ath9k_uninit_hw(sc);
1349 }
1350
1351 void ath_cleanup(struct ath_softc *sc)
1352 {
1353 struct ath_hw *ah = sc->sc_ah;
1354 struct ath_common *common = ath9k_hw_common(ah);
1355
1356 ath_clean_core(sc);
1357 free_irq(sc->irq, sc);
1358 ath_bus_cleanup(common);
1359 kfree(sc->sec_wiphy);
1360 ieee80211_free_hw(sc->hw);
1361
1362 ath9k_uninit_hw(sc);
1363 }
1364
1365 static int ath9k_reg_notifier(struct wiphy *wiphy,
1366 struct regulatory_request *request)
1367 {
1368 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1369 struct ath_wiphy *aphy = hw->priv;
1370 struct ath_softc *sc = aphy->sc;
1371 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
1372
1373 return ath_reg_notifier_apply(wiphy, request, reg);
1374 }
1375
1376 /*
1377 * Detects if there is any priority bt traffic
1378 */
1379 static void ath_detect_bt_priority(struct ath_softc *sc)
1380 {
1381 struct ath_btcoex *btcoex = &sc->btcoex;
1382 struct ath_hw *ah = sc->sc_ah;
1383
1384 if (ath9k_hw_gpio_get(sc->sc_ah, ah->btcoex_hw.btpriority_gpio))
1385 btcoex->bt_priority_cnt++;
1386
1387 if (time_after(jiffies, btcoex->bt_priority_time +
1388 msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
1389 if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
1390 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
1391 "BT priority traffic detected");
1392 sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
1393 } else {
1394 sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
1395 }
1396
1397 btcoex->bt_priority_cnt = 0;
1398 btcoex->bt_priority_time = jiffies;
1399 }
1400 }
1401
1402 /*
1403 * Configures appropriate weight based on stomp type.
1404 */
1405 static void ath9k_btcoex_bt_stomp(struct ath_softc *sc,
1406 enum ath_stomp_type stomp_type)
1407 {
1408 struct ath_hw *ah = sc->sc_ah;
1409
1410 switch (stomp_type) {
1411 case ATH_BTCOEX_STOMP_ALL:
1412 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
1413 AR_STOMP_ALL_WLAN_WGHT);
1414 break;
1415 case ATH_BTCOEX_STOMP_LOW:
1416 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
1417 AR_STOMP_LOW_WLAN_WGHT);
1418 break;
1419 case ATH_BTCOEX_STOMP_NONE:
1420 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
1421 AR_STOMP_NONE_WLAN_WGHT);
1422 break;
1423 default:
1424 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
1425 "Invalid Stomptype\n");
1426 break;
1427 }
1428
1429 ath9k_hw_btcoex_enable(ah);
1430 }
1431
1432 static void ath9k_gen_timer_start(struct ath_hw *ah,
1433 struct ath_gen_timer *timer,
1434 u32 timer_next,
1435 u32 timer_period)
1436 {
1437 struct ath_common *common = ath9k_hw_common(ah);
1438 struct ath_softc *sc = (struct ath_softc *) common->priv;
1439
1440 ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
1441
1442 if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
1443 ath9k_hw_set_interrupts(ah, 0);
1444 sc->imask |= ATH9K_INT_GENTIMER;
1445 ath9k_hw_set_interrupts(ah, sc->imask);
1446 }
1447 }
1448
1449 static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
1450 {
1451 struct ath_common *common = ath9k_hw_common(ah);
1452 struct ath_softc *sc = (struct ath_softc *) common->priv;
1453 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
1454
1455 ath9k_hw_gen_timer_stop(ah, timer);
1456
1457 /* if no timer is enabled, turn off interrupt mask */
1458 if (timer_table->timer_mask.val == 0) {
1459 ath9k_hw_set_interrupts(ah, 0);
1460 sc->imask &= ~ATH9K_INT_GENTIMER;
1461 ath9k_hw_set_interrupts(ah, sc->imask);
1462 }
1463 }
1464
1465 /*
1466 * This is the master bt coex timer which runs for every
1467 * 45ms, bt traffic will be given priority during 55% of this
1468 * period while wlan gets remaining 45%
1469 */
1470 static void ath_btcoex_period_timer(unsigned long data)
1471 {
1472 struct ath_softc *sc = (struct ath_softc *) data;
1473 struct ath_hw *ah = sc->sc_ah;
1474 struct ath_btcoex *btcoex = &sc->btcoex;
1475
1476 ath_detect_bt_priority(sc);
1477
1478 spin_lock_bh(&btcoex->btcoex_lock);
1479
1480 ath9k_btcoex_bt_stomp(sc, btcoex->bt_stomp_type);
1481
1482 spin_unlock_bh(&btcoex->btcoex_lock);
1483
1484 if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
1485 if (btcoex->hw_timer_enabled)
1486 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
1487
1488 ath9k_gen_timer_start(ah,
1489 btcoex->no_stomp_timer,
1490 (ath9k_hw_gettsf32(ah) +
1491 btcoex->btcoex_no_stomp),
1492 btcoex->btcoex_no_stomp * 10);
1493 btcoex->hw_timer_enabled = true;
1494 }
1495
1496 mod_timer(&btcoex->period_timer, jiffies +
1497 msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
1498 }
1499
1500 /*
1501 * Generic tsf based hw timer which configures weight
1502 * registers to time slice between wlan and bt traffic
1503 */
1504 static void ath_btcoex_no_stomp_timer(void *arg)
1505 {
1506 struct ath_softc *sc = (struct ath_softc *)arg;
1507 struct ath_hw *ah = sc->sc_ah;
1508 struct ath_btcoex *btcoex = &sc->btcoex;
1509
1510 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
1511 "no stomp timer running \n");
1512
1513 spin_lock_bh(&btcoex->btcoex_lock);
1514
1515 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW)
1516 ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_NONE);
1517 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
1518 ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_LOW);
1519
1520 spin_unlock_bh(&btcoex->btcoex_lock);
1521 }
1522
1523 static int ath_init_btcoex_timer(struct ath_softc *sc)
1524 {
1525 struct ath_btcoex *btcoex = &sc->btcoex;
1526
1527 btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
1528 btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
1529 btcoex->btcoex_period / 100;
1530
1531 setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
1532 (unsigned long) sc);
1533
1534 spin_lock_init(&btcoex->btcoex_lock);
1535
1536 btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
1537 ath_btcoex_no_stomp_timer,
1538 ath_btcoex_no_stomp_timer,
1539 (void *) sc, AR_FIRST_NDP_TIMER);
1540
1541 if (!btcoex->no_stomp_timer)
1542 return -ENOMEM;
1543
1544 return 0;
1545 }
1546
1547 /*
1548 * Read and write, they both share the same lock. We do this to serialize
1549 * reads and writes on Atheros 802.11n PCI devices only. This is required
1550 * as the FIFO on these devices can only accept sanely 2 requests. After
1551 * that the device goes bananas. Serializing the reads/writes prevents this
1552 * from happening.
1553 */
1554
1555 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
1556 {
1557 struct ath_hw *ah = (struct ath_hw *) hw_priv;
1558 struct ath_common *common = ath9k_hw_common(ah);
1559 struct ath_softc *sc = (struct ath_softc *) common->priv;
1560
1561 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
1562 unsigned long flags;
1563 spin_lock_irqsave(&sc->sc_serial_rw, flags);
1564 iowrite32(val, sc->mem + reg_offset);
1565 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
1566 } else
1567 iowrite32(val, sc->mem + reg_offset);
1568 }
1569
1570 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
1571 {
1572 struct ath_hw *ah = (struct ath_hw *) hw_priv;
1573 struct ath_common *common = ath9k_hw_common(ah);
1574 struct ath_softc *sc = (struct ath_softc *) common->priv;
1575 u32 val;
1576
1577 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
1578 unsigned long flags;
1579 spin_lock_irqsave(&sc->sc_serial_rw, flags);
1580 val = ioread32(sc->mem + reg_offset);
1581 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
1582 } else
1583 val = ioread32(sc->mem + reg_offset);
1584 return val;
1585 }
1586
1587 static const struct ath_ops ath9k_common_ops = {
1588 .read = ath9k_ioread32,
1589 .write = ath9k_iowrite32,
1590 };
1591
1592 /*
1593 * Initialize and fill ath_softc, ath_sofct is the
1594 * "Software Carrier" struct. Historically it has existed
1595 * to allow the separation between hardware specific
1596 * variables (now in ath_hw) and driver specific variables.
1597 */
1598 static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
1599 const struct ath_bus_ops *bus_ops)
1600 {
1601 struct ath_hw *ah = NULL;
1602 struct ath_common *common;
1603 int r = 0, i;
1604 int csz = 0;
1605 int qnum;
1606
1607 /* XXX: hardware will not be ready until ath_open() being called */
1608 sc->sc_flags |= SC_OP_INVALID;
1609
1610 spin_lock_init(&sc->wiphy_lock);
1611 spin_lock_init(&sc->sc_resetlock);
1612 spin_lock_init(&sc->sc_serial_rw);
1613 spin_lock_init(&sc->ani_lock);
1614 spin_lock_init(&sc->sc_pm_lock);
1615 mutex_init(&sc->mutex);
1616 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1617 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
1618 (unsigned long)sc);
1619
1620 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
1621 if (!ah)
1622 return -ENOMEM;
1623
1624 ah->hw_version.devid = devid;
1625 ah->hw_version.subsysid = subsysid;
1626 sc->sc_ah = ah;
1627
1628 common = ath9k_hw_common(ah);
1629 common->ops = &ath9k_common_ops;
1630 common->bus_ops = bus_ops;
1631 common->ah = ah;
1632 common->hw = sc->hw;
1633 common->priv = sc;
1634 common->debug_mask = ath9k_debug;
1635
1636 /*
1637 * Cache line size is used to size and align various
1638 * structures used to communicate with the hardware.
1639 */
1640 ath_read_cachesize(common, &csz);
1641 /* XXX assert csz is non-zero */
1642 common->cachelsz = csz << 2; /* convert to bytes */
1643
1644 r = ath9k_hw_init(ah);
1645 if (r) {
1646 ath_print(common, ATH_DBG_FATAL,
1647 "Unable to initialize hardware; "
1648 "initialization status: %d\n", r);
1649 goto bad_free_hw;
1650 }
1651
1652 if (ath9k_init_debug(ah) < 0) {
1653 ath_print(common, ATH_DBG_FATAL,
1654 "Unable to create debugfs files\n");
1655 goto bad_free_hw;
1656 }
1657
1658 /* Get the hardware key cache size. */
1659 common->keymax = ah->caps.keycache_size;
1660 if (common->keymax > ATH_KEYMAX) {
1661 ath_print(common, ATH_DBG_ANY,
1662 "Warning, using only %u entries in %u key cache\n",
1663 ATH_KEYMAX, common->keymax);
1664 common->keymax = ATH_KEYMAX;
1665 }
1666
1667 /*
1668 * Reset the key cache since some parts do not
1669 * reset the contents on initial power up.
1670 */
1671 for (i = 0; i < common->keymax; i++)
1672 ath9k_hw_keyreset(ah, (u16) i);
1673
1674 /* default to MONITOR mode */
1675 sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
1676
1677 /*
1678 * Allocate hardware transmit queues: one queue for
1679 * beacon frames and one data queue for each QoS
1680 * priority. Note that the hal handles reseting
1681 * these queues at the needed time.
1682 */
1683 sc->beacon.beaconq = ath9k_hw_beaconq_setup(ah);
1684 if (sc->beacon.beaconq == -1) {
1685 ath_print(common, ATH_DBG_FATAL,
1686 "Unable to setup a beacon xmit queue\n");
1687 r = -EIO;
1688 goto bad2;
1689 }
1690 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1691 if (sc->beacon.cabq == NULL) {
1692 ath_print(common, ATH_DBG_FATAL,
1693 "Unable to setup CAB xmit queue\n");
1694 r = -EIO;
1695 goto bad2;
1696 }
1697
1698 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
1699 ath_cabq_update(sc);
1700
1701 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
1702 sc->tx.hwq_map[i] = -1;
1703
1704 /* Setup data queues */
1705 /* NB: ensure BK queue is the lowest priority h/w queue */
1706 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1707 ath_print(common, ATH_DBG_FATAL,
1708 "Unable to setup xmit queue for BK traffic\n");
1709 r = -EIO;
1710 goto bad2;
1711 }
1712
1713 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1714 ath_print(common, ATH_DBG_FATAL,
1715 "Unable to setup xmit queue for BE traffic\n");
1716 r = -EIO;
1717 goto bad2;
1718 }
1719 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1720 ath_print(common, ATH_DBG_FATAL,
1721 "Unable to setup xmit queue for VI traffic\n");
1722 r = -EIO;
1723 goto bad2;
1724 }
1725 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1726 ath_print(common, ATH_DBG_FATAL,
1727 "Unable to setup xmit queue for VO traffic\n");
1728 r = -EIO;
1729 goto bad2;
1730 }
1731
1732 /* Initializes the noise floor to a reasonable default value.
1733 * Later on this will be updated during ANI processing. */
1734
1735 common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
1736 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
1737
1738 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1739 ATH9K_CIPHER_TKIP, NULL)) {
1740 /*
1741 * Whether we should enable h/w TKIP MIC.
1742 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1743 * report WMM capable, so it's always safe to turn on
1744 * TKIP MIC in this case.
1745 */
1746 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1747 0, 1, NULL);
1748 }
1749
1750 /*
1751 * Check whether the separate key cache entries
1752 * are required to handle both tx+rx MIC keys.
1753 * With split mic keys the number of stations is limited
1754 * to 27 otherwise 59.
1755 */
1756 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1757 ATH9K_CIPHER_TKIP, NULL)
1758 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1759 ATH9K_CIPHER_MIC, NULL)
1760 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1761 0, NULL))
1762 common->splitmic = 1;
1763
1764 /* turn on mcast key search if possible */
1765 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1766 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1767 1, NULL);
1768
1769 sc->config.txpowlimit = ATH_TXPOWER_MAX;
1770
1771 /* 11n Capabilities */
1772 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1773 sc->sc_flags |= SC_OP_TXAGGR;
1774 sc->sc_flags |= SC_OP_RXAGGR;
1775 }
1776
1777 common->tx_chainmask = ah->caps.tx_chainmask;
1778 common->rx_chainmask = ah->caps.rx_chainmask;
1779
1780 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1781 sc->rx.defant = ath9k_hw_getdefantenna(ah);
1782
1783 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
1784 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
1785
1786 sc->beacon.slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1787
1788 /* initialize beacon slots */
1789 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
1790 sc->beacon.bslot[i] = NULL;
1791 sc->beacon.bslot_aphy[i] = NULL;
1792 }
1793
1794 /* setup channels and rates */
1795
1796 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
1797 sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
1798 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
1799 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
1800 ARRAY_SIZE(ath9k_2ghz_chantable);
1801 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
1802 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
1803 ARRAY_SIZE(ath9k_legacy_rates);
1804 }
1805
1806 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
1807 sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
1808 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
1809 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
1810 ARRAY_SIZE(ath9k_5ghz_chantable);
1811 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
1812 ath9k_legacy_rates + 4;
1813 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
1814 ARRAY_SIZE(ath9k_legacy_rates) - 4;
1815 }
1816
1817 switch (ah->btcoex_hw.scheme) {
1818 case ATH_BTCOEX_CFG_NONE:
1819 break;
1820 case ATH_BTCOEX_CFG_2WIRE:
1821 ath9k_hw_btcoex_init_2wire(ah);
1822 break;
1823 case ATH_BTCOEX_CFG_3WIRE:
1824 ath9k_hw_btcoex_init_3wire(ah);
1825 r = ath_init_btcoex_timer(sc);
1826 if (r)
1827 goto bad2;
1828 qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
1829 ath9k_hw_init_btcoex_hw(ah, qnum);
1830 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
1831 break;
1832 default:
1833 WARN_ON(1);
1834 break;
1835 }
1836
1837 return 0;
1838 bad2:
1839 /* cleanup tx queues */
1840 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1841 if (ATH_TXQ_SETUP(sc, i))
1842 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1843
1844 bad_free_hw:
1845 ath9k_uninit_hw(sc);
1846 return r;
1847 }
1848
1849 void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
1850 {
1851 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
1852 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1853 IEEE80211_HW_SIGNAL_DBM |
1854 IEEE80211_HW_AMPDU_AGGREGATION |
1855 IEEE80211_HW_SUPPORTS_PS |
1856 IEEE80211_HW_PS_NULLFUNC_STACK |
1857 IEEE80211_HW_SPECTRUM_MGMT;
1858
1859 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
1860 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
1861
1862 hw->wiphy->interface_modes =
1863 BIT(NL80211_IFTYPE_AP) |
1864 BIT(NL80211_IFTYPE_STATION) |
1865 BIT(NL80211_IFTYPE_ADHOC) |
1866 BIT(NL80211_IFTYPE_MESH_POINT);
1867
1868 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1869
1870 hw->queues = 4;
1871 hw->max_rates = 4;
1872 hw->channel_change_time = 5000;
1873 hw->max_listen_interval = 10;
1874 /* Hardware supports 10 but we use 4 */
1875 hw->max_rate_tries = 4;
1876 hw->sta_data_size = sizeof(struct ath_node);
1877 hw->vif_data_size = sizeof(struct ath_vif);
1878
1879 hw->rate_control_algorithm = "ath9k_rate_control";
1880
1881 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
1882 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1883 &sc->sbands[IEEE80211_BAND_2GHZ];
1884 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
1885 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1886 &sc->sbands[IEEE80211_BAND_5GHZ];
1887 }
1888
1889 /* Device driver core initialization */
1890 int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
1891 const struct ath_bus_ops *bus_ops)
1892 {
1893 struct ieee80211_hw *hw = sc->hw;
1894 struct ath_common *common;
1895 struct ath_hw *ah;
1896 int error = 0, i;
1897 struct ath_regulatory *reg;
1898
1899 dev_dbg(sc->dev, "Attach ATH hw\n");
1900
1901 error = ath_init_softc(devid, sc, subsysid, bus_ops);
1902 if (error != 0)
1903 return error;
1904
1905 ah = sc->sc_ah;
1906 common = ath9k_hw_common(ah);
1907
1908 /* get mac address from hardware and set in mac80211 */
1909
1910 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
1911
1912 ath_set_hw_capab(sc, hw);
1913
1914 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
1915 ath9k_reg_notifier);
1916 if (error)
1917 return error;
1918
1919 reg = &common->regulatory;
1920
1921 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1922 if (test_bit(ATH9K_MODE_11G, ah->caps.wireless_modes))
1923 setup_ht_cap(sc,
1924 &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
1925 if (test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes))
1926 setup_ht_cap(sc,
1927 &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
1928 }
1929
1930 /* initialize tx/rx engine */
1931 error = ath_tx_init(sc, ATH_TXBUF);
1932 if (error != 0)
1933 goto error_attach;
1934
1935 error = ath_rx_init(sc, ATH_RXBUF);
1936 if (error != 0)
1937 goto error_attach;
1938
1939 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
1940 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
1941 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
1942
1943 error = ieee80211_register_hw(hw);
1944
1945 if (!ath_is_world_regd(reg)) {
1946 error = regulatory_hint(hw->wiphy, reg->alpha2);
1947 if (error)
1948 goto error_attach;
1949 }
1950
1951 /* Initialize LED control */
1952 ath_init_leds(sc);
1953
1954 ath_start_rfkill_poll(sc);
1955
1956 return 0;
1957
1958 error_attach:
1959 /* cleanup tx queues */
1960 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1961 if (ATH_TXQ_SETUP(sc, i))
1962 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1963
1964 ath9k_uninit_hw(sc);
1965
1966 return error;
1967 }
1968
1969 int ath_reset(struct ath_softc *sc, bool retry_tx)
1970 {
1971 struct ath_hw *ah = sc->sc_ah;
1972 struct ath_common *common = ath9k_hw_common(ah);
1973 struct ieee80211_hw *hw = sc->hw;
1974 int r;
1975
1976 /* Stop ANI */
1977 del_timer_sync(&common->ani.timer);
1978
1979 ath9k_hw_set_interrupts(ah, 0);
1980 ath_drain_all_txq(sc, retry_tx);
1981 ath_stoprecv(sc);
1982 ath_flushrecv(sc);
1983
1984 spin_lock_bh(&sc->sc_resetlock);
1985 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
1986 if (r)
1987 ath_print(common, ATH_DBG_FATAL,
1988 "Unable to reset hardware; reset status %d\n", r);
1989 spin_unlock_bh(&sc->sc_resetlock);
1990
1991 if (ath_startrecv(sc) != 0)
1992 ath_print(common, ATH_DBG_FATAL,
1993 "Unable to start recv logic\n");
1994
1995 /*
1996 * We may be doing a reset in response to a request
1997 * that changes the channel so update any state that
1998 * might change as a result.
1999 */
2000 ath_cache_conf_rate(sc, &hw->conf);
2001
2002 ath_update_txpow(sc);
2003
2004 if (sc->sc_flags & SC_OP_BEACONS)
2005 ath_beacon_config(sc, NULL); /* restart beacons */
2006
2007 ath9k_hw_set_interrupts(ah, sc->imask);
2008
2009 if (retry_tx) {
2010 int i;
2011 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2012 if (ATH_TXQ_SETUP(sc, i)) {
2013 spin_lock_bh(&sc->tx.txq[i].axq_lock);
2014 ath_txq_schedule(sc, &sc->tx.txq[i]);
2015 spin_unlock_bh(&sc->tx.txq[i].axq_lock);
2016 }
2017 }
2018 }
2019
2020 /* Start ANI */
2021 ath_start_ani(common);
2022
2023 return r;
2024 }
2025
2026 /*
2027 * This function will allocate both the DMA descriptor structure, and the
2028 * buffers it contains. These are used to contain the descriptors used
2029 * by the system.
2030 */
2031 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
2032 struct list_head *head, const char *name,
2033 int nbuf, int ndesc)
2034 {
2035 #define DS2PHYS(_dd, _ds) \
2036 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2037 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
2038 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
2039 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2040 struct ath_desc *ds;
2041 struct ath_buf *bf;
2042 int i, bsize, error;
2043
2044 ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
2045 name, nbuf, ndesc);
2046
2047 INIT_LIST_HEAD(head);
2048 /* ath_desc must be a multiple of DWORDs */
2049 if ((sizeof(struct ath_desc) % 4) != 0) {
2050 ath_print(common, ATH_DBG_FATAL,
2051 "ath_desc not DWORD aligned\n");
2052 BUG_ON((sizeof(struct ath_desc) % 4) != 0);
2053 error = -ENOMEM;
2054 goto fail;
2055 }
2056
2057 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
2058
2059 /*
2060 * Need additional DMA memory because we can't use
2061 * descriptors that cross the 4K page boundary. Assume
2062 * one skipped descriptor per 4K page.
2063 */
2064 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
2065 u32 ndesc_skipped =
2066 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
2067 u32 dma_len;
2068
2069 while (ndesc_skipped) {
2070 dma_len = ndesc_skipped * sizeof(struct ath_desc);
2071 dd->dd_desc_len += dma_len;
2072
2073 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
2074 };
2075 }
2076
2077 /* allocate descriptors */
2078 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2079 &dd->dd_desc_paddr, GFP_KERNEL);
2080 if (dd->dd_desc == NULL) {
2081 error = -ENOMEM;
2082 goto fail;
2083 }
2084 ds = dd->dd_desc;
2085 ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
2086 name, ds, (u32) dd->dd_desc_len,
2087 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
2088
2089 /* allocate buffers */
2090 bsize = sizeof(struct ath_buf) * nbuf;
2091 bf = kzalloc(bsize, GFP_KERNEL);
2092 if (bf == NULL) {
2093 error = -ENOMEM;
2094 goto fail2;
2095 }
2096 dd->dd_bufptr = bf;
2097
2098 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
2099 bf->bf_desc = ds;
2100 bf->bf_daddr = DS2PHYS(dd, ds);
2101
2102 if (!(sc->sc_ah->caps.hw_caps &
2103 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
2104 /*
2105 * Skip descriptor addresses which can cause 4KB
2106 * boundary crossing (addr + length) with a 32 dword
2107 * descriptor fetch.
2108 */
2109 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
2110 BUG_ON((caddr_t) bf->bf_desc >=
2111 ((caddr_t) dd->dd_desc +
2112 dd->dd_desc_len));
2113
2114 ds += ndesc;
2115 bf->bf_desc = ds;
2116 bf->bf_daddr = DS2PHYS(dd, ds);
2117 }
2118 }
2119 list_add_tail(&bf->list, head);
2120 }
2121 return 0;
2122 fail2:
2123 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2124 dd->dd_desc_paddr);
2125 fail:
2126 memset(dd, 0, sizeof(*dd));
2127 return error;
2128 #undef ATH_DESC_4KB_BOUND_CHECK
2129 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
2130 #undef DS2PHYS
2131 }
2132
2133 void ath_descdma_cleanup(struct ath_softc *sc,
2134 struct ath_descdma *dd,
2135 struct list_head *head)
2136 {
2137 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2138 dd->dd_desc_paddr);
2139
2140 INIT_LIST_HEAD(head);
2141 kfree(dd->dd_bufptr);
2142 memset(dd, 0, sizeof(*dd));
2143 }
2144
2145 int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
2146 {
2147 int qnum;
2148
2149 switch (queue) {
2150 case 0:
2151 qnum = sc->tx.hwq_map[ATH9K_WME_AC_VO];
2152 break;
2153 case 1:
2154 qnum = sc->tx.hwq_map[ATH9K_WME_AC_VI];
2155 break;
2156 case 2:
2157 qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE];
2158 break;
2159 case 3:
2160 qnum = sc->tx.hwq_map[ATH9K_WME_AC_BK];
2161 break;
2162 default:
2163 qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE];
2164 break;
2165 }
2166
2167 return qnum;
2168 }
2169
2170 int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
2171 {
2172 int qnum;
2173
2174 switch (queue) {
2175 case ATH9K_WME_AC_VO:
2176 qnum = 0;
2177 break;
2178 case ATH9K_WME_AC_VI:
2179 qnum = 1;
2180 break;
2181 case ATH9K_WME_AC_BE:
2182 qnum = 2;
2183 break;
2184 case ATH9K_WME_AC_BK:
2185 qnum = 3;
2186 break;
2187 default:
2188 qnum = -1;
2189 break;
2190 }
2191
2192 return qnum;
2193 }
2194
2195 /* XXX: Remove me once we don't depend on ath9k_channel for all
2196 * this redundant data */
2197 void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
2198 struct ath9k_channel *ichan)
2199 {
2200 struct ieee80211_channel *chan = hw->conf.channel;
2201 struct ieee80211_conf *conf = &hw->conf;
2202
2203 ichan->channel = chan->center_freq;
2204 ichan->chan = chan;
2205
2206 if (chan->band == IEEE80211_BAND_2GHZ) {
2207 ichan->chanmode = CHANNEL_G;
2208 ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM | CHANNEL_G;
2209 } else {
2210 ichan->chanmode = CHANNEL_A;
2211 ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
2212 }
2213
2214 if (conf_is_ht(conf))
2215 ichan->chanmode = ath_get_extchanmode(sc, chan,
2216 conf->channel_type);
2217 }
2218
2219 /**********************/
2220 /* mac80211 callbacks */
2221 /**********************/
2222
2223 /*
2224 * (Re)start btcoex timers
2225 */
2226 static void ath9k_btcoex_timer_resume(struct ath_softc *sc)
2227 {
2228 struct ath_btcoex *btcoex = &sc->btcoex;
2229 struct ath_hw *ah = sc->sc_ah;
2230
2231 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
2232 "Starting btcoex timers");
2233
2234 /* make sure duty cycle timer is also stopped when resuming */
2235 if (btcoex->hw_timer_enabled)
2236 ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
2237
2238 btcoex->bt_priority_cnt = 0;
2239 btcoex->bt_priority_time = jiffies;
2240 sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
2241
2242 mod_timer(&btcoex->period_timer, jiffies);
2243 }
2244
2245 static int ath9k_start(struct ieee80211_hw *hw)
2246 {
2247 struct ath_wiphy *aphy = hw->priv;
2248 struct ath_softc *sc = aphy->sc;
2249 struct ath_hw *ah = sc->sc_ah;
2250 struct ath_common *common = ath9k_hw_common(ah);
2251 struct ieee80211_channel *curchan = hw->conf.channel;
2252 struct ath9k_channel *init_channel;
2253 int r;
2254
2255 ath_print(common, ATH_DBG_CONFIG,
2256 "Starting driver with initial channel: %d MHz\n",
2257 curchan->center_freq);
2258
2259 mutex_lock(&sc->mutex);
2260
2261 if (ath9k_wiphy_started(sc)) {
2262 if (sc->chan_idx == curchan->hw_value) {
2263 /*
2264 * Already on the operational channel, the new wiphy
2265 * can be marked active.
2266 */
2267 aphy->state = ATH_WIPHY_ACTIVE;
2268 ieee80211_wake_queues(hw);
2269 } else {
2270 /*
2271 * Another wiphy is on another channel, start the new
2272 * wiphy in paused state.
2273 */
2274 aphy->state = ATH_WIPHY_PAUSED;
2275 ieee80211_stop_queues(hw);
2276 }
2277 mutex_unlock(&sc->mutex);
2278 return 0;
2279 }
2280 aphy->state = ATH_WIPHY_ACTIVE;
2281
2282 /* setup initial channel */
2283
2284 sc->chan_idx = curchan->hw_value;
2285
2286 init_channel = ath_get_curchannel(sc, hw);
2287
2288 /* Reset SERDES registers */
2289 ath9k_hw_configpcipowersave(ah, 0, 0);
2290
2291 /*
2292 * The basic interface to setting the hardware in a good
2293 * state is ``reset''. On return the hardware is known to
2294 * be powered up and with interrupts disabled. This must
2295 * be followed by initialization of the appropriate bits
2296 * and then setup of the interrupt mask.
2297 */
2298 spin_lock_bh(&sc->sc_resetlock);
2299 r = ath9k_hw_reset(ah, init_channel, false);
2300 if (r) {
2301 ath_print(common, ATH_DBG_FATAL,
2302 "Unable to reset hardware; reset status %d "
2303 "(freq %u MHz)\n", r,
2304 curchan->center_freq);
2305 spin_unlock_bh(&sc->sc_resetlock);
2306 goto mutex_unlock;
2307 }
2308 spin_unlock_bh(&sc->sc_resetlock);
2309
2310 /*
2311 * This is needed only to setup initial state
2312 * but it's best done after a reset.
2313 */
2314 ath_update_txpow(sc);
2315
2316 /*
2317 * Setup the hardware after reset:
2318 * The receive engine is set going.
2319 * Frame transmit is handled entirely
2320 * in the frame output path; there's nothing to do
2321 * here except setup the interrupt mask.
2322 */
2323 if (ath_startrecv(sc) != 0) {
2324 ath_print(common, ATH_DBG_FATAL,
2325 "Unable to start recv logic\n");
2326 r = -EIO;
2327 goto mutex_unlock;
2328 }
2329
2330 /* Setup our intr mask. */
2331 sc->imask = ATH9K_INT_RX | ATH9K_INT_TX
2332 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
2333 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
2334
2335 if (ah->caps.hw_caps & ATH9K_HW_CAP_GTT)
2336 sc->imask |= ATH9K_INT_GTT;
2337
2338 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
2339 sc->imask |= ATH9K_INT_CST;
2340
2341 ath_cache_conf_rate(sc, &hw->conf);
2342
2343 sc->sc_flags &= ~SC_OP_INVALID;
2344
2345 /* Disable BMISS interrupt when we're not associated */
2346 sc->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
2347 ath9k_hw_set_interrupts(ah, sc->imask);
2348
2349 ieee80211_wake_queues(hw);
2350
2351 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
2352
2353 if ((ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) &&
2354 !ah->btcoex_hw.enabled) {
2355 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
2356 AR_STOMP_LOW_WLAN_WGHT);
2357 ath9k_hw_btcoex_enable(ah);
2358
2359 if (common->bus_ops->bt_coex_prep)
2360 common->bus_ops->bt_coex_prep(common);
2361 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
2362 ath9k_btcoex_timer_resume(sc);
2363 }
2364
2365 mutex_unlock:
2366 mutex_unlock(&sc->mutex);
2367
2368 return r;
2369 }
2370
2371 static int ath9k_tx(struct ieee80211_hw *hw,
2372 struct sk_buff *skb)
2373 {
2374 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2375 struct ath_wiphy *aphy = hw->priv;
2376 struct ath_softc *sc = aphy->sc;
2377 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2378 struct ath_tx_control txctl;
2379 int padpos, padsize;
2380 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2381
2382 if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
2383 ath_print(common, ATH_DBG_XMIT,
2384 "ath9k: %s: TX in unexpected wiphy state "
2385 "%d\n", wiphy_name(hw->wiphy), aphy->state);
2386 goto exit;
2387 }
2388
2389 if (sc->ps_enabled) {
2390 /*
2391 * mac80211 does not set PM field for normal data frames, so we
2392 * need to update that based on the current PS mode.
2393 */
2394 if (ieee80211_is_data(hdr->frame_control) &&
2395 !ieee80211_is_nullfunc(hdr->frame_control) &&
2396 !ieee80211_has_pm(hdr->frame_control)) {
2397 ath_print(common, ATH_DBG_PS, "Add PM=1 for a TX frame "
2398 "while in PS mode\n");
2399 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
2400 }
2401 }
2402
2403 if (unlikely(sc->sc_ah->power_mode != ATH9K_PM_AWAKE)) {
2404 /*
2405 * We are using PS-Poll and mac80211 can request TX while in
2406 * power save mode. Need to wake up hardware for the TX to be
2407 * completed and if needed, also for RX of buffered frames.
2408 */
2409 ath9k_ps_wakeup(sc);
2410 ath9k_hw_setrxabort(sc->sc_ah, 0);
2411 if (ieee80211_is_pspoll(hdr->frame_control)) {
2412 ath_print(common, ATH_DBG_PS,
2413 "Sending PS-Poll to pick a buffered frame\n");
2414 sc->sc_flags |= SC_OP_WAIT_FOR_PSPOLL_DATA;
2415 } else {
2416 ath_print(common, ATH_DBG_PS,
2417 "Wake up to complete TX\n");
2418 sc->sc_flags |= SC_OP_WAIT_FOR_TX_ACK;
2419 }
2420 /*
2421 * The actual restore operation will happen only after
2422 * the sc_flags bit is cleared. We are just dropping
2423 * the ps_usecount here.
2424 */
2425 ath9k_ps_restore(sc);
2426 }
2427
2428 memset(&txctl, 0, sizeof(struct ath_tx_control));
2429
2430 /*
2431 * As a temporary workaround, assign seq# here; this will likely need
2432 * to be cleaned up to work better with Beacon transmission and virtual
2433 * BSSes.
2434 */
2435 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2436 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
2437 sc->tx.seq_no += 0x10;
2438 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
2439 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
2440 }
2441
2442 /* Add the padding after the header if this is not already done */
2443 padpos = ath9k_cmn_padpos(hdr->frame_control);
2444 padsize = padpos & 3;
2445 if (padsize && skb->len>padpos) {
2446 if (skb_headroom(skb) < padsize)
2447 return -1;
2448 skb_push(skb, padsize);
2449 memmove(skb->data, skb->data + padsize, padpos);
2450 }
2451
2452 /* Check if a tx queue is available */
2453
2454 txctl.txq = ath_test_get_txq(sc, skb);
2455 if (!txctl.txq)
2456 goto exit;
2457
2458 ath_print(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
2459
2460 if (ath_tx_start(hw, skb, &txctl) != 0) {
2461 ath_print(common, ATH_DBG_XMIT, "TX failed\n");
2462 goto exit;
2463 }
2464
2465 return 0;
2466 exit:
2467 dev_kfree_skb_any(skb);
2468 return 0;
2469 }
2470
2471 /*
2472 * Pause btcoex timer and bt duty cycle timer
2473 */
2474 static void ath9k_btcoex_timer_pause(struct ath_softc *sc)
2475 {
2476 struct ath_btcoex *btcoex = &sc->btcoex;
2477 struct ath_hw *ah = sc->sc_ah;
2478
2479 del_timer_sync(&btcoex->period_timer);
2480
2481 if (btcoex->hw_timer_enabled)
2482 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
2483
2484 btcoex->hw_timer_enabled = false;
2485 }
2486
2487 static void ath9k_stop(struct ieee80211_hw *hw)
2488 {
2489 struct ath_wiphy *aphy = hw->priv;
2490 struct ath_softc *sc = aphy->sc;
2491 struct ath_hw *ah = sc->sc_ah;
2492 struct ath_common *common = ath9k_hw_common(ah);
2493
2494 mutex_lock(&sc->mutex);
2495
2496 aphy->state = ATH_WIPHY_INACTIVE;
2497
2498 cancel_delayed_work_sync(&sc->ath_led_blink_work);
2499 cancel_delayed_work_sync(&sc->tx_complete_work);
2500
2501 if (!sc->num_sec_wiphy) {
2502 cancel_delayed_work_sync(&sc->wiphy_work);
2503 cancel_work_sync(&sc->chan_work);
2504 }
2505
2506 if (sc->sc_flags & SC_OP_INVALID) {
2507 ath_print(common, ATH_DBG_ANY, "Device not present\n");
2508 mutex_unlock(&sc->mutex);
2509 return;
2510 }
2511
2512 if (ath9k_wiphy_started(sc)) {
2513 mutex_unlock(&sc->mutex);
2514 return; /* another wiphy still in use */
2515 }
2516
2517 /* Ensure HW is awake when we try to shut it down. */
2518 ath9k_ps_wakeup(sc);
2519
2520 if (ah->btcoex_hw.enabled) {
2521 ath9k_hw_btcoex_disable(ah);
2522 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
2523 ath9k_btcoex_timer_pause(sc);
2524 }
2525
2526 /* make sure h/w will not generate any interrupt
2527 * before setting the invalid flag. */
2528 ath9k_hw_set_interrupts(ah, 0);
2529
2530 if (!(sc->sc_flags & SC_OP_INVALID)) {
2531 ath_drain_all_txq(sc, false);
2532 ath_stoprecv(sc);
2533 ath9k_hw_phy_disable(ah);
2534 } else
2535 sc->rx.rxlink = NULL;
2536
2537 /* disable HAL and put h/w to sleep */
2538 ath9k_hw_disable(ah);
2539 ath9k_hw_configpcipowersave(ah, 1, 1);
2540 ath9k_ps_restore(sc);
2541
2542 /* Finally, put the chip in FULL SLEEP mode */
2543 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
2544
2545 sc->sc_flags |= SC_OP_INVALID;
2546
2547 mutex_unlock(&sc->mutex);
2548
2549 ath_print(common, ATH_DBG_CONFIG, "Driver halt\n");
2550 }
2551
2552 static int ath9k_add_interface(struct ieee80211_hw *hw,
2553 struct ieee80211_if_init_conf *conf)
2554 {
2555 struct ath_wiphy *aphy = hw->priv;
2556 struct ath_softc *sc = aphy->sc;
2557 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2558 struct ath_vif *avp = (void *)conf->vif->drv_priv;
2559 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
2560 int ret = 0;
2561
2562 mutex_lock(&sc->mutex);
2563
2564 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) &&
2565 sc->nvifs > 0) {
2566 ret = -ENOBUFS;
2567 goto out;
2568 }
2569
2570 switch (conf->type) {
2571 case NL80211_IFTYPE_STATION:
2572 ic_opmode = NL80211_IFTYPE_STATION;
2573 break;
2574 case NL80211_IFTYPE_ADHOC:
2575 case NL80211_IFTYPE_AP:
2576 case NL80211_IFTYPE_MESH_POINT:
2577 if (sc->nbcnvifs >= ATH_BCBUF) {
2578 ret = -ENOBUFS;
2579 goto out;
2580 }
2581 ic_opmode = conf->type;
2582 break;
2583 default:
2584 ath_print(common, ATH_DBG_FATAL,
2585 "Interface type %d not yet supported\n", conf->type);
2586 ret = -EOPNOTSUPP;
2587 goto out;
2588 }
2589
2590 ath_print(common, ATH_DBG_CONFIG,
2591 "Attach a VIF of type: %d\n", ic_opmode);
2592
2593 /* Set the VIF opmode */
2594 avp->av_opmode = ic_opmode;
2595 avp->av_bslot = -1;
2596
2597 sc->nvifs++;
2598
2599 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
2600 ath9k_set_bssid_mask(hw);
2601
2602 if (sc->nvifs > 1)
2603 goto out; /* skip global settings for secondary vif */
2604
2605 if (ic_opmode == NL80211_IFTYPE_AP) {
2606 ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
2607 sc->sc_flags |= SC_OP_TSF_RESET;
2608 }
2609
2610 /* Set the device opmode */
2611 sc->sc_ah->opmode = ic_opmode;
2612
2613 /*
2614 * Enable MIB interrupts when there are hardware phy counters.
2615 * Note we only do this (at the moment) for station mode.
2616 */
2617 if ((conf->type == NL80211_IFTYPE_STATION) ||
2618 (conf->type == NL80211_IFTYPE_ADHOC) ||
2619 (conf->type == NL80211_IFTYPE_MESH_POINT)) {
2620 sc->imask |= ATH9K_INT_MIB;
2621 sc->imask |= ATH9K_INT_TSFOOR;
2622 }
2623
2624 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
2625
2626 if (conf->type == NL80211_IFTYPE_AP ||
2627 conf->type == NL80211_IFTYPE_ADHOC ||
2628 conf->type == NL80211_IFTYPE_MONITOR)
2629 ath_start_ani(common);
2630
2631 out:
2632 mutex_unlock(&sc->mutex);
2633 return ret;
2634 }
2635
2636 static void ath9k_remove_interface(struct ieee80211_hw *hw,
2637 struct ieee80211_if_init_conf *conf)
2638 {
2639 struct ath_wiphy *aphy = hw->priv;
2640 struct ath_softc *sc = aphy->sc;
2641 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2642 struct ath_vif *avp = (void *)conf->vif->drv_priv;
2643 int i;
2644
2645 ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
2646
2647 mutex_lock(&sc->mutex);
2648
2649 /* Stop ANI */
2650 del_timer_sync(&common->ani.timer);
2651
2652 /* Reclaim beacon resources */
2653 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
2654 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
2655 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
2656 ath9k_ps_wakeup(sc);
2657 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2658 ath_beacon_return(sc, avp);
2659 ath9k_ps_restore(sc);
2660 }
2661
2662 sc->sc_flags &= ~SC_OP_BEACONS;
2663
2664 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
2665 if (sc->beacon.bslot[i] == conf->vif) {
2666 printk(KERN_DEBUG "%s: vif had allocated beacon "
2667 "slot\n", __func__);
2668 sc->beacon.bslot[i] = NULL;
2669 sc->beacon.bslot_aphy[i] = NULL;
2670 }
2671 }
2672
2673 sc->nvifs--;
2674
2675 mutex_unlock(&sc->mutex);
2676 }
2677
2678 static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2679 {
2680 struct ath_wiphy *aphy = hw->priv;
2681 struct ath_softc *sc = aphy->sc;
2682 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2683 struct ieee80211_conf *conf = &hw->conf;
2684 struct ath_hw *ah = sc->sc_ah;
2685 bool disable_radio;
2686
2687 mutex_lock(&sc->mutex);
2688
2689 /*
2690 * Leave this as the first check because we need to turn on the
2691 * radio if it was disabled before prior to processing the rest
2692 * of the changes. Likewise we must only disable the radio towards
2693 * the end.
2694 */
2695 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
2696 bool enable_radio;
2697 bool all_wiphys_idle;
2698 bool idle = !!(conf->flags & IEEE80211_CONF_IDLE);
2699
2700 spin_lock_bh(&sc->wiphy_lock);
2701 all_wiphys_idle = ath9k_all_wiphys_idle(sc);
2702 ath9k_set_wiphy_idle(aphy, idle);
2703
2704 if (!idle && all_wiphys_idle)
2705 enable_radio = true;
2706
2707 /*
2708 * After we unlock here its possible another wiphy
2709 * can be re-renabled so to account for that we will
2710 * only disable the radio toward the end of this routine
2711 * if by then all wiphys are still idle.
2712 */
2713 spin_unlock_bh(&sc->wiphy_lock);
2714
2715 if (enable_radio) {
2716 ath_radio_enable(sc, hw);
2717 ath_print(common, ATH_DBG_CONFIG,
2718 "not-idle: enabling radio\n");
2719 }
2720 }
2721
2722 /*
2723 * We just prepare to enable PS. We have to wait until our AP has
2724 * ACK'd our null data frame to disable RX otherwise we'll ignore
2725 * those ACKs and end up retransmitting the same null data frames.
2726 * IEEE80211_CONF_CHANGE_PS is only passed by mac80211 for STA mode.
2727 */
2728 if (changed & IEEE80211_CONF_CHANGE_PS) {
2729 if (conf->flags & IEEE80211_CONF_PS) {
2730 sc->sc_flags |= SC_OP_PS_ENABLED;
2731 if (!(ah->caps.hw_caps &
2732 ATH9K_HW_CAP_AUTOSLEEP)) {
2733 if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
2734 sc->imask |= ATH9K_INT_TIM_TIMER;
2735 ath9k_hw_set_interrupts(sc->sc_ah,
2736 sc->imask);
2737 }
2738 }
2739 /*
2740 * At this point we know hardware has received an ACK
2741 * of a previously sent null data frame.
2742 */
2743 if ((sc->sc_flags & SC_OP_NULLFUNC_COMPLETED)) {
2744 sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED;
2745 sc->ps_enabled = true;
2746 ath9k_hw_setrxabort(sc->sc_ah, 1);
2747 }
2748 } else {
2749 sc->ps_enabled = false;
2750 sc->sc_flags &= ~(SC_OP_PS_ENABLED |
2751 SC_OP_NULLFUNC_COMPLETED);
2752 ath9k_setpower(sc, ATH9K_PM_AWAKE);
2753 if (!(ah->caps.hw_caps &
2754 ATH9K_HW_CAP_AUTOSLEEP)) {
2755 ath9k_hw_setrxabort(sc->sc_ah, 0);
2756 sc->sc_flags &= ~(SC_OP_WAIT_FOR_BEACON |
2757 SC_OP_WAIT_FOR_CAB |
2758 SC_OP_WAIT_FOR_PSPOLL_DATA |
2759 SC_OP_WAIT_FOR_TX_ACK);
2760 if (sc->imask & ATH9K_INT_TIM_TIMER) {
2761 sc->imask &= ~ATH9K_INT_TIM_TIMER;
2762 ath9k_hw_set_interrupts(sc->sc_ah,
2763 sc->imask);
2764 }
2765 }
2766 }
2767 }
2768
2769 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2770 struct ieee80211_channel *curchan = hw->conf.channel;
2771 int pos = curchan->hw_value;
2772
2773 aphy->chan_idx = pos;
2774 aphy->chan_is_ht = conf_is_ht(conf);
2775
2776 if (aphy->state == ATH_WIPHY_SCAN ||
2777 aphy->state == ATH_WIPHY_ACTIVE)
2778 ath9k_wiphy_pause_all_forced(sc, aphy);
2779 else {
2780 /*
2781 * Do not change operational channel based on a paused
2782 * wiphy changes.
2783 */
2784 goto skip_chan_change;
2785 }
2786
2787 ath_print(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
2788 curchan->center_freq);
2789
2790 /* XXX: remove me eventualy */
2791 ath9k_update_ichannel(sc, hw, &sc->sc_ah->channels[pos]);
2792
2793 ath_update_chainmask(sc, conf_is_ht(conf));
2794
2795 if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) {
2796 ath_print(common, ATH_DBG_FATAL,
2797 "Unable to set channel\n");
2798 mutex_unlock(&sc->mutex);
2799 return -EINVAL;
2800 }
2801 }
2802
2803 skip_chan_change:
2804 if (changed & IEEE80211_CONF_CHANGE_POWER)
2805 sc->config.txpowlimit = 2 * conf->power_level;
2806
2807 spin_lock_bh(&sc->wiphy_lock);
2808 disable_radio = ath9k_all_wiphys_idle(sc);
2809 spin_unlock_bh(&sc->wiphy_lock);
2810
2811 if (disable_radio) {
2812 ath_print(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
2813 ath_radio_disable(sc, hw);
2814 }
2815
2816 mutex_unlock(&sc->mutex);
2817
2818 return 0;
2819 }
2820
2821 #define SUPPORTED_FILTERS \
2822 (FIF_PROMISC_IN_BSS | \
2823 FIF_ALLMULTI | \
2824 FIF_CONTROL | \
2825 FIF_PSPOLL | \
2826 FIF_OTHER_BSS | \
2827 FIF_BCN_PRBRESP_PROMISC | \
2828 FIF_FCSFAIL)
2829
2830 /* FIXME: sc->sc_full_reset ? */
2831 static void ath9k_configure_filter(struct ieee80211_hw *hw,
2832 unsigned int changed_flags,
2833 unsigned int *total_flags,
2834 u64 multicast)
2835 {
2836 struct ath_wiphy *aphy = hw->priv;
2837 struct ath_softc *sc = aphy->sc;
2838 u32 rfilt;
2839
2840 changed_flags &= SUPPORTED_FILTERS;
2841 *total_flags &= SUPPORTED_FILTERS;
2842
2843 sc->rx.rxfilter = *total_flags;
2844 ath9k_ps_wakeup(sc);
2845 rfilt = ath_calcrxfilter(sc);
2846 ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
2847 ath9k_ps_restore(sc);
2848
2849 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
2850 "Set HW RX filter: 0x%x\n", rfilt);
2851 }
2852
2853 static void ath9k_sta_notify(struct ieee80211_hw *hw,
2854 struct ieee80211_vif *vif,
2855 enum sta_notify_cmd cmd,
2856 struct ieee80211_sta *sta)
2857 {
2858 struct ath_wiphy *aphy = hw->priv;
2859 struct ath_softc *sc = aphy->sc;
2860
2861 switch (cmd) {
2862 case STA_NOTIFY_ADD:
2863 ath_node_attach(sc, sta);
2864 break;
2865 case STA_NOTIFY_REMOVE:
2866 ath_node_detach(sc, sta);
2867 break;
2868 default:
2869 break;
2870 }
2871 }
2872
2873 static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
2874 const struct ieee80211_tx_queue_params *params)
2875 {
2876 struct ath_wiphy *aphy = hw->priv;
2877 struct ath_softc *sc = aphy->sc;
2878 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2879 struct ath9k_tx_queue_info qi;
2880 int ret = 0, qnum;
2881
2882 if (queue >= WME_NUM_AC)
2883 return 0;
2884
2885 mutex_lock(&sc->mutex);
2886
2887 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
2888
2889 qi.tqi_aifs = params->aifs;
2890 qi.tqi_cwmin = params->cw_min;
2891 qi.tqi_cwmax = params->cw_max;
2892 qi.tqi_burstTime = params->txop;
2893 qnum = ath_get_hal_qnum(queue, sc);
2894
2895 ath_print(common, ATH_DBG_CONFIG,
2896 "Configure tx [queue/halq] [%d/%d], "
2897 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
2898 queue, qnum, params->aifs, params->cw_min,
2899 params->cw_max, params->txop);
2900
2901 ret = ath_txq_update(sc, qnum, &qi);
2902 if (ret)
2903 ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n");
2904
2905 if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)
2906 if ((qnum == sc->tx.hwq_map[ATH9K_WME_AC_BE]) && !ret)
2907 ath_beaconq_config(sc);
2908
2909 mutex_unlock(&sc->mutex);
2910
2911 return ret;
2912 }
2913
2914 static int ath9k_set_key(struct ieee80211_hw *hw,
2915 enum set_key_cmd cmd,
2916 struct ieee80211_vif *vif,
2917 struct ieee80211_sta *sta,
2918 struct ieee80211_key_conf *key)
2919 {
2920 struct ath_wiphy *aphy = hw->priv;
2921 struct ath_softc *sc = aphy->sc;
2922 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2923 int ret = 0;
2924
2925 if (modparam_nohwcrypt)
2926 return -ENOSPC;
2927
2928 mutex_lock(&sc->mutex);
2929 ath9k_ps_wakeup(sc);
2930 ath_print(common, ATH_DBG_CONFIG, "Set HW Key\n");
2931
2932 switch (cmd) {
2933 case SET_KEY:
2934 ret = ath_key_config(common, vif, sta, key);
2935 if (ret >= 0) {
2936 key->hw_key_idx = ret;
2937 /* push IV and Michael MIC generation to stack */
2938 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2939 if (key->alg == ALG_TKIP)
2940 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2941 if (sc->sc_ah->sw_mgmt_crypto && key->alg == ALG_CCMP)
2942 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
2943 ret = 0;
2944 }
2945 break;
2946 case DISABLE_KEY:
2947 ath_key_delete(common, key);
2948 break;
2949 default:
2950 ret = -EINVAL;
2951 }
2952
2953 ath9k_ps_restore(sc);
2954 mutex_unlock(&sc->mutex);
2955
2956 return ret;
2957 }
2958
2959 static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2960 struct ieee80211_vif *vif,
2961 struct ieee80211_bss_conf *bss_conf,
2962 u32 changed)
2963 {
2964 struct ath_wiphy *aphy = hw->priv;
2965 struct ath_softc *sc = aphy->sc;
2966 struct ath_hw *ah = sc->sc_ah;
2967 struct ath_common *common = ath9k_hw_common(ah);
2968 struct ath_vif *avp = (void *)vif->drv_priv;
2969 int error;
2970
2971 mutex_lock(&sc->mutex);
2972
2973 if (changed & BSS_CHANGED_BSSID) {
2974 /* Set BSSID */
2975 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
2976 memcpy(avp->bssid, bss_conf->bssid, ETH_ALEN);
2977 common->curaid = 0;
2978 ath9k_hw_write_associd(ah);
2979
2980 /* Set aggregation protection mode parameters */
2981 sc->config.ath_aggr_prot = 0;
2982
2983 /* Only legacy IBSS for now */
2984 if (vif->type == NL80211_IFTYPE_ADHOC)
2985 ath_update_chainmask(sc, 0);
2986
2987 ath_print(common, ATH_DBG_CONFIG,
2988 "BSSID: %pM aid: 0x%x\n",
2989 common->curbssid, common->curaid);
2990
2991 /* need to reconfigure the beacon */
2992 sc->sc_flags &= ~SC_OP_BEACONS ;
2993 }
2994
2995 /* Enable transmission of beacons (AP, IBSS, MESH) */
2996 if ((changed & BSS_CHANGED_BEACON) ||
2997 ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon)) {
2998 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2999 error = ath_beacon_alloc(aphy, vif);
3000 if (!error)
3001 ath_beacon_config(sc, vif);
3002 }
3003
3004 /* Disable transmission of beacons */
3005 if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon)
3006 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
3007
3008 if (changed & BSS_CHANGED_BEACON_INT) {
3009 sc->beacon_interval = bss_conf->beacon_int;
3010 /*
3011 * In case of AP mode, the HW TSF has to be reset
3012 * when the beacon interval changes.
3013 */
3014 if (vif->type == NL80211_IFTYPE_AP) {
3015 sc->sc_flags |= SC_OP_TSF_RESET;
3016 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
3017 error = ath_beacon_alloc(aphy, vif);
3018 if (!error)
3019 ath_beacon_config(sc, vif);
3020 } else {
3021 ath_beacon_config(sc, vif);
3022 }
3023 }
3024
3025 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3026 ath_print(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
3027 bss_conf->use_short_preamble);
3028 if (bss_conf->use_short_preamble)
3029 sc->sc_flags |= SC_OP_PREAMBLE_SHORT;
3030 else
3031 sc->sc_flags &= ~SC_OP_PREAMBLE_SHORT;
3032 }
3033
3034 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3035 ath_print(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
3036 bss_conf->use_cts_prot);
3037 if (bss_conf->use_cts_prot &&
3038 hw->conf.channel->band != IEEE80211_BAND_5GHZ)
3039 sc->sc_flags |= SC_OP_PROTECT_ENABLE;
3040 else
3041 sc->sc_flags &= ~SC_OP_PROTECT_ENABLE;
3042 }
3043
3044 if (changed & BSS_CHANGED_ASSOC) {
3045 ath_print(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
3046 bss_conf->assoc);
3047 ath9k_bss_assoc_info(sc, vif, bss_conf);
3048 }
3049
3050 mutex_unlock(&sc->mutex);
3051 }
3052
3053 static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
3054 {
3055 u64 tsf;
3056 struct ath_wiphy *aphy = hw->priv;
3057 struct ath_softc *sc = aphy->sc;
3058
3059 mutex_lock(&sc->mutex);
3060 tsf = ath9k_hw_gettsf64(sc->sc_ah);
3061 mutex_unlock(&sc->mutex);
3062
3063 return tsf;
3064 }
3065
3066 static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
3067 {
3068 struct ath_wiphy *aphy = hw->priv;
3069 struct ath_softc *sc = aphy->sc;
3070
3071 mutex_lock(&sc->mutex);
3072 ath9k_hw_settsf64(sc->sc_ah, tsf);
3073 mutex_unlock(&sc->mutex);
3074 }
3075
3076 static void ath9k_reset_tsf(struct ieee80211_hw *hw)
3077 {
3078 struct ath_wiphy *aphy = hw->priv;
3079 struct ath_softc *sc = aphy->sc;
3080
3081 mutex_lock(&sc->mutex);
3082
3083 ath9k_ps_wakeup(sc);
3084 ath9k_hw_reset_tsf(sc->sc_ah);
3085 ath9k_ps_restore(sc);
3086
3087 mutex_unlock(&sc->mutex);
3088 }
3089
3090 static int ath9k_ampdu_action(struct ieee80211_hw *hw,
3091 struct ieee80211_vif *vif,
3092 enum ieee80211_ampdu_mlme_action action,
3093 struct ieee80211_sta *sta,
3094 u16 tid, u16 *ssn)
3095 {
3096 struct ath_wiphy *aphy = hw->priv;
3097 struct ath_softc *sc = aphy->sc;
3098 int ret = 0;
3099
3100 switch (action) {
3101 case IEEE80211_AMPDU_RX_START:
3102 if (!(sc->sc_flags & SC_OP_RXAGGR))
3103 ret = -ENOTSUPP;
3104 break;
3105 case IEEE80211_AMPDU_RX_STOP:
3106 break;
3107 case IEEE80211_AMPDU_TX_START:
3108 ath9k_ps_wakeup(sc);
3109 ath_tx_aggr_start(sc, sta, tid, ssn);
3110 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3111 ath9k_ps_restore(sc);
3112 break;
3113 case IEEE80211_AMPDU_TX_STOP:
3114 ath9k_ps_wakeup(sc);
3115 ath_tx_aggr_stop(sc, sta, tid);
3116 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3117 ath9k_ps_restore(sc);
3118 break;
3119 case IEEE80211_AMPDU_TX_OPERATIONAL:
3120 ath9k_ps_wakeup(sc);
3121 ath_tx_aggr_resume(sc, sta, tid);
3122 ath9k_ps_restore(sc);
3123 break;
3124 default:
3125 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
3126 "Unknown AMPDU action\n");
3127 }
3128
3129 return ret;
3130 }
3131
3132 static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
3133 {
3134 struct ath_wiphy *aphy = hw->priv;
3135 struct ath_softc *sc = aphy->sc;
3136
3137 mutex_lock(&sc->mutex);
3138 if (ath9k_wiphy_scanning(sc)) {
3139 printk(KERN_DEBUG "ath9k: Two wiphys trying to scan at the "
3140 "same time\n");
3141 /*
3142 * Do not allow the concurrent scanning state for now. This
3143 * could be improved with scanning control moved into ath9k.
3144 */
3145 mutex_unlock(&sc->mutex);
3146 return;
3147 }
3148
3149 aphy->state = ATH_WIPHY_SCAN;
3150 ath9k_wiphy_pause_all_forced(sc, aphy);
3151
3152 spin_lock_bh(&sc->ani_lock);
3153 sc->sc_flags |= SC_OP_SCANNING;
3154 spin_unlock_bh(&sc->ani_lock);
3155 mutex_unlock(&sc->mutex);
3156 }
3157
3158 static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
3159 {
3160 struct ath_wiphy *aphy = hw->priv;
3161 struct ath_softc *sc = aphy->sc;
3162
3163 mutex_lock(&sc->mutex);
3164 spin_lock_bh(&sc->ani_lock);
3165 aphy->state = ATH_WIPHY_ACTIVE;
3166 sc->sc_flags &= ~SC_OP_SCANNING;
3167 sc->sc_flags |= SC_OP_FULL_RESET;
3168 spin_unlock_bh(&sc->ani_lock);
3169 ath_beacon_config(sc, NULL);
3170 mutex_unlock(&sc->mutex);
3171 }
3172
3173 struct ieee80211_ops ath9k_ops = {
3174 .tx = ath9k_tx,
3175 .start = ath9k_start,
3176 .stop = ath9k_stop,
3177 .add_interface = ath9k_add_interface,
3178 .remove_interface = ath9k_remove_interface,
3179 .config = ath9k_config,
3180 .configure_filter = ath9k_configure_filter,
3181 .sta_notify = ath9k_sta_notify,
3182 .conf_tx = ath9k_conf_tx,
3183 .bss_info_changed = ath9k_bss_info_changed,
3184 .set_key = ath9k_set_key,
3185 .get_tsf = ath9k_get_tsf,
3186 .set_tsf = ath9k_set_tsf,
3187 .reset_tsf = ath9k_reset_tsf,
3188 .ampdu_action = ath9k_ampdu_action,
3189 .sw_scan_start = ath9k_sw_scan_start,
3190 .sw_scan_complete = ath9k_sw_scan_complete,
3191 .rfkill_poll = ath9k_rfkill_poll_state,
3192 };
3193
3194 static int __init ath9k_init(void)
3195 {
3196 int error;
3197
3198 /* Register rate control algorithm */
3199 error = ath_rate_control_register();
3200 if (error != 0) {
3201 printk(KERN_ERR
3202 "ath9k: Unable to register rate control "
3203 "algorithm: %d\n",
3204 error);
3205 goto err_out;
3206 }
3207
3208 error = ath9k_debug_create_root();
3209 if (error) {
3210 printk(KERN_ERR
3211 "ath9k: Unable to create debugfs root: %d\n",
3212 error);
3213 goto err_rate_unregister;
3214 }
3215
3216 error = ath_pci_init();
3217 if (error < 0) {
3218 printk(KERN_ERR
3219 "ath9k: No PCI devices found, driver not installed.\n");
3220 error = -ENODEV;
3221 goto err_remove_root;
3222 }
3223
3224 error = ath_ahb_init();
3225 if (error < 0) {
3226 error = -ENODEV;
3227 goto err_pci_exit;
3228 }
3229
3230 return 0;
3231
3232 err_pci_exit:
3233 ath_pci_exit();
3234
3235 err_remove_root:
3236 ath9k_debug_remove_root();
3237 err_rate_unregister:
3238 ath_rate_control_unregister();
3239 err_out:
3240 return error;
3241 }
3242 module_init(ath9k_init);
3243
3244 static void __exit ath9k_exit(void)
3245 {
3246 ath_ahb_exit();
3247 ath_pci_exit();
3248 ath9k_debug_remove_root();
3249 ath_rate_control_unregister();
3250 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
3251 }
3252 module_exit(ath9k_exit);