]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/wireless/ath/ath9k/init.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394...
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / ath / ath9k / init.c
1 /*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/slab.h>
18
19 #include "ath9k.h"
20
21 static char *dev_info = "ath9k";
22
23 MODULE_AUTHOR("Atheros Communications");
24 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
25 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
26 MODULE_LICENSE("Dual BSD/GPL");
27
28 static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
29 module_param_named(debug, ath9k_debug, uint, 0);
30 MODULE_PARM_DESC(debug, "Debugging mask");
31
32 int modparam_nohwcrypt;
33 module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
34 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
35
36 /* We use the hw_value as an index into our private channel structure */
37
38 #define CHAN2G(_freq, _idx) { \
39 .center_freq = (_freq), \
40 .hw_value = (_idx), \
41 .max_power = 20, \
42 }
43
44 #define CHAN5G(_freq, _idx) { \
45 .band = IEEE80211_BAND_5GHZ, \
46 .center_freq = (_freq), \
47 .hw_value = (_idx), \
48 .max_power = 20, \
49 }
50
51 /* Some 2 GHz radios are actually tunable on 2312-2732
52 * on 5 MHz steps, we support the channels which we know
53 * we have calibration data for all cards though to make
54 * this static */
55 static struct ieee80211_channel ath9k_2ghz_chantable[] = {
56 CHAN2G(2412, 0), /* Channel 1 */
57 CHAN2G(2417, 1), /* Channel 2 */
58 CHAN2G(2422, 2), /* Channel 3 */
59 CHAN2G(2427, 3), /* Channel 4 */
60 CHAN2G(2432, 4), /* Channel 5 */
61 CHAN2G(2437, 5), /* Channel 6 */
62 CHAN2G(2442, 6), /* Channel 7 */
63 CHAN2G(2447, 7), /* Channel 8 */
64 CHAN2G(2452, 8), /* Channel 9 */
65 CHAN2G(2457, 9), /* Channel 10 */
66 CHAN2G(2462, 10), /* Channel 11 */
67 CHAN2G(2467, 11), /* Channel 12 */
68 CHAN2G(2472, 12), /* Channel 13 */
69 CHAN2G(2484, 13), /* Channel 14 */
70 };
71
72 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
73 * on 5 MHz steps, we support the channels which we know
74 * we have calibration data for all cards though to make
75 * this static */
76 static struct ieee80211_channel ath9k_5ghz_chantable[] = {
77 /* _We_ call this UNII 1 */
78 CHAN5G(5180, 14), /* Channel 36 */
79 CHAN5G(5200, 15), /* Channel 40 */
80 CHAN5G(5220, 16), /* Channel 44 */
81 CHAN5G(5240, 17), /* Channel 48 */
82 /* _We_ call this UNII 2 */
83 CHAN5G(5260, 18), /* Channel 52 */
84 CHAN5G(5280, 19), /* Channel 56 */
85 CHAN5G(5300, 20), /* Channel 60 */
86 CHAN5G(5320, 21), /* Channel 64 */
87 /* _We_ call this "Middle band" */
88 CHAN5G(5500, 22), /* Channel 100 */
89 CHAN5G(5520, 23), /* Channel 104 */
90 CHAN5G(5540, 24), /* Channel 108 */
91 CHAN5G(5560, 25), /* Channel 112 */
92 CHAN5G(5580, 26), /* Channel 116 */
93 CHAN5G(5600, 27), /* Channel 120 */
94 CHAN5G(5620, 28), /* Channel 124 */
95 CHAN5G(5640, 29), /* Channel 128 */
96 CHAN5G(5660, 30), /* Channel 132 */
97 CHAN5G(5680, 31), /* Channel 136 */
98 CHAN5G(5700, 32), /* Channel 140 */
99 /* _We_ call this UNII 3 */
100 CHAN5G(5745, 33), /* Channel 149 */
101 CHAN5G(5765, 34), /* Channel 153 */
102 CHAN5G(5785, 35), /* Channel 157 */
103 CHAN5G(5805, 36), /* Channel 161 */
104 CHAN5G(5825, 37), /* Channel 165 */
105 };
106
107 /* Atheros hardware rate code addition for short premble */
108 #define SHPCHECK(__hw_rate, __flags) \
109 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
110
111 #define RATE(_bitrate, _hw_rate, _flags) { \
112 .bitrate = (_bitrate), \
113 .flags = (_flags), \
114 .hw_value = (_hw_rate), \
115 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
116 }
117
118 static struct ieee80211_rate ath9k_legacy_rates[] = {
119 RATE(10, 0x1b, 0),
120 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
121 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
122 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
123 RATE(60, 0x0b, 0),
124 RATE(90, 0x0f, 0),
125 RATE(120, 0x0a, 0),
126 RATE(180, 0x0e, 0),
127 RATE(240, 0x09, 0),
128 RATE(360, 0x0d, 0),
129 RATE(480, 0x08, 0),
130 RATE(540, 0x0c, 0),
131 };
132
133 static void ath9k_deinit_softc(struct ath_softc *sc);
134
135 /*
136 * Read and write, they both share the same lock. We do this to serialize
137 * reads and writes on Atheros 802.11n PCI devices only. This is required
138 * as the FIFO on these devices can only accept sanely 2 requests.
139 */
140
141 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
142 {
143 struct ath_hw *ah = (struct ath_hw *) hw_priv;
144 struct ath_common *common = ath9k_hw_common(ah);
145 struct ath_softc *sc = (struct ath_softc *) common->priv;
146
147 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
148 unsigned long flags;
149 spin_lock_irqsave(&sc->sc_serial_rw, flags);
150 iowrite32(val, sc->mem + reg_offset);
151 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
152 } else
153 iowrite32(val, sc->mem + reg_offset);
154 }
155
156 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
157 {
158 struct ath_hw *ah = (struct ath_hw *) hw_priv;
159 struct ath_common *common = ath9k_hw_common(ah);
160 struct ath_softc *sc = (struct ath_softc *) common->priv;
161 u32 val;
162
163 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
164 unsigned long flags;
165 spin_lock_irqsave(&sc->sc_serial_rw, flags);
166 val = ioread32(sc->mem + reg_offset);
167 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
168 } else
169 val = ioread32(sc->mem + reg_offset);
170 return val;
171 }
172
173 static const struct ath_ops ath9k_common_ops = {
174 .read = ath9k_ioread32,
175 .write = ath9k_iowrite32,
176 };
177
178 static int count_streams(unsigned int chainmask, int max)
179 {
180 int streams = 0;
181
182 do {
183 if (++streams == max)
184 break;
185 } while ((chainmask = chainmask & (chainmask - 1)));
186
187 return streams;
188 }
189
190 /**************************/
191 /* Initialization */
192 /**************************/
193
194 static void setup_ht_cap(struct ath_softc *sc,
195 struct ieee80211_sta_ht_cap *ht_info)
196 {
197 struct ath_hw *ah = sc->sc_ah;
198 struct ath_common *common = ath9k_hw_common(ah);
199 u8 tx_streams, rx_streams;
200 int i, max_streams;
201
202 ht_info->ht_supported = true;
203 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
204 IEEE80211_HT_CAP_SM_PS |
205 IEEE80211_HT_CAP_SGI_40 |
206 IEEE80211_HT_CAP_DSSSCCK40;
207
208 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
209 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
210
211 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
212 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
213
214 if (AR_SREV_9300_20_OR_LATER(ah))
215 max_streams = 3;
216 else
217 max_streams = 2;
218
219 if (AR_SREV_9280_10_OR_LATER(ah)) {
220 if (max_streams >= 2)
221 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
222 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
223 }
224
225 /* set up supported mcs set */
226 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
227 tx_streams = count_streams(common->tx_chainmask, max_streams);
228 rx_streams = count_streams(common->rx_chainmask, max_streams);
229
230 ath_print(common, ATH_DBG_CONFIG,
231 "TX streams %d, RX streams: %d\n",
232 tx_streams, rx_streams);
233
234 if (tx_streams != rx_streams) {
235 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
236 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
237 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
238 }
239
240 for (i = 0; i < rx_streams; i++)
241 ht_info->mcs.rx_mask[i] = 0xff;
242
243 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
244 }
245
246 static int ath9k_reg_notifier(struct wiphy *wiphy,
247 struct regulatory_request *request)
248 {
249 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
250 struct ath_wiphy *aphy = hw->priv;
251 struct ath_softc *sc = aphy->sc;
252 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
253
254 return ath_reg_notifier_apply(wiphy, request, reg);
255 }
256
257 /*
258 * This function will allocate both the DMA descriptor structure, and the
259 * buffers it contains. These are used to contain the descriptors used
260 * by the system.
261 */
262 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
263 struct list_head *head, const char *name,
264 int nbuf, int ndesc, bool is_tx)
265 {
266 #define DS2PHYS(_dd, _ds) \
267 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
268 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
269 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
270 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
271 u8 *ds;
272 struct ath_buf *bf;
273 int i, bsize, error, desc_len;
274
275 ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
276 name, nbuf, ndesc);
277
278 INIT_LIST_HEAD(head);
279
280 if (is_tx)
281 desc_len = sc->sc_ah->caps.tx_desc_len;
282 else
283 desc_len = sizeof(struct ath_desc);
284
285 /* ath_desc must be a multiple of DWORDs */
286 if ((desc_len % 4) != 0) {
287 ath_print(common, ATH_DBG_FATAL,
288 "ath_desc not DWORD aligned\n");
289 BUG_ON((desc_len % 4) != 0);
290 error = -ENOMEM;
291 goto fail;
292 }
293
294 dd->dd_desc_len = desc_len * nbuf * ndesc;
295
296 /*
297 * Need additional DMA memory because we can't use
298 * descriptors that cross the 4K page boundary. Assume
299 * one skipped descriptor per 4K page.
300 */
301 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
302 u32 ndesc_skipped =
303 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
304 u32 dma_len;
305
306 while (ndesc_skipped) {
307 dma_len = ndesc_skipped * desc_len;
308 dd->dd_desc_len += dma_len;
309
310 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
311 }
312 }
313
314 /* allocate descriptors */
315 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
316 &dd->dd_desc_paddr, GFP_KERNEL);
317 if (dd->dd_desc == NULL) {
318 error = -ENOMEM;
319 goto fail;
320 }
321 ds = (u8 *) dd->dd_desc;
322 ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
323 name, ds, (u32) dd->dd_desc_len,
324 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
325
326 /* allocate buffers */
327 bsize = sizeof(struct ath_buf) * nbuf;
328 bf = kzalloc(bsize, GFP_KERNEL);
329 if (bf == NULL) {
330 error = -ENOMEM;
331 goto fail2;
332 }
333 dd->dd_bufptr = bf;
334
335 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
336 bf->bf_desc = ds;
337 bf->bf_daddr = DS2PHYS(dd, ds);
338
339 if (!(sc->sc_ah->caps.hw_caps &
340 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
341 /*
342 * Skip descriptor addresses which can cause 4KB
343 * boundary crossing (addr + length) with a 32 dword
344 * descriptor fetch.
345 */
346 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
347 BUG_ON((caddr_t) bf->bf_desc >=
348 ((caddr_t) dd->dd_desc +
349 dd->dd_desc_len));
350
351 ds += (desc_len * ndesc);
352 bf->bf_desc = ds;
353 bf->bf_daddr = DS2PHYS(dd, ds);
354 }
355 }
356 list_add_tail(&bf->list, head);
357 }
358 return 0;
359 fail2:
360 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
361 dd->dd_desc_paddr);
362 fail:
363 memset(dd, 0, sizeof(*dd));
364 return error;
365 #undef ATH_DESC_4KB_BOUND_CHECK
366 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
367 #undef DS2PHYS
368 }
369
370 static void ath9k_init_crypto(struct ath_softc *sc)
371 {
372 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
373 int i = 0;
374
375 /* Get the hardware key cache size. */
376 common->keymax = sc->sc_ah->caps.keycache_size;
377 if (common->keymax > ATH_KEYMAX) {
378 ath_print(common, ATH_DBG_ANY,
379 "Warning, using only %u entries in %u key cache\n",
380 ATH_KEYMAX, common->keymax);
381 common->keymax = ATH_KEYMAX;
382 }
383
384 /*
385 * Reset the key cache since some parts do not
386 * reset the contents on initial power up.
387 */
388 for (i = 0; i < common->keymax; i++)
389 ath9k_hw_keyreset(sc->sc_ah, (u16) i);
390
391 if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
392 ATH9K_CIPHER_TKIP, NULL)) {
393 /*
394 * Whether we should enable h/w TKIP MIC.
395 * XXX: if we don't support WME TKIP MIC, then we wouldn't
396 * report WMM capable, so it's always safe to turn on
397 * TKIP MIC in this case.
398 */
399 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
400 }
401
402 /*
403 * Check whether the separate key cache entries
404 * are required to handle both tx+rx MIC keys.
405 * With split mic keys the number of stations is limited
406 * to 27 otherwise 59.
407 */
408 if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
409 ATH9K_CIPHER_TKIP, NULL)
410 && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
411 ATH9K_CIPHER_MIC, NULL)
412 && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_TKIP_SPLIT,
413 0, NULL))
414 common->splitmic = 1;
415
416 /* turn on mcast key search if possible */
417 if (!ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
418 (void)ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH,
419 1, 1, NULL);
420
421 }
422
423 static int ath9k_init_btcoex(struct ath_softc *sc)
424 {
425 int r, qnum;
426
427 switch (sc->sc_ah->btcoex_hw.scheme) {
428 case ATH_BTCOEX_CFG_NONE:
429 break;
430 case ATH_BTCOEX_CFG_2WIRE:
431 ath9k_hw_btcoex_init_2wire(sc->sc_ah);
432 break;
433 case ATH_BTCOEX_CFG_3WIRE:
434 ath9k_hw_btcoex_init_3wire(sc->sc_ah);
435 r = ath_init_btcoex_timer(sc);
436 if (r)
437 return -1;
438 qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
439 ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
440 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
441 break;
442 default:
443 WARN_ON(1);
444 break;
445 }
446
447 return 0;
448 }
449
450 static int ath9k_init_queues(struct ath_softc *sc)
451 {
452 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
453 int i = 0;
454
455 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
456 sc->tx.hwq_map[i] = -1;
457
458 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
459 if (sc->beacon.beaconq == -1) {
460 ath_print(common, ATH_DBG_FATAL,
461 "Unable to setup a beacon xmit queue\n");
462 goto err;
463 }
464
465 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
466 if (sc->beacon.cabq == NULL) {
467 ath_print(common, ATH_DBG_FATAL,
468 "Unable to setup CAB xmit queue\n");
469 goto err;
470 }
471
472 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
473 ath_cabq_update(sc);
474
475 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
476 ath_print(common, ATH_DBG_FATAL,
477 "Unable to setup xmit queue for BK traffic\n");
478 goto err;
479 }
480
481 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
482 ath_print(common, ATH_DBG_FATAL,
483 "Unable to setup xmit queue for BE traffic\n");
484 goto err;
485 }
486 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
487 ath_print(common, ATH_DBG_FATAL,
488 "Unable to setup xmit queue for VI traffic\n");
489 goto err;
490 }
491 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
492 ath_print(common, ATH_DBG_FATAL,
493 "Unable to setup xmit queue for VO traffic\n");
494 goto err;
495 }
496
497 return 0;
498
499 err:
500 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
501 if (ATH_TXQ_SETUP(sc, i))
502 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
503
504 return -EIO;
505 }
506
507 static void ath9k_init_channels_rates(struct ath_softc *sc)
508 {
509 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
510 sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
511 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
512 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
513 ARRAY_SIZE(ath9k_2ghz_chantable);
514 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
515 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
516 ARRAY_SIZE(ath9k_legacy_rates);
517 }
518
519 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
520 sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
521 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
522 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
523 ARRAY_SIZE(ath9k_5ghz_chantable);
524 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
525 ath9k_legacy_rates + 4;
526 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
527 ARRAY_SIZE(ath9k_legacy_rates) - 4;
528 }
529 }
530
531 static void ath9k_init_misc(struct ath_softc *sc)
532 {
533 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
534 int i = 0;
535
536 common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
537 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
538
539 sc->config.txpowlimit = ATH_TXPOWER_MAX;
540
541 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
542 sc->sc_flags |= SC_OP_TXAGGR;
543 sc->sc_flags |= SC_OP_RXAGGR;
544 }
545
546 common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
547 common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
548
549 ath9k_hw_set_diversity(sc->sc_ah, true);
550 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
551
552 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
553 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
554
555 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
556
557 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
558 sc->beacon.bslot[i] = NULL;
559 sc->beacon.bslot_aphy[i] = NULL;
560 }
561 }
562
563 static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
564 const struct ath_bus_ops *bus_ops)
565 {
566 struct ath_hw *ah = NULL;
567 struct ath_common *common;
568 int ret = 0, i;
569 int csz = 0;
570
571 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
572 if (!ah)
573 return -ENOMEM;
574
575 ah->hw_version.devid = devid;
576 ah->hw_version.subsysid = subsysid;
577 sc->sc_ah = ah;
578
579 common = ath9k_hw_common(ah);
580 common->ops = &ath9k_common_ops;
581 common->bus_ops = bus_ops;
582 common->ah = ah;
583 common->hw = sc->hw;
584 common->priv = sc;
585 common->debug_mask = ath9k_debug;
586
587 spin_lock_init(&sc->wiphy_lock);
588 spin_lock_init(&sc->sc_resetlock);
589 spin_lock_init(&sc->sc_serial_rw);
590 spin_lock_init(&sc->sc_pm_lock);
591 mutex_init(&sc->mutex);
592 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
593 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
594 (unsigned long)sc);
595
596 /*
597 * Cache line size is used to size and align various
598 * structures used to communicate with the hardware.
599 */
600 ath_read_cachesize(common, &csz);
601 common->cachelsz = csz << 2; /* convert to bytes */
602
603 /* Initializes the hardware for all supported chipsets */
604 ret = ath9k_hw_init(ah);
605 if (ret)
606 goto err_hw;
607
608 ret = ath9k_init_debug(ah);
609 if (ret) {
610 ath_print(common, ATH_DBG_FATAL,
611 "Unable to create debugfs files\n");
612 goto err_debug;
613 }
614
615 ret = ath9k_init_queues(sc);
616 if (ret)
617 goto err_queues;
618
619 ret = ath9k_init_btcoex(sc);
620 if (ret)
621 goto err_btcoex;
622
623 ath9k_init_crypto(sc);
624 ath9k_init_channels_rates(sc);
625 ath9k_init_misc(sc);
626
627 return 0;
628
629 err_btcoex:
630 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
631 if (ATH_TXQ_SETUP(sc, i))
632 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
633 err_queues:
634 ath9k_exit_debug(ah);
635 err_debug:
636 ath9k_hw_deinit(ah);
637 err_hw:
638 tasklet_kill(&sc->intr_tq);
639 tasklet_kill(&sc->bcon_tasklet);
640
641 kfree(ah);
642 sc->sc_ah = NULL;
643
644 return ret;
645 }
646
647 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
648 {
649 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
650
651 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
652 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
653 IEEE80211_HW_SIGNAL_DBM |
654 IEEE80211_HW_SUPPORTS_PS |
655 IEEE80211_HW_PS_NULLFUNC_STACK |
656 IEEE80211_HW_SPECTRUM_MGMT |
657 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
658
659 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
660 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
661
662 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
663 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
664
665 hw->wiphy->interface_modes =
666 BIT(NL80211_IFTYPE_AP) |
667 BIT(NL80211_IFTYPE_STATION) |
668 BIT(NL80211_IFTYPE_ADHOC) |
669 BIT(NL80211_IFTYPE_MESH_POINT);
670
671 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
672
673 hw->queues = 4;
674 hw->max_rates = 4;
675 hw->channel_change_time = 5000;
676 hw->max_listen_interval = 10;
677 hw->max_rate_tries = 10;
678 hw->sta_data_size = sizeof(struct ath_node);
679 hw->vif_data_size = sizeof(struct ath_vif);
680
681 hw->rate_control_algorithm = "ath9k_rate_control";
682
683 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
684 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
685 &sc->sbands[IEEE80211_BAND_2GHZ];
686 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
687 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
688 &sc->sbands[IEEE80211_BAND_5GHZ];
689
690 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
691 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
692 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
693 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
694 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
695 }
696
697 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
698 }
699
700 int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
701 const struct ath_bus_ops *bus_ops)
702 {
703 struct ieee80211_hw *hw = sc->hw;
704 struct ath_common *common;
705 struct ath_hw *ah;
706 int error = 0;
707 struct ath_regulatory *reg;
708
709 /* Bring up device */
710 error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
711 if (error != 0)
712 goto error_init;
713
714 ah = sc->sc_ah;
715 common = ath9k_hw_common(ah);
716 ath9k_set_hw_capab(sc, hw);
717
718 /* Initialize regulatory */
719 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
720 ath9k_reg_notifier);
721 if (error)
722 goto error_regd;
723
724 reg = &common->regulatory;
725
726 /* Setup TX DMA */
727 error = ath_tx_init(sc, ATH_TXBUF);
728 if (error != 0)
729 goto error_tx;
730
731 /* Setup RX DMA */
732 error = ath_rx_init(sc, ATH_RXBUF);
733 if (error != 0)
734 goto error_rx;
735
736 /* Register with mac80211 */
737 error = ieee80211_register_hw(hw);
738 if (error)
739 goto error_register;
740
741 /* Handle world regulatory */
742 if (!ath_is_world_regd(reg)) {
743 error = regulatory_hint(hw->wiphy, reg->alpha2);
744 if (error)
745 goto error_world;
746 }
747
748 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
749 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
750 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
751
752 ath_init_leds(sc);
753 ath_start_rfkill_poll(sc);
754
755 return 0;
756
757 error_world:
758 ieee80211_unregister_hw(hw);
759 error_register:
760 ath_rx_cleanup(sc);
761 error_rx:
762 ath_tx_cleanup(sc);
763 error_tx:
764 /* Nothing */
765 error_regd:
766 ath9k_deinit_softc(sc);
767 error_init:
768 return error;
769 }
770
771 /*****************************/
772 /* De-Initialization */
773 /*****************************/
774
775 static void ath9k_deinit_softc(struct ath_softc *sc)
776 {
777 int i = 0;
778
779 if ((sc->btcoex.no_stomp_timer) &&
780 sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
781 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
782
783 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
784 if (ATH_TXQ_SETUP(sc, i))
785 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
786
787 ath9k_exit_debug(sc->sc_ah);
788 ath9k_hw_deinit(sc->sc_ah);
789
790 tasklet_kill(&sc->intr_tq);
791 tasklet_kill(&sc->bcon_tasklet);
792
793 kfree(sc->sc_ah);
794 sc->sc_ah = NULL;
795 }
796
797 void ath9k_deinit_device(struct ath_softc *sc)
798 {
799 struct ieee80211_hw *hw = sc->hw;
800 int i = 0;
801
802 ath9k_ps_wakeup(sc);
803
804 wiphy_rfkill_stop_polling(sc->hw->wiphy);
805 ath_deinit_leds(sc);
806
807 for (i = 0; i < sc->num_sec_wiphy; i++) {
808 struct ath_wiphy *aphy = sc->sec_wiphy[i];
809 if (aphy == NULL)
810 continue;
811 sc->sec_wiphy[i] = NULL;
812 ieee80211_unregister_hw(aphy->hw);
813 ieee80211_free_hw(aphy->hw);
814 }
815 kfree(sc->sec_wiphy);
816
817 ieee80211_unregister_hw(hw);
818 ath_rx_cleanup(sc);
819 ath_tx_cleanup(sc);
820 ath9k_deinit_softc(sc);
821 }
822
823 void ath_descdma_cleanup(struct ath_softc *sc,
824 struct ath_descdma *dd,
825 struct list_head *head)
826 {
827 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
828 dd->dd_desc_paddr);
829
830 INIT_LIST_HEAD(head);
831 kfree(dd->dd_bufptr);
832 memset(dd, 0, sizeof(*dd));
833 }
834
835 /************************/
836 /* Module Hooks */
837 /************************/
838
839 static int __init ath9k_init(void)
840 {
841 int error;
842
843 /* Register rate control algorithm */
844 error = ath_rate_control_register();
845 if (error != 0) {
846 printk(KERN_ERR
847 "ath9k: Unable to register rate control "
848 "algorithm: %d\n",
849 error);
850 goto err_out;
851 }
852
853 error = ath9k_debug_create_root();
854 if (error) {
855 printk(KERN_ERR
856 "ath9k: Unable to create debugfs root: %d\n",
857 error);
858 goto err_rate_unregister;
859 }
860
861 error = ath_pci_init();
862 if (error < 0) {
863 printk(KERN_ERR
864 "ath9k: No PCI devices found, driver not installed.\n");
865 error = -ENODEV;
866 goto err_remove_root;
867 }
868
869 error = ath_ahb_init();
870 if (error < 0) {
871 error = -ENODEV;
872 goto err_pci_exit;
873 }
874
875 return 0;
876
877 err_pci_exit:
878 ath_pci_exit();
879
880 err_remove_root:
881 ath9k_debug_remove_root();
882 err_rate_unregister:
883 ath_rate_control_unregister();
884 err_out:
885 return error;
886 }
887 module_init(ath9k_init);
888
889 static void __exit ath9k_exit(void)
890 {
891 ath_ahb_exit();
892 ath_pci_exit();
893 ath9k_debug_remove_root();
894 ath_rate_control_unregister();
895 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
896 }
897 module_exit(ath9k_exit);