]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/wireless/ath/ath9k/init.c
ath9k: add WDS interfaces to ath9k
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / ath / ath9k / init.c
1 /*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/slab.h>
18
19 #include "ath9k.h"
20
21 static char *dev_info = "ath9k";
22
23 MODULE_AUTHOR("Atheros Communications");
24 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
25 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
26 MODULE_LICENSE("Dual BSD/GPL");
27
28 static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
29 module_param_named(debug, ath9k_debug, uint, 0);
30 MODULE_PARM_DESC(debug, "Debugging mask");
31
32 int modparam_nohwcrypt;
33 module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
34 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
35
36 int led_blink;
37 module_param_named(blink, led_blink, int, 0444);
38 MODULE_PARM_DESC(blink, "Enable LED blink on activity");
39
40 /* We use the hw_value as an index into our private channel structure */
41
42 #define CHAN2G(_freq, _idx) { \
43 .center_freq = (_freq), \
44 .hw_value = (_idx), \
45 .max_power = 20, \
46 }
47
48 #define CHAN5G(_freq, _idx) { \
49 .band = IEEE80211_BAND_5GHZ, \
50 .center_freq = (_freq), \
51 .hw_value = (_idx), \
52 .max_power = 20, \
53 }
54
55 /* Some 2 GHz radios are actually tunable on 2312-2732
56 * on 5 MHz steps, we support the channels which we know
57 * we have calibration data for all cards though to make
58 * this static */
59 static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
60 CHAN2G(2412, 0), /* Channel 1 */
61 CHAN2G(2417, 1), /* Channel 2 */
62 CHAN2G(2422, 2), /* Channel 3 */
63 CHAN2G(2427, 3), /* Channel 4 */
64 CHAN2G(2432, 4), /* Channel 5 */
65 CHAN2G(2437, 5), /* Channel 6 */
66 CHAN2G(2442, 6), /* Channel 7 */
67 CHAN2G(2447, 7), /* Channel 8 */
68 CHAN2G(2452, 8), /* Channel 9 */
69 CHAN2G(2457, 9), /* Channel 10 */
70 CHAN2G(2462, 10), /* Channel 11 */
71 CHAN2G(2467, 11), /* Channel 12 */
72 CHAN2G(2472, 12), /* Channel 13 */
73 CHAN2G(2484, 13), /* Channel 14 */
74 };
75
76 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
77 * on 5 MHz steps, we support the channels which we know
78 * we have calibration data for all cards though to make
79 * this static */
80 static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
81 /* _We_ call this UNII 1 */
82 CHAN5G(5180, 14), /* Channel 36 */
83 CHAN5G(5200, 15), /* Channel 40 */
84 CHAN5G(5220, 16), /* Channel 44 */
85 CHAN5G(5240, 17), /* Channel 48 */
86 /* _We_ call this UNII 2 */
87 CHAN5G(5260, 18), /* Channel 52 */
88 CHAN5G(5280, 19), /* Channel 56 */
89 CHAN5G(5300, 20), /* Channel 60 */
90 CHAN5G(5320, 21), /* Channel 64 */
91 /* _We_ call this "Middle band" */
92 CHAN5G(5500, 22), /* Channel 100 */
93 CHAN5G(5520, 23), /* Channel 104 */
94 CHAN5G(5540, 24), /* Channel 108 */
95 CHAN5G(5560, 25), /* Channel 112 */
96 CHAN5G(5580, 26), /* Channel 116 */
97 CHAN5G(5600, 27), /* Channel 120 */
98 CHAN5G(5620, 28), /* Channel 124 */
99 CHAN5G(5640, 29), /* Channel 128 */
100 CHAN5G(5660, 30), /* Channel 132 */
101 CHAN5G(5680, 31), /* Channel 136 */
102 CHAN5G(5700, 32), /* Channel 140 */
103 /* _We_ call this UNII 3 */
104 CHAN5G(5745, 33), /* Channel 149 */
105 CHAN5G(5765, 34), /* Channel 153 */
106 CHAN5G(5785, 35), /* Channel 157 */
107 CHAN5G(5805, 36), /* Channel 161 */
108 CHAN5G(5825, 37), /* Channel 165 */
109 };
110
111 /* Atheros hardware rate code addition for short premble */
112 #define SHPCHECK(__hw_rate, __flags) \
113 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
114
115 #define RATE(_bitrate, _hw_rate, _flags) { \
116 .bitrate = (_bitrate), \
117 .flags = (_flags), \
118 .hw_value = (_hw_rate), \
119 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
120 }
121
122 static struct ieee80211_rate ath9k_legacy_rates[] = {
123 RATE(10, 0x1b, 0),
124 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
125 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
126 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
127 RATE(60, 0x0b, 0),
128 RATE(90, 0x0f, 0),
129 RATE(120, 0x0a, 0),
130 RATE(180, 0x0e, 0),
131 RATE(240, 0x09, 0),
132 RATE(360, 0x0d, 0),
133 RATE(480, 0x08, 0),
134 RATE(540, 0x0c, 0),
135 };
136
137 static void ath9k_deinit_softc(struct ath_softc *sc);
138
139 /*
140 * Read and write, they both share the same lock. We do this to serialize
141 * reads and writes on Atheros 802.11n PCI devices only. This is required
142 * as the FIFO on these devices can only accept sanely 2 requests.
143 */
144
145 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
146 {
147 struct ath_hw *ah = (struct ath_hw *) hw_priv;
148 struct ath_common *common = ath9k_hw_common(ah);
149 struct ath_softc *sc = (struct ath_softc *) common->priv;
150
151 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
152 unsigned long flags;
153 spin_lock_irqsave(&sc->sc_serial_rw, flags);
154 iowrite32(val, sc->mem + reg_offset);
155 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
156 } else
157 iowrite32(val, sc->mem + reg_offset);
158 }
159
160 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
161 {
162 struct ath_hw *ah = (struct ath_hw *) hw_priv;
163 struct ath_common *common = ath9k_hw_common(ah);
164 struct ath_softc *sc = (struct ath_softc *) common->priv;
165 u32 val;
166
167 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
168 unsigned long flags;
169 spin_lock_irqsave(&sc->sc_serial_rw, flags);
170 val = ioread32(sc->mem + reg_offset);
171 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
172 } else
173 val = ioread32(sc->mem + reg_offset);
174 return val;
175 }
176
177 static const struct ath_ops ath9k_common_ops = {
178 .read = ath9k_ioread32,
179 .write = ath9k_iowrite32,
180 };
181
182 /**************************/
183 /* Initialization */
184 /**************************/
185
186 static void setup_ht_cap(struct ath_softc *sc,
187 struct ieee80211_sta_ht_cap *ht_info)
188 {
189 struct ath_hw *ah = sc->sc_ah;
190 struct ath_common *common = ath9k_hw_common(ah);
191 u8 tx_streams, rx_streams;
192 int i, max_streams;
193
194 ht_info->ht_supported = true;
195 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
196 IEEE80211_HT_CAP_SM_PS |
197 IEEE80211_HT_CAP_SGI_40 |
198 IEEE80211_HT_CAP_DSSSCCK40;
199
200 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
201 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
202
203 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
204 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
205
206 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
207 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
208
209 if (AR_SREV_9300_20_OR_LATER(ah))
210 max_streams = 3;
211 else
212 max_streams = 2;
213
214 if (AR_SREV_9280_20_OR_LATER(ah)) {
215 if (max_streams >= 2)
216 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
217 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
218 }
219
220 /* set up supported mcs set */
221 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
222 tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams);
223 rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams);
224
225 ath_print(common, ATH_DBG_CONFIG,
226 "TX streams %d, RX streams: %d\n",
227 tx_streams, rx_streams);
228
229 if (tx_streams != rx_streams) {
230 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
231 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
232 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
233 }
234
235 for (i = 0; i < rx_streams; i++)
236 ht_info->mcs.rx_mask[i] = 0xff;
237
238 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
239 }
240
241 static int ath9k_reg_notifier(struct wiphy *wiphy,
242 struct regulatory_request *request)
243 {
244 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
245 struct ath_wiphy *aphy = hw->priv;
246 struct ath_softc *sc = aphy->sc;
247 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
248
249 return ath_reg_notifier_apply(wiphy, request, reg);
250 }
251
252 /*
253 * This function will allocate both the DMA descriptor structure, and the
254 * buffers it contains. These are used to contain the descriptors used
255 * by the system.
256 */
257 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
258 struct list_head *head, const char *name,
259 int nbuf, int ndesc, bool is_tx)
260 {
261 #define DS2PHYS(_dd, _ds) \
262 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
263 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
264 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
265 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
266 u8 *ds;
267 struct ath_buf *bf;
268 int i, bsize, error, desc_len;
269
270 ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
271 name, nbuf, ndesc);
272
273 INIT_LIST_HEAD(head);
274
275 if (is_tx)
276 desc_len = sc->sc_ah->caps.tx_desc_len;
277 else
278 desc_len = sizeof(struct ath_desc);
279
280 /* ath_desc must be a multiple of DWORDs */
281 if ((desc_len % 4) != 0) {
282 ath_print(common, ATH_DBG_FATAL,
283 "ath_desc not DWORD aligned\n");
284 BUG_ON((desc_len % 4) != 0);
285 error = -ENOMEM;
286 goto fail;
287 }
288
289 dd->dd_desc_len = desc_len * nbuf * ndesc;
290
291 /*
292 * Need additional DMA memory because we can't use
293 * descriptors that cross the 4K page boundary. Assume
294 * one skipped descriptor per 4K page.
295 */
296 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
297 u32 ndesc_skipped =
298 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
299 u32 dma_len;
300
301 while (ndesc_skipped) {
302 dma_len = ndesc_skipped * desc_len;
303 dd->dd_desc_len += dma_len;
304
305 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
306 }
307 }
308
309 /* allocate descriptors */
310 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
311 &dd->dd_desc_paddr, GFP_KERNEL);
312 if (dd->dd_desc == NULL) {
313 error = -ENOMEM;
314 goto fail;
315 }
316 ds = (u8 *) dd->dd_desc;
317 ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
318 name, ds, (u32) dd->dd_desc_len,
319 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
320
321 /* allocate buffers */
322 bsize = sizeof(struct ath_buf) * nbuf;
323 bf = kzalloc(bsize, GFP_KERNEL);
324 if (bf == NULL) {
325 error = -ENOMEM;
326 goto fail2;
327 }
328 dd->dd_bufptr = bf;
329
330 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
331 bf->bf_desc = ds;
332 bf->bf_daddr = DS2PHYS(dd, ds);
333
334 if (!(sc->sc_ah->caps.hw_caps &
335 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
336 /*
337 * Skip descriptor addresses which can cause 4KB
338 * boundary crossing (addr + length) with a 32 dword
339 * descriptor fetch.
340 */
341 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
342 BUG_ON((caddr_t) bf->bf_desc >=
343 ((caddr_t) dd->dd_desc +
344 dd->dd_desc_len));
345
346 ds += (desc_len * ndesc);
347 bf->bf_desc = ds;
348 bf->bf_daddr = DS2PHYS(dd, ds);
349 }
350 }
351 list_add_tail(&bf->list, head);
352 }
353 return 0;
354 fail2:
355 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
356 dd->dd_desc_paddr);
357 fail:
358 memset(dd, 0, sizeof(*dd));
359 return error;
360 #undef ATH_DESC_4KB_BOUND_CHECK
361 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
362 #undef DS2PHYS
363 }
364
365 static void ath9k_init_crypto(struct ath_softc *sc)
366 {
367 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
368 int i = 0;
369
370 /* Get the hardware key cache size. */
371 common->keymax = sc->sc_ah->caps.keycache_size;
372 if (common->keymax > ATH_KEYMAX) {
373 ath_print(common, ATH_DBG_ANY,
374 "Warning, using only %u entries in %u key cache\n",
375 ATH_KEYMAX, common->keymax);
376 common->keymax = ATH_KEYMAX;
377 }
378
379 /*
380 * Reset the key cache since some parts do not
381 * reset the contents on initial power up.
382 */
383 for (i = 0; i < common->keymax; i++)
384 ath_hw_keyreset(common, (u16) i);
385
386 /*
387 * Check whether the separate key cache entries
388 * are required to handle both tx+rx MIC keys.
389 * With split mic keys the number of stations is limited
390 * to 27 otherwise 59.
391 */
392 if (sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)
393 common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
394 }
395
396 static int ath9k_init_btcoex(struct ath_softc *sc)
397 {
398 int r, qnum;
399
400 switch (sc->sc_ah->btcoex_hw.scheme) {
401 case ATH_BTCOEX_CFG_NONE:
402 break;
403 case ATH_BTCOEX_CFG_2WIRE:
404 ath9k_hw_btcoex_init_2wire(sc->sc_ah);
405 break;
406 case ATH_BTCOEX_CFG_3WIRE:
407 ath9k_hw_btcoex_init_3wire(sc->sc_ah);
408 r = ath_init_btcoex_timer(sc);
409 if (r)
410 return -1;
411 qnum = sc->tx.hwq_map[WME_AC_BE];
412 ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
413 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
414 break;
415 default:
416 WARN_ON(1);
417 break;
418 }
419
420 return 0;
421 }
422
423 static int ath9k_init_queues(struct ath_softc *sc)
424 {
425 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
426 int i = 0;
427
428 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
429 sc->tx.hwq_map[i] = -1;
430
431 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
432 if (sc->beacon.beaconq == -1) {
433 ath_print(common, ATH_DBG_FATAL,
434 "Unable to setup a beacon xmit queue\n");
435 goto err;
436 }
437
438 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
439 if (sc->beacon.cabq == NULL) {
440 ath_print(common, ATH_DBG_FATAL,
441 "Unable to setup CAB xmit queue\n");
442 goto err;
443 }
444
445 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
446 ath_cabq_update(sc);
447
448 if (!ath_tx_setup(sc, WME_AC_BK)) {
449 ath_print(common, ATH_DBG_FATAL,
450 "Unable to setup xmit queue for BK traffic\n");
451 goto err;
452 }
453
454 if (!ath_tx_setup(sc, WME_AC_BE)) {
455 ath_print(common, ATH_DBG_FATAL,
456 "Unable to setup xmit queue for BE traffic\n");
457 goto err;
458 }
459 if (!ath_tx_setup(sc, WME_AC_VI)) {
460 ath_print(common, ATH_DBG_FATAL,
461 "Unable to setup xmit queue for VI traffic\n");
462 goto err;
463 }
464 if (!ath_tx_setup(sc, WME_AC_VO)) {
465 ath_print(common, ATH_DBG_FATAL,
466 "Unable to setup xmit queue for VO traffic\n");
467 goto err;
468 }
469
470 return 0;
471
472 err:
473 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
474 if (ATH_TXQ_SETUP(sc, i))
475 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
476
477 return -EIO;
478 }
479
480 static int ath9k_init_channels_rates(struct ath_softc *sc)
481 {
482 void *channels;
483
484 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
485 channels = kmemdup(ath9k_2ghz_chantable,
486 sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
487 if (!channels)
488 return -ENOMEM;
489
490 sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
491 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
492 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
493 ARRAY_SIZE(ath9k_2ghz_chantable);
494 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
495 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
496 ARRAY_SIZE(ath9k_legacy_rates);
497 }
498
499 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
500 channels = kmemdup(ath9k_5ghz_chantable,
501 sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
502 if (!channels) {
503 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
504 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
505 return -ENOMEM;
506 }
507
508 sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
509 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
510 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
511 ARRAY_SIZE(ath9k_5ghz_chantable);
512 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
513 ath9k_legacy_rates + 4;
514 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
515 ARRAY_SIZE(ath9k_legacy_rates) - 4;
516 }
517 return 0;
518 }
519
520 static void ath9k_init_misc(struct ath_softc *sc)
521 {
522 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
523 int i = 0;
524
525 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
526
527 sc->config.txpowlimit = ATH_TXPOWER_MAX;
528
529 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
530 sc->sc_flags |= SC_OP_TXAGGR;
531 sc->sc_flags |= SC_OP_RXAGGR;
532 }
533
534 common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
535 common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
536
537 ath9k_hw_set_diversity(sc->sc_ah, true);
538 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
539
540 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
541
542 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
543
544 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
545 sc->beacon.bslot[i] = NULL;
546 sc->beacon.bslot_aphy[i] = NULL;
547 }
548
549 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
550 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
551 }
552
553 static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
554 const struct ath_bus_ops *bus_ops)
555 {
556 struct ath_hw *ah = NULL;
557 struct ath_common *common;
558 int ret = 0, i;
559 int csz = 0;
560
561 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
562 if (!ah)
563 return -ENOMEM;
564
565 ah->hw_version.devid = devid;
566 ah->hw_version.subsysid = subsysid;
567 sc->sc_ah = ah;
568
569 common = ath9k_hw_common(ah);
570 common->ops = &ath9k_common_ops;
571 common->bus_ops = bus_ops;
572 common->ah = ah;
573 common->hw = sc->hw;
574 common->priv = sc;
575 common->debug_mask = ath9k_debug;
576
577 spin_lock_init(&sc->wiphy_lock);
578 spin_lock_init(&sc->sc_resetlock);
579 spin_lock_init(&sc->sc_serial_rw);
580 spin_lock_init(&sc->sc_pm_lock);
581 mutex_init(&sc->mutex);
582 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
583 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
584 (unsigned long)sc);
585
586 /*
587 * Cache line size is used to size and align various
588 * structures used to communicate with the hardware.
589 */
590 ath_read_cachesize(common, &csz);
591 common->cachelsz = csz << 2; /* convert to bytes */
592
593 /* Initializes the hardware for all supported chipsets */
594 ret = ath9k_hw_init(ah);
595 if (ret)
596 goto err_hw;
597
598 ret = ath9k_init_debug(ah);
599 if (ret) {
600 ath_print(common, ATH_DBG_FATAL,
601 "Unable to create debugfs files\n");
602 goto err_debug;
603 }
604
605 ret = ath9k_init_queues(sc);
606 if (ret)
607 goto err_queues;
608
609 ret = ath9k_init_btcoex(sc);
610 if (ret)
611 goto err_btcoex;
612
613 ret = ath9k_init_channels_rates(sc);
614 if (ret)
615 goto err_btcoex;
616
617 ath9k_init_crypto(sc);
618 ath9k_init_misc(sc);
619
620 return 0;
621
622 err_btcoex:
623 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
624 if (ATH_TXQ_SETUP(sc, i))
625 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
626 err_queues:
627 ath9k_exit_debug(ah);
628 err_debug:
629 ath9k_hw_deinit(ah);
630 err_hw:
631 tasklet_kill(&sc->intr_tq);
632 tasklet_kill(&sc->bcon_tasklet);
633
634 kfree(ah);
635 sc->sc_ah = NULL;
636
637 return ret;
638 }
639
640 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
641 {
642 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
643
644 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
645 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
646 IEEE80211_HW_SIGNAL_DBM |
647 IEEE80211_HW_SUPPORTS_PS |
648 IEEE80211_HW_PS_NULLFUNC_STACK |
649 IEEE80211_HW_SPECTRUM_MGMT |
650 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
651
652 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
653 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
654
655 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
656 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
657
658 hw->wiphy->interface_modes =
659 BIT(NL80211_IFTYPE_AP) |
660 BIT(NL80211_IFTYPE_WDS) |
661 BIT(NL80211_IFTYPE_STATION) |
662 BIT(NL80211_IFTYPE_ADHOC) |
663 BIT(NL80211_IFTYPE_MESH_POINT);
664
665 if (AR_SREV_5416(sc->sc_ah))
666 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
667
668 hw->queues = 4;
669 hw->max_rates = 4;
670 hw->channel_change_time = 5000;
671 hw->max_listen_interval = 10;
672 hw->max_rate_tries = 10;
673 hw->sta_data_size = sizeof(struct ath_node);
674 hw->vif_data_size = sizeof(struct ath_vif);
675
676 #ifdef CONFIG_ATH9K_RATE_CONTROL
677 hw->rate_control_algorithm = "ath9k_rate_control";
678 #endif
679
680 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
681 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
682 &sc->sbands[IEEE80211_BAND_2GHZ];
683 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
684 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
685 &sc->sbands[IEEE80211_BAND_5GHZ];
686
687 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
688 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
689 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
690 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
691 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
692 }
693
694 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
695 }
696
697 int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
698 const struct ath_bus_ops *bus_ops)
699 {
700 struct ieee80211_hw *hw = sc->hw;
701 struct ath_common *common;
702 struct ath_hw *ah;
703 int error = 0;
704 struct ath_regulatory *reg;
705
706 /* Bring up device */
707 error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
708 if (error != 0)
709 goto error_init;
710
711 ah = sc->sc_ah;
712 common = ath9k_hw_common(ah);
713 ath9k_set_hw_capab(sc, hw);
714
715 /* Initialize regulatory */
716 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
717 ath9k_reg_notifier);
718 if (error)
719 goto error_regd;
720
721 reg = &common->regulatory;
722
723 /* Setup TX DMA */
724 error = ath_tx_init(sc, ATH_TXBUF);
725 if (error != 0)
726 goto error_tx;
727
728 /* Setup RX DMA */
729 error = ath_rx_init(sc, ATH_RXBUF);
730 if (error != 0)
731 goto error_rx;
732
733 /* Register with mac80211 */
734 error = ieee80211_register_hw(hw);
735 if (error)
736 goto error_register;
737
738 /* Handle world regulatory */
739 if (!ath_is_world_regd(reg)) {
740 error = regulatory_hint(hw->wiphy, reg->alpha2);
741 if (error)
742 goto error_world;
743 }
744
745 INIT_WORK(&sc->hw_check_work, ath_hw_check);
746 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
747 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
748 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
749 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
750
751 ath_init_leds(sc);
752 ath_start_rfkill_poll(sc);
753
754 return 0;
755
756 error_world:
757 ieee80211_unregister_hw(hw);
758 error_register:
759 ath_rx_cleanup(sc);
760 error_rx:
761 ath_tx_cleanup(sc);
762 error_tx:
763 /* Nothing */
764 error_regd:
765 ath9k_deinit_softc(sc);
766 error_init:
767 return error;
768 }
769
770 /*****************************/
771 /* De-Initialization */
772 /*****************************/
773
774 static void ath9k_deinit_softc(struct ath_softc *sc)
775 {
776 int i = 0;
777
778 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
779 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
780
781 if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
782 kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
783
784 if ((sc->btcoex.no_stomp_timer) &&
785 sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
786 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
787
788 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
789 if (ATH_TXQ_SETUP(sc, i))
790 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
791
792 ath9k_exit_debug(sc->sc_ah);
793 ath9k_hw_deinit(sc->sc_ah);
794
795 tasklet_kill(&sc->intr_tq);
796 tasklet_kill(&sc->bcon_tasklet);
797
798 kfree(sc->sc_ah);
799 sc->sc_ah = NULL;
800 }
801
802 void ath9k_deinit_device(struct ath_softc *sc)
803 {
804 struct ieee80211_hw *hw = sc->hw;
805 int i = 0;
806
807 ath9k_ps_wakeup(sc);
808
809 wiphy_rfkill_stop_polling(sc->hw->wiphy);
810 ath_deinit_leds(sc);
811
812 for (i = 0; i < sc->num_sec_wiphy; i++) {
813 struct ath_wiphy *aphy = sc->sec_wiphy[i];
814 if (aphy == NULL)
815 continue;
816 sc->sec_wiphy[i] = NULL;
817 ieee80211_unregister_hw(aphy->hw);
818 ieee80211_free_hw(aphy->hw);
819 }
820
821 ieee80211_unregister_hw(hw);
822 ath_rx_cleanup(sc);
823 ath_tx_cleanup(sc);
824 ath9k_deinit_softc(sc);
825 kfree(sc->sec_wiphy);
826 }
827
828 void ath_descdma_cleanup(struct ath_softc *sc,
829 struct ath_descdma *dd,
830 struct list_head *head)
831 {
832 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
833 dd->dd_desc_paddr);
834
835 INIT_LIST_HEAD(head);
836 kfree(dd->dd_bufptr);
837 memset(dd, 0, sizeof(*dd));
838 }
839
840 /************************/
841 /* Module Hooks */
842 /************************/
843
844 static int __init ath9k_init(void)
845 {
846 int error;
847
848 /* Register rate control algorithm */
849 error = ath_rate_control_register();
850 if (error != 0) {
851 printk(KERN_ERR
852 "ath9k: Unable to register rate control "
853 "algorithm: %d\n",
854 error);
855 goto err_out;
856 }
857
858 error = ath9k_debug_create_root();
859 if (error) {
860 printk(KERN_ERR
861 "ath9k: Unable to create debugfs root: %d\n",
862 error);
863 goto err_rate_unregister;
864 }
865
866 error = ath_pci_init();
867 if (error < 0) {
868 printk(KERN_ERR
869 "ath9k: No PCI devices found, driver not installed.\n");
870 error = -ENODEV;
871 goto err_remove_root;
872 }
873
874 error = ath_ahb_init();
875 if (error < 0) {
876 error = -ENODEV;
877 goto err_pci_exit;
878 }
879
880 return 0;
881
882 err_pci_exit:
883 ath_pci_exit();
884
885 err_remove_root:
886 ath9k_debug_remove_root();
887 err_rate_unregister:
888 ath_rate_control_unregister();
889 err_out:
890 return error;
891 }
892 module_init(ath9k_init);
893
894 static void __exit ath9k_exit(void)
895 {
896 ath_ahb_exit();
897 ath_pci_exit();
898 ath9k_debug_remove_root();
899 ath_rate_control_unregister();
900 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
901 }
902 module_exit(ath9k_exit);