]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/wireless/ath/ath9k/init.c
ath9k: fix channel flag / regd issues with multiple cards
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / ath / ath9k / init.c
1 /*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/slab.h>
18
19 #include "ath9k.h"
20
21 static char *dev_info = "ath9k";
22
23 MODULE_AUTHOR("Atheros Communications");
24 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
25 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
26 MODULE_LICENSE("Dual BSD/GPL");
27
28 static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
29 module_param_named(debug, ath9k_debug, uint, 0);
30 MODULE_PARM_DESC(debug, "Debugging mask");
31
32 int modparam_nohwcrypt;
33 module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
34 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
35
36 int led_blink;
37 module_param_named(blink, led_blink, int, 0444);
38 MODULE_PARM_DESC(blink, "Enable LED blink on activity");
39
40 /* We use the hw_value as an index into our private channel structure */
41
42 #define CHAN2G(_freq, _idx) { \
43 .center_freq = (_freq), \
44 .hw_value = (_idx), \
45 .max_power = 20, \
46 }
47
48 #define CHAN5G(_freq, _idx) { \
49 .band = IEEE80211_BAND_5GHZ, \
50 .center_freq = (_freq), \
51 .hw_value = (_idx), \
52 .max_power = 20, \
53 }
54
55 /* Some 2 GHz radios are actually tunable on 2312-2732
56 * on 5 MHz steps, we support the channels which we know
57 * we have calibration data for all cards though to make
58 * this static */
59 static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
60 CHAN2G(2412, 0), /* Channel 1 */
61 CHAN2G(2417, 1), /* Channel 2 */
62 CHAN2G(2422, 2), /* Channel 3 */
63 CHAN2G(2427, 3), /* Channel 4 */
64 CHAN2G(2432, 4), /* Channel 5 */
65 CHAN2G(2437, 5), /* Channel 6 */
66 CHAN2G(2442, 6), /* Channel 7 */
67 CHAN2G(2447, 7), /* Channel 8 */
68 CHAN2G(2452, 8), /* Channel 9 */
69 CHAN2G(2457, 9), /* Channel 10 */
70 CHAN2G(2462, 10), /* Channel 11 */
71 CHAN2G(2467, 11), /* Channel 12 */
72 CHAN2G(2472, 12), /* Channel 13 */
73 CHAN2G(2484, 13), /* Channel 14 */
74 };
75
76 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
77 * on 5 MHz steps, we support the channels which we know
78 * we have calibration data for all cards though to make
79 * this static */
80 static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
81 /* _We_ call this UNII 1 */
82 CHAN5G(5180, 14), /* Channel 36 */
83 CHAN5G(5200, 15), /* Channel 40 */
84 CHAN5G(5220, 16), /* Channel 44 */
85 CHAN5G(5240, 17), /* Channel 48 */
86 /* _We_ call this UNII 2 */
87 CHAN5G(5260, 18), /* Channel 52 */
88 CHAN5G(5280, 19), /* Channel 56 */
89 CHAN5G(5300, 20), /* Channel 60 */
90 CHAN5G(5320, 21), /* Channel 64 */
91 /* _We_ call this "Middle band" */
92 CHAN5G(5500, 22), /* Channel 100 */
93 CHAN5G(5520, 23), /* Channel 104 */
94 CHAN5G(5540, 24), /* Channel 108 */
95 CHAN5G(5560, 25), /* Channel 112 */
96 CHAN5G(5580, 26), /* Channel 116 */
97 CHAN5G(5600, 27), /* Channel 120 */
98 CHAN5G(5620, 28), /* Channel 124 */
99 CHAN5G(5640, 29), /* Channel 128 */
100 CHAN5G(5660, 30), /* Channel 132 */
101 CHAN5G(5680, 31), /* Channel 136 */
102 CHAN5G(5700, 32), /* Channel 140 */
103 /* _We_ call this UNII 3 */
104 CHAN5G(5745, 33), /* Channel 149 */
105 CHAN5G(5765, 34), /* Channel 153 */
106 CHAN5G(5785, 35), /* Channel 157 */
107 CHAN5G(5805, 36), /* Channel 161 */
108 CHAN5G(5825, 37), /* Channel 165 */
109 };
110
111 /* Atheros hardware rate code addition for short premble */
112 #define SHPCHECK(__hw_rate, __flags) \
113 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
114
115 #define RATE(_bitrate, _hw_rate, _flags) { \
116 .bitrate = (_bitrate), \
117 .flags = (_flags), \
118 .hw_value = (_hw_rate), \
119 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
120 }
121
122 static struct ieee80211_rate ath9k_legacy_rates[] = {
123 RATE(10, 0x1b, 0),
124 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
125 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
126 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
127 RATE(60, 0x0b, 0),
128 RATE(90, 0x0f, 0),
129 RATE(120, 0x0a, 0),
130 RATE(180, 0x0e, 0),
131 RATE(240, 0x09, 0),
132 RATE(360, 0x0d, 0),
133 RATE(480, 0x08, 0),
134 RATE(540, 0x0c, 0),
135 };
136
137 static void ath9k_deinit_softc(struct ath_softc *sc);
138
139 /*
140 * Read and write, they both share the same lock. We do this to serialize
141 * reads and writes on Atheros 802.11n PCI devices only. This is required
142 * as the FIFO on these devices can only accept sanely 2 requests.
143 */
144
145 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
146 {
147 struct ath_hw *ah = (struct ath_hw *) hw_priv;
148 struct ath_common *common = ath9k_hw_common(ah);
149 struct ath_softc *sc = (struct ath_softc *) common->priv;
150
151 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
152 unsigned long flags;
153 spin_lock_irqsave(&sc->sc_serial_rw, flags);
154 iowrite32(val, sc->mem + reg_offset);
155 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
156 } else
157 iowrite32(val, sc->mem + reg_offset);
158 }
159
160 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
161 {
162 struct ath_hw *ah = (struct ath_hw *) hw_priv;
163 struct ath_common *common = ath9k_hw_common(ah);
164 struct ath_softc *sc = (struct ath_softc *) common->priv;
165 u32 val;
166
167 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
168 unsigned long flags;
169 spin_lock_irqsave(&sc->sc_serial_rw, flags);
170 val = ioread32(sc->mem + reg_offset);
171 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
172 } else
173 val = ioread32(sc->mem + reg_offset);
174 return val;
175 }
176
177 static const struct ath_ops ath9k_common_ops = {
178 .read = ath9k_ioread32,
179 .write = ath9k_iowrite32,
180 };
181
182 /**************************/
183 /* Initialization */
184 /**************************/
185
186 static void setup_ht_cap(struct ath_softc *sc,
187 struct ieee80211_sta_ht_cap *ht_info)
188 {
189 struct ath_hw *ah = sc->sc_ah;
190 struct ath_common *common = ath9k_hw_common(ah);
191 u8 tx_streams, rx_streams;
192 int i, max_streams;
193
194 ht_info->ht_supported = true;
195 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
196 IEEE80211_HT_CAP_SM_PS |
197 IEEE80211_HT_CAP_SGI_40 |
198 IEEE80211_HT_CAP_DSSSCCK40;
199
200 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
201 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
202
203 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
204 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
205
206 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
207 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
208
209 if (AR_SREV_9300_20_OR_LATER(ah))
210 max_streams = 3;
211 else
212 max_streams = 2;
213
214 if (AR_SREV_9280_20_OR_LATER(ah)) {
215 if (max_streams >= 2)
216 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
217 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
218 }
219
220 /* set up supported mcs set */
221 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
222 tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams);
223 rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams);
224
225 ath_print(common, ATH_DBG_CONFIG,
226 "TX streams %d, RX streams: %d\n",
227 tx_streams, rx_streams);
228
229 if (tx_streams != rx_streams) {
230 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
231 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
232 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
233 }
234
235 for (i = 0; i < rx_streams; i++)
236 ht_info->mcs.rx_mask[i] = 0xff;
237
238 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
239 }
240
241 static int ath9k_reg_notifier(struct wiphy *wiphy,
242 struct regulatory_request *request)
243 {
244 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
245 struct ath_wiphy *aphy = hw->priv;
246 struct ath_softc *sc = aphy->sc;
247 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
248
249 return ath_reg_notifier_apply(wiphy, request, reg);
250 }
251
252 /*
253 * This function will allocate both the DMA descriptor structure, and the
254 * buffers it contains. These are used to contain the descriptors used
255 * by the system.
256 */
257 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
258 struct list_head *head, const char *name,
259 int nbuf, int ndesc, bool is_tx)
260 {
261 #define DS2PHYS(_dd, _ds) \
262 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
263 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
264 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
265 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
266 u8 *ds;
267 struct ath_buf *bf;
268 int i, bsize, error, desc_len;
269
270 ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
271 name, nbuf, ndesc);
272
273 INIT_LIST_HEAD(head);
274
275 if (is_tx)
276 desc_len = sc->sc_ah->caps.tx_desc_len;
277 else
278 desc_len = sizeof(struct ath_desc);
279
280 /* ath_desc must be a multiple of DWORDs */
281 if ((desc_len % 4) != 0) {
282 ath_print(common, ATH_DBG_FATAL,
283 "ath_desc not DWORD aligned\n");
284 BUG_ON((desc_len % 4) != 0);
285 error = -ENOMEM;
286 goto fail;
287 }
288
289 dd->dd_desc_len = desc_len * nbuf * ndesc;
290
291 /*
292 * Need additional DMA memory because we can't use
293 * descriptors that cross the 4K page boundary. Assume
294 * one skipped descriptor per 4K page.
295 */
296 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
297 u32 ndesc_skipped =
298 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
299 u32 dma_len;
300
301 while (ndesc_skipped) {
302 dma_len = ndesc_skipped * desc_len;
303 dd->dd_desc_len += dma_len;
304
305 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
306 }
307 }
308
309 /* allocate descriptors */
310 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
311 &dd->dd_desc_paddr, GFP_KERNEL);
312 if (dd->dd_desc == NULL) {
313 error = -ENOMEM;
314 goto fail;
315 }
316 ds = (u8 *) dd->dd_desc;
317 ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
318 name, ds, (u32) dd->dd_desc_len,
319 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
320
321 /* allocate buffers */
322 bsize = sizeof(struct ath_buf) * nbuf;
323 bf = kzalloc(bsize, GFP_KERNEL);
324 if (bf == NULL) {
325 error = -ENOMEM;
326 goto fail2;
327 }
328 dd->dd_bufptr = bf;
329
330 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
331 bf->bf_desc = ds;
332 bf->bf_daddr = DS2PHYS(dd, ds);
333
334 if (!(sc->sc_ah->caps.hw_caps &
335 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
336 /*
337 * Skip descriptor addresses which can cause 4KB
338 * boundary crossing (addr + length) with a 32 dword
339 * descriptor fetch.
340 */
341 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
342 BUG_ON((caddr_t) bf->bf_desc >=
343 ((caddr_t) dd->dd_desc +
344 dd->dd_desc_len));
345
346 ds += (desc_len * ndesc);
347 bf->bf_desc = ds;
348 bf->bf_daddr = DS2PHYS(dd, ds);
349 }
350 }
351 list_add_tail(&bf->list, head);
352 }
353 return 0;
354 fail2:
355 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
356 dd->dd_desc_paddr);
357 fail:
358 memset(dd, 0, sizeof(*dd));
359 return error;
360 #undef ATH_DESC_4KB_BOUND_CHECK
361 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
362 #undef DS2PHYS
363 }
364
365 static void ath9k_init_crypto(struct ath_softc *sc)
366 {
367 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
368 int i = 0;
369
370 /* Get the hardware key cache size. */
371 common->keymax = sc->sc_ah->caps.keycache_size;
372 if (common->keymax > ATH_KEYMAX) {
373 ath_print(common, ATH_DBG_ANY,
374 "Warning, using only %u entries in %u key cache\n",
375 ATH_KEYMAX, common->keymax);
376 common->keymax = ATH_KEYMAX;
377 }
378
379 /*
380 * Reset the key cache since some parts do not
381 * reset the contents on initial power up.
382 */
383 for (i = 0; i < common->keymax; i++)
384 ath_hw_keyreset(common, (u16) i);
385
386 /*
387 * Check whether the separate key cache entries
388 * are required to handle both tx+rx MIC keys.
389 * With split mic keys the number of stations is limited
390 * to 27 otherwise 59.
391 */
392 if (sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)
393 common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
394 }
395
396 static int ath9k_init_btcoex(struct ath_softc *sc)
397 {
398 int r, qnum;
399
400 switch (sc->sc_ah->btcoex_hw.scheme) {
401 case ATH_BTCOEX_CFG_NONE:
402 break;
403 case ATH_BTCOEX_CFG_2WIRE:
404 ath9k_hw_btcoex_init_2wire(sc->sc_ah);
405 break;
406 case ATH_BTCOEX_CFG_3WIRE:
407 ath9k_hw_btcoex_init_3wire(sc->sc_ah);
408 r = ath_init_btcoex_timer(sc);
409 if (r)
410 return -1;
411 qnum = sc->tx.hwq_map[WME_AC_BE];
412 ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
413 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
414 break;
415 default:
416 WARN_ON(1);
417 break;
418 }
419
420 return 0;
421 }
422
423 static int ath9k_init_queues(struct ath_softc *sc)
424 {
425 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
426 int i = 0;
427
428 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
429 sc->tx.hwq_map[i] = -1;
430
431 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
432 if (sc->beacon.beaconq == -1) {
433 ath_print(common, ATH_DBG_FATAL,
434 "Unable to setup a beacon xmit queue\n");
435 goto err;
436 }
437
438 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
439 if (sc->beacon.cabq == NULL) {
440 ath_print(common, ATH_DBG_FATAL,
441 "Unable to setup CAB xmit queue\n");
442 goto err;
443 }
444
445 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
446 ath_cabq_update(sc);
447
448 if (!ath_tx_setup(sc, WME_AC_BK)) {
449 ath_print(common, ATH_DBG_FATAL,
450 "Unable to setup xmit queue for BK traffic\n");
451 goto err;
452 }
453
454 if (!ath_tx_setup(sc, WME_AC_BE)) {
455 ath_print(common, ATH_DBG_FATAL,
456 "Unable to setup xmit queue for BE traffic\n");
457 goto err;
458 }
459 if (!ath_tx_setup(sc, WME_AC_VI)) {
460 ath_print(common, ATH_DBG_FATAL,
461 "Unable to setup xmit queue for VI traffic\n");
462 goto err;
463 }
464 if (!ath_tx_setup(sc, WME_AC_VO)) {
465 ath_print(common, ATH_DBG_FATAL,
466 "Unable to setup xmit queue for VO traffic\n");
467 goto err;
468 }
469
470 return 0;
471
472 err:
473 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
474 if (ATH_TXQ_SETUP(sc, i))
475 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
476
477 return -EIO;
478 }
479
480 static int ath9k_init_channels_rates(struct ath_softc *sc)
481 {
482 void *channels;
483
484 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
485 channels = kmemdup(ath9k_2ghz_chantable,
486 sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
487 if (!channels)
488 return -ENOMEM;
489
490 sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
491 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
492 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
493 ARRAY_SIZE(ath9k_2ghz_chantable);
494 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
495 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
496 ARRAY_SIZE(ath9k_legacy_rates);
497 }
498
499 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
500 channels = kmemdup(ath9k_5ghz_chantable,
501 sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
502 if (!channels) {
503 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
504 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
505 return -ENOMEM;
506 }
507
508 sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
509 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
510 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
511 ARRAY_SIZE(ath9k_5ghz_chantable);
512 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
513 ath9k_legacy_rates + 4;
514 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
515 ARRAY_SIZE(ath9k_legacy_rates) - 4;
516 }
517 return 0;
518 }
519
520 static void ath9k_init_misc(struct ath_softc *sc)
521 {
522 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
523 int i = 0;
524
525 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
526
527 sc->config.txpowlimit = ATH_TXPOWER_MAX;
528
529 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
530 sc->sc_flags |= SC_OP_TXAGGR;
531 sc->sc_flags |= SC_OP_RXAGGR;
532 }
533
534 common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
535 common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
536
537 ath9k_hw_set_diversity(sc->sc_ah, true);
538 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
539
540 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
541
542 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
543
544 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
545 sc->beacon.bslot[i] = NULL;
546 sc->beacon.bslot_aphy[i] = NULL;
547 }
548
549 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
550 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
551 }
552
553 static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
554 const struct ath_bus_ops *bus_ops)
555 {
556 struct ath_hw *ah = NULL;
557 struct ath_common *common;
558 int ret = 0, i;
559 int csz = 0;
560
561 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
562 if (!ah)
563 return -ENOMEM;
564
565 ah->hw_version.devid = devid;
566 ah->hw_version.subsysid = subsysid;
567 sc->sc_ah = ah;
568
569 common = ath9k_hw_common(ah);
570 common->ops = &ath9k_common_ops;
571 common->bus_ops = bus_ops;
572 common->ah = ah;
573 common->hw = sc->hw;
574 common->priv = sc;
575 common->debug_mask = ath9k_debug;
576
577 spin_lock_init(&sc->wiphy_lock);
578 spin_lock_init(&sc->sc_resetlock);
579 spin_lock_init(&sc->sc_serial_rw);
580 spin_lock_init(&sc->sc_pm_lock);
581 mutex_init(&sc->mutex);
582 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
583 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
584 (unsigned long)sc);
585
586 /*
587 * Cache line size is used to size and align various
588 * structures used to communicate with the hardware.
589 */
590 ath_read_cachesize(common, &csz);
591 common->cachelsz = csz << 2; /* convert to bytes */
592
593 /* Initializes the hardware for all supported chipsets */
594 ret = ath9k_hw_init(ah);
595 if (ret)
596 goto err_hw;
597
598 ret = ath9k_init_debug(ah);
599 if (ret) {
600 ath_print(common, ATH_DBG_FATAL,
601 "Unable to create debugfs files\n");
602 goto err_debug;
603 }
604
605 ret = ath9k_init_queues(sc);
606 if (ret)
607 goto err_queues;
608
609 ret = ath9k_init_btcoex(sc);
610 if (ret)
611 goto err_btcoex;
612
613 ret = ath9k_init_channels_rates(sc);
614 if (ret)
615 goto err_btcoex;
616
617 ath9k_init_crypto(sc);
618 ath9k_init_misc(sc);
619
620 return 0;
621
622 err_btcoex:
623 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
624 if (ATH_TXQ_SETUP(sc, i))
625 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
626 err_queues:
627 ath9k_exit_debug(ah);
628 err_debug:
629 ath9k_hw_deinit(ah);
630 err_hw:
631 tasklet_kill(&sc->intr_tq);
632 tasklet_kill(&sc->bcon_tasklet);
633
634 kfree(ah);
635 sc->sc_ah = NULL;
636
637 return ret;
638 }
639
640 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
641 {
642 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
643
644 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
645 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
646 IEEE80211_HW_SIGNAL_DBM |
647 IEEE80211_HW_SUPPORTS_PS |
648 IEEE80211_HW_PS_NULLFUNC_STACK |
649 IEEE80211_HW_SPECTRUM_MGMT |
650 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
651
652 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
653 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
654
655 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
656 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
657
658 hw->wiphy->interface_modes =
659 BIT(NL80211_IFTYPE_AP) |
660 BIT(NL80211_IFTYPE_STATION) |
661 BIT(NL80211_IFTYPE_ADHOC) |
662 BIT(NL80211_IFTYPE_MESH_POINT);
663
664 if (AR_SREV_5416(sc->sc_ah))
665 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
666
667 hw->queues = 4;
668 hw->max_rates = 4;
669 hw->channel_change_time = 5000;
670 hw->max_listen_interval = 10;
671 hw->max_rate_tries = 10;
672 hw->sta_data_size = sizeof(struct ath_node);
673 hw->vif_data_size = sizeof(struct ath_vif);
674
675 #ifdef CONFIG_ATH9K_RATE_CONTROL
676 hw->rate_control_algorithm = "ath9k_rate_control";
677 #endif
678
679 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
680 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
681 &sc->sbands[IEEE80211_BAND_2GHZ];
682 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
683 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
684 &sc->sbands[IEEE80211_BAND_5GHZ];
685
686 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
687 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
688 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
689 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
690 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
691 }
692
693 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
694 }
695
696 int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
697 const struct ath_bus_ops *bus_ops)
698 {
699 struct ieee80211_hw *hw = sc->hw;
700 struct ath_common *common;
701 struct ath_hw *ah;
702 int error = 0;
703 struct ath_regulatory *reg;
704
705 /* Bring up device */
706 error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
707 if (error != 0)
708 goto error_init;
709
710 ah = sc->sc_ah;
711 common = ath9k_hw_common(ah);
712 ath9k_set_hw_capab(sc, hw);
713
714 /* Initialize regulatory */
715 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
716 ath9k_reg_notifier);
717 if (error)
718 goto error_regd;
719
720 reg = &common->regulatory;
721
722 /* Setup TX DMA */
723 error = ath_tx_init(sc, ATH_TXBUF);
724 if (error != 0)
725 goto error_tx;
726
727 /* Setup RX DMA */
728 error = ath_rx_init(sc, ATH_RXBUF);
729 if (error != 0)
730 goto error_rx;
731
732 /* Register with mac80211 */
733 error = ieee80211_register_hw(hw);
734 if (error)
735 goto error_register;
736
737 /* Handle world regulatory */
738 if (!ath_is_world_regd(reg)) {
739 error = regulatory_hint(hw->wiphy, reg->alpha2);
740 if (error)
741 goto error_world;
742 }
743
744 INIT_WORK(&sc->hw_check_work, ath_hw_check);
745 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
746 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
747 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
748 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
749
750 ath_init_leds(sc);
751 ath_start_rfkill_poll(sc);
752
753 return 0;
754
755 error_world:
756 ieee80211_unregister_hw(hw);
757 error_register:
758 ath_rx_cleanup(sc);
759 error_rx:
760 ath_tx_cleanup(sc);
761 error_tx:
762 /* Nothing */
763 error_regd:
764 ath9k_deinit_softc(sc);
765 error_init:
766 return error;
767 }
768
769 /*****************************/
770 /* De-Initialization */
771 /*****************************/
772
773 static void ath9k_deinit_softc(struct ath_softc *sc)
774 {
775 int i = 0;
776
777 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
778 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
779
780 if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
781 kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
782
783 if ((sc->btcoex.no_stomp_timer) &&
784 sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
785 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
786
787 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
788 if (ATH_TXQ_SETUP(sc, i))
789 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
790
791 ath9k_exit_debug(sc->sc_ah);
792 ath9k_hw_deinit(sc->sc_ah);
793
794 tasklet_kill(&sc->intr_tq);
795 tasklet_kill(&sc->bcon_tasklet);
796
797 kfree(sc->sc_ah);
798 sc->sc_ah = NULL;
799 }
800
801 void ath9k_deinit_device(struct ath_softc *sc)
802 {
803 struct ieee80211_hw *hw = sc->hw;
804 int i = 0;
805
806 ath9k_ps_wakeup(sc);
807
808 wiphy_rfkill_stop_polling(sc->hw->wiphy);
809 ath_deinit_leds(sc);
810
811 for (i = 0; i < sc->num_sec_wiphy; i++) {
812 struct ath_wiphy *aphy = sc->sec_wiphy[i];
813 if (aphy == NULL)
814 continue;
815 sc->sec_wiphy[i] = NULL;
816 ieee80211_unregister_hw(aphy->hw);
817 ieee80211_free_hw(aphy->hw);
818 }
819
820 ieee80211_unregister_hw(hw);
821 ath_rx_cleanup(sc);
822 ath_tx_cleanup(sc);
823 ath9k_deinit_softc(sc);
824 kfree(sc->sec_wiphy);
825 }
826
827 void ath_descdma_cleanup(struct ath_softc *sc,
828 struct ath_descdma *dd,
829 struct list_head *head)
830 {
831 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
832 dd->dd_desc_paddr);
833
834 INIT_LIST_HEAD(head);
835 kfree(dd->dd_bufptr);
836 memset(dd, 0, sizeof(*dd));
837 }
838
839 /************************/
840 /* Module Hooks */
841 /************************/
842
843 static int __init ath9k_init(void)
844 {
845 int error;
846
847 /* Register rate control algorithm */
848 error = ath_rate_control_register();
849 if (error != 0) {
850 printk(KERN_ERR
851 "ath9k: Unable to register rate control "
852 "algorithm: %d\n",
853 error);
854 goto err_out;
855 }
856
857 error = ath9k_debug_create_root();
858 if (error) {
859 printk(KERN_ERR
860 "ath9k: Unable to create debugfs root: %d\n",
861 error);
862 goto err_rate_unregister;
863 }
864
865 error = ath_pci_init();
866 if (error < 0) {
867 printk(KERN_ERR
868 "ath9k: No PCI devices found, driver not installed.\n");
869 error = -ENODEV;
870 goto err_remove_root;
871 }
872
873 error = ath_ahb_init();
874 if (error < 0) {
875 error = -ENODEV;
876 goto err_pci_exit;
877 }
878
879 return 0;
880
881 err_pci_exit:
882 ath_pci_exit();
883
884 err_remove_root:
885 ath9k_debug_remove_root();
886 err_rate_unregister:
887 ath_rate_control_unregister();
888 err_out:
889 return error;
890 }
891 module_init(ath9k_init);
892
893 static void __exit ath9k_exit(void)
894 {
895 ath_ahb_exit();
896 ath_pci_exit();
897 ath9k_debug_remove_root();
898 ath_rate_control_unregister();
899 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
900 }
901 module_exit(ath9k_exit);