]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/wireless/ath/ath9k/init.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-2.6
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / ath / ath9k / init.c
1 /*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/slab.h>
18
19 #include "ath9k.h"
20
21 static char *dev_info = "ath9k";
22
23 MODULE_AUTHOR("Atheros Communications");
24 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
25 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
26 MODULE_LICENSE("Dual BSD/GPL");
27
28 static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
29 module_param_named(debug, ath9k_debug, uint, 0);
30 MODULE_PARM_DESC(debug, "Debugging mask");
31
32 int ath9k_modparam_nohwcrypt;
33 module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444);
34 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
35
36 int led_blink;
37 module_param_named(blink, led_blink, int, 0444);
38 MODULE_PARM_DESC(blink, "Enable LED blink on activity");
39
40 static int ath9k_btcoex_enable;
41 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
42 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
43
44 int ath9k_pm_qos_value = ATH9K_PM_QOS_DEFAULT_VALUE;
45 module_param_named(pmqos, ath9k_pm_qos_value, int, S_IRUSR | S_IRGRP | S_IROTH);
46 MODULE_PARM_DESC(pmqos, "User specified PM-QOS value");
47
48 bool is_ath9k_unloaded;
49 /* We use the hw_value as an index into our private channel structure */
50
51 #define CHAN2G(_freq, _idx) { \
52 .band = IEEE80211_BAND_2GHZ, \
53 .center_freq = (_freq), \
54 .hw_value = (_idx), \
55 .max_power = 20, \
56 }
57
58 #define CHAN5G(_freq, _idx) { \
59 .band = IEEE80211_BAND_5GHZ, \
60 .center_freq = (_freq), \
61 .hw_value = (_idx), \
62 .max_power = 20, \
63 }
64
65 /* Some 2 GHz radios are actually tunable on 2312-2732
66 * on 5 MHz steps, we support the channels which we know
67 * we have calibration data for all cards though to make
68 * this static */
69 static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
70 CHAN2G(2412, 0), /* Channel 1 */
71 CHAN2G(2417, 1), /* Channel 2 */
72 CHAN2G(2422, 2), /* Channel 3 */
73 CHAN2G(2427, 3), /* Channel 4 */
74 CHAN2G(2432, 4), /* Channel 5 */
75 CHAN2G(2437, 5), /* Channel 6 */
76 CHAN2G(2442, 6), /* Channel 7 */
77 CHAN2G(2447, 7), /* Channel 8 */
78 CHAN2G(2452, 8), /* Channel 9 */
79 CHAN2G(2457, 9), /* Channel 10 */
80 CHAN2G(2462, 10), /* Channel 11 */
81 CHAN2G(2467, 11), /* Channel 12 */
82 CHAN2G(2472, 12), /* Channel 13 */
83 CHAN2G(2484, 13), /* Channel 14 */
84 };
85
86 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
87 * on 5 MHz steps, we support the channels which we know
88 * we have calibration data for all cards though to make
89 * this static */
90 static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
91 /* _We_ call this UNII 1 */
92 CHAN5G(5180, 14), /* Channel 36 */
93 CHAN5G(5200, 15), /* Channel 40 */
94 CHAN5G(5220, 16), /* Channel 44 */
95 CHAN5G(5240, 17), /* Channel 48 */
96 /* _We_ call this UNII 2 */
97 CHAN5G(5260, 18), /* Channel 52 */
98 CHAN5G(5280, 19), /* Channel 56 */
99 CHAN5G(5300, 20), /* Channel 60 */
100 CHAN5G(5320, 21), /* Channel 64 */
101 /* _We_ call this "Middle band" */
102 CHAN5G(5500, 22), /* Channel 100 */
103 CHAN5G(5520, 23), /* Channel 104 */
104 CHAN5G(5540, 24), /* Channel 108 */
105 CHAN5G(5560, 25), /* Channel 112 */
106 CHAN5G(5580, 26), /* Channel 116 */
107 CHAN5G(5600, 27), /* Channel 120 */
108 CHAN5G(5620, 28), /* Channel 124 */
109 CHAN5G(5640, 29), /* Channel 128 */
110 CHAN5G(5660, 30), /* Channel 132 */
111 CHAN5G(5680, 31), /* Channel 136 */
112 CHAN5G(5700, 32), /* Channel 140 */
113 /* _We_ call this UNII 3 */
114 CHAN5G(5745, 33), /* Channel 149 */
115 CHAN5G(5765, 34), /* Channel 153 */
116 CHAN5G(5785, 35), /* Channel 157 */
117 CHAN5G(5805, 36), /* Channel 161 */
118 CHAN5G(5825, 37), /* Channel 165 */
119 };
120
121 /* Atheros hardware rate code addition for short premble */
122 #define SHPCHECK(__hw_rate, __flags) \
123 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
124
125 #define RATE(_bitrate, _hw_rate, _flags) { \
126 .bitrate = (_bitrate), \
127 .flags = (_flags), \
128 .hw_value = (_hw_rate), \
129 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
130 }
131
132 static struct ieee80211_rate ath9k_legacy_rates[] = {
133 RATE(10, 0x1b, 0),
134 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
135 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
136 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
137 RATE(60, 0x0b, 0),
138 RATE(90, 0x0f, 0),
139 RATE(120, 0x0a, 0),
140 RATE(180, 0x0e, 0),
141 RATE(240, 0x09, 0),
142 RATE(360, 0x0d, 0),
143 RATE(480, 0x08, 0),
144 RATE(540, 0x0c, 0),
145 };
146
147 static void ath9k_deinit_softc(struct ath_softc *sc);
148
149 /*
150 * Read and write, they both share the same lock. We do this to serialize
151 * reads and writes on Atheros 802.11n PCI devices only. This is required
152 * as the FIFO on these devices can only accept sanely 2 requests.
153 */
154
155 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
156 {
157 struct ath_hw *ah = (struct ath_hw *) hw_priv;
158 struct ath_common *common = ath9k_hw_common(ah);
159 struct ath_softc *sc = (struct ath_softc *) common->priv;
160
161 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
162 unsigned long flags;
163 spin_lock_irqsave(&sc->sc_serial_rw, flags);
164 iowrite32(val, sc->mem + reg_offset);
165 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
166 } else
167 iowrite32(val, sc->mem + reg_offset);
168 }
169
170 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
171 {
172 struct ath_hw *ah = (struct ath_hw *) hw_priv;
173 struct ath_common *common = ath9k_hw_common(ah);
174 struct ath_softc *sc = (struct ath_softc *) common->priv;
175 u32 val;
176
177 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
178 unsigned long flags;
179 spin_lock_irqsave(&sc->sc_serial_rw, flags);
180 val = ioread32(sc->mem + reg_offset);
181 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
182 } else
183 val = ioread32(sc->mem + reg_offset);
184 return val;
185 }
186
187 static const struct ath_ops ath9k_common_ops = {
188 .read = ath9k_ioread32,
189 .write = ath9k_iowrite32,
190 };
191
192 /**************************/
193 /* Initialization */
194 /**************************/
195
196 static void setup_ht_cap(struct ath_softc *sc,
197 struct ieee80211_sta_ht_cap *ht_info)
198 {
199 struct ath_hw *ah = sc->sc_ah;
200 struct ath_common *common = ath9k_hw_common(ah);
201 u8 tx_streams, rx_streams;
202 int i, max_streams;
203
204 ht_info->ht_supported = true;
205 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
206 IEEE80211_HT_CAP_SM_PS |
207 IEEE80211_HT_CAP_SGI_40 |
208 IEEE80211_HT_CAP_DSSSCCK40;
209
210 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
211 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
212
213 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
214 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
215
216 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
217 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
218
219 if (AR_SREV_9485(ah))
220 max_streams = 1;
221 else if (AR_SREV_9300_20_OR_LATER(ah))
222 max_streams = 3;
223 else
224 max_streams = 2;
225
226 if (AR_SREV_9280_20_OR_LATER(ah)) {
227 if (max_streams >= 2)
228 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
229 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
230 }
231
232 /* set up supported mcs set */
233 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
234 tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams);
235 rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams);
236
237 ath_dbg(common, ATH_DBG_CONFIG,
238 "TX streams %d, RX streams: %d\n",
239 tx_streams, rx_streams);
240
241 if (tx_streams != rx_streams) {
242 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
243 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
244 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
245 }
246
247 for (i = 0; i < rx_streams; i++)
248 ht_info->mcs.rx_mask[i] = 0xff;
249
250 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
251 }
252
253 static int ath9k_reg_notifier(struct wiphy *wiphy,
254 struct regulatory_request *request)
255 {
256 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
257 struct ath_wiphy *aphy = hw->priv;
258 struct ath_softc *sc = aphy->sc;
259 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
260
261 return ath_reg_notifier_apply(wiphy, request, reg);
262 }
263
264 /*
265 * This function will allocate both the DMA descriptor structure, and the
266 * buffers it contains. These are used to contain the descriptors used
267 * by the system.
268 */
269 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
270 struct list_head *head, const char *name,
271 int nbuf, int ndesc, bool is_tx)
272 {
273 #define DS2PHYS(_dd, _ds) \
274 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
275 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
276 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
277 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
278 u8 *ds;
279 struct ath_buf *bf;
280 int i, bsize, error, desc_len;
281
282 ath_dbg(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
283 name, nbuf, ndesc);
284
285 INIT_LIST_HEAD(head);
286
287 if (is_tx)
288 desc_len = sc->sc_ah->caps.tx_desc_len;
289 else
290 desc_len = sizeof(struct ath_desc);
291
292 /* ath_desc must be a multiple of DWORDs */
293 if ((desc_len % 4) != 0) {
294 ath_err(common, "ath_desc not DWORD aligned\n");
295 BUG_ON((desc_len % 4) != 0);
296 error = -ENOMEM;
297 goto fail;
298 }
299
300 dd->dd_desc_len = desc_len * nbuf * ndesc;
301
302 /*
303 * Need additional DMA memory because we can't use
304 * descriptors that cross the 4K page boundary. Assume
305 * one skipped descriptor per 4K page.
306 */
307 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
308 u32 ndesc_skipped =
309 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
310 u32 dma_len;
311
312 while (ndesc_skipped) {
313 dma_len = ndesc_skipped * desc_len;
314 dd->dd_desc_len += dma_len;
315
316 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
317 }
318 }
319
320 /* allocate descriptors */
321 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
322 &dd->dd_desc_paddr, GFP_KERNEL);
323 if (dd->dd_desc == NULL) {
324 error = -ENOMEM;
325 goto fail;
326 }
327 ds = (u8 *) dd->dd_desc;
328 ath_dbg(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
329 name, ds, (u32) dd->dd_desc_len,
330 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
331
332 /* allocate buffers */
333 bsize = sizeof(struct ath_buf) * nbuf;
334 bf = kzalloc(bsize, GFP_KERNEL);
335 if (bf == NULL) {
336 error = -ENOMEM;
337 goto fail2;
338 }
339 dd->dd_bufptr = bf;
340
341 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
342 bf->bf_desc = ds;
343 bf->bf_daddr = DS2PHYS(dd, ds);
344
345 if (!(sc->sc_ah->caps.hw_caps &
346 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
347 /*
348 * Skip descriptor addresses which can cause 4KB
349 * boundary crossing (addr + length) with a 32 dword
350 * descriptor fetch.
351 */
352 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
353 BUG_ON((caddr_t) bf->bf_desc >=
354 ((caddr_t) dd->dd_desc +
355 dd->dd_desc_len));
356
357 ds += (desc_len * ndesc);
358 bf->bf_desc = ds;
359 bf->bf_daddr = DS2PHYS(dd, ds);
360 }
361 }
362 list_add_tail(&bf->list, head);
363 }
364 return 0;
365 fail2:
366 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
367 dd->dd_desc_paddr);
368 fail:
369 memset(dd, 0, sizeof(*dd));
370 return error;
371 #undef ATH_DESC_4KB_BOUND_CHECK
372 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
373 #undef DS2PHYS
374 }
375
376 void ath9k_init_crypto(struct ath_softc *sc)
377 {
378 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
379 int i = 0;
380
381 /* Get the hardware key cache size. */
382 common->keymax = sc->sc_ah->caps.keycache_size;
383 if (common->keymax > ATH_KEYMAX) {
384 ath_dbg(common, ATH_DBG_ANY,
385 "Warning, using only %u entries in %u key cache\n",
386 ATH_KEYMAX, common->keymax);
387 common->keymax = ATH_KEYMAX;
388 }
389
390 /*
391 * Reset the key cache since some parts do not
392 * reset the contents on initial power up.
393 */
394 for (i = 0; i < common->keymax; i++)
395 ath_hw_keyreset(common, (u16) i);
396
397 /*
398 * Check whether the separate key cache entries
399 * are required to handle both tx+rx MIC keys.
400 * With split mic keys the number of stations is limited
401 * to 27 otherwise 59.
402 */
403 if (sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)
404 common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
405 }
406
407 static int ath9k_init_btcoex(struct ath_softc *sc)
408 {
409 struct ath_txq *txq;
410 int r;
411
412 switch (sc->sc_ah->btcoex_hw.scheme) {
413 case ATH_BTCOEX_CFG_NONE:
414 break;
415 case ATH_BTCOEX_CFG_2WIRE:
416 ath9k_hw_btcoex_init_2wire(sc->sc_ah);
417 break;
418 case ATH_BTCOEX_CFG_3WIRE:
419 ath9k_hw_btcoex_init_3wire(sc->sc_ah);
420 r = ath_init_btcoex_timer(sc);
421 if (r)
422 return -1;
423 txq = sc->tx.txq_map[WME_AC_BE];
424 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
425 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
426 break;
427 default:
428 WARN_ON(1);
429 break;
430 }
431
432 return 0;
433 }
434
435 static int ath9k_init_queues(struct ath_softc *sc)
436 {
437 int i = 0;
438
439 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
440 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
441
442 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
443 ath_cabq_update(sc);
444
445 for (i = 0; i < WME_NUM_AC; i++)
446 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
447
448 return 0;
449 }
450
451 static int ath9k_init_channels_rates(struct ath_softc *sc)
452 {
453 void *channels;
454
455 BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
456 ARRAY_SIZE(ath9k_5ghz_chantable) !=
457 ATH9K_NUM_CHANNELS);
458
459 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
460 channels = kmemdup(ath9k_2ghz_chantable,
461 sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
462 if (!channels)
463 return -ENOMEM;
464
465 sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
466 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
467 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
468 ARRAY_SIZE(ath9k_2ghz_chantable);
469 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
470 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
471 ARRAY_SIZE(ath9k_legacy_rates);
472 }
473
474 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
475 channels = kmemdup(ath9k_5ghz_chantable,
476 sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
477 if (!channels) {
478 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
479 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
480 return -ENOMEM;
481 }
482
483 sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
484 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
485 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
486 ARRAY_SIZE(ath9k_5ghz_chantable);
487 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
488 ath9k_legacy_rates + 4;
489 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
490 ARRAY_SIZE(ath9k_legacy_rates) - 4;
491 }
492 return 0;
493 }
494
495 static void ath9k_init_misc(struct ath_softc *sc)
496 {
497 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
498 int i = 0;
499
500 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
501
502 sc->config.txpowlimit = ATH_TXPOWER_MAX;
503
504 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
505 sc->sc_flags |= SC_OP_TXAGGR;
506 sc->sc_flags |= SC_OP_RXAGGR;
507 }
508
509 common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
510 common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
511
512 ath9k_hw_set_diversity(sc->sc_ah, true);
513 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
514
515 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
516
517 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
518
519 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
520 sc->beacon.bslot[i] = NULL;
521 sc->beacon.bslot_aphy[i] = NULL;
522 }
523
524 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
525 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
526 }
527
528 static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
529 const struct ath_bus_ops *bus_ops)
530 {
531 struct ath_hw *ah = NULL;
532 struct ath_common *common;
533 int ret = 0, i;
534 int csz = 0;
535
536 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
537 if (!ah)
538 return -ENOMEM;
539
540 ah->hw_version.devid = devid;
541 ah->hw_version.subsysid = subsysid;
542 sc->sc_ah = ah;
543
544 if (!sc->dev->platform_data)
545 ah->ah_flags |= AH_USE_EEPROM;
546
547 common = ath9k_hw_common(ah);
548 common->ops = &ath9k_common_ops;
549 common->bus_ops = bus_ops;
550 common->ah = ah;
551 common->hw = sc->hw;
552 common->priv = sc;
553 common->debug_mask = ath9k_debug;
554 common->btcoex_enabled = ath9k_btcoex_enable == 1;
555 spin_lock_init(&common->cc_lock);
556
557 spin_lock_init(&sc->wiphy_lock);
558 spin_lock_init(&sc->sc_serial_rw);
559 spin_lock_init(&sc->sc_pm_lock);
560 mutex_init(&sc->mutex);
561 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
562 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
563 (unsigned long)sc);
564
565 /*
566 * Cache line size is used to size and align various
567 * structures used to communicate with the hardware.
568 */
569 ath_read_cachesize(common, &csz);
570 common->cachelsz = csz << 2; /* convert to bytes */
571
572 /* Initializes the hardware for all supported chipsets */
573 ret = ath9k_hw_init(ah);
574 if (ret)
575 goto err_hw;
576
577 ret = ath9k_init_queues(sc);
578 if (ret)
579 goto err_queues;
580
581 ret = ath9k_init_btcoex(sc);
582 if (ret)
583 goto err_btcoex;
584
585 ret = ath9k_init_channels_rates(sc);
586 if (ret)
587 goto err_btcoex;
588
589 ath9k_init_crypto(sc);
590 ath9k_init_misc(sc);
591
592 return 0;
593
594 err_btcoex:
595 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
596 if (ATH_TXQ_SETUP(sc, i))
597 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
598 err_queues:
599 ath9k_hw_deinit(ah);
600 err_hw:
601 tasklet_kill(&sc->intr_tq);
602 tasklet_kill(&sc->bcon_tasklet);
603
604 kfree(ah);
605 sc->sc_ah = NULL;
606
607 return ret;
608 }
609
610 static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
611 {
612 struct ieee80211_supported_band *sband;
613 struct ieee80211_channel *chan;
614 struct ath_hw *ah = sc->sc_ah;
615 struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
616 int i;
617
618 sband = &sc->sbands[band];
619 for (i = 0; i < sband->n_channels; i++) {
620 chan = &sband->channels[i];
621 ah->curchan = &ah->channels[chan->hw_value];
622 ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
623 ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
624 chan->max_power = reg->max_power_level / 2;
625 }
626 }
627
628 static void ath9k_init_txpower_limits(struct ath_softc *sc)
629 {
630 struct ath_hw *ah = sc->sc_ah;
631 struct ath9k_channel *curchan = ah->curchan;
632
633 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
634 ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
635 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
636 ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
637
638 ah->curchan = curchan;
639 }
640
641 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
642 {
643 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
644
645 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
646 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
647 IEEE80211_HW_SIGNAL_DBM |
648 IEEE80211_HW_SUPPORTS_PS |
649 IEEE80211_HW_PS_NULLFUNC_STACK |
650 IEEE80211_HW_SPECTRUM_MGMT |
651 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
652
653 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
654 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
655
656 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
657 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
658
659 hw->wiphy->interface_modes =
660 BIT(NL80211_IFTYPE_P2P_GO) |
661 BIT(NL80211_IFTYPE_P2P_CLIENT) |
662 BIT(NL80211_IFTYPE_AP) |
663 BIT(NL80211_IFTYPE_WDS) |
664 BIT(NL80211_IFTYPE_STATION) |
665 BIT(NL80211_IFTYPE_ADHOC) |
666 BIT(NL80211_IFTYPE_MESH_POINT);
667
668 if (AR_SREV_5416(sc->sc_ah))
669 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
670
671 hw->queues = 4;
672 hw->max_rates = 4;
673 hw->channel_change_time = 5000;
674 hw->max_listen_interval = 10;
675 hw->max_rate_tries = 10;
676 hw->sta_data_size = sizeof(struct ath_node);
677 hw->vif_data_size = sizeof(struct ath_vif);
678
679 #ifdef CONFIG_ATH9K_RATE_CONTROL
680 hw->rate_control_algorithm = "ath9k_rate_control";
681 #endif
682
683 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
684 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
685 &sc->sbands[IEEE80211_BAND_2GHZ];
686 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
687 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
688 &sc->sbands[IEEE80211_BAND_5GHZ];
689
690 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
691 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
692 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
693 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
694 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
695 }
696
697 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
698 }
699
700 int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
701 const struct ath_bus_ops *bus_ops)
702 {
703 struct ieee80211_hw *hw = sc->hw;
704 struct ath_wiphy *aphy = hw->priv;
705 struct ath_common *common;
706 struct ath_hw *ah;
707 int error = 0;
708 struct ath_regulatory *reg;
709
710 /* Bring up device */
711 error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
712 if (error != 0)
713 goto error_init;
714
715 ah = sc->sc_ah;
716 common = ath9k_hw_common(ah);
717 ath9k_set_hw_capab(sc, hw);
718
719 /* Initialize regulatory */
720 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
721 ath9k_reg_notifier);
722 if (error)
723 goto error_regd;
724
725 reg = &common->regulatory;
726
727 /* Setup TX DMA */
728 error = ath_tx_init(sc, ATH_TXBUF);
729 if (error != 0)
730 goto error_tx;
731
732 /* Setup RX DMA */
733 error = ath_rx_init(sc, ATH_RXBUF);
734 if (error != 0)
735 goto error_rx;
736
737 ath9k_init_txpower_limits(sc);
738
739 /* Register with mac80211 */
740 error = ieee80211_register_hw(hw);
741 if (error)
742 goto error_register;
743
744 error = ath9k_init_debug(ah);
745 if (error) {
746 ath_err(common, "Unable to create debugfs files\n");
747 goto error_world;
748 }
749
750 /* Handle world regulatory */
751 if (!ath_is_world_regd(reg)) {
752 error = regulatory_hint(hw->wiphy, reg->alpha2);
753 if (error)
754 goto error_world;
755 }
756
757 INIT_WORK(&sc->hw_check_work, ath_hw_check);
758 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
759 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
760 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
761 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
762 aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
763
764 ath_init_leds(sc);
765 ath_start_rfkill_poll(sc);
766
767 pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
768 PM_QOS_DEFAULT_VALUE);
769
770 return 0;
771
772 error_world:
773 ieee80211_unregister_hw(hw);
774 error_register:
775 ath_rx_cleanup(sc);
776 error_rx:
777 ath_tx_cleanup(sc);
778 error_tx:
779 /* Nothing */
780 error_regd:
781 ath9k_deinit_softc(sc);
782 error_init:
783 return error;
784 }
785
786 /*****************************/
787 /* De-Initialization */
788 /*****************************/
789
790 static void ath9k_deinit_softc(struct ath_softc *sc)
791 {
792 int i = 0;
793
794 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
795 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
796
797 if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
798 kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
799
800 if ((sc->btcoex.no_stomp_timer) &&
801 sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
802 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
803
804 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
805 if (ATH_TXQ_SETUP(sc, i))
806 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
807
808 ath9k_hw_deinit(sc->sc_ah);
809
810 tasklet_kill(&sc->intr_tq);
811 tasklet_kill(&sc->bcon_tasklet);
812
813 kfree(sc->sc_ah);
814 sc->sc_ah = NULL;
815 }
816
817 void ath9k_deinit_device(struct ath_softc *sc)
818 {
819 struct ieee80211_hw *hw = sc->hw;
820 int i = 0;
821
822 ath9k_ps_wakeup(sc);
823
824 wiphy_rfkill_stop_polling(sc->hw->wiphy);
825 ath_deinit_leds(sc);
826
827 for (i = 0; i < sc->num_sec_wiphy; i++) {
828 struct ath_wiphy *aphy = sc->sec_wiphy[i];
829 if (aphy == NULL)
830 continue;
831 sc->sec_wiphy[i] = NULL;
832 ieee80211_unregister_hw(aphy->hw);
833 ieee80211_free_hw(aphy->hw);
834 }
835
836 ieee80211_unregister_hw(hw);
837 pm_qos_remove_request(&sc->pm_qos_req);
838 ath_rx_cleanup(sc);
839 ath_tx_cleanup(sc);
840 ath9k_deinit_softc(sc);
841 kfree(sc->sec_wiphy);
842 }
843
844 void ath_descdma_cleanup(struct ath_softc *sc,
845 struct ath_descdma *dd,
846 struct list_head *head)
847 {
848 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
849 dd->dd_desc_paddr);
850
851 INIT_LIST_HEAD(head);
852 kfree(dd->dd_bufptr);
853 memset(dd, 0, sizeof(*dd));
854 }
855
856 /************************/
857 /* Module Hooks */
858 /************************/
859
860 static int __init ath9k_init(void)
861 {
862 int error;
863
864 /* Register rate control algorithm */
865 error = ath_rate_control_register();
866 if (error != 0) {
867 printk(KERN_ERR
868 "ath9k: Unable to register rate control "
869 "algorithm: %d\n",
870 error);
871 goto err_out;
872 }
873
874 error = ath_pci_init();
875 if (error < 0) {
876 printk(KERN_ERR
877 "ath9k: No PCI devices found, driver not installed.\n");
878 error = -ENODEV;
879 goto err_rate_unregister;
880 }
881
882 error = ath_ahb_init();
883 if (error < 0) {
884 error = -ENODEV;
885 goto err_pci_exit;
886 }
887
888 return 0;
889
890 err_pci_exit:
891 ath_pci_exit();
892
893 err_rate_unregister:
894 ath_rate_control_unregister();
895 err_out:
896 return error;
897 }
898 module_init(ath9k_init);
899
900 static void __exit ath9k_exit(void)
901 {
902 is_ath9k_unloaded = true;
903 ath_ahb_exit();
904 ath_pci_exit();
905 ath_rate_control_unregister();
906 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
907 }
908 module_exit(ath9k_exit);