]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/net/wireless/ath/ath9k/init.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[mirror_ubuntu-zesty-kernel.git] / drivers / net / wireless / ath / ath9k / init.c
1 /*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/slab.h>
18
19 #include "ath9k.h"
20
21 static char *dev_info = "ath9k";
22
23 MODULE_AUTHOR("Atheros Communications");
24 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
25 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
26 MODULE_LICENSE("Dual BSD/GPL");
27
28 static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
29 module_param_named(debug, ath9k_debug, uint, 0);
30 MODULE_PARM_DESC(debug, "Debugging mask");
31
32 int modparam_nohwcrypt;
33 module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
34 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
35
36 int led_blink;
37 module_param_named(blink, led_blink, int, 0444);
38 MODULE_PARM_DESC(blink, "Enable LED blink on activity");
39
40 static int ath9k_btcoex_enable;
41 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
42 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
43
44 /* We use the hw_value as an index into our private channel structure */
45
46 #define CHAN2G(_freq, _idx) { \
47 .center_freq = (_freq), \
48 .hw_value = (_idx), \
49 .max_power = 20, \
50 }
51
52 #define CHAN5G(_freq, _idx) { \
53 .band = IEEE80211_BAND_5GHZ, \
54 .center_freq = (_freq), \
55 .hw_value = (_idx), \
56 .max_power = 20, \
57 }
58
59 /* Some 2 GHz radios are actually tunable on 2312-2732
60 * on 5 MHz steps, we support the channels which we know
61 * we have calibration data for all cards though to make
62 * this static */
63 static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
64 CHAN2G(2412, 0), /* Channel 1 */
65 CHAN2G(2417, 1), /* Channel 2 */
66 CHAN2G(2422, 2), /* Channel 3 */
67 CHAN2G(2427, 3), /* Channel 4 */
68 CHAN2G(2432, 4), /* Channel 5 */
69 CHAN2G(2437, 5), /* Channel 6 */
70 CHAN2G(2442, 6), /* Channel 7 */
71 CHAN2G(2447, 7), /* Channel 8 */
72 CHAN2G(2452, 8), /* Channel 9 */
73 CHAN2G(2457, 9), /* Channel 10 */
74 CHAN2G(2462, 10), /* Channel 11 */
75 CHAN2G(2467, 11), /* Channel 12 */
76 CHAN2G(2472, 12), /* Channel 13 */
77 CHAN2G(2484, 13), /* Channel 14 */
78 };
79
80 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
81 * on 5 MHz steps, we support the channels which we know
82 * we have calibration data for all cards though to make
83 * this static */
84 static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
85 /* _We_ call this UNII 1 */
86 CHAN5G(5180, 14), /* Channel 36 */
87 CHAN5G(5200, 15), /* Channel 40 */
88 CHAN5G(5220, 16), /* Channel 44 */
89 CHAN5G(5240, 17), /* Channel 48 */
90 /* _We_ call this UNII 2 */
91 CHAN5G(5260, 18), /* Channel 52 */
92 CHAN5G(5280, 19), /* Channel 56 */
93 CHAN5G(5300, 20), /* Channel 60 */
94 CHAN5G(5320, 21), /* Channel 64 */
95 /* _We_ call this "Middle band" */
96 CHAN5G(5500, 22), /* Channel 100 */
97 CHAN5G(5520, 23), /* Channel 104 */
98 CHAN5G(5540, 24), /* Channel 108 */
99 CHAN5G(5560, 25), /* Channel 112 */
100 CHAN5G(5580, 26), /* Channel 116 */
101 CHAN5G(5600, 27), /* Channel 120 */
102 CHAN5G(5620, 28), /* Channel 124 */
103 CHAN5G(5640, 29), /* Channel 128 */
104 CHAN5G(5660, 30), /* Channel 132 */
105 CHAN5G(5680, 31), /* Channel 136 */
106 CHAN5G(5700, 32), /* Channel 140 */
107 /* _We_ call this UNII 3 */
108 CHAN5G(5745, 33), /* Channel 149 */
109 CHAN5G(5765, 34), /* Channel 153 */
110 CHAN5G(5785, 35), /* Channel 157 */
111 CHAN5G(5805, 36), /* Channel 161 */
112 CHAN5G(5825, 37), /* Channel 165 */
113 };
114
115 /* Atheros hardware rate code addition for short premble */
116 #define SHPCHECK(__hw_rate, __flags) \
117 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
118
119 #define RATE(_bitrate, _hw_rate, _flags) { \
120 .bitrate = (_bitrate), \
121 .flags = (_flags), \
122 .hw_value = (_hw_rate), \
123 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
124 }
125
126 static struct ieee80211_rate ath9k_legacy_rates[] = {
127 RATE(10, 0x1b, 0),
128 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
129 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
130 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
131 RATE(60, 0x0b, 0),
132 RATE(90, 0x0f, 0),
133 RATE(120, 0x0a, 0),
134 RATE(180, 0x0e, 0),
135 RATE(240, 0x09, 0),
136 RATE(360, 0x0d, 0),
137 RATE(480, 0x08, 0),
138 RATE(540, 0x0c, 0),
139 };
140
141 static void ath9k_deinit_softc(struct ath_softc *sc);
142
143 /*
144 * Read and write, they both share the same lock. We do this to serialize
145 * reads and writes on Atheros 802.11n PCI devices only. This is required
146 * as the FIFO on these devices can only accept sanely 2 requests.
147 */
148
149 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
150 {
151 struct ath_hw *ah = (struct ath_hw *) hw_priv;
152 struct ath_common *common = ath9k_hw_common(ah);
153 struct ath_softc *sc = (struct ath_softc *) common->priv;
154
155 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
156 unsigned long flags;
157 spin_lock_irqsave(&sc->sc_serial_rw, flags);
158 iowrite32(val, sc->mem + reg_offset);
159 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
160 } else
161 iowrite32(val, sc->mem + reg_offset);
162 }
163
164 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
165 {
166 struct ath_hw *ah = (struct ath_hw *) hw_priv;
167 struct ath_common *common = ath9k_hw_common(ah);
168 struct ath_softc *sc = (struct ath_softc *) common->priv;
169 u32 val;
170
171 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
172 unsigned long flags;
173 spin_lock_irqsave(&sc->sc_serial_rw, flags);
174 val = ioread32(sc->mem + reg_offset);
175 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
176 } else
177 val = ioread32(sc->mem + reg_offset);
178 return val;
179 }
180
181 static const struct ath_ops ath9k_common_ops = {
182 .read = ath9k_ioread32,
183 .write = ath9k_iowrite32,
184 };
185
186 /**************************/
187 /* Initialization */
188 /**************************/
189
190 static void setup_ht_cap(struct ath_softc *sc,
191 struct ieee80211_sta_ht_cap *ht_info)
192 {
193 struct ath_hw *ah = sc->sc_ah;
194 struct ath_common *common = ath9k_hw_common(ah);
195 u8 tx_streams, rx_streams;
196 int i, max_streams;
197
198 ht_info->ht_supported = true;
199 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
200 IEEE80211_HT_CAP_SM_PS |
201 IEEE80211_HT_CAP_SGI_40 |
202 IEEE80211_HT_CAP_DSSSCCK40;
203
204 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
205 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
206
207 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
208 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
209
210 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
211 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
212
213 if (AR_SREV_9300_20_OR_LATER(ah))
214 max_streams = 3;
215 else
216 max_streams = 2;
217
218 if (AR_SREV_9280_20_OR_LATER(ah)) {
219 if (max_streams >= 2)
220 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
221 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
222 }
223
224 /* set up supported mcs set */
225 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
226 tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams);
227 rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams);
228
229 ath_print(common, ATH_DBG_CONFIG,
230 "TX streams %d, RX streams: %d\n",
231 tx_streams, rx_streams);
232
233 if (tx_streams != rx_streams) {
234 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
235 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
236 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
237 }
238
239 for (i = 0; i < rx_streams; i++)
240 ht_info->mcs.rx_mask[i] = 0xff;
241
242 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
243 }
244
245 static int ath9k_reg_notifier(struct wiphy *wiphy,
246 struct regulatory_request *request)
247 {
248 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
249 struct ath_wiphy *aphy = hw->priv;
250 struct ath_softc *sc = aphy->sc;
251 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
252
253 return ath_reg_notifier_apply(wiphy, request, reg);
254 }
255
256 /*
257 * This function will allocate both the DMA descriptor structure, and the
258 * buffers it contains. These are used to contain the descriptors used
259 * by the system.
260 */
261 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
262 struct list_head *head, const char *name,
263 int nbuf, int ndesc, bool is_tx)
264 {
265 #define DS2PHYS(_dd, _ds) \
266 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
267 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
268 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
269 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
270 u8 *ds;
271 struct ath_buf *bf;
272 int i, bsize, error, desc_len;
273
274 ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
275 name, nbuf, ndesc);
276
277 INIT_LIST_HEAD(head);
278
279 if (is_tx)
280 desc_len = sc->sc_ah->caps.tx_desc_len;
281 else
282 desc_len = sizeof(struct ath_desc);
283
284 /* ath_desc must be a multiple of DWORDs */
285 if ((desc_len % 4) != 0) {
286 ath_print(common, ATH_DBG_FATAL,
287 "ath_desc not DWORD aligned\n");
288 BUG_ON((desc_len % 4) != 0);
289 error = -ENOMEM;
290 goto fail;
291 }
292
293 dd->dd_desc_len = desc_len * nbuf * ndesc;
294
295 /*
296 * Need additional DMA memory because we can't use
297 * descriptors that cross the 4K page boundary. Assume
298 * one skipped descriptor per 4K page.
299 */
300 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
301 u32 ndesc_skipped =
302 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
303 u32 dma_len;
304
305 while (ndesc_skipped) {
306 dma_len = ndesc_skipped * desc_len;
307 dd->dd_desc_len += dma_len;
308
309 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
310 }
311 }
312
313 /* allocate descriptors */
314 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
315 &dd->dd_desc_paddr, GFP_KERNEL);
316 if (dd->dd_desc == NULL) {
317 error = -ENOMEM;
318 goto fail;
319 }
320 ds = (u8 *) dd->dd_desc;
321 ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
322 name, ds, (u32) dd->dd_desc_len,
323 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
324
325 /* allocate buffers */
326 bsize = sizeof(struct ath_buf) * nbuf;
327 bf = kzalloc(bsize, GFP_KERNEL);
328 if (bf == NULL) {
329 error = -ENOMEM;
330 goto fail2;
331 }
332 dd->dd_bufptr = bf;
333
334 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
335 bf->bf_desc = ds;
336 bf->bf_daddr = DS2PHYS(dd, ds);
337
338 if (!(sc->sc_ah->caps.hw_caps &
339 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
340 /*
341 * Skip descriptor addresses which can cause 4KB
342 * boundary crossing (addr + length) with a 32 dword
343 * descriptor fetch.
344 */
345 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
346 BUG_ON((caddr_t) bf->bf_desc >=
347 ((caddr_t) dd->dd_desc +
348 dd->dd_desc_len));
349
350 ds += (desc_len * ndesc);
351 bf->bf_desc = ds;
352 bf->bf_daddr = DS2PHYS(dd, ds);
353 }
354 }
355 list_add_tail(&bf->list, head);
356 }
357 return 0;
358 fail2:
359 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
360 dd->dd_desc_paddr);
361 fail:
362 memset(dd, 0, sizeof(*dd));
363 return error;
364 #undef ATH_DESC_4KB_BOUND_CHECK
365 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
366 #undef DS2PHYS
367 }
368
369 static void ath9k_init_crypto(struct ath_softc *sc)
370 {
371 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
372 int i = 0;
373
374 /* Get the hardware key cache size. */
375 common->keymax = sc->sc_ah->caps.keycache_size;
376 if (common->keymax > ATH_KEYMAX) {
377 ath_print(common, ATH_DBG_ANY,
378 "Warning, using only %u entries in %u key cache\n",
379 ATH_KEYMAX, common->keymax);
380 common->keymax = ATH_KEYMAX;
381 }
382
383 /*
384 * Reset the key cache since some parts do not
385 * reset the contents on initial power up.
386 */
387 for (i = 0; i < common->keymax; i++)
388 ath_hw_keyreset(common, (u16) i);
389
390 /*
391 * Check whether the separate key cache entries
392 * are required to handle both tx+rx MIC keys.
393 * With split mic keys the number of stations is limited
394 * to 27 otherwise 59.
395 */
396 if (sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)
397 common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
398 }
399
400 static int ath9k_init_btcoex(struct ath_softc *sc)
401 {
402 struct ath_txq *txq;
403 int r;
404
405 switch (sc->sc_ah->btcoex_hw.scheme) {
406 case ATH_BTCOEX_CFG_NONE:
407 break;
408 case ATH_BTCOEX_CFG_2WIRE:
409 ath9k_hw_btcoex_init_2wire(sc->sc_ah);
410 break;
411 case ATH_BTCOEX_CFG_3WIRE:
412 ath9k_hw_btcoex_init_3wire(sc->sc_ah);
413 r = ath_init_btcoex_timer(sc);
414 if (r)
415 return -1;
416 txq = sc->tx.txq_map[WME_AC_BE];
417 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
418 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
419 break;
420 default:
421 WARN_ON(1);
422 break;
423 }
424
425 return 0;
426 }
427
428 static int ath9k_init_queues(struct ath_softc *sc)
429 {
430 int i = 0;
431
432 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
433 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
434
435 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
436 ath_cabq_update(sc);
437
438 for (i = 0; i < WME_NUM_AC; i++)
439 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
440
441 return 0;
442 }
443
444 static int ath9k_init_channels_rates(struct ath_softc *sc)
445 {
446 void *channels;
447
448 BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
449 ARRAY_SIZE(ath9k_5ghz_chantable) !=
450 ATH9K_NUM_CHANNELS);
451
452 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
453 channels = kmemdup(ath9k_2ghz_chantable,
454 sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
455 if (!channels)
456 return -ENOMEM;
457
458 sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
459 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
460 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
461 ARRAY_SIZE(ath9k_2ghz_chantable);
462 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
463 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
464 ARRAY_SIZE(ath9k_legacy_rates);
465 }
466
467 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
468 channels = kmemdup(ath9k_5ghz_chantable,
469 sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
470 if (!channels) {
471 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
472 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
473 return -ENOMEM;
474 }
475
476 sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
477 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
478 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
479 ARRAY_SIZE(ath9k_5ghz_chantable);
480 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
481 ath9k_legacy_rates + 4;
482 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
483 ARRAY_SIZE(ath9k_legacy_rates) - 4;
484 }
485 return 0;
486 }
487
488 static void ath9k_init_misc(struct ath_softc *sc)
489 {
490 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
491 int i = 0;
492
493 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
494
495 sc->config.txpowlimit = ATH_TXPOWER_MAX;
496
497 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
498 sc->sc_flags |= SC_OP_TXAGGR;
499 sc->sc_flags |= SC_OP_RXAGGR;
500 }
501
502 common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
503 common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
504
505 ath9k_hw_set_diversity(sc->sc_ah, true);
506 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
507
508 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
509
510 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
511
512 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
513 sc->beacon.bslot[i] = NULL;
514 sc->beacon.bslot_aphy[i] = NULL;
515 }
516
517 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
518 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
519 }
520
521 static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
522 const struct ath_bus_ops *bus_ops)
523 {
524 struct ath_hw *ah = NULL;
525 struct ath_common *common;
526 int ret = 0, i;
527 int csz = 0;
528
529 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
530 if (!ah)
531 return -ENOMEM;
532
533 ah->hw_version.devid = devid;
534 ah->hw_version.subsysid = subsysid;
535 sc->sc_ah = ah;
536
537 if (!sc->dev->platform_data)
538 ah->ah_flags |= AH_USE_EEPROM;
539
540 common = ath9k_hw_common(ah);
541 common->ops = &ath9k_common_ops;
542 common->bus_ops = bus_ops;
543 common->ah = ah;
544 common->hw = sc->hw;
545 common->priv = sc;
546 common->debug_mask = ath9k_debug;
547 common->btcoex_enabled = ath9k_btcoex_enable == 1;
548 spin_lock_init(&common->cc_lock);
549
550 spin_lock_init(&sc->wiphy_lock);
551 spin_lock_init(&sc->sc_serial_rw);
552 spin_lock_init(&sc->sc_pm_lock);
553 mutex_init(&sc->mutex);
554 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
555 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
556 (unsigned long)sc);
557
558 /*
559 * Cache line size is used to size and align various
560 * structures used to communicate with the hardware.
561 */
562 ath_read_cachesize(common, &csz);
563 common->cachelsz = csz << 2; /* convert to bytes */
564
565 /* Initializes the hardware for all supported chipsets */
566 ret = ath9k_hw_init(ah);
567 if (ret)
568 goto err_hw;
569
570 ret = ath9k_init_queues(sc);
571 if (ret)
572 goto err_queues;
573
574 ret = ath9k_init_btcoex(sc);
575 if (ret)
576 goto err_btcoex;
577
578 ret = ath9k_init_channels_rates(sc);
579 if (ret)
580 goto err_btcoex;
581
582 ath9k_init_crypto(sc);
583 ath9k_init_misc(sc);
584
585 return 0;
586
587 err_btcoex:
588 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
589 if (ATH_TXQ_SETUP(sc, i))
590 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
591 err_queues:
592 ath9k_hw_deinit(ah);
593 err_hw:
594 tasklet_kill(&sc->intr_tq);
595 tasklet_kill(&sc->bcon_tasklet);
596
597 kfree(ah);
598 sc->sc_ah = NULL;
599
600 return ret;
601 }
602
603 static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
604 {
605 struct ieee80211_supported_band *sband;
606 struct ieee80211_channel *chan;
607 struct ath_hw *ah = sc->sc_ah;
608 struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
609 int i;
610
611 sband = &sc->sbands[band];
612 for (i = 0; i < sband->n_channels; i++) {
613 chan = &sband->channels[i];
614 ah->curchan = &ah->channels[chan->hw_value];
615 ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
616 ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
617 chan->max_power = reg->max_power_level / 2;
618 }
619 }
620
621 static void ath9k_init_txpower_limits(struct ath_softc *sc)
622 {
623 struct ath_hw *ah = sc->sc_ah;
624 struct ath9k_channel *curchan = ah->curchan;
625
626 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
627 ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
628 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
629 ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
630
631 ah->curchan = curchan;
632 }
633
634 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
635 {
636 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
637
638 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
639 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
640 IEEE80211_HW_SIGNAL_DBM |
641 IEEE80211_HW_SUPPORTS_PS |
642 IEEE80211_HW_PS_NULLFUNC_STACK |
643 IEEE80211_HW_SPECTRUM_MGMT |
644 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
645
646 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
647 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
648
649 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
650 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
651
652 hw->wiphy->interface_modes =
653 BIT(NL80211_IFTYPE_P2P_GO) |
654 BIT(NL80211_IFTYPE_P2P_CLIENT) |
655 BIT(NL80211_IFTYPE_AP) |
656 BIT(NL80211_IFTYPE_WDS) |
657 BIT(NL80211_IFTYPE_STATION) |
658 BIT(NL80211_IFTYPE_ADHOC) |
659 BIT(NL80211_IFTYPE_MESH_POINT);
660
661 if (AR_SREV_5416(sc->sc_ah))
662 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
663
664 hw->queues = 4;
665 hw->max_rates = 4;
666 hw->channel_change_time = 5000;
667 hw->max_listen_interval = 10;
668 hw->max_rate_tries = 10;
669 hw->sta_data_size = sizeof(struct ath_node);
670 hw->vif_data_size = sizeof(struct ath_vif);
671
672 #ifdef CONFIG_ATH9K_RATE_CONTROL
673 hw->rate_control_algorithm = "ath9k_rate_control";
674 #endif
675
676 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
677 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
678 &sc->sbands[IEEE80211_BAND_2GHZ];
679 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
680 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
681 &sc->sbands[IEEE80211_BAND_5GHZ];
682
683 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
684 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
685 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
686 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
687 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
688 }
689
690 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
691 }
692
693 int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
694 const struct ath_bus_ops *bus_ops)
695 {
696 struct ieee80211_hw *hw = sc->hw;
697 struct ath_wiphy *aphy = hw->priv;
698 struct ath_common *common;
699 struct ath_hw *ah;
700 int error = 0;
701 struct ath_regulatory *reg;
702
703 /* Bring up device */
704 error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
705 if (error != 0)
706 goto error_init;
707
708 ah = sc->sc_ah;
709 common = ath9k_hw_common(ah);
710 ath9k_set_hw_capab(sc, hw);
711
712 /* Initialize regulatory */
713 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
714 ath9k_reg_notifier);
715 if (error)
716 goto error_regd;
717
718 reg = &common->regulatory;
719
720 /* Setup TX DMA */
721 error = ath_tx_init(sc, ATH_TXBUF);
722 if (error != 0)
723 goto error_tx;
724
725 /* Setup RX DMA */
726 error = ath_rx_init(sc, ATH_RXBUF);
727 if (error != 0)
728 goto error_rx;
729
730 ath9k_init_txpower_limits(sc);
731
732 /* Register with mac80211 */
733 error = ieee80211_register_hw(hw);
734 if (error)
735 goto error_register;
736
737 error = ath9k_init_debug(ah);
738 if (error) {
739 ath_print(common, ATH_DBG_FATAL,
740 "Unable to create debugfs files\n");
741 goto error_world;
742 }
743
744 /* Handle world regulatory */
745 if (!ath_is_world_regd(reg)) {
746 error = regulatory_hint(hw->wiphy, reg->alpha2);
747 if (error)
748 goto error_world;
749 }
750
751 INIT_WORK(&sc->hw_check_work, ath_hw_check);
752 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
753 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
754 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
755 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
756 aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
757
758 ath_init_leds(sc);
759 ath_start_rfkill_poll(sc);
760
761 pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
762 PM_QOS_DEFAULT_VALUE);
763
764 return 0;
765
766 error_world:
767 ieee80211_unregister_hw(hw);
768 error_register:
769 ath_rx_cleanup(sc);
770 error_rx:
771 ath_tx_cleanup(sc);
772 error_tx:
773 /* Nothing */
774 error_regd:
775 ath9k_deinit_softc(sc);
776 error_init:
777 return error;
778 }
779
780 /*****************************/
781 /* De-Initialization */
782 /*****************************/
783
784 static void ath9k_deinit_softc(struct ath_softc *sc)
785 {
786 int i = 0;
787
788 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
789 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
790
791 if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
792 kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
793
794 if ((sc->btcoex.no_stomp_timer) &&
795 sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
796 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
797
798 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
799 if (ATH_TXQ_SETUP(sc, i))
800 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
801
802 ath9k_hw_deinit(sc->sc_ah);
803
804 tasklet_kill(&sc->intr_tq);
805 tasklet_kill(&sc->bcon_tasklet);
806
807 kfree(sc->sc_ah);
808 sc->sc_ah = NULL;
809 }
810
811 void ath9k_deinit_device(struct ath_softc *sc)
812 {
813 struct ieee80211_hw *hw = sc->hw;
814 int i = 0;
815
816 ath9k_ps_wakeup(sc);
817
818 wiphy_rfkill_stop_polling(sc->hw->wiphy);
819 ath_deinit_leds(sc);
820
821 for (i = 0; i < sc->num_sec_wiphy; i++) {
822 struct ath_wiphy *aphy = sc->sec_wiphy[i];
823 if (aphy == NULL)
824 continue;
825 sc->sec_wiphy[i] = NULL;
826 ieee80211_unregister_hw(aphy->hw);
827 ieee80211_free_hw(aphy->hw);
828 }
829
830 ieee80211_unregister_hw(hw);
831 pm_qos_remove_request(&sc->pm_qos_req);
832 ath_rx_cleanup(sc);
833 ath_tx_cleanup(sc);
834 ath9k_deinit_softc(sc);
835 kfree(sc->sec_wiphy);
836 }
837
838 void ath_descdma_cleanup(struct ath_softc *sc,
839 struct ath_descdma *dd,
840 struct list_head *head)
841 {
842 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
843 dd->dd_desc_paddr);
844
845 INIT_LIST_HEAD(head);
846 kfree(dd->dd_bufptr);
847 memset(dd, 0, sizeof(*dd));
848 }
849
850 /************************/
851 /* Module Hooks */
852 /************************/
853
854 static int __init ath9k_init(void)
855 {
856 int error;
857
858 /* Register rate control algorithm */
859 error = ath_rate_control_register();
860 if (error != 0) {
861 printk(KERN_ERR
862 "ath9k: Unable to register rate control "
863 "algorithm: %d\n",
864 error);
865 goto err_out;
866 }
867
868 error = ath_pci_init();
869 if (error < 0) {
870 printk(KERN_ERR
871 "ath9k: No PCI devices found, driver not installed.\n");
872 error = -ENODEV;
873 goto err_rate_unregister;
874 }
875
876 error = ath_ahb_init();
877 if (error < 0) {
878 error = -ENODEV;
879 goto err_pci_exit;
880 }
881
882 return 0;
883
884 err_pci_exit:
885 ath_pci_exit();
886
887 err_rate_unregister:
888 ath_rate_control_unregister();
889 err_out:
890 return error;
891 }
892 module_init(ath9k_init);
893
894 static void __exit ath9k_exit(void)
895 {
896 ath_ahb_exit();
897 ath_pci_exit();
898 ath_rate_control_unregister();
899 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
900 }
901 module_exit(ath9k_exit);