]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/wireless/ath/ath9k/init.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / ath / ath9k / init.c
1 /*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/dma-mapping.h>
18 #include <linux/slab.h>
19 #include <linux/ath9k_platform.h>
20 #include <linux/module.h>
21
22 #include "ath9k.h"
23
24 static char *dev_info = "ath9k";
25
26 MODULE_AUTHOR("Atheros Communications");
27 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
28 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
29 MODULE_LICENSE("Dual BSD/GPL");
30
31 static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
32 module_param_named(debug, ath9k_debug, uint, 0);
33 MODULE_PARM_DESC(debug, "Debugging mask");
34
35 int ath9k_modparam_nohwcrypt;
36 module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444);
37 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
38
39 int led_blink;
40 module_param_named(blink, led_blink, int, 0444);
41 MODULE_PARM_DESC(blink, "Enable LED blink on activity");
42
43 static int ath9k_btcoex_enable;
44 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
45 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
46
47 bool is_ath9k_unloaded;
48 /* We use the hw_value as an index into our private channel structure */
49
50 #define CHAN2G(_freq, _idx) { \
51 .band = IEEE80211_BAND_2GHZ, \
52 .center_freq = (_freq), \
53 .hw_value = (_idx), \
54 .max_power = 20, \
55 }
56
57 #define CHAN5G(_freq, _idx) { \
58 .band = IEEE80211_BAND_5GHZ, \
59 .center_freq = (_freq), \
60 .hw_value = (_idx), \
61 .max_power = 20, \
62 }
63
64 /* Some 2 GHz radios are actually tunable on 2312-2732
65 * on 5 MHz steps, we support the channels which we know
66 * we have calibration data for all cards though to make
67 * this static */
68 static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
69 CHAN2G(2412, 0), /* Channel 1 */
70 CHAN2G(2417, 1), /* Channel 2 */
71 CHAN2G(2422, 2), /* Channel 3 */
72 CHAN2G(2427, 3), /* Channel 4 */
73 CHAN2G(2432, 4), /* Channel 5 */
74 CHAN2G(2437, 5), /* Channel 6 */
75 CHAN2G(2442, 6), /* Channel 7 */
76 CHAN2G(2447, 7), /* Channel 8 */
77 CHAN2G(2452, 8), /* Channel 9 */
78 CHAN2G(2457, 9), /* Channel 10 */
79 CHAN2G(2462, 10), /* Channel 11 */
80 CHAN2G(2467, 11), /* Channel 12 */
81 CHAN2G(2472, 12), /* Channel 13 */
82 CHAN2G(2484, 13), /* Channel 14 */
83 };
84
85 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
86 * on 5 MHz steps, we support the channels which we know
87 * we have calibration data for all cards though to make
88 * this static */
89 static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
90 /* _We_ call this UNII 1 */
91 CHAN5G(5180, 14), /* Channel 36 */
92 CHAN5G(5200, 15), /* Channel 40 */
93 CHAN5G(5220, 16), /* Channel 44 */
94 CHAN5G(5240, 17), /* Channel 48 */
95 /* _We_ call this UNII 2 */
96 CHAN5G(5260, 18), /* Channel 52 */
97 CHAN5G(5280, 19), /* Channel 56 */
98 CHAN5G(5300, 20), /* Channel 60 */
99 CHAN5G(5320, 21), /* Channel 64 */
100 /* _We_ call this "Middle band" */
101 CHAN5G(5500, 22), /* Channel 100 */
102 CHAN5G(5520, 23), /* Channel 104 */
103 CHAN5G(5540, 24), /* Channel 108 */
104 CHAN5G(5560, 25), /* Channel 112 */
105 CHAN5G(5580, 26), /* Channel 116 */
106 CHAN5G(5600, 27), /* Channel 120 */
107 CHAN5G(5620, 28), /* Channel 124 */
108 CHAN5G(5640, 29), /* Channel 128 */
109 CHAN5G(5660, 30), /* Channel 132 */
110 CHAN5G(5680, 31), /* Channel 136 */
111 CHAN5G(5700, 32), /* Channel 140 */
112 /* _We_ call this UNII 3 */
113 CHAN5G(5745, 33), /* Channel 149 */
114 CHAN5G(5765, 34), /* Channel 153 */
115 CHAN5G(5785, 35), /* Channel 157 */
116 CHAN5G(5805, 36), /* Channel 161 */
117 CHAN5G(5825, 37), /* Channel 165 */
118 };
119
120 /* Atheros hardware rate code addition for short premble */
121 #define SHPCHECK(__hw_rate, __flags) \
122 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
123
124 #define RATE(_bitrate, _hw_rate, _flags) { \
125 .bitrate = (_bitrate), \
126 .flags = (_flags), \
127 .hw_value = (_hw_rate), \
128 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
129 }
130
131 static struct ieee80211_rate ath9k_legacy_rates[] = {
132 RATE(10, 0x1b, 0),
133 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
134 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
135 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
136 RATE(60, 0x0b, 0),
137 RATE(90, 0x0f, 0),
138 RATE(120, 0x0a, 0),
139 RATE(180, 0x0e, 0),
140 RATE(240, 0x09, 0),
141 RATE(360, 0x0d, 0),
142 RATE(480, 0x08, 0),
143 RATE(540, 0x0c, 0),
144 };
145
146 #ifdef CONFIG_MAC80211_LEDS
147 static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
148 { .throughput = 0 * 1024, .blink_time = 334 },
149 { .throughput = 1 * 1024, .blink_time = 260 },
150 { .throughput = 5 * 1024, .blink_time = 220 },
151 { .throughput = 10 * 1024, .blink_time = 190 },
152 { .throughput = 20 * 1024, .blink_time = 170 },
153 { .throughput = 50 * 1024, .blink_time = 150 },
154 { .throughput = 70 * 1024, .blink_time = 130 },
155 { .throughput = 100 * 1024, .blink_time = 110 },
156 { .throughput = 200 * 1024, .blink_time = 80 },
157 { .throughput = 300 * 1024, .blink_time = 50 },
158 };
159 #endif
160
161 static void ath9k_deinit_softc(struct ath_softc *sc);
162
163 /*
164 * Read and write, they both share the same lock. We do this to serialize
165 * reads and writes on Atheros 802.11n PCI devices only. This is required
166 * as the FIFO on these devices can only accept sanely 2 requests.
167 */
168
169 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
170 {
171 struct ath_hw *ah = (struct ath_hw *) hw_priv;
172 struct ath_common *common = ath9k_hw_common(ah);
173 struct ath_softc *sc = (struct ath_softc *) common->priv;
174
175 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
176 unsigned long flags;
177 spin_lock_irqsave(&sc->sc_serial_rw, flags);
178 iowrite32(val, sc->mem + reg_offset);
179 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
180 } else
181 iowrite32(val, sc->mem + reg_offset);
182 }
183
184 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
185 {
186 struct ath_hw *ah = (struct ath_hw *) hw_priv;
187 struct ath_common *common = ath9k_hw_common(ah);
188 struct ath_softc *sc = (struct ath_softc *) common->priv;
189 u32 val;
190
191 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
192 unsigned long flags;
193 spin_lock_irqsave(&sc->sc_serial_rw, flags);
194 val = ioread32(sc->mem + reg_offset);
195 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
196 } else
197 val = ioread32(sc->mem + reg_offset);
198 return val;
199 }
200
201 static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset,
202 u32 set, u32 clr)
203 {
204 u32 val;
205
206 val = ioread32(sc->mem + reg_offset);
207 val &= ~clr;
208 val |= set;
209 iowrite32(val, sc->mem + reg_offset);
210
211 return val;
212 }
213
214 static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
215 {
216 struct ath_hw *ah = (struct ath_hw *) hw_priv;
217 struct ath_common *common = ath9k_hw_common(ah);
218 struct ath_softc *sc = (struct ath_softc *) common->priv;
219 unsigned long uninitialized_var(flags);
220 u32 val;
221
222 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
223 spin_lock_irqsave(&sc->sc_serial_rw, flags);
224 val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
225 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
226 } else
227 val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
228
229 return val;
230 }
231
232 /**************************/
233 /* Initialization */
234 /**************************/
235
236 static void setup_ht_cap(struct ath_softc *sc,
237 struct ieee80211_sta_ht_cap *ht_info)
238 {
239 struct ath_hw *ah = sc->sc_ah;
240 struct ath_common *common = ath9k_hw_common(ah);
241 u8 tx_streams, rx_streams;
242 int i, max_streams;
243
244 ht_info->ht_supported = true;
245 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
246 IEEE80211_HT_CAP_SM_PS |
247 IEEE80211_HT_CAP_SGI_40 |
248 IEEE80211_HT_CAP_DSSSCCK40;
249
250 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
251 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
252
253 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
254 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
255
256 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
257 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
258
259 if (AR_SREV_9330(ah) || AR_SREV_9485(ah))
260 max_streams = 1;
261 else if (AR_SREV_9300_20_OR_LATER(ah))
262 max_streams = 3;
263 else
264 max_streams = 2;
265
266 if (AR_SREV_9280_20_OR_LATER(ah)) {
267 if (max_streams >= 2)
268 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
269 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
270 }
271
272 /* set up supported mcs set */
273 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
274 tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams);
275 rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams);
276
277 ath_dbg(common, ATH_DBG_CONFIG,
278 "TX streams %d, RX streams: %d\n",
279 tx_streams, rx_streams);
280
281 if (tx_streams != rx_streams) {
282 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
283 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
284 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
285 }
286
287 for (i = 0; i < rx_streams; i++)
288 ht_info->mcs.rx_mask[i] = 0xff;
289
290 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
291 }
292
293 static int ath9k_reg_notifier(struct wiphy *wiphy,
294 struct regulatory_request *request)
295 {
296 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
297 struct ath_softc *sc = hw->priv;
298 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
299
300 return ath_reg_notifier_apply(wiphy, request, reg);
301 }
302
303 /*
304 * This function will allocate both the DMA descriptor structure, and the
305 * buffers it contains. These are used to contain the descriptors used
306 * by the system.
307 */
308 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
309 struct list_head *head, const char *name,
310 int nbuf, int ndesc, bool is_tx)
311 {
312 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
313 u8 *ds;
314 struct ath_buf *bf;
315 int i, bsize, error, desc_len;
316
317 ath_dbg(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
318 name, nbuf, ndesc);
319
320 INIT_LIST_HEAD(head);
321
322 if (is_tx)
323 desc_len = sc->sc_ah->caps.tx_desc_len;
324 else
325 desc_len = sizeof(struct ath_desc);
326
327 /* ath_desc must be a multiple of DWORDs */
328 if ((desc_len % 4) != 0) {
329 ath_err(common, "ath_desc not DWORD aligned\n");
330 BUG_ON((desc_len % 4) != 0);
331 error = -ENOMEM;
332 goto fail;
333 }
334
335 dd->dd_desc_len = desc_len * nbuf * ndesc;
336
337 /*
338 * Need additional DMA memory because we can't use
339 * descriptors that cross the 4K page boundary. Assume
340 * one skipped descriptor per 4K page.
341 */
342 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
343 u32 ndesc_skipped =
344 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
345 u32 dma_len;
346
347 while (ndesc_skipped) {
348 dma_len = ndesc_skipped * desc_len;
349 dd->dd_desc_len += dma_len;
350
351 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
352 }
353 }
354
355 /* allocate descriptors */
356 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
357 &dd->dd_desc_paddr, GFP_KERNEL);
358 if (dd->dd_desc == NULL) {
359 error = -ENOMEM;
360 goto fail;
361 }
362 ds = (u8 *) dd->dd_desc;
363 ath_dbg(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
364 name, ds, (u32) dd->dd_desc_len,
365 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
366
367 /* allocate buffers */
368 bsize = sizeof(struct ath_buf) * nbuf;
369 bf = kzalloc(bsize, GFP_KERNEL);
370 if (bf == NULL) {
371 error = -ENOMEM;
372 goto fail2;
373 }
374 dd->dd_bufptr = bf;
375
376 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
377 bf->bf_desc = ds;
378 bf->bf_daddr = DS2PHYS(dd, ds);
379
380 if (!(sc->sc_ah->caps.hw_caps &
381 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
382 /*
383 * Skip descriptor addresses which can cause 4KB
384 * boundary crossing (addr + length) with a 32 dword
385 * descriptor fetch.
386 */
387 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
388 BUG_ON((caddr_t) bf->bf_desc >=
389 ((caddr_t) dd->dd_desc +
390 dd->dd_desc_len));
391
392 ds += (desc_len * ndesc);
393 bf->bf_desc = ds;
394 bf->bf_daddr = DS2PHYS(dd, ds);
395 }
396 }
397 list_add_tail(&bf->list, head);
398 }
399 return 0;
400 fail2:
401 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
402 dd->dd_desc_paddr);
403 fail:
404 memset(dd, 0, sizeof(*dd));
405 return error;
406 }
407
408 static int ath9k_init_btcoex(struct ath_softc *sc)
409 {
410 struct ath_txq *txq;
411 int r;
412
413 switch (sc->sc_ah->btcoex_hw.scheme) {
414 case ATH_BTCOEX_CFG_NONE:
415 break;
416 case ATH_BTCOEX_CFG_2WIRE:
417 ath9k_hw_btcoex_init_2wire(sc->sc_ah);
418 break;
419 case ATH_BTCOEX_CFG_3WIRE:
420 ath9k_hw_btcoex_init_3wire(sc->sc_ah);
421 r = ath_init_btcoex_timer(sc);
422 if (r)
423 return -1;
424 txq = sc->tx.txq_map[WME_AC_BE];
425 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
426 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
427 sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
428 INIT_LIST_HEAD(&sc->btcoex.mci.info);
429 break;
430 default:
431 WARN_ON(1);
432 break;
433 }
434
435 return 0;
436 }
437
438 static int ath9k_init_queues(struct ath_softc *sc)
439 {
440 int i = 0;
441
442 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
443 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
444
445 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
446 ath_cabq_update(sc);
447
448 for (i = 0; i < WME_NUM_AC; i++) {
449 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
450 sc->tx.txq_map[i]->mac80211_qnum = i;
451 }
452 return 0;
453 }
454
455 static int ath9k_init_channels_rates(struct ath_softc *sc)
456 {
457 void *channels;
458
459 BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
460 ARRAY_SIZE(ath9k_5ghz_chantable) !=
461 ATH9K_NUM_CHANNELS);
462
463 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
464 channels = kmemdup(ath9k_2ghz_chantable,
465 sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
466 if (!channels)
467 return -ENOMEM;
468
469 sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
470 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
471 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
472 ARRAY_SIZE(ath9k_2ghz_chantable);
473 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
474 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
475 ARRAY_SIZE(ath9k_legacy_rates);
476 }
477
478 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
479 channels = kmemdup(ath9k_5ghz_chantable,
480 sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
481 if (!channels) {
482 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
483 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
484 return -ENOMEM;
485 }
486
487 sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
488 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
489 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
490 ARRAY_SIZE(ath9k_5ghz_chantable);
491 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
492 ath9k_legacy_rates + 4;
493 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
494 ARRAY_SIZE(ath9k_legacy_rates) - 4;
495 }
496 return 0;
497 }
498
499 static void ath9k_init_misc(struct ath_softc *sc)
500 {
501 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
502 int i = 0;
503 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
504
505 sc->config.txpowlimit = ATH_TXPOWER_MAX;
506
507 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
508 sc->sc_flags |= SC_OP_TXAGGR;
509 sc->sc_flags |= SC_OP_RXAGGR;
510 }
511
512 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
513
514 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
515
516 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
517
518 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
519 sc->beacon.bslot[i] = NULL;
520
521 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
522 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
523 }
524
525 static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
526 const struct ath_bus_ops *bus_ops)
527 {
528 struct ath9k_platform_data *pdata = sc->dev->platform_data;
529 struct ath_hw *ah = NULL;
530 struct ath_common *common;
531 int ret = 0, i;
532 int csz = 0;
533
534 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
535 if (!ah)
536 return -ENOMEM;
537
538 ah->hw = sc->hw;
539 ah->hw_version.devid = devid;
540 ah->reg_ops.read = ath9k_ioread32;
541 ah->reg_ops.write = ath9k_iowrite32;
542 ah->reg_ops.rmw = ath9k_reg_rmw;
543 atomic_set(&ah->intr_ref_cnt, -1);
544 sc->sc_ah = ah;
545
546 if (!pdata) {
547 ah->ah_flags |= AH_USE_EEPROM;
548 sc->sc_ah->led_pin = -1;
549 } else {
550 sc->sc_ah->gpio_mask = pdata->gpio_mask;
551 sc->sc_ah->gpio_val = pdata->gpio_val;
552 sc->sc_ah->led_pin = pdata->led_pin;
553 ah->is_clk_25mhz = pdata->is_clk_25mhz;
554 ah->get_mac_revision = pdata->get_mac_revision;
555 ah->external_reset = pdata->external_reset;
556 }
557
558 common = ath9k_hw_common(ah);
559 common->ops = &ah->reg_ops;
560 common->bus_ops = bus_ops;
561 common->ah = ah;
562 common->hw = sc->hw;
563 common->priv = sc;
564 common->debug_mask = ath9k_debug;
565 common->btcoex_enabled = ath9k_btcoex_enable == 1;
566 common->disable_ani = false;
567 spin_lock_init(&common->cc_lock);
568
569 spin_lock_init(&sc->sc_serial_rw);
570 spin_lock_init(&sc->sc_pm_lock);
571 mutex_init(&sc->mutex);
572 #ifdef CONFIG_ATH9K_DEBUGFS
573 spin_lock_init(&sc->nodes_lock);
574 spin_lock_init(&sc->debug.samp_lock);
575 INIT_LIST_HEAD(&sc->nodes);
576 #endif
577 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
578 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
579 (unsigned long)sc);
580
581 /*
582 * Cache line size is used to size and align various
583 * structures used to communicate with the hardware.
584 */
585 ath_read_cachesize(common, &csz);
586 common->cachelsz = csz << 2; /* convert to bytes */
587
588 /* Initializes the hardware for all supported chipsets */
589 ret = ath9k_hw_init(ah);
590 if (ret)
591 goto err_hw;
592
593 if (pdata && pdata->macaddr)
594 memcpy(common->macaddr, pdata->macaddr, ETH_ALEN);
595
596 ret = ath9k_init_queues(sc);
597 if (ret)
598 goto err_queues;
599
600 ret = ath9k_init_btcoex(sc);
601 if (ret)
602 goto err_btcoex;
603
604 ret = ath9k_init_channels_rates(sc);
605 if (ret)
606 goto err_btcoex;
607
608 ath9k_cmn_init_crypto(sc->sc_ah);
609 ath9k_init_misc(sc);
610
611 return 0;
612
613 err_btcoex:
614 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
615 if (ATH_TXQ_SETUP(sc, i))
616 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
617 err_queues:
618 ath9k_hw_deinit(ah);
619 err_hw:
620
621 kfree(ah);
622 sc->sc_ah = NULL;
623
624 return ret;
625 }
626
627 static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
628 {
629 struct ieee80211_supported_band *sband;
630 struct ieee80211_channel *chan;
631 struct ath_hw *ah = sc->sc_ah;
632 int i;
633
634 sband = &sc->sbands[band];
635 for (i = 0; i < sband->n_channels; i++) {
636 chan = &sband->channels[i];
637 ah->curchan = &ah->channels[chan->hw_value];
638 ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
639 ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
640 }
641 }
642
643 static void ath9k_init_txpower_limits(struct ath_softc *sc)
644 {
645 struct ath_hw *ah = sc->sc_ah;
646 struct ath9k_channel *curchan = ah->curchan;
647
648 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
649 ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
650 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
651 ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
652
653 ah->curchan = curchan;
654 }
655
656 void ath9k_reload_chainmask_settings(struct ath_softc *sc)
657 {
658 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT))
659 return;
660
661 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
662 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
663 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
664 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
665 }
666
667
668 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
669 {
670 struct ath_hw *ah = sc->sc_ah;
671 struct ath_common *common = ath9k_hw_common(ah);
672
673 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
674 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
675 IEEE80211_HW_SIGNAL_DBM |
676 IEEE80211_HW_SUPPORTS_PS |
677 IEEE80211_HW_PS_NULLFUNC_STACK |
678 IEEE80211_HW_SPECTRUM_MGMT |
679 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
680
681 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
682 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
683
684 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
685 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
686
687 hw->wiphy->interface_modes =
688 BIT(NL80211_IFTYPE_P2P_GO) |
689 BIT(NL80211_IFTYPE_P2P_CLIENT) |
690 BIT(NL80211_IFTYPE_AP) |
691 BIT(NL80211_IFTYPE_WDS) |
692 BIT(NL80211_IFTYPE_STATION) |
693 BIT(NL80211_IFTYPE_ADHOC) |
694 BIT(NL80211_IFTYPE_MESH_POINT);
695
696 if (AR_SREV_5416(sc->sc_ah))
697 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
698
699 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
700 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
701
702 hw->queues = 4;
703 hw->max_rates = 4;
704 hw->channel_change_time = 5000;
705 hw->max_listen_interval = 10;
706 hw->max_rate_tries = 10;
707 hw->sta_data_size = sizeof(struct ath_node);
708 hw->vif_data_size = sizeof(struct ath_vif);
709
710 hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
711 hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1;
712
713 /* single chain devices with rx diversity */
714 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
715 hw->wiphy->available_antennas_rx = BIT(0) | BIT(1);
716
717 sc->ant_rx = hw->wiphy->available_antennas_rx;
718 sc->ant_tx = hw->wiphy->available_antennas_tx;
719
720 #ifdef CONFIG_ATH9K_RATE_CONTROL
721 hw->rate_control_algorithm = "ath9k_rate_control";
722 #endif
723
724 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
725 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
726 &sc->sbands[IEEE80211_BAND_2GHZ];
727 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
728 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
729 &sc->sbands[IEEE80211_BAND_5GHZ];
730
731 ath9k_reload_chainmask_settings(sc);
732
733 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
734 }
735
736 int ath9k_init_device(u16 devid, struct ath_softc *sc,
737 const struct ath_bus_ops *bus_ops)
738 {
739 struct ieee80211_hw *hw = sc->hw;
740 struct ath_common *common;
741 struct ath_hw *ah;
742 int error = 0;
743 struct ath_regulatory *reg;
744
745 /* Bring up device */
746 error = ath9k_init_softc(devid, sc, bus_ops);
747 if (error != 0)
748 goto error_init;
749
750 ah = sc->sc_ah;
751 common = ath9k_hw_common(ah);
752 ath9k_set_hw_capab(sc, hw);
753
754 /* Initialize regulatory */
755 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
756 ath9k_reg_notifier);
757 if (error)
758 goto error_regd;
759
760 reg = &common->regulatory;
761
762 /* Setup TX DMA */
763 error = ath_tx_init(sc, ATH_TXBUF);
764 if (error != 0)
765 goto error_tx;
766
767 /* Setup RX DMA */
768 error = ath_rx_init(sc, ATH_RXBUF);
769 if (error != 0)
770 goto error_rx;
771
772 ath9k_init_txpower_limits(sc);
773
774 #ifdef CONFIG_MAC80211_LEDS
775 /* must be initialized before ieee80211_register_hw */
776 sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw,
777 IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink,
778 ARRAY_SIZE(ath9k_tpt_blink));
779 #endif
780
781 /* Register with mac80211 */
782 error = ieee80211_register_hw(hw);
783 if (error)
784 goto error_register;
785
786 error = ath9k_init_debug(ah);
787 if (error) {
788 ath_err(common, "Unable to create debugfs files\n");
789 goto error_world;
790 }
791
792 /* Handle world regulatory */
793 if (!ath_is_world_regd(reg)) {
794 error = regulatory_hint(hw->wiphy, reg->alpha2);
795 if (error)
796 goto error_world;
797 }
798
799 INIT_WORK(&sc->hw_reset_work, ath_reset_work);
800 INIT_WORK(&sc->hw_check_work, ath_hw_check);
801 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
802 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
803 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
804
805 ath_init_leds(sc);
806 ath_start_rfkill_poll(sc);
807
808 return 0;
809
810 error_world:
811 ieee80211_unregister_hw(hw);
812 error_register:
813 ath_rx_cleanup(sc);
814 error_rx:
815 ath_tx_cleanup(sc);
816 error_tx:
817 /* Nothing */
818 error_regd:
819 ath9k_deinit_softc(sc);
820 error_init:
821 return error;
822 }
823
824 /*****************************/
825 /* De-Initialization */
826 /*****************************/
827
828 static void ath9k_deinit_softc(struct ath_softc *sc)
829 {
830 int i = 0;
831
832 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
833 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
834
835 if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
836 kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
837
838 if ((sc->btcoex.no_stomp_timer) &&
839 sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
840 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
841
842 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
843 if (ATH_TXQ_SETUP(sc, i))
844 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
845
846 ath9k_hw_deinit(sc->sc_ah);
847
848 kfree(sc->sc_ah);
849 sc->sc_ah = NULL;
850 }
851
852 void ath9k_deinit_device(struct ath_softc *sc)
853 {
854 struct ieee80211_hw *hw = sc->hw;
855
856 ath9k_ps_wakeup(sc);
857
858 wiphy_rfkill_stop_polling(sc->hw->wiphy);
859 ath_deinit_leds(sc);
860
861 ath9k_ps_restore(sc);
862
863 ieee80211_unregister_hw(hw);
864 ath_rx_cleanup(sc);
865 ath_tx_cleanup(sc);
866 ath9k_deinit_softc(sc);
867 }
868
869 void ath_descdma_cleanup(struct ath_softc *sc,
870 struct ath_descdma *dd,
871 struct list_head *head)
872 {
873 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
874 dd->dd_desc_paddr);
875
876 INIT_LIST_HEAD(head);
877 kfree(dd->dd_bufptr);
878 memset(dd, 0, sizeof(*dd));
879 }
880
881 /************************/
882 /* Module Hooks */
883 /************************/
884
885 static int __init ath9k_init(void)
886 {
887 int error;
888
889 /* Register rate control algorithm */
890 error = ath_rate_control_register();
891 if (error != 0) {
892 printk(KERN_ERR
893 "ath9k: Unable to register rate control "
894 "algorithm: %d\n",
895 error);
896 goto err_out;
897 }
898
899 error = ath_pci_init();
900 if (error < 0) {
901 printk(KERN_ERR
902 "ath9k: No PCI devices found, driver not installed.\n");
903 error = -ENODEV;
904 goto err_rate_unregister;
905 }
906
907 error = ath_ahb_init();
908 if (error < 0) {
909 error = -ENODEV;
910 goto err_pci_exit;
911 }
912
913 return 0;
914
915 err_pci_exit:
916 ath_pci_exit();
917
918 err_rate_unregister:
919 ath_rate_control_unregister();
920 err_out:
921 return error;
922 }
923 module_init(ath9k_init);
924
925 static void __exit ath9k_exit(void)
926 {
927 is_ath9k_unloaded = true;
928 ath_ahb_exit();
929 ath_pci_exit();
930 ath_rate_control_unregister();
931 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
932 }
933 module_exit(ath9k_exit);