2 * Copyright (c) 2008-2009 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/slab.h>
21 static char *dev_info
= "ath9k";
23 MODULE_AUTHOR("Atheros Communications");
24 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
25 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
26 MODULE_LICENSE("Dual BSD/GPL");
28 static unsigned int ath9k_debug
= ATH_DBG_DEFAULT
;
29 module_param_named(debug
, ath9k_debug
, uint
, 0);
30 MODULE_PARM_DESC(debug
, "Debugging mask");
32 int modparam_nohwcrypt
;
33 module_param_named(nohwcrypt
, modparam_nohwcrypt
, int, 0444);
34 MODULE_PARM_DESC(nohwcrypt
, "Disable hardware encryption");
37 module_param_named(blink
, led_blink
, int, 0444);
38 MODULE_PARM_DESC(blink
, "Enable LED blink on activity");
40 static int ath9k_btcoex_enable
;
41 module_param_named(btcoex_enable
, ath9k_btcoex_enable
, int, 0444);
42 MODULE_PARM_DESC(btcoex_enable
, "Enable wifi-BT coexistence");
44 /* We use the hw_value as an index into our private channel structure */
46 #define CHAN2G(_freq, _idx) { \
47 .center_freq = (_freq), \
52 #define CHAN5G(_freq, _idx) { \
53 .band = IEEE80211_BAND_5GHZ, \
54 .center_freq = (_freq), \
59 /* Some 2 GHz radios are actually tunable on 2312-2732
60 * on 5 MHz steps, we support the channels which we know
61 * we have calibration data for all cards though to make
63 static const struct ieee80211_channel ath9k_2ghz_chantable
[] = {
64 CHAN2G(2412, 0), /* Channel 1 */
65 CHAN2G(2417, 1), /* Channel 2 */
66 CHAN2G(2422, 2), /* Channel 3 */
67 CHAN2G(2427, 3), /* Channel 4 */
68 CHAN2G(2432, 4), /* Channel 5 */
69 CHAN2G(2437, 5), /* Channel 6 */
70 CHAN2G(2442, 6), /* Channel 7 */
71 CHAN2G(2447, 7), /* Channel 8 */
72 CHAN2G(2452, 8), /* Channel 9 */
73 CHAN2G(2457, 9), /* Channel 10 */
74 CHAN2G(2462, 10), /* Channel 11 */
75 CHAN2G(2467, 11), /* Channel 12 */
76 CHAN2G(2472, 12), /* Channel 13 */
77 CHAN2G(2484, 13), /* Channel 14 */
80 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
81 * on 5 MHz steps, we support the channels which we know
82 * we have calibration data for all cards though to make
84 static const struct ieee80211_channel ath9k_5ghz_chantable
[] = {
85 /* _We_ call this UNII 1 */
86 CHAN5G(5180, 14), /* Channel 36 */
87 CHAN5G(5200, 15), /* Channel 40 */
88 CHAN5G(5220, 16), /* Channel 44 */
89 CHAN5G(5240, 17), /* Channel 48 */
90 /* _We_ call this UNII 2 */
91 CHAN5G(5260, 18), /* Channel 52 */
92 CHAN5G(5280, 19), /* Channel 56 */
93 CHAN5G(5300, 20), /* Channel 60 */
94 CHAN5G(5320, 21), /* Channel 64 */
95 /* _We_ call this "Middle band" */
96 CHAN5G(5500, 22), /* Channel 100 */
97 CHAN5G(5520, 23), /* Channel 104 */
98 CHAN5G(5540, 24), /* Channel 108 */
99 CHAN5G(5560, 25), /* Channel 112 */
100 CHAN5G(5580, 26), /* Channel 116 */
101 CHAN5G(5600, 27), /* Channel 120 */
102 CHAN5G(5620, 28), /* Channel 124 */
103 CHAN5G(5640, 29), /* Channel 128 */
104 CHAN5G(5660, 30), /* Channel 132 */
105 CHAN5G(5680, 31), /* Channel 136 */
106 CHAN5G(5700, 32), /* Channel 140 */
107 /* _We_ call this UNII 3 */
108 CHAN5G(5745, 33), /* Channel 149 */
109 CHAN5G(5765, 34), /* Channel 153 */
110 CHAN5G(5785, 35), /* Channel 157 */
111 CHAN5G(5805, 36), /* Channel 161 */
112 CHAN5G(5825, 37), /* Channel 165 */
115 /* Atheros hardware rate code addition for short premble */
116 #define SHPCHECK(__hw_rate, __flags) \
117 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
119 #define RATE(_bitrate, _hw_rate, _flags) { \
120 .bitrate = (_bitrate), \
122 .hw_value = (_hw_rate), \
123 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
126 static struct ieee80211_rate ath9k_legacy_rates
[] = {
128 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE
),
129 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE
),
130 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE
),
141 static void ath9k_deinit_softc(struct ath_softc
*sc
);
144 * Read and write, they both share the same lock. We do this to serialize
145 * reads and writes on Atheros 802.11n PCI devices only. This is required
146 * as the FIFO on these devices can only accept sanely 2 requests.
149 static void ath9k_iowrite32(void *hw_priv
, u32 val
, u32 reg_offset
)
151 struct ath_hw
*ah
= (struct ath_hw
*) hw_priv
;
152 struct ath_common
*common
= ath9k_hw_common(ah
);
153 struct ath_softc
*sc
= (struct ath_softc
*) common
->priv
;
155 if (ah
->config
.serialize_regmode
== SER_REG_MODE_ON
) {
157 spin_lock_irqsave(&sc
->sc_serial_rw
, flags
);
158 iowrite32(val
, sc
->mem
+ reg_offset
);
159 spin_unlock_irqrestore(&sc
->sc_serial_rw
, flags
);
161 iowrite32(val
, sc
->mem
+ reg_offset
);
164 static unsigned int ath9k_ioread32(void *hw_priv
, u32 reg_offset
)
166 struct ath_hw
*ah
= (struct ath_hw
*) hw_priv
;
167 struct ath_common
*common
= ath9k_hw_common(ah
);
168 struct ath_softc
*sc
= (struct ath_softc
*) common
->priv
;
171 if (ah
->config
.serialize_regmode
== SER_REG_MODE_ON
) {
173 spin_lock_irqsave(&sc
->sc_serial_rw
, flags
);
174 val
= ioread32(sc
->mem
+ reg_offset
);
175 spin_unlock_irqrestore(&sc
->sc_serial_rw
, flags
);
177 val
= ioread32(sc
->mem
+ reg_offset
);
181 static const struct ath_ops ath9k_common_ops
= {
182 .read
= ath9k_ioread32
,
183 .write
= ath9k_iowrite32
,
186 /**************************/
188 /**************************/
190 static void setup_ht_cap(struct ath_softc
*sc
,
191 struct ieee80211_sta_ht_cap
*ht_info
)
193 struct ath_hw
*ah
= sc
->sc_ah
;
194 struct ath_common
*common
= ath9k_hw_common(ah
);
195 u8 tx_streams
, rx_streams
;
198 ht_info
->ht_supported
= true;
199 ht_info
->cap
= IEEE80211_HT_CAP_SUP_WIDTH_20_40
|
200 IEEE80211_HT_CAP_SM_PS
|
201 IEEE80211_HT_CAP_SGI_40
|
202 IEEE80211_HT_CAP_DSSSCCK40
;
204 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_LDPC
)
205 ht_info
->cap
|= IEEE80211_HT_CAP_LDPC_CODING
;
207 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_SGI_20
)
208 ht_info
->cap
|= IEEE80211_HT_CAP_SGI_20
;
210 ht_info
->ampdu_factor
= IEEE80211_HT_MAX_AMPDU_64K
;
211 ht_info
->ampdu_density
= IEEE80211_HT_MPDU_DENSITY_8
;
213 if (AR_SREV_9300_20_OR_LATER(ah
))
218 if (AR_SREV_9280_20_OR_LATER(ah
)) {
219 if (max_streams
>= 2)
220 ht_info
->cap
|= IEEE80211_HT_CAP_TX_STBC
;
221 ht_info
->cap
|= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT
);
224 /* set up supported mcs set */
225 memset(&ht_info
->mcs
, 0, sizeof(ht_info
->mcs
));
226 tx_streams
= ath9k_cmn_count_streams(common
->tx_chainmask
, max_streams
);
227 rx_streams
= ath9k_cmn_count_streams(common
->rx_chainmask
, max_streams
);
229 ath_print(common
, ATH_DBG_CONFIG
,
230 "TX streams %d, RX streams: %d\n",
231 tx_streams
, rx_streams
);
233 if (tx_streams
!= rx_streams
) {
234 ht_info
->mcs
.tx_params
|= IEEE80211_HT_MCS_TX_RX_DIFF
;
235 ht_info
->mcs
.tx_params
|= ((tx_streams
- 1) <<
236 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT
);
239 for (i
= 0; i
< rx_streams
; i
++)
240 ht_info
->mcs
.rx_mask
[i
] = 0xff;
242 ht_info
->mcs
.tx_params
|= IEEE80211_HT_MCS_TX_DEFINED
;
245 static int ath9k_reg_notifier(struct wiphy
*wiphy
,
246 struct regulatory_request
*request
)
248 struct ieee80211_hw
*hw
= wiphy_to_ieee80211_hw(wiphy
);
249 struct ath_wiphy
*aphy
= hw
->priv
;
250 struct ath_softc
*sc
= aphy
->sc
;
251 struct ath_regulatory
*reg
= ath9k_hw_regulatory(sc
->sc_ah
);
253 return ath_reg_notifier_apply(wiphy
, request
, reg
);
257 * This function will allocate both the DMA descriptor structure, and the
258 * buffers it contains. These are used to contain the descriptors used
261 int ath_descdma_setup(struct ath_softc
*sc
, struct ath_descdma
*dd
,
262 struct list_head
*head
, const char *name
,
263 int nbuf
, int ndesc
, bool is_tx
)
265 #define DS2PHYS(_dd, _ds) \
266 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
267 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
268 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
269 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
272 int i
, bsize
, error
, desc_len
;
274 ath_print(common
, ATH_DBG_CONFIG
, "%s DMA: %u buffers %u desc/buf\n",
277 INIT_LIST_HEAD(head
);
280 desc_len
= sc
->sc_ah
->caps
.tx_desc_len
;
282 desc_len
= sizeof(struct ath_desc
);
284 /* ath_desc must be a multiple of DWORDs */
285 if ((desc_len
% 4) != 0) {
286 ath_print(common
, ATH_DBG_FATAL
,
287 "ath_desc not DWORD aligned\n");
288 BUG_ON((desc_len
% 4) != 0);
293 dd
->dd_desc_len
= desc_len
* nbuf
* ndesc
;
296 * Need additional DMA memory because we can't use
297 * descriptors that cross the 4K page boundary. Assume
298 * one skipped descriptor per 4K page.
300 if (!(sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_4KB_SPLITTRANS
)) {
302 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd
->dd_desc_len
);
305 while (ndesc_skipped
) {
306 dma_len
= ndesc_skipped
* desc_len
;
307 dd
->dd_desc_len
+= dma_len
;
309 ndesc_skipped
= ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len
);
313 /* allocate descriptors */
314 dd
->dd_desc
= dma_alloc_coherent(sc
->dev
, dd
->dd_desc_len
,
315 &dd
->dd_desc_paddr
, GFP_KERNEL
);
316 if (dd
->dd_desc
== NULL
) {
320 ds
= (u8
*) dd
->dd_desc
;
321 ath_print(common
, ATH_DBG_CONFIG
, "%s DMA map: %p (%u) -> %llx (%u)\n",
322 name
, ds
, (u32
) dd
->dd_desc_len
,
323 ito64(dd
->dd_desc_paddr
), /*XXX*/(u32
) dd
->dd_desc_len
);
325 /* allocate buffers */
326 bsize
= sizeof(struct ath_buf
) * nbuf
;
327 bf
= kzalloc(bsize
, GFP_KERNEL
);
334 for (i
= 0; i
< nbuf
; i
++, bf
++, ds
+= (desc_len
* ndesc
)) {
336 bf
->bf_daddr
= DS2PHYS(dd
, ds
);
338 if (!(sc
->sc_ah
->caps
.hw_caps
&
339 ATH9K_HW_CAP_4KB_SPLITTRANS
)) {
341 * Skip descriptor addresses which can cause 4KB
342 * boundary crossing (addr + length) with a 32 dword
345 while (ATH_DESC_4KB_BOUND_CHECK(bf
->bf_daddr
)) {
346 BUG_ON((caddr_t
) bf
->bf_desc
>=
347 ((caddr_t
) dd
->dd_desc
+
350 ds
+= (desc_len
* ndesc
);
352 bf
->bf_daddr
= DS2PHYS(dd
, ds
);
355 list_add_tail(&bf
->list
, head
);
359 dma_free_coherent(sc
->dev
, dd
->dd_desc_len
, dd
->dd_desc
,
362 memset(dd
, 0, sizeof(*dd
));
364 #undef ATH_DESC_4KB_BOUND_CHECK
365 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
369 static void ath9k_init_crypto(struct ath_softc
*sc
)
371 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
374 /* Get the hardware key cache size. */
375 common
->keymax
= sc
->sc_ah
->caps
.keycache_size
;
376 if (common
->keymax
> ATH_KEYMAX
) {
377 ath_print(common
, ATH_DBG_ANY
,
378 "Warning, using only %u entries in %u key cache\n",
379 ATH_KEYMAX
, common
->keymax
);
380 common
->keymax
= ATH_KEYMAX
;
384 * Reset the key cache since some parts do not
385 * reset the contents on initial power up.
387 for (i
= 0; i
< common
->keymax
; i
++)
388 ath_hw_keyreset(common
, (u16
) i
);
391 * Check whether the separate key cache entries
392 * are required to handle both tx+rx MIC keys.
393 * With split mic keys the number of stations is limited
394 * to 27 otherwise 59.
396 if (sc
->sc_ah
->misc_mode
& AR_PCU_MIC_NEW_LOC_ENA
)
397 common
->crypt_caps
|= ATH_CRYPT_CAP_MIC_COMBINED
;
400 static int ath9k_init_btcoex(struct ath_softc
*sc
)
405 switch (sc
->sc_ah
->btcoex_hw
.scheme
) {
406 case ATH_BTCOEX_CFG_NONE
:
408 case ATH_BTCOEX_CFG_2WIRE
:
409 ath9k_hw_btcoex_init_2wire(sc
->sc_ah
);
411 case ATH_BTCOEX_CFG_3WIRE
:
412 ath9k_hw_btcoex_init_3wire(sc
->sc_ah
);
413 r
= ath_init_btcoex_timer(sc
);
416 txq
= sc
->tx
.txq_map
[WME_AC_BE
];
417 ath9k_hw_init_btcoex_hw(sc
->sc_ah
, txq
->axq_qnum
);
418 sc
->btcoex
.bt_stomp_type
= ATH_BTCOEX_STOMP_LOW
;
428 static int ath9k_init_queues(struct ath_softc
*sc
)
432 sc
->beacon
.beaconq
= ath9k_hw_beaconq_setup(sc
->sc_ah
);
433 sc
->beacon
.cabq
= ath_txq_setup(sc
, ATH9K_TX_QUEUE_CAB
, 0);
435 sc
->config
.cabqReadytime
= ATH_CABQ_READY_TIME
;
438 for (i
= 0; i
< WME_NUM_AC
; i
++)
439 sc
->tx
.txq_map
[i
] = ath_txq_setup(sc
, ATH9K_TX_QUEUE_DATA
, i
);
444 static int ath9k_init_channels_rates(struct ath_softc
*sc
)
448 BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable
) +
449 ARRAY_SIZE(ath9k_5ghz_chantable
) !=
452 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_2GHZ
) {
453 channels
= kmemdup(ath9k_2ghz_chantable
,
454 sizeof(ath9k_2ghz_chantable
), GFP_KERNEL
);
458 sc
->sbands
[IEEE80211_BAND_2GHZ
].channels
= channels
;
459 sc
->sbands
[IEEE80211_BAND_2GHZ
].band
= IEEE80211_BAND_2GHZ
;
460 sc
->sbands
[IEEE80211_BAND_2GHZ
].n_channels
=
461 ARRAY_SIZE(ath9k_2ghz_chantable
);
462 sc
->sbands
[IEEE80211_BAND_2GHZ
].bitrates
= ath9k_legacy_rates
;
463 sc
->sbands
[IEEE80211_BAND_2GHZ
].n_bitrates
=
464 ARRAY_SIZE(ath9k_legacy_rates
);
467 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_5GHZ
) {
468 channels
= kmemdup(ath9k_5ghz_chantable
,
469 sizeof(ath9k_5ghz_chantable
), GFP_KERNEL
);
471 if (sc
->sbands
[IEEE80211_BAND_2GHZ
].channels
)
472 kfree(sc
->sbands
[IEEE80211_BAND_2GHZ
].channels
);
476 sc
->sbands
[IEEE80211_BAND_5GHZ
].channels
= channels
;
477 sc
->sbands
[IEEE80211_BAND_5GHZ
].band
= IEEE80211_BAND_5GHZ
;
478 sc
->sbands
[IEEE80211_BAND_5GHZ
].n_channels
=
479 ARRAY_SIZE(ath9k_5ghz_chantable
);
480 sc
->sbands
[IEEE80211_BAND_5GHZ
].bitrates
=
481 ath9k_legacy_rates
+ 4;
482 sc
->sbands
[IEEE80211_BAND_5GHZ
].n_bitrates
=
483 ARRAY_SIZE(ath9k_legacy_rates
) - 4;
488 static void ath9k_init_misc(struct ath_softc
*sc
)
490 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
493 setup_timer(&common
->ani
.timer
, ath_ani_calibrate
, (unsigned long)sc
);
495 sc
->config
.txpowlimit
= ATH_TXPOWER_MAX
;
497 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_HT
) {
498 sc
->sc_flags
|= SC_OP_TXAGGR
;
499 sc
->sc_flags
|= SC_OP_RXAGGR
;
502 common
->tx_chainmask
= sc
->sc_ah
->caps
.tx_chainmask
;
503 common
->rx_chainmask
= sc
->sc_ah
->caps
.rx_chainmask
;
505 ath9k_hw_set_diversity(sc
->sc_ah
, true);
506 sc
->rx
.defant
= ath9k_hw_getdefantenna(sc
->sc_ah
);
508 memcpy(common
->bssidmask
, ath_bcast_mac
, ETH_ALEN
);
510 sc
->beacon
.slottime
= ATH9K_SLOT_TIME_9
;
512 for (i
= 0; i
< ARRAY_SIZE(sc
->beacon
.bslot
); i
++) {
513 sc
->beacon
.bslot
[i
] = NULL
;
514 sc
->beacon
.bslot_aphy
[i
] = NULL
;
517 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_ANT_DIV_COMB
)
518 sc
->ant_comb
.count
= ATH_ANT_DIV_COMB_INIT_COUNT
;
521 static int ath9k_init_softc(u16 devid
, struct ath_softc
*sc
, u16 subsysid
,
522 const struct ath_bus_ops
*bus_ops
)
524 struct ath_hw
*ah
= NULL
;
525 struct ath_common
*common
;
529 ah
= kzalloc(sizeof(struct ath_hw
), GFP_KERNEL
);
533 ah
->hw_version
.devid
= devid
;
534 ah
->hw_version
.subsysid
= subsysid
;
537 if (!sc
->dev
->platform_data
)
538 ah
->ah_flags
|= AH_USE_EEPROM
;
540 common
= ath9k_hw_common(ah
);
541 common
->ops
= &ath9k_common_ops
;
542 common
->bus_ops
= bus_ops
;
546 common
->debug_mask
= ath9k_debug
;
547 common
->btcoex_enabled
= ath9k_btcoex_enable
== 1;
548 spin_lock_init(&common
->cc_lock
);
550 spin_lock_init(&sc
->wiphy_lock
);
551 spin_lock_init(&sc
->sc_serial_rw
);
552 spin_lock_init(&sc
->sc_pm_lock
);
553 mutex_init(&sc
->mutex
);
554 tasklet_init(&sc
->intr_tq
, ath9k_tasklet
, (unsigned long)sc
);
555 tasklet_init(&sc
->bcon_tasklet
, ath_beacon_tasklet
,
559 * Cache line size is used to size and align various
560 * structures used to communicate with the hardware.
562 ath_read_cachesize(common
, &csz
);
563 common
->cachelsz
= csz
<< 2; /* convert to bytes */
565 /* Initializes the hardware for all supported chipsets */
566 ret
= ath9k_hw_init(ah
);
570 ret
= ath9k_init_queues(sc
);
574 ret
= ath9k_init_btcoex(sc
);
578 ret
= ath9k_init_channels_rates(sc
);
582 ath9k_init_crypto(sc
);
588 for (i
= 0; i
< ATH9K_NUM_TX_QUEUES
; i
++)
589 if (ATH_TXQ_SETUP(sc
, i
))
590 ath_tx_cleanupq(sc
, &sc
->tx
.txq
[i
]);
594 tasklet_kill(&sc
->intr_tq
);
595 tasklet_kill(&sc
->bcon_tasklet
);
603 static void ath9k_init_band_txpower(struct ath_softc
*sc
, int band
)
605 struct ieee80211_supported_band
*sband
;
606 struct ieee80211_channel
*chan
;
607 struct ath_hw
*ah
= sc
->sc_ah
;
608 struct ath_regulatory
*reg
= ath9k_hw_regulatory(ah
);
611 sband
= &sc
->sbands
[band
];
612 for (i
= 0; i
< sband
->n_channels
; i
++) {
613 chan
= &sband
->channels
[i
];
614 ah
->curchan
= &ah
->channels
[chan
->hw_value
];
615 ath9k_cmn_update_ichannel(ah
->curchan
, chan
, NL80211_CHAN_HT20
);
616 ath9k_hw_set_txpowerlimit(ah
, MAX_RATE_POWER
, true);
617 chan
->max_power
= reg
->max_power_level
/ 2;
621 static void ath9k_init_txpower_limits(struct ath_softc
*sc
)
623 struct ath_hw
*ah
= sc
->sc_ah
;
624 struct ath9k_channel
*curchan
= ah
->curchan
;
626 if (ah
->caps
.hw_caps
& ATH9K_HW_CAP_2GHZ
)
627 ath9k_init_band_txpower(sc
, IEEE80211_BAND_2GHZ
);
628 if (ah
->caps
.hw_caps
& ATH9K_HW_CAP_5GHZ
)
629 ath9k_init_band_txpower(sc
, IEEE80211_BAND_5GHZ
);
631 ah
->curchan
= curchan
;
634 void ath9k_set_hw_capab(struct ath_softc
*sc
, struct ieee80211_hw
*hw
)
636 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
638 hw
->flags
= IEEE80211_HW_RX_INCLUDES_FCS
|
639 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING
|
640 IEEE80211_HW_SIGNAL_DBM
|
641 IEEE80211_HW_SUPPORTS_PS
|
642 IEEE80211_HW_PS_NULLFUNC_STACK
|
643 IEEE80211_HW_SPECTRUM_MGMT
|
644 IEEE80211_HW_REPORTS_TX_ACK_STATUS
;
646 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_HT
)
647 hw
->flags
|= IEEE80211_HW_AMPDU_AGGREGATION
;
649 if (AR_SREV_9160_10_OR_LATER(sc
->sc_ah
) || modparam_nohwcrypt
)
650 hw
->flags
|= IEEE80211_HW_MFP_CAPABLE
;
652 hw
->wiphy
->interface_modes
=
653 BIT(NL80211_IFTYPE_P2P_GO
) |
654 BIT(NL80211_IFTYPE_P2P_CLIENT
) |
655 BIT(NL80211_IFTYPE_AP
) |
656 BIT(NL80211_IFTYPE_WDS
) |
657 BIT(NL80211_IFTYPE_STATION
) |
658 BIT(NL80211_IFTYPE_ADHOC
) |
659 BIT(NL80211_IFTYPE_MESH_POINT
);
661 if (AR_SREV_5416(sc
->sc_ah
))
662 hw
->wiphy
->flags
&= ~WIPHY_FLAG_PS_ON_BY_DEFAULT
;
666 hw
->channel_change_time
= 5000;
667 hw
->max_listen_interval
= 10;
668 hw
->max_rate_tries
= 10;
669 hw
->sta_data_size
= sizeof(struct ath_node
);
670 hw
->vif_data_size
= sizeof(struct ath_vif
);
672 #ifdef CONFIG_ATH9K_RATE_CONTROL
673 hw
->rate_control_algorithm
= "ath9k_rate_control";
676 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_2GHZ
)
677 hw
->wiphy
->bands
[IEEE80211_BAND_2GHZ
] =
678 &sc
->sbands
[IEEE80211_BAND_2GHZ
];
679 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_5GHZ
)
680 hw
->wiphy
->bands
[IEEE80211_BAND_5GHZ
] =
681 &sc
->sbands
[IEEE80211_BAND_5GHZ
];
683 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_HT
) {
684 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_2GHZ
)
685 setup_ht_cap(sc
, &sc
->sbands
[IEEE80211_BAND_2GHZ
].ht_cap
);
686 if (sc
->sc_ah
->caps
.hw_caps
& ATH9K_HW_CAP_5GHZ
)
687 setup_ht_cap(sc
, &sc
->sbands
[IEEE80211_BAND_5GHZ
].ht_cap
);
690 SET_IEEE80211_PERM_ADDR(hw
, common
->macaddr
);
693 int ath9k_init_device(u16 devid
, struct ath_softc
*sc
, u16 subsysid
,
694 const struct ath_bus_ops
*bus_ops
)
696 struct ieee80211_hw
*hw
= sc
->hw
;
697 struct ath_wiphy
*aphy
= hw
->priv
;
698 struct ath_common
*common
;
701 struct ath_regulatory
*reg
;
703 /* Bring up device */
704 error
= ath9k_init_softc(devid
, sc
, subsysid
, bus_ops
);
709 common
= ath9k_hw_common(ah
);
710 ath9k_set_hw_capab(sc
, hw
);
712 /* Initialize regulatory */
713 error
= ath_regd_init(&common
->regulatory
, sc
->hw
->wiphy
,
718 reg
= &common
->regulatory
;
721 error
= ath_tx_init(sc
, ATH_TXBUF
);
726 error
= ath_rx_init(sc
, ATH_RXBUF
);
730 ath9k_init_txpower_limits(sc
);
732 /* Register with mac80211 */
733 error
= ieee80211_register_hw(hw
);
737 error
= ath9k_init_debug(ah
);
739 ath_print(common
, ATH_DBG_FATAL
,
740 "Unable to create debugfs files\n");
744 /* Handle world regulatory */
745 if (!ath_is_world_regd(reg
)) {
746 error
= regulatory_hint(hw
->wiphy
, reg
->alpha2
);
751 INIT_WORK(&sc
->hw_check_work
, ath_hw_check
);
752 INIT_WORK(&sc
->paprd_work
, ath_paprd_calibrate
);
753 INIT_WORK(&sc
->chan_work
, ath9k_wiphy_chan_work
);
754 INIT_DELAYED_WORK(&sc
->wiphy_work
, ath9k_wiphy_work
);
755 sc
->wiphy_scheduler_int
= msecs_to_jiffies(500);
756 aphy
->last_rssi
= ATH_RSSI_DUMMY_MARKER
;
759 ath_start_rfkill_poll(sc
);
761 pm_qos_add_request(&sc
->pm_qos_req
, PM_QOS_CPU_DMA_LATENCY
,
762 PM_QOS_DEFAULT_VALUE
);
767 ieee80211_unregister_hw(hw
);
775 ath9k_deinit_softc(sc
);
780 /*****************************/
781 /* De-Initialization */
782 /*****************************/
784 static void ath9k_deinit_softc(struct ath_softc
*sc
)
788 if (sc
->sbands
[IEEE80211_BAND_2GHZ
].channels
)
789 kfree(sc
->sbands
[IEEE80211_BAND_2GHZ
].channels
);
791 if (sc
->sbands
[IEEE80211_BAND_5GHZ
].channels
)
792 kfree(sc
->sbands
[IEEE80211_BAND_5GHZ
].channels
);
794 if ((sc
->btcoex
.no_stomp_timer
) &&
795 sc
->sc_ah
->btcoex_hw
.scheme
== ATH_BTCOEX_CFG_3WIRE
)
796 ath_gen_timer_free(sc
->sc_ah
, sc
->btcoex
.no_stomp_timer
);
798 for (i
= 0; i
< ATH9K_NUM_TX_QUEUES
; i
++)
799 if (ATH_TXQ_SETUP(sc
, i
))
800 ath_tx_cleanupq(sc
, &sc
->tx
.txq
[i
]);
802 ath9k_hw_deinit(sc
->sc_ah
);
804 tasklet_kill(&sc
->intr_tq
);
805 tasklet_kill(&sc
->bcon_tasklet
);
811 void ath9k_deinit_device(struct ath_softc
*sc
)
813 struct ieee80211_hw
*hw
= sc
->hw
;
818 wiphy_rfkill_stop_polling(sc
->hw
->wiphy
);
821 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
822 struct ath_wiphy
*aphy
= sc
->sec_wiphy
[i
];
825 sc
->sec_wiphy
[i
] = NULL
;
826 ieee80211_unregister_hw(aphy
->hw
);
827 ieee80211_free_hw(aphy
->hw
);
830 ieee80211_unregister_hw(hw
);
831 pm_qos_remove_request(&sc
->pm_qos_req
);
834 ath9k_deinit_softc(sc
);
835 kfree(sc
->sec_wiphy
);
838 void ath_descdma_cleanup(struct ath_softc
*sc
,
839 struct ath_descdma
*dd
,
840 struct list_head
*head
)
842 dma_free_coherent(sc
->dev
, dd
->dd_desc_len
, dd
->dd_desc
,
845 INIT_LIST_HEAD(head
);
846 kfree(dd
->dd_bufptr
);
847 memset(dd
, 0, sizeof(*dd
));
850 /************************/
852 /************************/
854 static int __init
ath9k_init(void)
858 /* Register rate control algorithm */
859 error
= ath_rate_control_register();
862 "ath9k: Unable to register rate control "
868 error
= ath_pci_init();
871 "ath9k: No PCI devices found, driver not installed.\n");
873 goto err_rate_unregister
;
876 error
= ath_ahb_init();
888 ath_rate_control_unregister();
892 module_init(ath9k_init
);
894 static void __exit
ath9k_exit(void)
898 ath_rate_control_unregister();
899 printk(KERN_INFO
"%s: Driver unloaded\n", dev_info
);
901 module_exit(ath9k_exit
);