2 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
3 * Copyright (c) 2004-2005 Atheros Communications, Inc.
4 * Copyright (c) 2006 Devicescape Software, Inc.
5 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
6 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
8 * Modified for gPXE, July 2009, by Joshua Oreman <oremanj@rwcr.net>
9 * Original from Linux kernel 2.6.30.
11 * All rights reserved.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer,
18 * without modification.
19 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
20 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
21 * redistribution must be conditioned upon including a substantially
22 * similar Disclaimer requirement for further binary redistribution.
23 * 3. Neither the names of the above-listed copyright holders nor the names
24 * of any contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
27 * Alternatively, this software may be distributed under the terms of the
28 * GNU General Public License ("GPL") version 2 as published by the Free
29 * Software Foundation.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
35 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
36 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
37 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
38 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
39 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
40 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
42 * THE POSSIBILITY OF SUCH DAMAGES.
46 FILE_LICENCE ( BSD3
);
49 #include <gpxe/malloc.h>
50 #include <gpxe/timer.h>
51 #include <gpxe/netdevice.h>
53 #include <gpxe/pci_io.h>
58 #define ATH5K_CALIB_INTERVAL 10 /* Calibrate PHY every 10 seconds */
59 #define ATH5K_RETRIES 4 /* Number of times to retry packet sends */
60 #define ATH5K_DESC_ALIGN 16 /* Alignment for TX/RX descriptors */
67 static struct pci_device_id ath5k_nics
[] = {
68 PCI_ROM(0x168c, 0x0207, "ath5210e", "Atheros 5210 early", AR5K_AR5210
),
69 PCI_ROM(0x168c, 0x0007, "ath5210", "Atheros 5210", AR5K_AR5210
),
70 PCI_ROM(0x168c, 0x0011, "ath5311", "Atheros 5311 (AHB)", AR5K_AR5211
),
71 PCI_ROM(0x168c, 0x0012, "ath5211", "Atheros 5211", AR5K_AR5211
),
72 PCI_ROM(0x168c, 0x0013, "ath5212", "Atheros 5212", AR5K_AR5212
),
73 PCI_ROM(0xa727, 0x0013, "ath5212c","3com Ath 5212", AR5K_AR5212
),
74 PCI_ROM(0x10b7, 0x0013, "rdag675", "3com 3CRDAG675", AR5K_AR5212
),
75 PCI_ROM(0x168c, 0x1014, "ath5212m", "Ath 5212 miniPCI", AR5K_AR5212
),
76 PCI_ROM(0x168c, 0x0014, "ath5212x14", "Atheros 5212 x14", AR5K_AR5212
),
77 PCI_ROM(0x168c, 0x0015, "ath5212x15", "Atheros 5212 x15", AR5K_AR5212
),
78 PCI_ROM(0x168c, 0x0016, "ath5212x16", "Atheros 5212 x16", AR5K_AR5212
),
79 PCI_ROM(0x168c, 0x0017, "ath5212x17", "Atheros 5212 x17", AR5K_AR5212
),
80 PCI_ROM(0x168c, 0x0018, "ath5212x18", "Atheros 5212 x18", AR5K_AR5212
),
81 PCI_ROM(0x168c, 0x0019, "ath5212x19", "Atheros 5212 x19", AR5K_AR5212
),
82 PCI_ROM(0x168c, 0x001a, "ath2413", "Atheros 2413 Griffin", AR5K_AR5212
),
83 PCI_ROM(0x168c, 0x001b, "ath5413", "Atheros 5413 Eagle", AR5K_AR5212
),
84 PCI_ROM(0x168c, 0x001c, "ath5212e", "Atheros 5212 PCI-E", AR5K_AR5212
),
85 PCI_ROM(0x168c, 0x001d, "ath2417", "Atheros 2417 Nala", AR5K_AR5212
),
89 static const struct ath5k_srev_name srev_names
[] = {
90 { "5210", AR5K_VERSION_MAC
, AR5K_SREV_AR5210
},
91 { "5311", AR5K_VERSION_MAC
, AR5K_SREV_AR5311
},
92 { "5311A", AR5K_VERSION_MAC
, AR5K_SREV_AR5311A
},
93 { "5311B", AR5K_VERSION_MAC
, AR5K_SREV_AR5311B
},
94 { "5211", AR5K_VERSION_MAC
, AR5K_SREV_AR5211
},
95 { "5212", AR5K_VERSION_MAC
, AR5K_SREV_AR5212
},
96 { "5213", AR5K_VERSION_MAC
, AR5K_SREV_AR5213
},
97 { "5213A", AR5K_VERSION_MAC
, AR5K_SREV_AR5213A
},
98 { "2413", AR5K_VERSION_MAC
, AR5K_SREV_AR2413
},
99 { "2414", AR5K_VERSION_MAC
, AR5K_SREV_AR2414
},
100 { "5424", AR5K_VERSION_MAC
, AR5K_SREV_AR5424
},
101 { "5413", AR5K_VERSION_MAC
, AR5K_SREV_AR5413
},
102 { "5414", AR5K_VERSION_MAC
, AR5K_SREV_AR5414
},
103 { "2415", AR5K_VERSION_MAC
, AR5K_SREV_AR2415
},
104 { "5416", AR5K_VERSION_MAC
, AR5K_SREV_AR5416
},
105 { "5418", AR5K_VERSION_MAC
, AR5K_SREV_AR5418
},
106 { "2425", AR5K_VERSION_MAC
, AR5K_SREV_AR2425
},
107 { "2417", AR5K_VERSION_MAC
, AR5K_SREV_AR2417
},
108 { "xxxxx", AR5K_VERSION_MAC
, AR5K_SREV_UNKNOWN
},
109 { "5110", AR5K_VERSION_RAD
, AR5K_SREV_RAD_5110
},
110 { "5111", AR5K_VERSION_RAD
, AR5K_SREV_RAD_5111
},
111 { "5111A", AR5K_VERSION_RAD
, AR5K_SREV_RAD_5111A
},
112 { "2111", AR5K_VERSION_RAD
, AR5K_SREV_RAD_2111
},
113 { "5112", AR5K_VERSION_RAD
, AR5K_SREV_RAD_5112
},
114 { "5112A", AR5K_VERSION_RAD
, AR5K_SREV_RAD_5112A
},
115 { "5112B", AR5K_VERSION_RAD
, AR5K_SREV_RAD_5112B
},
116 { "2112", AR5K_VERSION_RAD
, AR5K_SREV_RAD_2112
},
117 { "2112A", AR5K_VERSION_RAD
, AR5K_SREV_RAD_2112A
},
118 { "2112B", AR5K_VERSION_RAD
, AR5K_SREV_RAD_2112B
},
119 { "2413", AR5K_VERSION_RAD
, AR5K_SREV_RAD_2413
},
120 { "5413", AR5K_VERSION_RAD
, AR5K_SREV_RAD_5413
},
121 { "2316", AR5K_VERSION_RAD
, AR5K_SREV_RAD_2316
},
122 { "2317", AR5K_VERSION_RAD
, AR5K_SREV_RAD_2317
},
123 { "5424", AR5K_VERSION_RAD
, AR5K_SREV_RAD_5424
},
124 { "5133", AR5K_VERSION_RAD
, AR5K_SREV_RAD_5133
},
125 { "xxxxx", AR5K_VERSION_RAD
, AR5K_SREV_UNKNOWN
},
128 #define ATH5K_SPMBL_NO 1
129 #define ATH5K_SPMBL_YES 2
130 #define ATH5K_SPMBL_BOTH 3
132 static const struct {
137 { 10, ATH5K_SPMBL_BOTH
, ATH5K_RATE_CODE_1M
},
138 { 20, ATH5K_SPMBL_NO
, ATH5K_RATE_CODE_2M
},
139 { 55, ATH5K_SPMBL_NO
, ATH5K_RATE_CODE_5_5M
},
140 { 110, ATH5K_SPMBL_NO
, ATH5K_RATE_CODE_11M
},
141 { 60, ATH5K_SPMBL_BOTH
, ATH5K_RATE_CODE_6M
},
142 { 90, ATH5K_SPMBL_BOTH
, ATH5K_RATE_CODE_9M
},
143 { 120, ATH5K_SPMBL_BOTH
, ATH5K_RATE_CODE_12M
},
144 { 180, ATH5K_SPMBL_BOTH
, ATH5K_RATE_CODE_18M
},
145 { 240, ATH5K_SPMBL_BOTH
, ATH5K_RATE_CODE_24M
},
146 { 360, ATH5K_SPMBL_BOTH
, ATH5K_RATE_CODE_36M
},
147 { 480, ATH5K_SPMBL_BOTH
, ATH5K_RATE_CODE_48M
},
148 { 540, ATH5K_SPMBL_BOTH
, ATH5K_RATE_CODE_54M
},
149 { 20, ATH5K_SPMBL_YES
, ATH5K_RATE_CODE_2M
| AR5K_SET_SHORT_PREAMBLE
},
150 { 55, ATH5K_SPMBL_YES
, ATH5K_RATE_CODE_5_5M
| AR5K_SET_SHORT_PREAMBLE
},
151 { 110, ATH5K_SPMBL_YES
, ATH5K_RATE_CODE_11M
| AR5K_SET_SHORT_PREAMBLE
},
155 #define ATH5K_NR_RATES 15
158 * Prototypes - PCI stack related functions
160 static int ath5k_probe(struct pci_device
*pdev
,
161 const struct pci_device_id
*id
);
162 static void ath5k_remove(struct pci_device
*pdev
);
164 struct pci_driver ath5k_pci_driver __pci_driver
= {
166 .id_count
= sizeof(ath5k_nics
) / sizeof(ath5k_nics
[0]),
167 .probe
= ath5k_probe
,
168 .remove
= ath5k_remove
,
174 * Prototypes - MAC 802.11 stack related functions
176 static int ath5k_tx(struct net80211_device
*dev
, struct io_buffer
*skb
);
177 static int ath5k_reset(struct ath5k_softc
*sc
, struct net80211_channel
*chan
);
178 static int ath5k_reset_wake(struct ath5k_softc
*sc
);
179 static int ath5k_start(struct net80211_device
*dev
);
180 static void ath5k_stop(struct net80211_device
*dev
);
181 static int ath5k_config(struct net80211_device
*dev
, int changed
);
182 static void ath5k_poll(struct net80211_device
*dev
);
183 static void ath5k_irq(struct net80211_device
*dev
, int enable
);
185 static struct net80211_device_operations ath5k_ops
= {
188 .transmit
= ath5k_tx
,
191 .config
= ath5k_config
,
195 * Prototypes - Internal functions
198 static int ath5k_attach(struct net80211_device
*dev
);
199 static void ath5k_detach(struct net80211_device
*dev
);
200 /* Channel/mode setup */
201 static unsigned int ath5k_copy_channels(struct ath5k_hw
*ah
,
202 struct net80211_channel
*channels
,
205 static int ath5k_setup_bands(struct net80211_device
*dev
);
206 static int ath5k_chan_set(struct ath5k_softc
*sc
,
207 struct net80211_channel
*chan
);
208 static void ath5k_setcurmode(struct ath5k_softc
*sc
,
210 static void ath5k_mode_setup(struct ath5k_softc
*sc
);
212 /* Descriptor setup */
213 static int ath5k_desc_alloc(struct ath5k_softc
*sc
);
214 static void ath5k_desc_free(struct ath5k_softc
*sc
);
216 static int ath5k_rxbuf_setup(struct ath5k_softc
*sc
, struct ath5k_buf
*bf
);
217 static int ath5k_txbuf_setup(struct ath5k_softc
*sc
, struct ath5k_buf
*bf
);
219 static inline void ath5k_txbuf_free(struct ath5k_softc
*sc
,
220 struct ath5k_buf
*bf
)
225 net80211_tx_complete(sc
->dev
, bf
->iob
, 0, ECANCELED
);
229 static inline void ath5k_rxbuf_free(struct ath5k_softc
*sc __unused
,
230 struct ath5k_buf
*bf
)
237 static int ath5k_txq_setup(struct ath5k_softc
*sc
,
238 int qtype
, int subtype
);
239 static void ath5k_txq_drainq(struct ath5k_softc
*sc
,
240 struct ath5k_txq
*txq
);
241 static void ath5k_txq_cleanup(struct ath5k_softc
*sc
);
242 static void ath5k_txq_release(struct ath5k_softc
*sc
);
244 static int ath5k_rx_start(struct ath5k_softc
*sc
);
245 static void ath5k_rx_stop(struct ath5k_softc
*sc
);
247 static void ath5k_tx_processq(struct ath5k_softc
*sc
,
248 struct ath5k_txq
*txq
);
250 /* Interrupt handling */
251 static int ath5k_init(struct ath5k_softc
*sc
);
252 static int ath5k_stop_hw(struct ath5k_softc
*sc
);
254 static void ath5k_calibrate(struct ath5k_softc
*sc
);
257 static void ath5k_configure_filter(struct ath5k_softc
*sc
);
259 /********************\
260 * PCI Initialization *
261 \********************/
265 ath5k_chip_name(enum ath5k_srev_type type
, u16 val
)
267 const char *name
= "xxxxx";
270 for (i
= 0; i
< ARRAY_SIZE(srev_names
); i
++) {
271 if (srev_names
[i
].sr_type
!= type
)
274 if ((val
& 0xf0) == srev_names
[i
].sr_val
)
275 name
= srev_names
[i
].sr_name
;
277 if ((val
& 0xff) == srev_names
[i
].sr_val
) {
278 name
= srev_names
[i
].sr_name
;
287 static int ath5k_probe(struct pci_device
*pdev
,
288 const struct pci_device_id
*id
)
291 struct ath5k_softc
*sc
;
292 struct net80211_device
*dev
;
296 adjust_pci_device(pdev
);
299 * Cache line size is used to size and align various
300 * structures used to communicate with the hardware.
302 pci_read_config_byte(pdev
, PCI_CACHE_LINE_SIZE
, &csz
);
305 * We must have this setup properly for rx buffer
306 * DMA to work so force a reasonable value here if it
309 pci_write_config_byte(pdev
, PCI_CACHE_LINE_SIZE
, 16);
312 * The default setting of latency timer yields poor results,
313 * set it to the value used by other systems. It may be worth
314 * tweaking this setting more.
316 pci_write_config_byte(pdev
, PCI_LATENCY_TIMER
, 0xa8);
319 * Disable the RETRY_TIMEOUT register (0x41) to keep
320 * PCI Tx retries from interfering with C3 CPU state.
322 pci_write_config_byte(pdev
, 0x41, 0);
324 mem
= ioremap(pdev
->membase
, 0x10000);
326 DBG("ath5k: cannot remap PCI memory region\n");
332 * Allocate dev (net80211 main struct)
333 * and dev->priv (driver private data)
335 dev
= net80211_alloc(sizeof(*sc
));
337 DBG("ath5k: cannot allocate 802.11 device\n");
342 /* Initialize driver private data */
347 sc
->hwinfo
= zalloc(sizeof(*sc
->hwinfo
));
349 DBG("ath5k: cannot allocate 802.11 hardware info structure\n");
354 sc
->hwinfo
->flags
= NET80211_HW_RX_HAS_FCS
;
355 sc
->hwinfo
->signal_type
= NET80211_SIGNAL_DB
;
356 sc
->hwinfo
->signal_max
= 40; /* 35dB should give perfect 54Mbps */
357 sc
->hwinfo
->channel_change_time
= 5000;
359 /* Avoid working with the device until setup is complete */
360 sc
->status
|= ATH_STAT_INVALID
;
363 sc
->cachelsz
= csz
* 4; /* convert to bytes */
365 DBG("ath5k: register base at %p (%08lx)\n", sc
->iobase
, pdev
->membase
);
366 DBG("ath5k: cache line size %d\n", sc
->cachelsz
);
368 /* Set private data */
369 pci_set_drvdata(pdev
, dev
);
370 dev
->netdev
->dev
= (struct device
*)pdev
;
372 /* Initialize device */
373 ret
= ath5k_hw_attach(sc
, id
->driver_data
, &sc
->ah
);
375 goto err_free_hwinfo
;
377 /* Finish private driver data initialization */
378 ret
= ath5k_attach(dev
);
383 DBG("Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
384 ath5k_chip_name(AR5K_VERSION_MAC
, sc
->ah
->ah_mac_srev
),
385 sc
->ah
->ah_mac_srev
, sc
->ah
->ah_phy_revision
);
387 if (!sc
->ah
->ah_single_chip
) {
388 /* Single chip radio (!RF5111) */
389 if (sc
->ah
->ah_radio_5ghz_revision
&&
390 !sc
->ah
->ah_radio_2ghz_revision
) {
391 /* No 5GHz support -> report 2GHz radio */
392 if (!(sc
->ah
->ah_capabilities
.cap_mode
& AR5K_MODE_BIT_11A
)) {
393 DBG("RF%s 2GHz radio found (0x%x)\n",
394 ath5k_chip_name(AR5K_VERSION_RAD
,
395 sc
->ah
->ah_radio_5ghz_revision
),
396 sc
->ah
->ah_radio_5ghz_revision
);
397 /* No 2GHz support (5110 and some
398 * 5Ghz only cards) -> report 5Ghz radio */
399 } else if (!(sc
->ah
->ah_capabilities
.cap_mode
& AR5K_MODE_BIT_11B
)) {
400 DBG("RF%s 5GHz radio found (0x%x)\n",
401 ath5k_chip_name(AR5K_VERSION_RAD
,
402 sc
->ah
->ah_radio_5ghz_revision
),
403 sc
->ah
->ah_radio_5ghz_revision
);
404 /* Multiband radio */
406 DBG("RF%s multiband radio found (0x%x)\n",
407 ath5k_chip_name(AR5K_VERSION_RAD
,
408 sc
->ah
->ah_radio_5ghz_revision
),
409 sc
->ah
->ah_radio_5ghz_revision
);
412 /* Multi chip radio (RF5111 - RF2111) ->
413 * report both 2GHz/5GHz radios */
414 else if (sc
->ah
->ah_radio_5ghz_revision
&&
415 sc
->ah
->ah_radio_2ghz_revision
) {
416 DBG("RF%s 5GHz radio found (0x%x)\n",
417 ath5k_chip_name(AR5K_VERSION_RAD
,
418 sc
->ah
->ah_radio_5ghz_revision
),
419 sc
->ah
->ah_radio_5ghz_revision
);
420 DBG("RF%s 2GHz radio found (0x%x)\n",
421 ath5k_chip_name(AR5K_VERSION_RAD
,
422 sc
->ah
->ah_radio_2ghz_revision
),
423 sc
->ah
->ah_radio_2ghz_revision
);
429 sc
->status
&= ~ATH_STAT_INVALID
;
433 ath5k_hw_detach(sc
->ah
);
444 static void ath5k_remove(struct pci_device
*pdev
)
446 struct net80211_device
*dev
= pci_get_drvdata(pdev
);
447 struct ath5k_softc
*sc
= dev
->priv
;
450 ath5k_hw_detach(sc
->ah
);
457 /***********************\
458 * Driver Initialization *
459 \***********************/
462 ath5k_attach(struct net80211_device
*dev
)
464 struct ath5k_softc
*sc
= dev
->priv
;
465 struct ath5k_hw
*ah
= sc
->ah
;
469 * Collect the channel list. The 802.11 layer
470 * is resposible for filtering this list based
471 * on settings like the phy mode and regulatory
472 * domain restrictions.
474 ret
= ath5k_setup_bands(dev
);
476 DBG("ath5k: can't get channels\n");
480 /* NB: setup here so ath5k_rate_update is happy */
481 if (ah
->ah_modes
& AR5K_MODE_BIT_11A
)
482 ath5k_setcurmode(sc
, AR5K_MODE_11A
);
484 ath5k_setcurmode(sc
, AR5K_MODE_11B
);
487 * Allocate tx+rx descriptors and populate the lists.
489 ret
= ath5k_desc_alloc(sc
);
491 DBG("ath5k: can't allocate descriptors\n");
496 * Allocate hardware transmit queues. Note that hw functions
497 * handle reseting these queues at the needed time.
499 ret
= ath5k_txq_setup(sc
, AR5K_TX_QUEUE_DATA
, AR5K_WME_AC_BE
);
501 DBG("ath5k: can't setup xmit queue\n");
505 sc
->last_calib_ticks
= currticks();
507 ret
= ath5k_eeprom_read_mac(ah
, sc
->hwinfo
->hwaddr
);
509 DBG("ath5k: unable to read address from EEPROM: 0x%04x\n",
514 memset(sc
->bssidmask
, 0xff, ETH_ALEN
);
515 ath5k_hw_set_bssid_mask(sc
->ah
, sc
->bssidmask
);
517 ret
= net80211_register(sc
->dev
, &ath5k_ops
, sc
->hwinfo
);
519 DBG("ath5k: can't register ieee80211 hw\n");
525 ath5k_txq_release(sc
);
533 ath5k_detach(struct net80211_device
*dev
)
535 struct ath5k_softc
*sc
= dev
->priv
;
537 net80211_unregister(dev
);
539 ath5k_txq_release(sc
);
545 /********************\
546 * Channel/mode setup *
547 \********************/
550 * Convert IEEE channel number to MHz frequency.
553 ath5k_ieee2mhz(short chan
)
556 return 2407 + 5 * chan
;
560 return 2212 + 20 * chan
;
561 return 5000 + 5 * chan
;
565 ath5k_copy_channels(struct ath5k_hw
*ah
,
566 struct net80211_channel
*channels
,
567 unsigned int mode
, unsigned int max
)
569 unsigned int i
, count
, size
, chfreq
, freq
, ch
;
571 if (!(ah
->ah_modes
& (1 << mode
)))
576 case AR5K_MODE_11A_TURBO
:
577 /* 1..220, but 2GHz frequencies are filtered by check_channel */
579 chfreq
= CHANNEL_5GHZ
;
583 case AR5K_MODE_11G_TURBO
:
585 chfreq
= CHANNEL_2GHZ
;
591 for (i
= 0, count
= 0; i
< size
&& max
> 0; i
++) {
593 freq
= ath5k_ieee2mhz(ch
);
595 /* Check if channel is supported by the chipset */
596 if (!ath5k_channel_ok(ah
, freq
, chfreq
))
599 /* Write channel info and increment counter */
600 channels
[count
].center_freq
= freq
;
601 channels
[count
].maxpower
= 0; /* use regulatory */
602 channels
[count
].band
= (chfreq
== CHANNEL_2GHZ
) ?
603 NET80211_BAND_2GHZ
: NET80211_BAND_5GHZ
;
607 channels
[count
].hw_value
= chfreq
| CHANNEL_OFDM
;
609 case AR5K_MODE_11A_TURBO
:
610 case AR5K_MODE_11G_TURBO
:
611 channels
[count
].hw_value
= chfreq
|
612 CHANNEL_OFDM
| CHANNEL_TURBO
;
615 channels
[count
].hw_value
= CHANNEL_B
;
626 ath5k_setup_bands(struct net80211_device
*dev
)
628 struct ath5k_softc
*sc
= dev
->priv
;
629 struct ath5k_hw
*ah
= sc
->ah
;
630 int max_c
, count_c
= 0;
634 max_c
= sizeof(sc
->hwinfo
->channels
) / sizeof(sc
->hwinfo
->channels
[0]);
637 if (sc
->ah
->ah_capabilities
.cap_mode
& AR5K_MODE_BIT_11G
) {
639 band
= NET80211_BAND_2GHZ
;
640 sc
->hwinfo
->bands
= NET80211_BAND_BIT_2GHZ
;
641 sc
->hwinfo
->modes
= (NET80211_MODE_G
| NET80211_MODE_B
);
643 for (i
= 0; i
< 12; i
++)
644 sc
->hwinfo
->rates
[band
][i
] = ath5k_rates
[i
].bitrate
;
645 sc
->hwinfo
->nr_rates
[band
] = 12;
647 sc
->hwinfo
->nr_channels
=
648 ath5k_copy_channels(ah
, sc
->hwinfo
->channels
,
649 AR5K_MODE_11G
, max_c
);
650 count_c
= sc
->hwinfo
->nr_channels
;
652 } else if (sc
->ah
->ah_capabilities
.cap_mode
& AR5K_MODE_BIT_11B
) {
654 band
= NET80211_BAND_2GHZ
;
655 sc
->hwinfo
->bands
= NET80211_BAND_BIT_2GHZ
;
656 sc
->hwinfo
->modes
= NET80211_MODE_B
;
658 for (i
= 0; i
< 4; i
++)
659 sc
->hwinfo
->rates
[band
][i
] = ath5k_rates
[i
].bitrate
;
660 sc
->hwinfo
->nr_rates
[band
] = 4;
662 sc
->hwinfo
->nr_channels
=
663 ath5k_copy_channels(ah
, sc
->hwinfo
->channels
,
664 AR5K_MODE_11B
, max_c
);
665 count_c
= sc
->hwinfo
->nr_channels
;
669 /* 5GHz band, A mode */
670 if (sc
->ah
->ah_capabilities
.cap_mode
& AR5K_MODE_BIT_11A
) {
671 band
= NET80211_BAND_5GHZ
;
672 sc
->hwinfo
->bands
|= NET80211_BAND_BIT_5GHZ
;
673 sc
->hwinfo
->modes
|= NET80211_MODE_A
;
675 for (i
= 0; i
< 8; i
++)
676 sc
->hwinfo
->rates
[band
][i
] = ath5k_rates
[i
+4].bitrate
;
677 sc
->hwinfo
->nr_rates
[band
] = 8;
679 sc
->hwinfo
->nr_channels
=
680 ath5k_copy_channels(ah
, sc
->hwinfo
->channels
,
681 AR5K_MODE_11B
, max_c
);
682 count_c
= sc
->hwinfo
->nr_channels
;
690 * Set/change channels. If the channel is really being changed,
691 * it's done by reseting the chip. To accomplish this we must
692 * first cleanup any pending DMA, then restart stuff after a la
696 ath5k_chan_set(struct ath5k_softc
*sc
, struct net80211_channel
*chan
)
698 if (chan
->center_freq
!= sc
->curchan
->center_freq
||
699 chan
->hw_value
!= sc
->curchan
->hw_value
) {
701 * To switch channels clear any pending DMA operations;
702 * wait long enough for the RX fifo to drain, reset the
703 * hardware at the new frequency, and then re-enable
704 * the relevant bits of the h/w.
706 DBG2("ath5k: resetting for channel change (%d -> %d MHz)\n",
707 sc
->curchan
->center_freq
, chan
->center_freq
);
708 return ath5k_reset(sc
, chan
);
715 ath5k_setcurmode(struct ath5k_softc
*sc
, unsigned int mode
)
719 if (mode
== AR5K_MODE_11A
) {
720 sc
->curband
= NET80211_BAND_5GHZ
;
722 sc
->curband
= NET80211_BAND_2GHZ
;
727 ath5k_mode_setup(struct ath5k_softc
*sc
)
729 struct ath5k_hw
*ah
= sc
->ah
;
732 /* configure rx filter */
733 rfilt
= sc
->filter_flags
;
734 ath5k_hw_set_rx_filter(ah
, rfilt
);
736 if (ath5k_hw_hasbssidmask(ah
))
737 ath5k_hw_set_bssid_mask(ah
, sc
->bssidmask
);
739 /* configure operational mode */
740 ath5k_hw_set_opmode(ah
);
742 ath5k_hw_set_mcast_filter(ah
, 0, 0);
746 ath5k_hw_rix_to_bitrate(int hw_rix
)
750 for (i
= 0; i
< ATH5K_NR_RATES
; i
++) {
751 if (ath5k_rates
[i
].hw_code
== hw_rix
)
752 return ath5k_rates
[i
].bitrate
;
755 DBG("ath5k: invalid rix %02x\n", hw_rix
);
756 return 10; /* use lowest rate */
759 int ath5k_bitrate_to_hw_rix(int bitrate
)
763 for (i
= 0; i
< ATH5K_NR_RATES
; i
++) {
764 if (ath5k_rates
[i
].bitrate
== bitrate
)
765 return ath5k_rates
[i
].hw_code
;
768 DBG("ath5k: invalid bitrate %d\n", bitrate
);
769 return ATH5K_RATE_CODE_1M
; /* use lowest rate */
776 static struct io_buffer
*
777 ath5k_rx_iob_alloc(struct ath5k_softc
*sc
, u32
*iob_addr
)
779 struct io_buffer
*iob
;
783 * Allocate buffer with headroom_needed space for the
784 * fake physical layer header at the start.
786 iob
= alloc_iob(sc
->rxbufsize
+ sc
->cachelsz
- 1);
789 DBG("ath5k: can't alloc iobuf of size %d\n",
790 sc
->rxbufsize
+ sc
->cachelsz
- 1);
794 *iob_addr
= virt_to_bus(iob
->data
);
797 * Cache-line-align. This is important (for the
798 * 5210 at least) as not doing so causes bogus data
801 off
= *iob_addr
% sc
->cachelsz
;
803 iob_reserve(iob
, sc
->cachelsz
- off
);
804 *iob_addr
+= sc
->cachelsz
- off
;
811 ath5k_rxbuf_setup(struct ath5k_softc
*sc
, struct ath5k_buf
*bf
)
813 struct ath5k_hw
*ah
= sc
->ah
;
814 struct io_buffer
*iob
= bf
->iob
;
815 struct ath5k_desc
*ds
;
818 iob
= ath5k_rx_iob_alloc(sc
, &bf
->iobaddr
);
825 * Setup descriptors. For receive we always terminate
826 * the descriptor list with a self-linked entry so we'll
827 * not get overrun under high load (as can happen with a
828 * 5212 when ANI processing enables PHY error frames).
830 * To insure the last descriptor is self-linked we create
831 * each descriptor as self-linked and add it to the end. As
832 * each additional descriptor is added the previous self-linked
833 * entry is ``fixed'' naturally. This should be safe even
834 * if DMA is happening. When processing RX interrupts we
835 * never remove/process the last, self-linked, entry on the
836 * descriptor list. This insures the hardware always has
837 * someplace to write a new frame.
840 ds
->ds_link
= bf
->daddr
; /* link to self */
841 ds
->ds_data
= bf
->iobaddr
;
842 if (ah
->ah_setup_rx_desc(ah
, ds
,
843 iob_tailroom(iob
), /* buffer size */
845 DBG("ath5k: error setting up RX descriptor for %d bytes\n", iob_tailroom(iob
));
849 if (sc
->rxlink
!= NULL
)
850 *sc
->rxlink
= bf
->daddr
;
851 sc
->rxlink
= &ds
->ds_link
;
856 ath5k_txbuf_setup(struct ath5k_softc
*sc
, struct ath5k_buf
*bf
)
858 struct ath5k_hw
*ah
= sc
->ah
;
859 struct ath5k_txq
*txq
= &sc
->txq
;
860 struct ath5k_desc
*ds
= bf
->desc
;
861 struct io_buffer
*iob
= bf
->iob
;
862 unsigned int pktlen
, flags
;
867 flags
= AR5K_TXDESC_INTREQ
| AR5K_TXDESC_CLRDMASK
;
868 bf
->iobaddr
= virt_to_bus(iob
->data
);
869 pktlen
= iob_len(iob
);
871 /* FIXME: If we are in g mode and rate is a CCK rate
872 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
873 * from tx power (value is in dB units already) */
874 if (sc
->dev
->phy_flags
& NET80211_PHY_USE_PROTECTION
) {
875 struct net80211_device
*dev
= sc
->dev
;
877 flags
|= AR5K_TXDESC_CTSENA
;
878 cts_rate
= sc
->hw_rtscts_rate
;
879 duration
= net80211_cts_duration(dev
, pktlen
);
881 ret
= ah
->ah_setup_tx_desc(ah
, ds
, pktlen
,
882 IEEE80211_TYP_FRAME_HEADER_LEN
,
883 AR5K_PKT_TYPE_NORMAL
, sc
->power_level
* 2,
884 sc
->hw_rate
, ATH5K_RETRIES
,
885 AR5K_TXKEYIX_INVALID
, 0, flags
,
891 ds
->ds_data
= bf
->iobaddr
;
893 list_add_tail(&bf
->list
, &txq
->q
);
894 if (txq
->link
== NULL
) /* is this first packet? */
895 ath5k_hw_set_txdp(ah
, txq
->qnum
, bf
->daddr
);
896 else /* no, so only link it */
897 *txq
->link
= bf
->daddr
;
899 txq
->link
= &ds
->ds_link
;
900 ath5k_hw_start_tx_dma(ah
, txq
->qnum
);
906 /*******************\
907 * Descriptors setup *
908 \*******************/
911 ath5k_desc_alloc(struct ath5k_softc
*sc
)
913 struct ath5k_desc
*ds
;
914 struct ath5k_buf
*bf
;
919 /* allocate descriptors */
920 sc
->desc_len
= sizeof(struct ath5k_desc
) * (ATH_TXBUF
+ ATH_RXBUF
+ 1);
921 sc
->desc
= malloc_dma(sc
->desc_len
, ATH5K_DESC_ALIGN
);
922 if (sc
->desc
== NULL
) {
923 DBG("ath5k: can't allocate descriptors\n");
927 memset(sc
->desc
, 0, sc
->desc_len
);
928 sc
->desc_daddr
= virt_to_bus(sc
->desc
);
933 bf
= calloc(ATH_TXBUF
+ ATH_RXBUF
+ 1, sizeof(struct ath5k_buf
));
935 DBG("ath5k: can't allocate buffer pointers\n");
941 INIT_LIST_HEAD(&sc
->rxbuf
);
942 for (i
= 0; i
< ATH_RXBUF
; i
++, bf
++, ds
++, da
+= sizeof(*ds
)) {
945 list_add_tail(&bf
->list
, &sc
->rxbuf
);
948 INIT_LIST_HEAD(&sc
->txbuf
);
949 sc
->txbuf_len
= ATH_TXBUF
;
950 for (i
= 0; i
< ATH_TXBUF
; i
++, bf
++, ds
++, da
+= sizeof(*ds
)) {
953 list_add_tail(&bf
->list
, &sc
->txbuf
);
959 free_dma(sc
->desc
, sc
->desc_len
);
966 ath5k_desc_free(struct ath5k_softc
*sc
)
968 struct ath5k_buf
*bf
;
970 list_for_each_entry(bf
, &sc
->txbuf
, list
)
971 ath5k_txbuf_free(sc
, bf
);
972 list_for_each_entry(bf
, &sc
->rxbuf
, list
)
973 ath5k_rxbuf_free(sc
, bf
);
975 /* Free memory associated with all descriptors */
976 free_dma(sc
->desc
, sc
->desc_len
);
991 ath5k_txq_setup(struct ath5k_softc
*sc
, int qtype
, int subtype
)
993 struct ath5k_hw
*ah
= sc
->ah
;
994 struct ath5k_txq
*txq
;
995 struct ath5k_txq_info qi
= {
996 .tqi_subtype
= subtype
,
997 .tqi_aifs
= AR5K_TXQ_USEDEFAULT
,
998 .tqi_cw_min
= AR5K_TXQ_USEDEFAULT
,
999 .tqi_cw_max
= AR5K_TXQ_USEDEFAULT
1004 * Enable interrupts only for EOL and DESC conditions.
1005 * We mark tx descriptors to receive a DESC interrupt
1006 * when a tx queue gets deep; otherwise waiting for the
1007 * EOL to reap descriptors. Note that this is done to
1008 * reduce interrupt load and this only defers reaping
1009 * descriptors, never transmitting frames. Aside from
1010 * reducing interrupts this also permits more concurrency.
1011 * The only potential downside is if the tx queue backs
1012 * up in which case the top half of the kernel may backup
1013 * due to a lack of tx descriptors.
1015 qi
.tqi_flags
= AR5K_TXQ_FLAG_TXEOLINT_ENABLE
|
1016 AR5K_TXQ_FLAG_TXDESCINT_ENABLE
;
1017 qnum
= ath5k_hw_setup_tx_queue(ah
, qtype
, &qi
);
1019 DBG("ath5k: can't set up a TX queue\n");
1027 INIT_LIST_HEAD(&txq
->q
);
1034 ath5k_txq_drainq(struct ath5k_softc
*sc
, struct ath5k_txq
*txq
)
1036 struct ath5k_buf
*bf
, *bf0
;
1038 list_for_each_entry_safe(bf
, bf0
, &txq
->q
, list
) {
1039 ath5k_txbuf_free(sc
, bf
);
1041 list_del(&bf
->list
);
1042 list_add_tail(&bf
->list
, &sc
->txbuf
);
1049 * Drain the transmit queues and reclaim resources.
1052 ath5k_txq_cleanup(struct ath5k_softc
*sc
)
1054 struct ath5k_hw
*ah
= sc
->ah
;
1056 if (!(sc
->status
& ATH_STAT_INVALID
)) {
1057 /* don't touch the hardware if marked invalid */
1058 if (sc
->txq
.setup
) {
1059 ath5k_hw_stop_tx_dma(ah
, sc
->txq
.qnum
);
1060 DBG("ath5k: txq [%d] %x, link %p\n",
1062 ath5k_hw_get_txdp(ah
, sc
->txq
.qnum
),
1068 ath5k_txq_drainq(sc
, &sc
->txq
);
1072 ath5k_txq_release(struct ath5k_softc
*sc
)
1074 if (sc
->txq
.setup
) {
1075 ath5k_hw_release_tx_queue(sc
->ah
);
1088 * Enable the receive h/w following a reset.
1091 ath5k_rx_start(struct ath5k_softc
*sc
)
1093 struct ath5k_hw
*ah
= sc
->ah
;
1094 struct ath5k_buf
*bf
;
1097 sc
->rxbufsize
= IEEE80211_MAX_LEN
;
1098 if (sc
->rxbufsize
% sc
->cachelsz
!= 0)
1099 sc
->rxbufsize
+= sc
->cachelsz
- (sc
->rxbufsize
% sc
->cachelsz
);
1103 list_for_each_entry(bf
, &sc
->rxbuf
, list
) {
1104 ret
= ath5k_rxbuf_setup(sc
, bf
);
1109 bf
= list_entry(sc
->rxbuf
.next
, struct ath5k_buf
, list
);
1111 ath5k_hw_set_rxdp(ah
, bf
->daddr
);
1112 ath5k_hw_start_rx_dma(ah
); /* enable recv descriptors */
1113 ath5k_mode_setup(sc
); /* set filters, etc. */
1114 ath5k_hw_start_rx_pcu(ah
); /* re-enable PCU/DMA engine */
1120 * Disable the receive h/w in preparation for a reset.
1123 ath5k_rx_stop(struct ath5k_softc
*sc
)
1125 struct ath5k_hw
*ah
= sc
->ah
;
1127 ath5k_hw_stop_rx_pcu(ah
); /* disable PCU */
1128 ath5k_hw_set_rx_filter(ah
, 0); /* clear recv filter */
1129 ath5k_hw_stop_rx_dma(ah
); /* disable DMA engine */
1131 sc
->rxlink
= NULL
; /* just in case */
1135 ath5k_handle_rx(struct ath5k_softc
*sc
)
1137 struct ath5k_rx_status rs
;
1138 struct io_buffer
*iob
, *next_iob
;
1140 struct ath5k_buf
*bf
, *bf_last
;
1141 struct ath5k_desc
*ds
;
1144 memset(&rs
, 0, sizeof(rs
));
1146 if (list_empty(&sc
->rxbuf
)) {
1147 DBG("ath5k: empty rx buf pool\n");
1151 bf_last
= list_entry(sc
->rxbuf
.prev
, struct ath5k_buf
, list
);
1154 bf
= list_entry(sc
->rxbuf
.next
, struct ath5k_buf
, list
);
1155 assert(bf
->iob
!= NULL
);
1160 * last buffer must not be freed to ensure proper hardware
1161 * function. When the hardware finishes also a packet next to
1162 * it, we are sure, it doesn't use it anymore and we can go on.
1167 struct ath5k_buf
*bf_next
= list_entry(bf
->list
.next
,
1168 struct ath5k_buf
, list
);
1169 ret
= sc
->ah
->ah_proc_rx_desc(sc
->ah
, bf_next
->desc
,
1174 /* skip the overwritten one (even status is martian) */
1178 ret
= sc
->ah
->ah_proc_rx_desc(sc
->ah
, ds
, &rs
);
1180 if (ret
!= -EINPROGRESS
) {
1181 DBG("ath5k: error in processing rx desc: %s\n",
1183 net80211_rx_err(sc
->dev
, NULL
, -ret
);
1185 /* normal return, reached end of
1186 available descriptors */
1192 DBG("ath5k: unsupported fragmented rx\n");
1197 if (rs
.rs_status
& AR5K_RXERR_PHY
) {
1198 DBG("ath5k: rx PHY error\n");
1201 if (rs
.rs_status
& AR5K_RXERR_CRC
) {
1202 net80211_rx_err(sc
->dev
, NULL
, EIO
);
1205 if (rs
.rs_status
& AR5K_RXERR_DECRYPT
) {
1207 * Decrypt error. If the error occurred
1208 * because there was no hardware key, then
1209 * let the frame through so the upper layers
1210 * can process it. This is necessary for 5210
1211 * parts which have no way to setup a ``clear''
1214 * XXX do key cache faulting
1216 if (rs
.rs_keyix
== AR5K_RXKEYIX_INVALID
&&
1217 !(rs
.rs_status
& AR5K_RXERR_CRC
))
1221 /* any other error, unhandled */
1222 DBG("ath5k: packet rx status %x\n", rs
.rs_status
);
1226 next_iob
= ath5k_rx_iob_alloc(sc
, &next_iob_addr
);
1229 * If we can't replace bf->iob with a new iob under memory
1230 * pressure, just skip this packet
1233 DBG("ath5k: dropping packet under memory pressure\n");
1237 iob_put(iob
, rs
.rs_datalen
);
1239 /* The MAC header is padded to have 32-bit boundary if the
1240 * packet payload is non-zero. However, gPXE only
1241 * supports standard 802.11 packets with 24-byte
1242 * header, so no padding correction should be needed.
1245 DBG2("ath5k: rx %d bytes, signal %d\n", rs
.rs_datalen
,
1248 net80211_rx(sc
->dev
, iob
, rs
.rs_rssi
,
1249 ath5k_hw_rix_to_bitrate(rs
.rs_rate
));
1252 bf
->iobaddr
= next_iob_addr
;
1254 list_del(&bf
->list
);
1255 list_add_tail(&bf
->list
, &sc
->rxbuf
);
1256 } while (ath5k_rxbuf_setup(sc
, bf
) == 0);
1267 ath5k_tx_processq(struct ath5k_softc
*sc
, struct ath5k_txq
*txq
)
1269 struct ath5k_tx_status ts
;
1270 struct ath5k_buf
*bf
, *bf0
;
1271 struct ath5k_desc
*ds
;
1272 struct io_buffer
*iob
;
1275 memset(&ts
, 0, sizeof(ts
));
1277 list_for_each_entry_safe(bf
, bf0
, &txq
->q
, list
) {
1280 ret
= sc
->ah
->ah_proc_tx_desc(sc
->ah
, ds
, &ts
);
1282 if (ret
!= -EINPROGRESS
) {
1283 DBG("ath5k: error in processing tx desc: %s\n",
1286 /* normal return, reached end of tx completions */
1294 DBG2("ath5k: tx %d bytes complete, %d retries\n",
1295 iob_len(iob
), ts
.ts_retry
[0]);
1297 net80211_tx_complete(sc
->dev
, iob
, ts
.ts_retry
[0],
1298 ts
.ts_status
? EIO
: 0);
1300 list_del(&bf
->list
);
1301 list_add_tail(&bf
->list
, &sc
->txbuf
);
1305 if (list_empty(&txq
->q
))
1310 ath5k_handle_tx(struct ath5k_softc
*sc
)
1312 ath5k_tx_processq(sc
, &sc
->txq
);
1316 /********************\
1317 * Interrupt handling *
1318 \********************/
1321 ath5k_irq(struct net80211_device
*dev
, int enable
)
1323 struct ath5k_softc
*sc
= dev
->priv
;
1324 struct ath5k_hw
*ah
= sc
->ah
;
1326 sc
->irq_ena
= enable
;
1327 ah
->ah_ier
= enable
? AR5K_IER_ENABLE
: AR5K_IER_DISABLE
;
1329 ath5k_hw_reg_write(ah
, ah
->ah_ier
, AR5K_IER
);
1330 ath5k_hw_set_imr(ah
, sc
->imask
);
1334 ath5k_init(struct ath5k_softc
*sc
)
1336 struct ath5k_hw
*ah
= sc
->ah
;
1340 * Stop anything previously setup. This is safe
1341 * no matter this is the first time through or not.
1346 * The basic interface to setting the hardware in a good
1347 * state is ``reset''. On return the hardware is known to
1348 * be powered up and with interrupts disabled. This must
1349 * be followed by initialization of the appropriate bits
1350 * and then setup of the interrupt mask.
1352 sc
->curchan
= sc
->dev
->channels
+ sc
->dev
->channel
;
1353 sc
->curband
= sc
->curchan
->band
;
1354 sc
->imask
= AR5K_INT_RXOK
| AR5K_INT_RXERR
| AR5K_INT_RXEOL
|
1355 AR5K_INT_RXORN
| AR5K_INT_TXDESC
| AR5K_INT_TXEOL
|
1356 AR5K_INT_FATAL
| AR5K_INT_GLOBAL
;
1357 ret
= ath5k_reset(sc
, NULL
);
1362 * Reset the key cache since some parts do not reset the
1363 * contents on initial power up or resume from suspend.
1365 for (i
= 0; i
< AR5K_KEYTABLE_SIZE
; i
++)
1366 ath5k_hw_reset_key(ah
, i
);
1368 /* Set ack to be sent at low bit-rates */
1369 ath5k_hw_set_ack_bitrate_high(ah
, 0);
1378 ath5k_stop_hw(struct ath5k_softc
*sc
)
1380 struct ath5k_hw
*ah
= sc
->ah
;
1383 * Shutdown the hardware and driver:
1384 * stop output from above
1385 * disable interrupts
1387 * turn off the radio
1388 * clear transmit machinery
1389 * clear receive machinery
1390 * drain and release tx queues
1391 * reclaim beacon resources
1392 * power down hardware
1394 * Note that some of this work is not possible if the
1395 * hardware is gone (invalid).
1398 if (!(sc
->status
& ATH_STAT_INVALID
)) {
1399 ath5k_hw_set_imr(ah
, 0);
1401 ath5k_txq_cleanup(sc
);
1402 if (!(sc
->status
& ATH_STAT_INVALID
)) {
1404 ath5k_hw_phy_disable(ah
);
1412 ath5k_poll(struct net80211_device
*dev
)
1414 struct ath5k_softc
*sc
= dev
->priv
;
1415 struct ath5k_hw
*ah
= sc
->ah
;
1416 enum ath5k_int status
;
1417 unsigned int counter
= 1000;
1419 if (currticks() - sc
->last_calib_ticks
>
1420 ATH5K_CALIB_INTERVAL
* ticks_per_sec()) {
1421 ath5k_calibrate(sc
);
1422 sc
->last_calib_ticks
= currticks();
1425 if ((sc
->status
& ATH_STAT_INVALID
) ||
1426 (sc
->irq_ena
&& !ath5k_hw_is_intr_pending(ah
)))
1430 ath5k_hw_get_isr(ah
, &status
); /* NB: clears IRQ too */
1431 DBGP("ath5k: status %#x/%#x\n", status
, sc
->imask
);
1432 if (status
& AR5K_INT_FATAL
) {
1434 * Fatal errors are unrecoverable.
1435 * Typically these are caused by DMA errors.
1437 DBG("ath5k: fatal error, resetting\n");
1438 ath5k_reset_wake(sc
);
1439 } else if (status
& AR5K_INT_RXORN
) {
1440 DBG("ath5k: rx overrun, resetting\n");
1441 ath5k_reset_wake(sc
);
1443 if (status
& AR5K_INT_RXEOL
) {
1445 * NB: the hardware should re-read the link when
1446 * RXE bit is written, but it doesn't work at
1447 * least on older hardware revs.
1449 DBG("ath5k: rx EOL\n");
1452 if (status
& AR5K_INT_TXURN
) {
1453 /* bump tx trigger level */
1454 DBG("ath5k: tx underrun\n");
1455 ath5k_hw_update_tx_triglevel(ah
, 1);
1457 if (status
& (AR5K_INT_RXOK
| AR5K_INT_RXERR
))
1458 ath5k_handle_rx(sc
);
1459 if (status
& (AR5K_INT_TXOK
| AR5K_INT_TXDESC
1460 | AR5K_INT_TXERR
| AR5K_INT_TXEOL
))
1461 ath5k_handle_tx(sc
);
1463 } while (ath5k_hw_is_intr_pending(ah
) && counter
-- > 0);
1466 DBG("ath5k: too many interrupts, giving up for now\n");
1470 * Periodically recalibrate the PHY to account
1471 * for temperature/environment changes.
1474 ath5k_calibrate(struct ath5k_softc
*sc
)
1476 struct ath5k_hw
*ah
= sc
->ah
;
1478 if (ath5k_hw_gainf_calibrate(ah
) == AR5K_RFGAIN_NEED_CHANGE
) {
1480 * Rfgain is out of bounds, reset the chip
1481 * to load new gain values.
1483 DBG("ath5k: resetting for calibration\n");
1484 ath5k_reset_wake(sc
);
1486 if (ath5k_hw_phy_calibrate(ah
, sc
->curchan
))
1487 DBG("ath5k: calibration of channel %d failed\n",
1488 sc
->curchan
->channel_nr
);
1492 /********************\
1493 * Net80211 functions *
1494 \********************/
1497 ath5k_tx(struct net80211_device
*dev
, struct io_buffer
*iob
)
1499 struct ath5k_softc
*sc
= dev
->priv
;
1500 struct ath5k_buf
*bf
;
1504 * The hardware expects the header padded to 4 byte boundaries.
1505 * gPXE only ever sends 24-byte headers, so no action necessary.
1508 if (list_empty(&sc
->txbuf
)) {
1509 DBG("ath5k: dropping packet because no tx bufs available\n");
1513 bf
= list_entry(sc
->txbuf
.next
, struct ath5k_buf
, list
);
1514 list_del(&bf
->list
);
1519 if ((rc
= ath5k_txbuf_setup(sc
, bf
)) != 0) {
1521 list_add_tail(&bf
->list
, &sc
->txbuf
);
1529 * Reset the hardware. If chan is not NULL, then also pause rx/tx
1530 * and change to the given channel.
1533 ath5k_reset(struct ath5k_softc
*sc
, struct net80211_channel
*chan
)
1535 struct ath5k_hw
*ah
= sc
->ah
;
1539 ath5k_hw_set_imr(ah
, 0);
1540 ath5k_txq_cleanup(sc
);
1544 sc
->curband
= chan
->band
;
1547 ret
= ath5k_hw_reset(ah
, sc
->curchan
, 1);
1549 DBG("ath5k: can't reset hardware: %s\n", strerror(ret
));
1553 ret
= ath5k_rx_start(sc
);
1555 DBG("ath5k: can't start rx logic: %s\n", strerror(ret
));
1560 * Change channels and update the h/w rate map if we're switching;
1561 * e.g. 11a to 11b/g.
1563 * We may be doing a reset in response to an ioctl that changes the
1564 * channel so update any state that might change as a result.
1568 /* ath5k_chan_change(sc, c); */
1570 /* Reenable interrupts if necessary */
1571 ath5k_irq(sc
->dev
, sc
->irq_ena
);
1576 static int ath5k_reset_wake(struct ath5k_softc
*sc
)
1578 return ath5k_reset(sc
, sc
->curchan
);
1581 static int ath5k_start(struct net80211_device
*dev
)
1583 struct ath5k_softc
*sc
= dev
->priv
;
1586 if ((ret
= ath5k_init(sc
)) != 0)
1590 ath5k_configure_filter(sc
);
1591 ath5k_hw_set_lladdr(sc
->ah
, dev
->netdev
->ll_addr
);
1596 static void ath5k_stop(struct net80211_device
*dev
)
1598 struct ath5k_softc
*sc
= dev
->priv
;
1599 u8 mac
[ETH_ALEN
] = {};
1601 ath5k_hw_set_lladdr(sc
->ah
, mac
);
1607 ath5k_config(struct net80211_device
*dev
, int changed
)
1609 struct ath5k_softc
*sc
= dev
->priv
;
1610 struct ath5k_hw
*ah
= sc
->ah
;
1611 struct net80211_channel
*chan
= &dev
->channels
[dev
->channel
];
1614 if (changed
& NET80211_CFG_CHANNEL
) {
1615 sc
->power_level
= chan
->maxpower
;
1616 if ((ret
= ath5k_chan_set(sc
, chan
)) != 0)
1620 if ((changed
& NET80211_CFG_RATE
) ||
1621 (changed
& NET80211_CFG_PHY_PARAMS
)) {
1622 int spmbl
= ATH5K_SPMBL_NO
;
1623 u16 rate
= dev
->rates
[dev
->rate
];
1624 u16 slowrate
= dev
->rates
[dev
->rtscts_rate
];
1627 if (dev
->phy_flags
& NET80211_PHY_USE_SHORT_PREAMBLE
)
1628 spmbl
= ATH5K_SPMBL_YES
;
1630 for (i
= 0; i
< ATH5K_NR_RATES
; i
++) {
1631 if (ath5k_rates
[i
].bitrate
== rate
&&
1632 (ath5k_rates
[i
].short_pmbl
& spmbl
))
1633 sc
->hw_rate
= ath5k_rates
[i
].hw_code
;
1635 if (ath5k_rates
[i
].bitrate
== slowrate
&&
1636 (ath5k_rates
[i
].short_pmbl
& spmbl
))
1637 sc
->hw_rtscts_rate
= ath5k_rates
[i
].hw_code
;
1641 if (changed
& NET80211_CFG_ASSOC
) {
1642 sc
->assoc
= !!(dev
->state
& NET80211_ASSOCIATED
);
1644 memcpy(ah
->ah_bssid
, dev
->bssid
, ETH_ALEN
);
1646 memset(ah
->ah_bssid
, 0xff, ETH_ALEN
);
1648 ath5k_hw_set_associd(ah
, ah
->ah_bssid
, 0);
1655 * o always accept unicast, broadcast, and multicast traffic
1656 * o multicast traffic for all BSSIDs will be enabled if mac80211
1658 * o maintain current state of phy ofdm or phy cck error reception.
1659 * If the hardware detects any of these type of errors then
1660 * ath5k_hw_get_rx_filter() will pass to us the respective
1661 * hardware filters to be able to receive these type of frames.
1662 * o probe request frames are accepted only when operating in
1663 * hostap, adhoc, or monitor modes
1664 * o enable promiscuous mode according to the interface state
1666 * - when operating in adhoc mode so the 802.11 layer creates
1667 * node table entries for peers,
1668 * - when operating in station mode for collecting rssi data when
1669 * the station is otherwise quiet, or
1672 static void ath5k_configure_filter(struct ath5k_softc
*sc
)
1674 struct ath5k_hw
*ah
= sc
->ah
;
1675 u32 mfilt
[2], rfilt
;
1677 /* Enable all multicast */
1681 /* Enable data frames and beacons */
1682 rfilt
= (AR5K_RX_FILTER_UCAST
| AR5K_RX_FILTER_BCAST
|
1683 AR5K_RX_FILTER_MCAST
| AR5K_RX_FILTER_BEACON
);
1686 ath5k_hw_set_rx_filter(ah
, rfilt
);
1688 /* Set multicast bits */
1689 ath5k_hw_set_mcast_filter(ah
, mfilt
[0], mfilt
[1]);
1691 /* Set the cached hw filter flags, this will alter actually
1693 sc
->filter_flags
= rfilt
;