]> git.proxmox.com Git - grub2.git/blob - debian/grub-extras/disabled/gpxe/src/drivers/net/ath5k/ath5k.c
grub2 (2.02+dfsg1-20) unstable; urgency=medium
[grub2.git] / debian / grub-extras / disabled / gpxe / src / drivers / net / ath5k / ath5k.c
1 /*
2 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
3 * Copyright (c) 2004-2005 Atheros Communications, Inc.
4 * Copyright (c) 2006 Devicescape Software, Inc.
5 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
6 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
7 *
8 * Modified for gPXE, July 2009, by Joshua Oreman <oremanj@rwcr.net>
9 * Original from Linux kernel 2.6.30.
10 *
11 * All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer,
18 * without modification.
19 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
20 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
21 * redistribution must be conditioned upon including a substantially
22 * similar Disclaimer requirement for further binary redistribution.
23 * 3. Neither the names of the above-listed copyright holders nor the names
24 * of any contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * Alternatively, this software may be distributed under the terms of the
28 * GNU General Public License ("GPL") version 2 as published by the Free
29 * Software Foundation.
30 *
31 * NO WARRANTY
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
35 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
36 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
37 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
38 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
39 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
40 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
42 * THE POSSIBILITY OF SUCH DAMAGES.
43 *
44 */
45
46 FILE_LICENCE ( BSD3 );
47
48 #include <stdlib.h>
49 #include <gpxe/malloc.h>
50 #include <gpxe/timer.h>
51 #include <gpxe/netdevice.h>
52 #include <gpxe/pci.h>
53 #include <gpxe/pci_io.h>
54
55 #include "base.h"
56 #include "reg.h"
57
58 #define ATH5K_CALIB_INTERVAL 10 /* Calibrate PHY every 10 seconds */
59 #define ATH5K_RETRIES 4 /* Number of times to retry packet sends */
60 #define ATH5K_DESC_ALIGN 16 /* Alignment for TX/RX descriptors */
61
62 /******************\
63 * Internal defines *
64 \******************/
65
66 /* Known PCI ids */
67 static struct pci_device_id ath5k_nics[] = {
68 PCI_ROM(0x168c, 0x0207, "ath5210e", "Atheros 5210 early", AR5K_AR5210),
69 PCI_ROM(0x168c, 0x0007, "ath5210", "Atheros 5210", AR5K_AR5210),
70 PCI_ROM(0x168c, 0x0011, "ath5311", "Atheros 5311 (AHB)", AR5K_AR5211),
71 PCI_ROM(0x168c, 0x0012, "ath5211", "Atheros 5211", AR5K_AR5211),
72 PCI_ROM(0x168c, 0x0013, "ath5212", "Atheros 5212", AR5K_AR5212),
73 PCI_ROM(0xa727, 0x0013, "ath5212c","3com Ath 5212", AR5K_AR5212),
74 PCI_ROM(0x10b7, 0x0013, "rdag675", "3com 3CRDAG675", AR5K_AR5212),
75 PCI_ROM(0x168c, 0x1014, "ath5212m", "Ath 5212 miniPCI", AR5K_AR5212),
76 PCI_ROM(0x168c, 0x0014, "ath5212x14", "Atheros 5212 x14", AR5K_AR5212),
77 PCI_ROM(0x168c, 0x0015, "ath5212x15", "Atheros 5212 x15", AR5K_AR5212),
78 PCI_ROM(0x168c, 0x0016, "ath5212x16", "Atheros 5212 x16", AR5K_AR5212),
79 PCI_ROM(0x168c, 0x0017, "ath5212x17", "Atheros 5212 x17", AR5K_AR5212),
80 PCI_ROM(0x168c, 0x0018, "ath5212x18", "Atheros 5212 x18", AR5K_AR5212),
81 PCI_ROM(0x168c, 0x0019, "ath5212x19", "Atheros 5212 x19", AR5K_AR5212),
82 PCI_ROM(0x168c, 0x001a, "ath2413", "Atheros 2413 Griffin", AR5K_AR5212),
83 PCI_ROM(0x168c, 0x001b, "ath5413", "Atheros 5413 Eagle", AR5K_AR5212),
84 PCI_ROM(0x168c, 0x001c, "ath5212e", "Atheros 5212 PCI-E", AR5K_AR5212),
85 PCI_ROM(0x168c, 0x001d, "ath2417", "Atheros 2417 Nala", AR5K_AR5212),
86 };
87
88 /* Known SREVs */
89 static const struct ath5k_srev_name srev_names[] = {
90 { "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 },
91 { "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 },
92 { "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A },
93 { "5311B", AR5K_VERSION_MAC, AR5K_SREV_AR5311B },
94 { "5211", AR5K_VERSION_MAC, AR5K_SREV_AR5211 },
95 { "5212", AR5K_VERSION_MAC, AR5K_SREV_AR5212 },
96 { "5213", AR5K_VERSION_MAC, AR5K_SREV_AR5213 },
97 { "5213A", AR5K_VERSION_MAC, AR5K_SREV_AR5213A },
98 { "2413", AR5K_VERSION_MAC, AR5K_SREV_AR2413 },
99 { "2414", AR5K_VERSION_MAC, AR5K_SREV_AR2414 },
100 { "5424", AR5K_VERSION_MAC, AR5K_SREV_AR5424 },
101 { "5413", AR5K_VERSION_MAC, AR5K_SREV_AR5413 },
102 { "5414", AR5K_VERSION_MAC, AR5K_SREV_AR5414 },
103 { "2415", AR5K_VERSION_MAC, AR5K_SREV_AR2415 },
104 { "5416", AR5K_VERSION_MAC, AR5K_SREV_AR5416 },
105 { "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 },
106 { "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 },
107 { "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 },
108 { "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN },
109 { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 },
110 { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 },
111 { "5111A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111A },
112 { "2111", AR5K_VERSION_RAD, AR5K_SREV_RAD_2111 },
113 { "5112", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112 },
114 { "5112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112A },
115 { "5112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112B },
116 { "2112", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112 },
117 { "2112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112A },
118 { "2112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112B },
119 { "2413", AR5K_VERSION_RAD, AR5K_SREV_RAD_2413 },
120 { "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 },
121 { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 },
122 { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 },
123 { "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 },
124 { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 },
125 { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN },
126 };
127
128 #define ATH5K_SPMBL_NO 1
129 #define ATH5K_SPMBL_YES 2
130 #define ATH5K_SPMBL_BOTH 3
131
132 static const struct {
133 u16 bitrate;
134 u8 short_pmbl;
135 u8 hw_code;
136 } ath5k_rates[] = {
137 { 10, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_1M },
138 { 20, ATH5K_SPMBL_NO, ATH5K_RATE_CODE_2M },
139 { 55, ATH5K_SPMBL_NO, ATH5K_RATE_CODE_5_5M },
140 { 110, ATH5K_SPMBL_NO, ATH5K_RATE_CODE_11M },
141 { 60, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_6M },
142 { 90, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_9M },
143 { 120, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_12M },
144 { 180, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_18M },
145 { 240, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_24M },
146 { 360, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_36M },
147 { 480, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_48M },
148 { 540, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_54M },
149 { 20, ATH5K_SPMBL_YES, ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE },
150 { 55, ATH5K_SPMBL_YES, ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE },
151 { 110, ATH5K_SPMBL_YES, ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE },
152 { 0, 0, 0 },
153 };
154
155 #define ATH5K_NR_RATES 15
156
157 /*
158 * Prototypes - PCI stack related functions
159 */
160 static int ath5k_probe(struct pci_device *pdev,
161 const struct pci_device_id *id);
162 static void ath5k_remove(struct pci_device *pdev);
163
164 struct pci_driver ath5k_pci_driver __pci_driver = {
165 .ids = ath5k_nics,
166 .id_count = sizeof(ath5k_nics) / sizeof(ath5k_nics[0]),
167 .probe = ath5k_probe,
168 .remove = ath5k_remove,
169 };
170
171
172
173 /*
174 * Prototypes - MAC 802.11 stack related functions
175 */
176 static int ath5k_tx(struct net80211_device *dev, struct io_buffer *skb);
177 static int ath5k_reset(struct ath5k_softc *sc, struct net80211_channel *chan);
178 static int ath5k_reset_wake(struct ath5k_softc *sc);
179 static int ath5k_start(struct net80211_device *dev);
180 static void ath5k_stop(struct net80211_device *dev);
181 static int ath5k_config(struct net80211_device *dev, int changed);
182 static void ath5k_poll(struct net80211_device *dev);
183 static void ath5k_irq(struct net80211_device *dev, int enable);
184
185 static struct net80211_device_operations ath5k_ops = {
186 .open = ath5k_start,
187 .close = ath5k_stop,
188 .transmit = ath5k_tx,
189 .poll = ath5k_poll,
190 .irq = ath5k_irq,
191 .config = ath5k_config,
192 };
193
194 /*
195 * Prototypes - Internal functions
196 */
197 /* Attach detach */
198 static int ath5k_attach(struct net80211_device *dev);
199 static void ath5k_detach(struct net80211_device *dev);
200 /* Channel/mode setup */
201 static unsigned int ath5k_copy_channels(struct ath5k_hw *ah,
202 struct net80211_channel *channels,
203 unsigned int mode,
204 unsigned int max);
205 static int ath5k_setup_bands(struct net80211_device *dev);
206 static int ath5k_chan_set(struct ath5k_softc *sc,
207 struct net80211_channel *chan);
208 static void ath5k_setcurmode(struct ath5k_softc *sc,
209 unsigned int mode);
210 static void ath5k_mode_setup(struct ath5k_softc *sc);
211
212 /* Descriptor setup */
213 static int ath5k_desc_alloc(struct ath5k_softc *sc);
214 static void ath5k_desc_free(struct ath5k_softc *sc);
215 /* Buffers setup */
216 static int ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf);
217 static int ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf);
218
219 static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
220 struct ath5k_buf *bf)
221 {
222 if (!bf->iob)
223 return;
224
225 net80211_tx_complete(sc->dev, bf->iob, 0, ECANCELED);
226 bf->iob = NULL;
227 }
228
229 static inline void ath5k_rxbuf_free(struct ath5k_softc *sc __unused,
230 struct ath5k_buf *bf)
231 {
232 free_iob(bf->iob);
233 bf->iob = NULL;
234 }
235
236 /* Queues setup */
237 static int ath5k_txq_setup(struct ath5k_softc *sc,
238 int qtype, int subtype);
239 static void ath5k_txq_drainq(struct ath5k_softc *sc,
240 struct ath5k_txq *txq);
241 static void ath5k_txq_cleanup(struct ath5k_softc *sc);
242 static void ath5k_txq_release(struct ath5k_softc *sc);
243 /* Rx handling */
244 static int ath5k_rx_start(struct ath5k_softc *sc);
245 static void ath5k_rx_stop(struct ath5k_softc *sc);
246 /* Tx handling */
247 static void ath5k_tx_processq(struct ath5k_softc *sc,
248 struct ath5k_txq *txq);
249
250 /* Interrupt handling */
251 static int ath5k_init(struct ath5k_softc *sc);
252 static int ath5k_stop_hw(struct ath5k_softc *sc);
253
254 static void ath5k_calibrate(struct ath5k_softc *sc);
255
256 /* Filter */
257 static void ath5k_configure_filter(struct ath5k_softc *sc);
258
259 /********************\
260 * PCI Initialization *
261 \********************/
262
263 #if DBGLVL_MAX
264 static const char *
265 ath5k_chip_name(enum ath5k_srev_type type, u16 val)
266 {
267 const char *name = "xxxxx";
268 unsigned int i;
269
270 for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
271 if (srev_names[i].sr_type != type)
272 continue;
273
274 if ((val & 0xf0) == srev_names[i].sr_val)
275 name = srev_names[i].sr_name;
276
277 if ((val & 0xff) == srev_names[i].sr_val) {
278 name = srev_names[i].sr_name;
279 break;
280 }
281 }
282
283 return name;
284 }
285 #endif
286
287 static int ath5k_probe(struct pci_device *pdev,
288 const struct pci_device_id *id)
289 {
290 void *mem;
291 struct ath5k_softc *sc;
292 struct net80211_device *dev;
293 int ret;
294 u8 csz;
295
296 adjust_pci_device(pdev);
297
298 /*
299 * Cache line size is used to size and align various
300 * structures used to communicate with the hardware.
301 */
302 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
303 if (csz == 0) {
304 /*
305 * We must have this setup properly for rx buffer
306 * DMA to work so force a reasonable value here if it
307 * comes up zero.
308 */
309 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 16);
310 }
311 /*
312 * The default setting of latency timer yields poor results,
313 * set it to the value used by other systems. It may be worth
314 * tweaking this setting more.
315 */
316 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
317
318 /*
319 * Disable the RETRY_TIMEOUT register (0x41) to keep
320 * PCI Tx retries from interfering with C3 CPU state.
321 */
322 pci_write_config_byte(pdev, 0x41, 0);
323
324 mem = ioremap(pdev->membase, 0x10000);
325 if (!mem) {
326 DBG("ath5k: cannot remap PCI memory region\n");
327 ret = -EIO;
328 goto err;
329 }
330
331 /*
332 * Allocate dev (net80211 main struct)
333 * and dev->priv (driver private data)
334 */
335 dev = net80211_alloc(sizeof(*sc));
336 if (!dev) {
337 DBG("ath5k: cannot allocate 802.11 device\n");
338 ret = -ENOMEM;
339 goto err_map;
340 }
341
342 /* Initialize driver private data */
343 sc = dev->priv;
344 sc->dev = dev;
345 sc->pdev = pdev;
346
347 sc->hwinfo = zalloc(sizeof(*sc->hwinfo));
348 if (!sc->hwinfo) {
349 DBG("ath5k: cannot allocate 802.11 hardware info structure\n");
350 ret = -ENOMEM;
351 goto err_free;
352 }
353
354 sc->hwinfo->flags = NET80211_HW_RX_HAS_FCS;
355 sc->hwinfo->signal_type = NET80211_SIGNAL_DB;
356 sc->hwinfo->signal_max = 40; /* 35dB should give perfect 54Mbps */
357 sc->hwinfo->channel_change_time = 5000;
358
359 /* Avoid working with the device until setup is complete */
360 sc->status |= ATH_STAT_INVALID;
361
362 sc->iobase = mem;
363 sc->cachelsz = csz * 4; /* convert to bytes */
364
365 DBG("ath5k: register base at %p (%08lx)\n", sc->iobase, pdev->membase);
366 DBG("ath5k: cache line size %d\n", sc->cachelsz);
367
368 /* Set private data */
369 pci_set_drvdata(pdev, dev);
370 dev->netdev->dev = (struct device *)pdev;
371
372 /* Initialize device */
373 ret = ath5k_hw_attach(sc, id->driver_data, &sc->ah);
374 if (ret)
375 goto err_free_hwinfo;
376
377 /* Finish private driver data initialization */
378 ret = ath5k_attach(dev);
379 if (ret)
380 goto err_ah;
381
382 #if DBGLVL_MAX
383 DBG("Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
384 ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
385 sc->ah->ah_mac_srev, sc->ah->ah_phy_revision);
386
387 if (!sc->ah->ah_single_chip) {
388 /* Single chip radio (!RF5111) */
389 if (sc->ah->ah_radio_5ghz_revision &&
390 !sc->ah->ah_radio_2ghz_revision) {
391 /* No 5GHz support -> report 2GHz radio */
392 if (!(sc->ah->ah_capabilities.cap_mode & AR5K_MODE_BIT_11A)) {
393 DBG("RF%s 2GHz radio found (0x%x)\n",
394 ath5k_chip_name(AR5K_VERSION_RAD,
395 sc->ah->ah_radio_5ghz_revision),
396 sc->ah->ah_radio_5ghz_revision);
397 /* No 2GHz support (5110 and some
398 * 5Ghz only cards) -> report 5Ghz radio */
399 } else if (!(sc->ah->ah_capabilities.cap_mode & AR5K_MODE_BIT_11B)) {
400 DBG("RF%s 5GHz radio found (0x%x)\n",
401 ath5k_chip_name(AR5K_VERSION_RAD,
402 sc->ah->ah_radio_5ghz_revision),
403 sc->ah->ah_radio_5ghz_revision);
404 /* Multiband radio */
405 } else {
406 DBG("RF%s multiband radio found (0x%x)\n",
407 ath5k_chip_name(AR5K_VERSION_RAD,
408 sc->ah->ah_radio_5ghz_revision),
409 sc->ah->ah_radio_5ghz_revision);
410 }
411 }
412 /* Multi chip radio (RF5111 - RF2111) ->
413 * report both 2GHz/5GHz radios */
414 else if (sc->ah->ah_radio_5ghz_revision &&
415 sc->ah->ah_radio_2ghz_revision) {
416 DBG("RF%s 5GHz radio found (0x%x)\n",
417 ath5k_chip_name(AR5K_VERSION_RAD,
418 sc->ah->ah_radio_5ghz_revision),
419 sc->ah->ah_radio_5ghz_revision);
420 DBG("RF%s 2GHz radio found (0x%x)\n",
421 ath5k_chip_name(AR5K_VERSION_RAD,
422 sc->ah->ah_radio_2ghz_revision),
423 sc->ah->ah_radio_2ghz_revision);
424 }
425 }
426 #endif
427
428 /* Ready to go */
429 sc->status &= ~ATH_STAT_INVALID;
430
431 return 0;
432 err_ah:
433 ath5k_hw_detach(sc->ah);
434 err_free_hwinfo:
435 free(sc->hwinfo);
436 err_free:
437 net80211_free(dev);
438 err_map:
439 iounmap(mem);
440 err:
441 return ret;
442 }
443
444 static void ath5k_remove(struct pci_device *pdev)
445 {
446 struct net80211_device *dev = pci_get_drvdata(pdev);
447 struct ath5k_softc *sc = dev->priv;
448
449 ath5k_detach(dev);
450 ath5k_hw_detach(sc->ah);
451 iounmap(sc->iobase);
452 free(sc->hwinfo);
453 net80211_free(dev);
454 }
455
456
457 /***********************\
458 * Driver Initialization *
459 \***********************/
460
461 static int
462 ath5k_attach(struct net80211_device *dev)
463 {
464 struct ath5k_softc *sc = dev->priv;
465 struct ath5k_hw *ah = sc->ah;
466 int ret;
467
468 /*
469 * Collect the channel list. The 802.11 layer
470 * is resposible for filtering this list based
471 * on settings like the phy mode and regulatory
472 * domain restrictions.
473 */
474 ret = ath5k_setup_bands(dev);
475 if (ret) {
476 DBG("ath5k: can't get channels\n");
477 goto err;
478 }
479
480 /* NB: setup here so ath5k_rate_update is happy */
481 if (ah->ah_modes & AR5K_MODE_BIT_11A)
482 ath5k_setcurmode(sc, AR5K_MODE_11A);
483 else
484 ath5k_setcurmode(sc, AR5K_MODE_11B);
485
486 /*
487 * Allocate tx+rx descriptors and populate the lists.
488 */
489 ret = ath5k_desc_alloc(sc);
490 if (ret) {
491 DBG("ath5k: can't allocate descriptors\n");
492 goto err;
493 }
494
495 /*
496 * Allocate hardware transmit queues. Note that hw functions
497 * handle reseting these queues at the needed time.
498 */
499 ret = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
500 if (ret) {
501 DBG("ath5k: can't setup xmit queue\n");
502 goto err_desc;
503 }
504
505 sc->last_calib_ticks = currticks();
506
507 ret = ath5k_eeprom_read_mac(ah, sc->hwinfo->hwaddr);
508 if (ret) {
509 DBG("ath5k: unable to read address from EEPROM: 0x%04x\n",
510 sc->pdev->device);
511 goto err_queues;
512 }
513
514 memset(sc->bssidmask, 0xff, ETH_ALEN);
515 ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
516
517 ret = net80211_register(sc->dev, &ath5k_ops, sc->hwinfo);
518 if (ret) {
519 DBG("ath5k: can't register ieee80211 hw\n");
520 goto err_queues;
521 }
522
523 return 0;
524 err_queues:
525 ath5k_txq_release(sc);
526 err_desc:
527 ath5k_desc_free(sc);
528 err:
529 return ret;
530 }
531
532 static void
533 ath5k_detach(struct net80211_device *dev)
534 {
535 struct ath5k_softc *sc = dev->priv;
536
537 net80211_unregister(dev);
538 ath5k_desc_free(sc);
539 ath5k_txq_release(sc);
540 }
541
542
543
544
545 /********************\
546 * Channel/mode setup *
547 \********************/
548
549 /*
550 * Convert IEEE channel number to MHz frequency.
551 */
552 static inline short
553 ath5k_ieee2mhz(short chan)
554 {
555 if (chan < 14)
556 return 2407 + 5 * chan;
557 if (chan == 14)
558 return 2484;
559 if (chan < 27)
560 return 2212 + 20 * chan;
561 return 5000 + 5 * chan;
562 }
563
564 static unsigned int
565 ath5k_copy_channels(struct ath5k_hw *ah,
566 struct net80211_channel *channels,
567 unsigned int mode, unsigned int max)
568 {
569 unsigned int i, count, size, chfreq, freq, ch;
570
571 if (!(ah->ah_modes & (1 << mode)))
572 return 0;
573
574 switch (mode) {
575 case AR5K_MODE_11A:
576 case AR5K_MODE_11A_TURBO:
577 /* 1..220, but 2GHz frequencies are filtered by check_channel */
578 size = 220;
579 chfreq = CHANNEL_5GHZ;
580 break;
581 case AR5K_MODE_11B:
582 case AR5K_MODE_11G:
583 case AR5K_MODE_11G_TURBO:
584 size = 26;
585 chfreq = CHANNEL_2GHZ;
586 break;
587 default:
588 return 0;
589 }
590
591 for (i = 0, count = 0; i < size && max > 0; i++) {
592 ch = i + 1 ;
593 freq = ath5k_ieee2mhz(ch);
594
595 /* Check if channel is supported by the chipset */
596 if (!ath5k_channel_ok(ah, freq, chfreq))
597 continue;
598
599 /* Write channel info and increment counter */
600 channels[count].center_freq = freq;
601 channels[count].maxpower = 0; /* use regulatory */
602 channels[count].band = (chfreq == CHANNEL_2GHZ) ?
603 NET80211_BAND_2GHZ : NET80211_BAND_5GHZ;
604 switch (mode) {
605 case AR5K_MODE_11A:
606 case AR5K_MODE_11G:
607 channels[count].hw_value = chfreq | CHANNEL_OFDM;
608 break;
609 case AR5K_MODE_11A_TURBO:
610 case AR5K_MODE_11G_TURBO:
611 channels[count].hw_value = chfreq |
612 CHANNEL_OFDM | CHANNEL_TURBO;
613 break;
614 case AR5K_MODE_11B:
615 channels[count].hw_value = CHANNEL_B;
616 }
617
618 count++;
619 max--;
620 }
621
622 return count;
623 }
624
625 static int
626 ath5k_setup_bands(struct net80211_device *dev)
627 {
628 struct ath5k_softc *sc = dev->priv;
629 struct ath5k_hw *ah = sc->ah;
630 int max_c, count_c = 0;
631 int i;
632 int band;
633
634 max_c = sizeof(sc->hwinfo->channels) / sizeof(sc->hwinfo->channels[0]);
635
636 /* 2GHz band */
637 if (sc->ah->ah_capabilities.cap_mode & AR5K_MODE_BIT_11G) {
638 /* G mode */
639 band = NET80211_BAND_2GHZ;
640 sc->hwinfo->bands = NET80211_BAND_BIT_2GHZ;
641 sc->hwinfo->modes = (NET80211_MODE_G | NET80211_MODE_B);
642
643 for (i = 0; i < 12; i++)
644 sc->hwinfo->rates[band][i] = ath5k_rates[i].bitrate;
645 sc->hwinfo->nr_rates[band] = 12;
646
647 sc->hwinfo->nr_channels =
648 ath5k_copy_channels(ah, sc->hwinfo->channels,
649 AR5K_MODE_11G, max_c);
650 count_c = sc->hwinfo->nr_channels;
651 max_c -= count_c;
652 } else if (sc->ah->ah_capabilities.cap_mode & AR5K_MODE_BIT_11B) {
653 /* B mode */
654 band = NET80211_BAND_2GHZ;
655 sc->hwinfo->bands = NET80211_BAND_BIT_2GHZ;
656 sc->hwinfo->modes = NET80211_MODE_B;
657
658 for (i = 0; i < 4; i++)
659 sc->hwinfo->rates[band][i] = ath5k_rates[i].bitrate;
660 sc->hwinfo->nr_rates[band] = 4;
661
662 sc->hwinfo->nr_channels =
663 ath5k_copy_channels(ah, sc->hwinfo->channels,
664 AR5K_MODE_11B, max_c);
665 count_c = sc->hwinfo->nr_channels;
666 max_c -= count_c;
667 }
668
669 /* 5GHz band, A mode */
670 if (sc->ah->ah_capabilities.cap_mode & AR5K_MODE_BIT_11A) {
671 band = NET80211_BAND_5GHZ;
672 sc->hwinfo->bands |= NET80211_BAND_BIT_5GHZ;
673 sc->hwinfo->modes |= NET80211_MODE_A;
674
675 for (i = 0; i < 8; i++)
676 sc->hwinfo->rates[band][i] = ath5k_rates[i+4].bitrate;
677 sc->hwinfo->nr_rates[band] = 8;
678
679 sc->hwinfo->nr_channels =
680 ath5k_copy_channels(ah, sc->hwinfo->channels,
681 AR5K_MODE_11B, max_c);
682 count_c = sc->hwinfo->nr_channels;
683 max_c -= count_c;
684 }
685
686 return 0;
687 }
688
689 /*
690 * Set/change channels. If the channel is really being changed,
691 * it's done by reseting the chip. To accomplish this we must
692 * first cleanup any pending DMA, then restart stuff after a la
693 * ath5k_init.
694 */
695 static int
696 ath5k_chan_set(struct ath5k_softc *sc, struct net80211_channel *chan)
697 {
698 if (chan->center_freq != sc->curchan->center_freq ||
699 chan->hw_value != sc->curchan->hw_value) {
700 /*
701 * To switch channels clear any pending DMA operations;
702 * wait long enough for the RX fifo to drain, reset the
703 * hardware at the new frequency, and then re-enable
704 * the relevant bits of the h/w.
705 */
706 DBG2("ath5k: resetting for channel change (%d -> %d MHz)\n",
707 sc->curchan->center_freq, chan->center_freq);
708 return ath5k_reset(sc, chan);
709 }
710
711 return 0;
712 }
713
714 static void
715 ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode)
716 {
717 sc->curmode = mode;
718
719 if (mode == AR5K_MODE_11A) {
720 sc->curband = NET80211_BAND_5GHZ;
721 } else {
722 sc->curband = NET80211_BAND_2GHZ;
723 }
724 }
725
726 static void
727 ath5k_mode_setup(struct ath5k_softc *sc)
728 {
729 struct ath5k_hw *ah = sc->ah;
730 u32 rfilt;
731
732 /* configure rx filter */
733 rfilt = sc->filter_flags;
734 ath5k_hw_set_rx_filter(ah, rfilt);
735
736 if (ath5k_hw_hasbssidmask(ah))
737 ath5k_hw_set_bssid_mask(ah, sc->bssidmask);
738
739 /* configure operational mode */
740 ath5k_hw_set_opmode(ah);
741
742 ath5k_hw_set_mcast_filter(ah, 0, 0);
743 }
744
745 static inline int
746 ath5k_hw_rix_to_bitrate(int hw_rix)
747 {
748 int i;
749
750 for (i = 0; i < ATH5K_NR_RATES; i++) {
751 if (ath5k_rates[i].hw_code == hw_rix)
752 return ath5k_rates[i].bitrate;
753 }
754
755 DBG("ath5k: invalid rix %02x\n", hw_rix);
756 return 10; /* use lowest rate */
757 }
758
759 int ath5k_bitrate_to_hw_rix(int bitrate)
760 {
761 int i;
762
763 for (i = 0; i < ATH5K_NR_RATES; i++) {
764 if (ath5k_rates[i].bitrate == bitrate)
765 return ath5k_rates[i].hw_code;
766 }
767
768 DBG("ath5k: invalid bitrate %d\n", bitrate);
769 return ATH5K_RATE_CODE_1M; /* use lowest rate */
770 }
771
772 /***************\
773 * Buffers setup *
774 \***************/
775
776 static struct io_buffer *
777 ath5k_rx_iob_alloc(struct ath5k_softc *sc, u32 *iob_addr)
778 {
779 struct io_buffer *iob;
780 unsigned int off;
781
782 /*
783 * Allocate buffer with headroom_needed space for the
784 * fake physical layer header at the start.
785 */
786 iob = alloc_iob(sc->rxbufsize + sc->cachelsz - 1);
787
788 if (!iob) {
789 DBG("ath5k: can't alloc iobuf of size %d\n",
790 sc->rxbufsize + sc->cachelsz - 1);
791 return NULL;
792 }
793
794 *iob_addr = virt_to_bus(iob->data);
795
796 /*
797 * Cache-line-align. This is important (for the
798 * 5210 at least) as not doing so causes bogus data
799 * in rx'd frames.
800 */
801 off = *iob_addr % sc->cachelsz;
802 if (off != 0) {
803 iob_reserve(iob, sc->cachelsz - off);
804 *iob_addr += sc->cachelsz - off;
805 }
806
807 return iob;
808 }
809
810 static int
811 ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
812 {
813 struct ath5k_hw *ah = sc->ah;
814 struct io_buffer *iob = bf->iob;
815 struct ath5k_desc *ds;
816
817 if (!iob) {
818 iob = ath5k_rx_iob_alloc(sc, &bf->iobaddr);
819 if (!iob)
820 return -ENOMEM;
821 bf->iob = iob;
822 }
823
824 /*
825 * Setup descriptors. For receive we always terminate
826 * the descriptor list with a self-linked entry so we'll
827 * not get overrun under high load (as can happen with a
828 * 5212 when ANI processing enables PHY error frames).
829 *
830 * To insure the last descriptor is self-linked we create
831 * each descriptor as self-linked and add it to the end. As
832 * each additional descriptor is added the previous self-linked
833 * entry is ``fixed'' naturally. This should be safe even
834 * if DMA is happening. When processing RX interrupts we
835 * never remove/process the last, self-linked, entry on the
836 * descriptor list. This insures the hardware always has
837 * someplace to write a new frame.
838 */
839 ds = bf->desc;
840 ds->ds_link = bf->daddr; /* link to self */
841 ds->ds_data = bf->iobaddr;
842 if (ah->ah_setup_rx_desc(ah, ds,
843 iob_tailroom(iob), /* buffer size */
844 0) != 0) {
845 DBG("ath5k: error setting up RX descriptor for %d bytes\n", iob_tailroom(iob));
846 return -EINVAL;
847 }
848
849 if (sc->rxlink != NULL)
850 *sc->rxlink = bf->daddr;
851 sc->rxlink = &ds->ds_link;
852 return 0;
853 }
854
855 static int
856 ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
857 {
858 struct ath5k_hw *ah = sc->ah;
859 struct ath5k_txq *txq = &sc->txq;
860 struct ath5k_desc *ds = bf->desc;
861 struct io_buffer *iob = bf->iob;
862 unsigned int pktlen, flags;
863 int ret;
864 u16 duration = 0;
865 u16 cts_rate = 0;
866
867 flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
868 bf->iobaddr = virt_to_bus(iob->data);
869 pktlen = iob_len(iob);
870
871 /* FIXME: If we are in g mode and rate is a CCK rate
872 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
873 * from tx power (value is in dB units already) */
874 if (sc->dev->phy_flags & NET80211_PHY_USE_PROTECTION) {
875 struct net80211_device *dev = sc->dev;
876
877 flags |= AR5K_TXDESC_CTSENA;
878 cts_rate = sc->hw_rtscts_rate;
879 duration = net80211_cts_duration(dev, pktlen);
880 }
881 ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
882 IEEE80211_TYP_FRAME_HEADER_LEN,
883 AR5K_PKT_TYPE_NORMAL, sc->power_level * 2,
884 sc->hw_rate, ATH5K_RETRIES,
885 AR5K_TXKEYIX_INVALID, 0, flags,
886 cts_rate, duration);
887 if (ret)
888 return ret;
889
890 ds->ds_link = 0;
891 ds->ds_data = bf->iobaddr;
892
893 list_add_tail(&bf->list, &txq->q);
894 if (txq->link == NULL) /* is this first packet? */
895 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
896 else /* no, so only link it */
897 *txq->link = bf->daddr;
898
899 txq->link = &ds->ds_link;
900 ath5k_hw_start_tx_dma(ah, txq->qnum);
901 mb();
902
903 return 0;
904 }
905
906 /*******************\
907 * Descriptors setup *
908 \*******************/
909
910 static int
911 ath5k_desc_alloc(struct ath5k_softc *sc)
912 {
913 struct ath5k_desc *ds;
914 struct ath5k_buf *bf;
915 u32 da;
916 unsigned int i;
917 int ret;
918
919 /* allocate descriptors */
920 sc->desc_len = sizeof(struct ath5k_desc) * (ATH_TXBUF + ATH_RXBUF + 1);
921 sc->desc = malloc_dma(sc->desc_len, ATH5K_DESC_ALIGN);
922 if (sc->desc == NULL) {
923 DBG("ath5k: can't allocate descriptors\n");
924 ret = -ENOMEM;
925 goto err;
926 }
927 memset(sc->desc, 0, sc->desc_len);
928 sc->desc_daddr = virt_to_bus(sc->desc);
929
930 ds = sc->desc;
931 da = sc->desc_daddr;
932
933 bf = calloc(ATH_TXBUF + ATH_RXBUF + 1, sizeof(struct ath5k_buf));
934 if (bf == NULL) {
935 DBG("ath5k: can't allocate buffer pointers\n");
936 ret = -ENOMEM;
937 goto err_free;
938 }
939 sc->bufptr = bf;
940
941 INIT_LIST_HEAD(&sc->rxbuf);
942 for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
943 bf->desc = ds;
944 bf->daddr = da;
945 list_add_tail(&bf->list, &sc->rxbuf);
946 }
947
948 INIT_LIST_HEAD(&sc->txbuf);
949 sc->txbuf_len = ATH_TXBUF;
950 for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
951 bf->desc = ds;
952 bf->daddr = da;
953 list_add_tail(&bf->list, &sc->txbuf);
954 }
955
956 return 0;
957
958 err_free:
959 free_dma(sc->desc, sc->desc_len);
960 err:
961 sc->desc = NULL;
962 return ret;
963 }
964
965 static void
966 ath5k_desc_free(struct ath5k_softc *sc)
967 {
968 struct ath5k_buf *bf;
969
970 list_for_each_entry(bf, &sc->txbuf, list)
971 ath5k_txbuf_free(sc, bf);
972 list_for_each_entry(bf, &sc->rxbuf, list)
973 ath5k_rxbuf_free(sc, bf);
974
975 /* Free memory associated with all descriptors */
976 free_dma(sc->desc, sc->desc_len);
977
978 free(sc->bufptr);
979 sc->bufptr = NULL;
980 }
981
982
983
984
985
986 /**************\
987 * Queues setup *
988 \**************/
989
990 static int
991 ath5k_txq_setup(struct ath5k_softc *sc, int qtype, int subtype)
992 {
993 struct ath5k_hw *ah = sc->ah;
994 struct ath5k_txq *txq;
995 struct ath5k_txq_info qi = {
996 .tqi_subtype = subtype,
997 .tqi_aifs = AR5K_TXQ_USEDEFAULT,
998 .tqi_cw_min = AR5K_TXQ_USEDEFAULT,
999 .tqi_cw_max = AR5K_TXQ_USEDEFAULT
1000 };
1001 int qnum;
1002
1003 /*
1004 * Enable interrupts only for EOL and DESC conditions.
1005 * We mark tx descriptors to receive a DESC interrupt
1006 * when a tx queue gets deep; otherwise waiting for the
1007 * EOL to reap descriptors. Note that this is done to
1008 * reduce interrupt load and this only defers reaping
1009 * descriptors, never transmitting frames. Aside from
1010 * reducing interrupts this also permits more concurrency.
1011 * The only potential downside is if the tx queue backs
1012 * up in which case the top half of the kernel may backup
1013 * due to a lack of tx descriptors.
1014 */
1015 qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
1016 AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
1017 qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
1018 if (qnum < 0) {
1019 DBG("ath5k: can't set up a TX queue\n");
1020 return -EIO;
1021 }
1022
1023 txq = &sc->txq;
1024 if (!txq->setup) {
1025 txq->qnum = qnum;
1026 txq->link = NULL;
1027 INIT_LIST_HEAD(&txq->q);
1028 txq->setup = 1;
1029 }
1030 return 0;
1031 }
1032
1033 static void
1034 ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1035 {
1036 struct ath5k_buf *bf, *bf0;
1037
1038 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1039 ath5k_txbuf_free(sc, bf);
1040
1041 list_del(&bf->list);
1042 list_add_tail(&bf->list, &sc->txbuf);
1043 sc->txbuf_len++;
1044 }
1045 txq->link = NULL;
1046 }
1047
1048 /*
1049 * Drain the transmit queues and reclaim resources.
1050 */
1051 static void
1052 ath5k_txq_cleanup(struct ath5k_softc *sc)
1053 {
1054 struct ath5k_hw *ah = sc->ah;
1055
1056 if (!(sc->status & ATH_STAT_INVALID)) {
1057 /* don't touch the hardware if marked invalid */
1058 if (sc->txq.setup) {
1059 ath5k_hw_stop_tx_dma(ah, sc->txq.qnum);
1060 DBG("ath5k: txq [%d] %x, link %p\n",
1061 sc->txq.qnum,
1062 ath5k_hw_get_txdp(ah, sc->txq.qnum),
1063 sc->txq.link);
1064 }
1065 }
1066
1067 if (sc->txq.setup)
1068 ath5k_txq_drainq(sc, &sc->txq);
1069 }
1070
1071 static void
1072 ath5k_txq_release(struct ath5k_softc *sc)
1073 {
1074 if (sc->txq.setup) {
1075 ath5k_hw_release_tx_queue(sc->ah);
1076 sc->txq.setup = 0;
1077 }
1078 }
1079
1080
1081
1082
1083 /*************\
1084 * RX Handling *
1085 \*************/
1086
1087 /*
1088 * Enable the receive h/w following a reset.
1089 */
1090 static int
1091 ath5k_rx_start(struct ath5k_softc *sc)
1092 {
1093 struct ath5k_hw *ah = sc->ah;
1094 struct ath5k_buf *bf;
1095 int ret;
1096
1097 sc->rxbufsize = IEEE80211_MAX_LEN;
1098 if (sc->rxbufsize % sc->cachelsz != 0)
1099 sc->rxbufsize += sc->cachelsz - (sc->rxbufsize % sc->cachelsz);
1100
1101 sc->rxlink = NULL;
1102
1103 list_for_each_entry(bf, &sc->rxbuf, list) {
1104 ret = ath5k_rxbuf_setup(sc, bf);
1105 if (ret != 0)
1106 return ret;
1107 }
1108
1109 bf = list_entry(sc->rxbuf.next, struct ath5k_buf, list);
1110
1111 ath5k_hw_set_rxdp(ah, bf->daddr);
1112 ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */
1113 ath5k_mode_setup(sc); /* set filters, etc. */
1114 ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */
1115
1116 return 0;
1117 }
1118
1119 /*
1120 * Disable the receive h/w in preparation for a reset.
1121 */
1122 static void
1123 ath5k_rx_stop(struct ath5k_softc *sc)
1124 {
1125 struct ath5k_hw *ah = sc->ah;
1126
1127 ath5k_hw_stop_rx_pcu(ah); /* disable PCU */
1128 ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */
1129 ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */
1130
1131 sc->rxlink = NULL; /* just in case */
1132 }
1133
1134 static void
1135 ath5k_handle_rx(struct ath5k_softc *sc)
1136 {
1137 struct ath5k_rx_status rs;
1138 struct io_buffer *iob, *next_iob;
1139 u32 next_iob_addr;
1140 struct ath5k_buf *bf, *bf_last;
1141 struct ath5k_desc *ds;
1142 int ret;
1143
1144 memset(&rs, 0, sizeof(rs));
1145
1146 if (list_empty(&sc->rxbuf)) {
1147 DBG("ath5k: empty rx buf pool\n");
1148 return;
1149 }
1150
1151 bf_last = list_entry(sc->rxbuf.prev, struct ath5k_buf, list);
1152
1153 do {
1154 bf = list_entry(sc->rxbuf.next, struct ath5k_buf, list);
1155 assert(bf->iob != NULL);
1156 iob = bf->iob;
1157 ds = bf->desc;
1158
1159 /*
1160 * last buffer must not be freed to ensure proper hardware
1161 * function. When the hardware finishes also a packet next to
1162 * it, we are sure, it doesn't use it anymore and we can go on.
1163 */
1164 if (bf_last == bf)
1165 bf->flags |= 1;
1166 if (bf->flags) {
1167 struct ath5k_buf *bf_next = list_entry(bf->list.next,
1168 struct ath5k_buf, list);
1169 ret = sc->ah->ah_proc_rx_desc(sc->ah, bf_next->desc,
1170 &rs);
1171 if (ret)
1172 break;
1173 bf->flags &= ~1;
1174 /* skip the overwritten one (even status is martian) */
1175 goto next;
1176 }
1177
1178 ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs);
1179 if (ret) {
1180 if (ret != -EINPROGRESS) {
1181 DBG("ath5k: error in processing rx desc: %s\n",
1182 strerror(ret));
1183 net80211_rx_err(sc->dev, NULL, -ret);
1184 } else {
1185 /* normal return, reached end of
1186 available descriptors */
1187 }
1188 return;
1189 }
1190
1191 if (rs.rs_more) {
1192 DBG("ath5k: unsupported fragmented rx\n");
1193 goto next;
1194 }
1195
1196 if (rs.rs_status) {
1197 if (rs.rs_status & AR5K_RXERR_PHY) {
1198 DBG("ath5k: rx PHY error\n");
1199 goto next;
1200 }
1201 if (rs.rs_status & AR5K_RXERR_CRC) {
1202 net80211_rx_err(sc->dev, NULL, EIO);
1203 goto next;
1204 }
1205 if (rs.rs_status & AR5K_RXERR_DECRYPT) {
1206 /*
1207 * Decrypt error. If the error occurred
1208 * because there was no hardware key, then
1209 * let the frame through so the upper layers
1210 * can process it. This is necessary for 5210
1211 * parts which have no way to setup a ``clear''
1212 * key cache entry.
1213 *
1214 * XXX do key cache faulting
1215 */
1216 if (rs.rs_keyix == AR5K_RXKEYIX_INVALID &&
1217 !(rs.rs_status & AR5K_RXERR_CRC))
1218 goto accept;
1219 }
1220
1221 /* any other error, unhandled */
1222 DBG("ath5k: packet rx status %x\n", rs.rs_status);
1223 goto next;
1224 }
1225 accept:
1226 next_iob = ath5k_rx_iob_alloc(sc, &next_iob_addr);
1227
1228 /*
1229 * If we can't replace bf->iob with a new iob under memory
1230 * pressure, just skip this packet
1231 */
1232 if (!next_iob) {
1233 DBG("ath5k: dropping packet under memory pressure\n");
1234 goto next;
1235 }
1236
1237 iob_put(iob, rs.rs_datalen);
1238
1239 /* The MAC header is padded to have 32-bit boundary if the
1240 * packet payload is non-zero. However, gPXE only
1241 * supports standard 802.11 packets with 24-byte
1242 * header, so no padding correction should be needed.
1243 */
1244
1245 DBG2("ath5k: rx %d bytes, signal %d\n", rs.rs_datalen,
1246 rs.rs_rssi);
1247
1248 net80211_rx(sc->dev, iob, rs.rs_rssi,
1249 ath5k_hw_rix_to_bitrate(rs.rs_rate));
1250
1251 bf->iob = next_iob;
1252 bf->iobaddr = next_iob_addr;
1253 next:
1254 list_del(&bf->list);
1255 list_add_tail(&bf->list, &sc->rxbuf);
1256 } while (ath5k_rxbuf_setup(sc, bf) == 0);
1257 }
1258
1259
1260
1261
1262 /*************\
1263 * TX Handling *
1264 \*************/
1265
1266 static void
1267 ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1268 {
1269 struct ath5k_tx_status ts;
1270 struct ath5k_buf *bf, *bf0;
1271 struct ath5k_desc *ds;
1272 struct io_buffer *iob;
1273 int ret;
1274
1275 memset(&ts, 0, sizeof(ts));
1276
1277 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1278 ds = bf->desc;
1279
1280 ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
1281 if (ret) {
1282 if (ret != -EINPROGRESS) {
1283 DBG("ath5k: error in processing tx desc: %s\n",
1284 strerror(ret));
1285 } else {
1286 /* normal return, reached end of tx completions */
1287 }
1288 break;
1289 }
1290
1291 iob = bf->iob;
1292 bf->iob = NULL;
1293
1294 DBG2("ath5k: tx %d bytes complete, %d retries\n",
1295 iob_len(iob), ts.ts_retry[0]);
1296
1297 net80211_tx_complete(sc->dev, iob, ts.ts_retry[0],
1298 ts.ts_status ? EIO : 0);
1299
1300 list_del(&bf->list);
1301 list_add_tail(&bf->list, &sc->txbuf);
1302 sc->txbuf_len++;
1303 }
1304
1305 if (list_empty(&txq->q))
1306 txq->link = NULL;
1307 }
1308
1309 static void
1310 ath5k_handle_tx(struct ath5k_softc *sc)
1311 {
1312 ath5k_tx_processq(sc, &sc->txq);
1313 }
1314
1315
1316 /********************\
1317 * Interrupt handling *
1318 \********************/
1319
1320 static void
1321 ath5k_irq(struct net80211_device *dev, int enable)
1322 {
1323 struct ath5k_softc *sc = dev->priv;
1324 struct ath5k_hw *ah = sc->ah;
1325
1326 sc->irq_ena = enable;
1327 ah->ah_ier = enable ? AR5K_IER_ENABLE : AR5K_IER_DISABLE;
1328
1329 ath5k_hw_reg_write(ah, ah->ah_ier, AR5K_IER);
1330 ath5k_hw_set_imr(ah, sc->imask);
1331 }
1332
1333 static int
1334 ath5k_init(struct ath5k_softc *sc)
1335 {
1336 struct ath5k_hw *ah = sc->ah;
1337 int ret, i;
1338
1339 /*
1340 * Stop anything previously setup. This is safe
1341 * no matter this is the first time through or not.
1342 */
1343 ath5k_stop_hw(sc);
1344
1345 /*
1346 * The basic interface to setting the hardware in a good
1347 * state is ``reset''. On return the hardware is known to
1348 * be powered up and with interrupts disabled. This must
1349 * be followed by initialization of the appropriate bits
1350 * and then setup of the interrupt mask.
1351 */
1352 sc->curchan = sc->dev->channels + sc->dev->channel;
1353 sc->curband = sc->curchan->band;
1354 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
1355 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
1356 AR5K_INT_FATAL | AR5K_INT_GLOBAL;
1357 ret = ath5k_reset(sc, NULL);
1358 if (ret)
1359 goto done;
1360
1361 /*
1362 * Reset the key cache since some parts do not reset the
1363 * contents on initial power up or resume from suspend.
1364 */
1365 for (i = 0; i < AR5K_KEYTABLE_SIZE; i++)
1366 ath5k_hw_reset_key(ah, i);
1367
1368 /* Set ack to be sent at low bit-rates */
1369 ath5k_hw_set_ack_bitrate_high(ah, 0);
1370
1371 ret = 0;
1372 done:
1373 mb();
1374 return ret;
1375 }
1376
1377 static int
1378 ath5k_stop_hw(struct ath5k_softc *sc)
1379 {
1380 struct ath5k_hw *ah = sc->ah;
1381
1382 /*
1383 * Shutdown the hardware and driver:
1384 * stop output from above
1385 * disable interrupts
1386 * turn off timers
1387 * turn off the radio
1388 * clear transmit machinery
1389 * clear receive machinery
1390 * drain and release tx queues
1391 * reclaim beacon resources
1392 * power down hardware
1393 *
1394 * Note that some of this work is not possible if the
1395 * hardware is gone (invalid).
1396 */
1397
1398 if (!(sc->status & ATH_STAT_INVALID)) {
1399 ath5k_hw_set_imr(ah, 0);
1400 }
1401 ath5k_txq_cleanup(sc);
1402 if (!(sc->status & ATH_STAT_INVALID)) {
1403 ath5k_rx_stop(sc);
1404 ath5k_hw_phy_disable(ah);
1405 } else
1406 sc->rxlink = NULL;
1407
1408 return 0;
1409 }
1410
1411 static void
1412 ath5k_poll(struct net80211_device *dev)
1413 {
1414 struct ath5k_softc *sc = dev->priv;
1415 struct ath5k_hw *ah = sc->ah;
1416 enum ath5k_int status;
1417 unsigned int counter = 1000;
1418
1419 if (currticks() - sc->last_calib_ticks >
1420 ATH5K_CALIB_INTERVAL * ticks_per_sec()) {
1421 ath5k_calibrate(sc);
1422 sc->last_calib_ticks = currticks();
1423 }
1424
1425 if ((sc->status & ATH_STAT_INVALID) ||
1426 (sc->irq_ena && !ath5k_hw_is_intr_pending(ah)))
1427 return;
1428
1429 do {
1430 ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */
1431 DBGP("ath5k: status %#x/%#x\n", status, sc->imask);
1432 if (status & AR5K_INT_FATAL) {
1433 /*
1434 * Fatal errors are unrecoverable.
1435 * Typically these are caused by DMA errors.
1436 */
1437 DBG("ath5k: fatal error, resetting\n");
1438 ath5k_reset_wake(sc);
1439 } else if (status & AR5K_INT_RXORN) {
1440 DBG("ath5k: rx overrun, resetting\n");
1441 ath5k_reset_wake(sc);
1442 } else {
1443 if (status & AR5K_INT_RXEOL) {
1444 /*
1445 * NB: the hardware should re-read the link when
1446 * RXE bit is written, but it doesn't work at
1447 * least on older hardware revs.
1448 */
1449 DBG("ath5k: rx EOL\n");
1450 sc->rxlink = NULL;
1451 }
1452 if (status & AR5K_INT_TXURN) {
1453 /* bump tx trigger level */
1454 DBG("ath5k: tx underrun\n");
1455 ath5k_hw_update_tx_triglevel(ah, 1);
1456 }
1457 if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
1458 ath5k_handle_rx(sc);
1459 if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
1460 | AR5K_INT_TXERR | AR5K_INT_TXEOL))
1461 ath5k_handle_tx(sc);
1462 }
1463 } while (ath5k_hw_is_intr_pending(ah) && counter-- > 0);
1464
1465 if (!counter)
1466 DBG("ath5k: too many interrupts, giving up for now\n");
1467 }
1468
1469 /*
1470 * Periodically recalibrate the PHY to account
1471 * for temperature/environment changes.
1472 */
1473 static void
1474 ath5k_calibrate(struct ath5k_softc *sc)
1475 {
1476 struct ath5k_hw *ah = sc->ah;
1477
1478 if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
1479 /*
1480 * Rfgain is out of bounds, reset the chip
1481 * to load new gain values.
1482 */
1483 DBG("ath5k: resetting for calibration\n");
1484 ath5k_reset_wake(sc);
1485 }
1486 if (ath5k_hw_phy_calibrate(ah, sc->curchan))
1487 DBG("ath5k: calibration of channel %d failed\n",
1488 sc->curchan->channel_nr);
1489 }
1490
1491
1492 /********************\
1493 * Net80211 functions *
1494 \********************/
1495
1496 static int
1497 ath5k_tx(struct net80211_device *dev, struct io_buffer *iob)
1498 {
1499 struct ath5k_softc *sc = dev->priv;
1500 struct ath5k_buf *bf;
1501 int rc;
1502
1503 /*
1504 * The hardware expects the header padded to 4 byte boundaries.
1505 * gPXE only ever sends 24-byte headers, so no action necessary.
1506 */
1507
1508 if (list_empty(&sc->txbuf)) {
1509 DBG("ath5k: dropping packet because no tx bufs available\n");
1510 return -ENOBUFS;
1511 }
1512
1513 bf = list_entry(sc->txbuf.next, struct ath5k_buf, list);
1514 list_del(&bf->list);
1515 sc->txbuf_len--;
1516
1517 bf->iob = iob;
1518
1519 if ((rc = ath5k_txbuf_setup(sc, bf)) != 0) {
1520 bf->iob = NULL;
1521 list_add_tail(&bf->list, &sc->txbuf);
1522 sc->txbuf_len++;
1523 return rc;
1524 }
1525 return 0;
1526 }
1527
1528 /*
1529 * Reset the hardware. If chan is not NULL, then also pause rx/tx
1530 * and change to the given channel.
1531 */
1532 static int
1533 ath5k_reset(struct ath5k_softc *sc, struct net80211_channel *chan)
1534 {
1535 struct ath5k_hw *ah = sc->ah;
1536 int ret;
1537
1538 if (chan) {
1539 ath5k_hw_set_imr(ah, 0);
1540 ath5k_txq_cleanup(sc);
1541 ath5k_rx_stop(sc);
1542
1543 sc->curchan = chan;
1544 sc->curband = chan->band;
1545 }
1546
1547 ret = ath5k_hw_reset(ah, sc->curchan, 1);
1548 if (ret) {
1549 DBG("ath5k: can't reset hardware: %s\n", strerror(ret));
1550 return ret;
1551 }
1552
1553 ret = ath5k_rx_start(sc);
1554 if (ret) {
1555 DBG("ath5k: can't start rx logic: %s\n", strerror(ret));
1556 return ret;
1557 }
1558
1559 /*
1560 * Change channels and update the h/w rate map if we're switching;
1561 * e.g. 11a to 11b/g.
1562 *
1563 * We may be doing a reset in response to an ioctl that changes the
1564 * channel so update any state that might change as a result.
1565 *
1566 * XXX needed?
1567 */
1568 /* ath5k_chan_change(sc, c); */
1569
1570 /* Reenable interrupts if necessary */
1571 ath5k_irq(sc->dev, sc->irq_ena);
1572
1573 return 0;
1574 }
1575
1576 static int ath5k_reset_wake(struct ath5k_softc *sc)
1577 {
1578 return ath5k_reset(sc, sc->curchan);
1579 }
1580
1581 static int ath5k_start(struct net80211_device *dev)
1582 {
1583 struct ath5k_softc *sc = dev->priv;
1584 int ret;
1585
1586 if ((ret = ath5k_init(sc)) != 0)
1587 return ret;
1588
1589 sc->assoc = 0;
1590 ath5k_configure_filter(sc);
1591 ath5k_hw_set_lladdr(sc->ah, dev->netdev->ll_addr);
1592
1593 return 0;
1594 }
1595
1596 static void ath5k_stop(struct net80211_device *dev)
1597 {
1598 struct ath5k_softc *sc = dev->priv;
1599 u8 mac[ETH_ALEN] = {};
1600
1601 ath5k_hw_set_lladdr(sc->ah, mac);
1602
1603 ath5k_stop_hw(sc);
1604 }
1605
1606 static int
1607 ath5k_config(struct net80211_device *dev, int changed)
1608 {
1609 struct ath5k_softc *sc = dev->priv;
1610 struct ath5k_hw *ah = sc->ah;
1611 struct net80211_channel *chan = &dev->channels[dev->channel];
1612 int ret;
1613
1614 if (changed & NET80211_CFG_CHANNEL) {
1615 sc->power_level = chan->maxpower;
1616 if ((ret = ath5k_chan_set(sc, chan)) != 0)
1617 return ret;
1618 }
1619
1620 if ((changed & NET80211_CFG_RATE) ||
1621 (changed & NET80211_CFG_PHY_PARAMS)) {
1622 int spmbl = ATH5K_SPMBL_NO;
1623 u16 rate = dev->rates[dev->rate];
1624 u16 slowrate = dev->rates[dev->rtscts_rate];
1625 int i;
1626
1627 if (dev->phy_flags & NET80211_PHY_USE_SHORT_PREAMBLE)
1628 spmbl = ATH5K_SPMBL_YES;
1629
1630 for (i = 0; i < ATH5K_NR_RATES; i++) {
1631 if (ath5k_rates[i].bitrate == rate &&
1632 (ath5k_rates[i].short_pmbl & spmbl))
1633 sc->hw_rate = ath5k_rates[i].hw_code;
1634
1635 if (ath5k_rates[i].bitrate == slowrate &&
1636 (ath5k_rates[i].short_pmbl & spmbl))
1637 sc->hw_rtscts_rate = ath5k_rates[i].hw_code;
1638 }
1639 }
1640
1641 if (changed & NET80211_CFG_ASSOC) {
1642 sc->assoc = !!(dev->state & NET80211_ASSOCIATED);
1643 if (sc->assoc) {
1644 memcpy(ah->ah_bssid, dev->bssid, ETH_ALEN);
1645 } else {
1646 memset(ah->ah_bssid, 0xff, ETH_ALEN);
1647 }
1648 ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
1649 }
1650
1651 return 0;
1652 }
1653
1654 /*
1655 * o always accept unicast, broadcast, and multicast traffic
1656 * o multicast traffic for all BSSIDs will be enabled if mac80211
1657 * says it should be
1658 * o maintain current state of phy ofdm or phy cck error reception.
1659 * If the hardware detects any of these type of errors then
1660 * ath5k_hw_get_rx_filter() will pass to us the respective
1661 * hardware filters to be able to receive these type of frames.
1662 * o probe request frames are accepted only when operating in
1663 * hostap, adhoc, or monitor modes
1664 * o enable promiscuous mode according to the interface state
1665 * o accept beacons:
1666 * - when operating in adhoc mode so the 802.11 layer creates
1667 * node table entries for peers,
1668 * - when operating in station mode for collecting rssi data when
1669 * the station is otherwise quiet, or
1670 * - when scanning
1671 */
1672 static void ath5k_configure_filter(struct ath5k_softc *sc)
1673 {
1674 struct ath5k_hw *ah = sc->ah;
1675 u32 mfilt[2], rfilt;
1676
1677 /* Enable all multicast */
1678 mfilt[0] = ~0;
1679 mfilt[1] = ~0;
1680
1681 /* Enable data frames and beacons */
1682 rfilt = (AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST |
1683 AR5K_RX_FILTER_MCAST | AR5K_RX_FILTER_BEACON);
1684
1685 /* Set filters */
1686 ath5k_hw_set_rx_filter(ah, rfilt);
1687
1688 /* Set multicast bits */
1689 ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]);
1690
1691 /* Set the cached hw filter flags, this will alter actually
1692 * be set in HW */
1693 sc->filter_flags = rfilt;
1694 }