]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/wireless/ipw2200.c
Catch ipw2200 up to equivelancy with v1.0.2
[mirror_ubuntu-zesty-kernel.git] / drivers / net / wireless / ipw2200.c
CommitLineData
43f66a6c 1/******************************************************************************
bf79451e 2
43f66a6c
JK
3 Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
4
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
10
bf79451e
JG
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
43f66a6c 13 published by the Free Software Foundation.
bf79451e
JG
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
43f66a6c 18 more details.
bf79451e 19
43f66a6c 20 You should have received a copy of the GNU General Public License along with
bf79451e 21 this program; if not, write to the Free Software Foundation, Inc., 59
43f66a6c 22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
bf79451e 23
43f66a6c
JK
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
bf79451e 26
43f66a6c
JK
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31******************************************************************************/
32
33#include "ipw2200.h"
34
a613bffd 35#define IPW2200_VERSION "1.0.2"
43f66a6c
JK
36#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
37#define DRV_COPYRIGHT "Copyright(c) 2003-2004 Intel Corporation"
38#define DRV_VERSION IPW2200_VERSION
39
40MODULE_DESCRIPTION(DRV_DESCRIPTION);
41MODULE_VERSION(DRV_VERSION);
42MODULE_AUTHOR(DRV_COPYRIGHT);
43MODULE_LICENSE("GPL");
44
45static int debug = 0;
46static int channel = 0;
43f66a6c
JK
47static int mode = 0;
48
49static u32 ipw_debug_level;
50static int associate = 1;
51static int auto_create = 1;
a613bffd 52static int led = 0;
43f66a6c
JK
53static int disable = 0;
54static const char ipw_modes[] = {
55 'a', 'b', 'g', '?'
56};
57
58static void ipw_rx(struct ipw_priv *priv);
bf79451e 59static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
43f66a6c
JK
60 struct clx2_tx_queue *txq, int qindex);
61static int ipw_queue_reset(struct ipw_priv *priv);
62
63static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
64 int len, int sync);
65
66static void ipw_tx_queue_free(struct ipw_priv *);
67
68static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
69static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
70static void ipw_rx_queue_replenish(void *);
71
72static int ipw_up(struct ipw_priv *);
73static void ipw_down(struct ipw_priv *);
74static int ipw_config(struct ipw_priv *);
0edd5b44
JG
75static int init_supported_rates(struct ipw_priv *priv,
76 struct ipw_supported_rates *prates);
43f66a6c
JK
77
78static u8 band_b_active_channel[MAX_B_CHANNELS] = {
79 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0
80};
81static u8 band_a_active_channel[MAX_A_CHANNELS] = {
82 36, 40, 44, 48, 149, 153, 157, 161, 165, 52, 56, 60, 64, 0
83};
84
85static int is_valid_channel(int mode_mask, int channel)
86{
87 int i;
88
89 if (!channel)
90 return 0;
91
92 if (mode_mask & IEEE_A)
93 for (i = 0; i < MAX_A_CHANNELS; i++)
94 if (band_a_active_channel[i] == channel)
95 return IEEE_A;
96
97 if (mode_mask & (IEEE_B | IEEE_G))
98 for (i = 0; i < MAX_B_CHANNELS; i++)
99 if (band_b_active_channel[i] == channel)
100 return mode_mask & (IEEE_B | IEEE_G);
101
102 return 0;
103}
104
bf79451e 105static char *snprint_line(char *buf, size_t count,
0edd5b44 106 const u8 * data, u32 len, u32 ofs)
43f66a6c
JK
107{
108 int out, i, j, l;
109 char c;
bf79451e 110
43f66a6c
JK
111 out = snprintf(buf, count, "%08X", ofs);
112
113 for (l = 0, i = 0; i < 2; i++) {
114 out += snprintf(buf + out, count - out, " ");
bf79451e
JG
115 for (j = 0; j < 8 && l < len; j++, l++)
116 out += snprintf(buf + out, count - out, "%02X ",
43f66a6c
JK
117 data[(i * 8 + j)]);
118 for (; j < 8; j++)
119 out += snprintf(buf + out, count - out, " ");
120 }
bf79451e 121
43f66a6c
JK
122 out += snprintf(buf + out, count - out, " ");
123 for (l = 0, i = 0; i < 2; i++) {
124 out += snprintf(buf + out, count - out, " ");
125 for (j = 0; j < 8 && l < len; j++, l++) {
126 c = data[(i * 8 + j)];
127 if (!isascii(c) || !isprint(c))
128 c = '.';
bf79451e 129
43f66a6c
JK
130 out += snprintf(buf + out, count - out, "%c", c);
131 }
132
133 for (; j < 8; j++)
134 out += snprintf(buf + out, count - out, " ");
135 }
bf79451e 136
43f66a6c
JK
137 return buf;
138}
139
0edd5b44 140static void printk_buf(int level, const u8 * data, u32 len)
43f66a6c
JK
141{
142 char line[81];
143 u32 ofs = 0;
144 if (!(ipw_debug_level & level))
145 return;
146
147 while (len) {
148 printk(KERN_DEBUG "%s\n",
bf79451e 149 snprint_line(line, sizeof(line), &data[ofs],
43f66a6c
JK
150 min(len, 16U), ofs));
151 ofs += 16;
152 len -= min(len, 16U);
153 }
154}
155
156static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
157#define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
158
159static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
160#define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
161
162static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
163static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
164{
0edd5b44
JG
165 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
166 __LINE__, (u32) (b), (u32) (c));
43f66a6c
JK
167 _ipw_write_reg8(a, b, c);
168}
169
170static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
171static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
172{
0edd5b44
JG
173 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
174 __LINE__, (u32) (b), (u32) (c));
43f66a6c
JK
175 _ipw_write_reg16(a, b, c);
176}
177
178static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
179static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
180{
0edd5b44
JG
181 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
182 __LINE__, (u32) (b), (u32) (c));
43f66a6c
JK
183 _ipw_write_reg32(a, b, c);
184}
185
186#define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
187#define ipw_write8(ipw, ofs, val) \
188 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
189 _ipw_write8(ipw, ofs, val)
190
191#define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
192#define ipw_write16(ipw, ofs, val) \
193 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
194 _ipw_write16(ipw, ofs, val)
195
196#define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
197#define ipw_write32(ipw, ofs, val) \
198 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
199 _ipw_write32(ipw, ofs, val)
200
201#define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
0edd5b44
JG
202static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
203{
204 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
43f66a6c
JK
205 return _ipw_read8(ipw, ofs);
206}
0edd5b44 207
43f66a6c
JK
208#define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
209
210#define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
0edd5b44
JG
211static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
212{
213 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
43f66a6c
JK
214 return _ipw_read16(ipw, ofs);
215}
0edd5b44 216
43f66a6c
JK
217#define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
218
219#define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
0edd5b44
JG
220static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
221{
222 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
43f66a6c
JK
223 return _ipw_read32(ipw, ofs);
224}
0edd5b44 225
43f66a6c
JK
226#define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
227
228static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
229#define ipw_read_indirect(a, b, c, d) \
230 IPW_DEBUG_IO("%s %d: read_inddirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
231 _ipw_read_indirect(a, b, c, d)
232
0edd5b44
JG
233static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
234 int num);
43f66a6c
JK
235#define ipw_write_indirect(a, b, c, d) \
236 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
237 _ipw_write_indirect(a, b, c, d)
238
239/* indirect write s */
0edd5b44 240static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
43f66a6c 241{
0edd5b44 242 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
43f66a6c
JK
243 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg);
244 _ipw_write32(priv, CX2_INDIRECT_DATA, value);
245}
246
43f66a6c
JK
247static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
248{
249 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
250 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
251 _ipw_write8(priv, CX2_INDIRECT_DATA, value);
bf79451e 252 IPW_DEBUG_IO(" reg = 0x%8lX : value = 0x%8X\n",
0edd5b44 253 (unsigned long)(priv->hw_base + CX2_INDIRECT_DATA), value);
43f66a6c
JK
254}
255
0edd5b44 256static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
43f66a6c
JK
257{
258 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
259 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
260 _ipw_write16(priv, CX2_INDIRECT_DATA, value);
261}
262
263/* indirect read s */
264
265static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
266{
267 u32 word;
268 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
269 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
270 word = _ipw_read32(priv, CX2_INDIRECT_DATA);
0edd5b44 271 return (word >> ((reg & 0x3) * 8)) & 0xff;
43f66a6c
JK
272}
273
274static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
275{
276 u32 value;
277
278 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
279
280 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg);
281 value = _ipw_read32(priv, CX2_INDIRECT_DATA);
282 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
283 return value;
284}
285
286/* iterative/auto-increment 32 bit reads and writes */
287static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
288 int num)
289{
290 u32 aligned_addr = addr & CX2_INDIRECT_ADDR_MASK;
291 u32 dif_len = addr - aligned_addr;
43f66a6c 292 u32 i;
bf79451e 293
43f66a6c
JK
294 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
295
ea2b26e0
JK
296 if (num <= 0) {
297 return;
298 }
299
43f66a6c
JK
300 /* Read the first nibble byte by byte */
301 if (unlikely(dif_len)) {
43f66a6c 302 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
ea2b26e0
JK
303 /* Start reading at aligned_addr + dif_len */
304 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
305 *buf++ = _ipw_read8(priv, CX2_INDIRECT_DATA + i);
43f66a6c
JK
306 aligned_addr += 4;
307 }
308
43f66a6c 309 _ipw_write32(priv, CX2_AUTOINC_ADDR, aligned_addr);
ea2b26e0
JK
310 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
311 *(u32 *) buf = _ipw_read32(priv, CX2_AUTOINC_DATA);
bf79451e 312
43f66a6c 313 /* Copy the last nibble */
ea2b26e0
JK
314 if (unlikely(num)) {
315 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
316 for (i = 0; num > 0; i++, num--)
317 *buf++ = ipw_read8(priv, CX2_INDIRECT_DATA + i);
318 }
43f66a6c
JK
319}
320
0edd5b44 321static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
43f66a6c
JK
322 int num)
323{
324 u32 aligned_addr = addr & CX2_INDIRECT_ADDR_MASK;
325 u32 dif_len = addr - aligned_addr;
43f66a6c 326 u32 i;
bf79451e 327
43f66a6c 328 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
bf79451e 329
ea2b26e0
JK
330 if (num <= 0) {
331 return;
332 }
333
43f66a6c
JK
334 /* Write the first nibble byte by byte */
335 if (unlikely(dif_len)) {
43f66a6c 336 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
ea2b26e0
JK
337 /* Start reading at aligned_addr + dif_len */
338 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
43f66a6c 339 _ipw_write8(priv, CX2_INDIRECT_DATA + i, *buf);
43f66a6c
JK
340 aligned_addr += 4;
341 }
bf79451e 342
43f66a6c 343 _ipw_write32(priv, CX2_AUTOINC_ADDR, aligned_addr);
ea2b26e0 344 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
0edd5b44 345 _ipw_write32(priv, CX2_AUTOINC_DATA, *(u32 *) buf);
bf79451e 346
43f66a6c 347 /* Copy the last nibble */
ea2b26e0
JK
348 if (unlikely(num)) {
349 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
350 for (i = 0; num > 0; i++, num--, buf++)
351 _ipw_write8(priv, CX2_INDIRECT_DATA + i, *buf);
352 }
43f66a6c
JK
353}
354
bf79451e 355static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
43f66a6c
JK
356 int num)
357{
358 memcpy_toio((priv->hw_base + addr), buf, num);
359}
360
361static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
362{
363 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
364}
365
366static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
367{
368 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
369}
370
371static inline void ipw_enable_interrupts(struct ipw_priv *priv)
372{
373 if (priv->status & STATUS_INT_ENABLED)
374 return;
375 priv->status |= STATUS_INT_ENABLED;
376 ipw_write32(priv, CX2_INTA_MASK_R, CX2_INTA_MASK_ALL);
377}
378
379static inline void ipw_disable_interrupts(struct ipw_priv *priv)
380{
381 if (!(priv->status & STATUS_INT_ENABLED))
382 return;
383 priv->status &= ~STATUS_INT_ENABLED;
384 ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
385}
386
387static char *ipw_error_desc(u32 val)
388{
389 switch (val) {
bf79451e 390 case IPW_FW_ERROR_OK:
43f66a6c 391 return "ERROR_OK";
bf79451e 392 case IPW_FW_ERROR_FAIL:
43f66a6c 393 return "ERROR_FAIL";
bf79451e 394 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
43f66a6c 395 return "MEMORY_UNDERFLOW";
bf79451e 396 case IPW_FW_ERROR_MEMORY_OVERFLOW:
43f66a6c 397 return "MEMORY_OVERFLOW";
bf79451e 398 case IPW_FW_ERROR_BAD_PARAM:
43f66a6c 399 return "ERROR_BAD_PARAM";
bf79451e 400 case IPW_FW_ERROR_BAD_CHECKSUM:
43f66a6c 401 return "ERROR_BAD_CHECKSUM";
bf79451e 402 case IPW_FW_ERROR_NMI_INTERRUPT:
43f66a6c 403 return "ERROR_NMI_INTERRUPT";
bf79451e 404 case IPW_FW_ERROR_BAD_DATABASE:
43f66a6c 405 return "ERROR_BAD_DATABASE";
bf79451e 406 case IPW_FW_ERROR_ALLOC_FAIL:
43f66a6c 407 return "ERROR_ALLOC_FAIL";
bf79451e 408 case IPW_FW_ERROR_DMA_UNDERRUN:
43f66a6c 409 return "ERROR_DMA_UNDERRUN";
bf79451e 410 case IPW_FW_ERROR_DMA_STATUS:
43f66a6c 411 return "ERROR_DMA_STATUS";
bf79451e 412 case IPW_FW_ERROR_DINOSTATUS_ERROR:
43f66a6c 413 return "ERROR_DINOSTATUS_ERROR";
bf79451e 414 case IPW_FW_ERROR_EEPROMSTATUS_ERROR:
43f66a6c 415 return "ERROR_EEPROMSTATUS_ERROR";
bf79451e 416 case IPW_FW_ERROR_SYSASSERT:
43f66a6c 417 return "ERROR_SYSASSERT";
bf79451e 418 case IPW_FW_ERROR_FATAL_ERROR:
43f66a6c 419 return "ERROR_FATALSTATUS_ERROR";
bf79451e 420 default:
43f66a6c
JK
421 return "UNKNOWNSTATUS_ERROR";
422 }
423}
424
425static void ipw_dump_nic_error_log(struct ipw_priv *priv)
426{
427 u32 desc, time, blink1, blink2, ilink1, ilink2, idata, i, count, base;
428
429 base = ipw_read32(priv, IPWSTATUS_ERROR_LOG);
430 count = ipw_read_reg32(priv, base);
bf79451e 431
43f66a6c
JK
432 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
433 IPW_ERROR("Start IPW Error Log Dump:\n");
434 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
435 priv->status, priv->config);
436 }
437
bf79451e 438 for (i = ERROR_START_OFFSET;
0edd5b44
JG
439 i <= count * ERROR_ELEM_SIZE; i += ERROR_ELEM_SIZE) {
440 desc = ipw_read_reg32(priv, base + i);
441 time = ipw_read_reg32(priv, base + i + 1 * sizeof(u32));
442 blink1 = ipw_read_reg32(priv, base + i + 2 * sizeof(u32));
443 blink2 = ipw_read_reg32(priv, base + i + 3 * sizeof(u32));
444 ilink1 = ipw_read_reg32(priv, base + i + 4 * sizeof(u32));
445 ilink2 = ipw_read_reg32(priv, base + i + 5 * sizeof(u32));
446 idata = ipw_read_reg32(priv, base + i + 6 * sizeof(u32));
43f66a6c 447
0edd5b44
JG
448 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
449 ipw_error_desc(desc), time, blink1, blink2,
450 ilink1, ilink2, idata);
43f66a6c
JK
451 }
452}
453
454static void ipw_dump_nic_event_log(struct ipw_priv *priv)
455{
456 u32 ev, time, data, i, count, base;
457
458 base = ipw_read32(priv, IPW_EVENT_LOG);
459 count = ipw_read_reg32(priv, base);
bf79451e 460
43f66a6c
JK
461 if (EVENT_START_OFFSET <= count * EVENT_ELEM_SIZE)
462 IPW_ERROR("Start IPW Event Log Dump:\n");
463
bf79451e 464 for (i = EVENT_START_OFFSET;
0edd5b44 465 i <= count * EVENT_ELEM_SIZE; i += EVENT_ELEM_SIZE) {
43f66a6c 466 ev = ipw_read_reg32(priv, base + i);
0edd5b44
JG
467 time = ipw_read_reg32(priv, base + i + 1 * sizeof(u32));
468 data = ipw_read_reg32(priv, base + i + 2 * sizeof(u32));
43f66a6c
JK
469
470#ifdef CONFIG_IPW_DEBUG
471 IPW_ERROR("%i\t0x%08x\t%i\n", time, data, ev);
472#endif
473 }
474}
475
0edd5b44 476static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
43f66a6c
JK
477{
478 u32 addr, field_info, field_len, field_count, total_len;
479
480 IPW_DEBUG_ORD("ordinal = %i\n", ord);
481
482 if (!priv || !val || !len) {
483 IPW_DEBUG_ORD("Invalid argument\n");
484 return -EINVAL;
485 }
bf79451e 486
43f66a6c
JK
487 /* verify device ordinal tables have been initialized */
488 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
489 IPW_DEBUG_ORD("Access ordinals before initialization\n");
490 return -EINVAL;
491 }
492
493 switch (IPW_ORD_TABLE_ID_MASK & ord) {
494 case IPW_ORD_TABLE_0_MASK:
495 /*
496 * TABLE 0: Direct access to a table of 32 bit values
497 *
bf79451e 498 * This is a very simple table with the data directly
43f66a6c
JK
499 * read from the table
500 */
501
502 /* remove the table id from the ordinal */
503 ord &= IPW_ORD_TABLE_VALUE_MASK;
504
505 /* boundary check */
506 if (ord > priv->table0_len) {
507 IPW_DEBUG_ORD("ordinal value (%i) longer then "
508 "max (%i)\n", ord, priv->table0_len);
509 return -EINVAL;
510 }
511
512 /* verify we have enough room to store the value */
513 if (*len < sizeof(u32)) {
514 IPW_DEBUG_ORD("ordinal buffer length too small, "
aaa4d308 515 "need %zd\n", sizeof(u32));
43f66a6c
JK
516 return -EINVAL;
517 }
518
519 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
0edd5b44 520 ord, priv->table0_addr + (ord << 2));
43f66a6c
JK
521
522 *len = sizeof(u32);
523 ord <<= 2;
0edd5b44 524 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
43f66a6c
JK
525 break;
526
527 case IPW_ORD_TABLE_1_MASK:
528 /*
529 * TABLE 1: Indirect access to a table of 32 bit values
bf79451e
JG
530 *
531 * This is a fairly large table of u32 values each
43f66a6c
JK
532 * representing starting addr for the data (which is
533 * also a u32)
534 */
535
536 /* remove the table id from the ordinal */
537 ord &= IPW_ORD_TABLE_VALUE_MASK;
bf79451e 538
43f66a6c
JK
539 /* boundary check */
540 if (ord > priv->table1_len) {
541 IPW_DEBUG_ORD("ordinal value too long\n");
542 return -EINVAL;
543 }
544
545 /* verify we have enough room to store the value */
546 if (*len < sizeof(u32)) {
547 IPW_DEBUG_ORD("ordinal buffer length too small, "
aaa4d308 548 "need %zd\n", sizeof(u32));
43f66a6c
JK
549 return -EINVAL;
550 }
551
0edd5b44
JG
552 *((u32 *) val) =
553 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
43f66a6c
JK
554 *len = sizeof(u32);
555 break;
556
557 case IPW_ORD_TABLE_2_MASK:
558 /*
559 * TABLE 2: Indirect access to a table of variable sized values
560 *
561 * This table consist of six values, each containing
562 * - dword containing the starting offset of the data
563 * - dword containing the lengh in the first 16bits
564 * and the count in the second 16bits
565 */
566
567 /* remove the table id from the ordinal */
568 ord &= IPW_ORD_TABLE_VALUE_MASK;
569
570 /* boundary check */
571 if (ord > priv->table2_len) {
572 IPW_DEBUG_ORD("ordinal value too long\n");
573 return -EINVAL;
574 }
575
576 /* get the address of statistic */
577 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
bf79451e
JG
578
579 /* get the second DW of statistics ;
43f66a6c 580 * two 16-bit words - first is length, second is count */
0edd5b44
JG
581 field_info =
582 ipw_read_reg32(priv,
583 priv->table2_addr + (ord << 3) +
584 sizeof(u32));
bf79451e 585
43f66a6c 586 /* get each entry length */
0edd5b44 587 field_len = *((u16 *) & field_info);
bf79451e 588
43f66a6c 589 /* get number of entries */
0edd5b44 590 field_count = *(((u16 *) & field_info) + 1);
bf79451e 591
43f66a6c
JK
592 /* abort if not enought memory */
593 total_len = field_len * field_count;
594 if (total_len > *len) {
595 *len = total_len;
596 return -EINVAL;
597 }
bf79451e 598
43f66a6c
JK
599 *len = total_len;
600 if (!total_len)
601 return 0;
602
603 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
bf79451e 604 "field_info = 0x%08x\n",
43f66a6c
JK
605 addr, total_len, field_info);
606 ipw_read_indirect(priv, addr, val, total_len);
607 break;
608
609 default:
610 IPW_DEBUG_ORD("Invalid ordinal!\n");
611 return -EINVAL;
612
613 }
614
43f66a6c
JK
615 return 0;
616}
617
618static void ipw_init_ordinals(struct ipw_priv *priv)
619{
620 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
bf79451e 621 priv->table0_len = ipw_read32(priv, priv->table0_addr);
43f66a6c
JK
622
623 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
624 priv->table0_addr, priv->table0_len);
625
626 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
627 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
628
629 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
630 priv->table1_addr, priv->table1_len);
631
632 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
633 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
0edd5b44 634 priv->table2_len &= 0x0000ffff; /* use first two bytes */
43f66a6c
JK
635
636 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
637 priv->table2_addr, priv->table2_len);
638
639}
640
a613bffd
JK
641u32 ipw_register_toggle(u32 reg)
642{
643 reg &= ~CX2_START_STANDBY;
644 if (reg & CX2_GATE_ODMA)
645 reg &= ~CX2_GATE_ODMA;
646 if (reg & CX2_GATE_IDMA)
647 reg &= ~CX2_GATE_IDMA;
648 if (reg & CX2_GATE_ADMA)
649 reg &= ~CX2_GATE_ADMA;
650 return reg;
651}
652
653/*
654 * LED behavior:
655 * - On radio ON, turn on any LEDs that require to be on during start
656 * - On initialization, start unassociated blink
657 * - On association, disable unassociated blink
658 * - On disassociation, start unassociated blink
659 * - On radio OFF, turn off any LEDs started during radio on
660 *
661 */
662#define LD_TIME_LINK_ON 300
663#define LD_TIME_LINK_OFF 2700
664#define LD_TIME_ACT_ON 250
665
666void ipw_led_link_on(struct ipw_priv *priv)
667{
668 unsigned long flags;
669 u32 led;
670
671 /* If configured to not use LEDs, or nic_type is 1,
672 * then we don't toggle a LINK led */
673 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
674 return;
675
676 spin_lock_irqsave(&priv->lock, flags);
677
678 if (!(priv->status & STATUS_RF_KILL_MASK) &&
679 !(priv->status & STATUS_LED_LINK_ON)) {
680 IPW_DEBUG_LED("Link LED On\n");
681 led = ipw_read_reg32(priv, CX2_EVENT_REG);
682 led |= priv->led_association_on;
683
684 led = ipw_register_toggle(led);
685
686 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
687 ipw_write_reg32(priv, CX2_EVENT_REG, led);
688
689 priv->status |= STATUS_LED_LINK_ON;
690
691 /* If we aren't associated, schedule turning the LED off */
692 if (!(priv->status & STATUS_ASSOCIATED))
693 queue_delayed_work(priv->workqueue,
694 &priv->led_link_off,
695 LD_TIME_LINK_ON);
696 }
697
698 spin_unlock_irqrestore(&priv->lock, flags);
699}
700
701void ipw_led_link_off(struct ipw_priv *priv)
702{
703 unsigned long flags;
704 u32 led;
705
706 /* If configured not to use LEDs, or nic type is 1,
707 * then we don't goggle the LINK led. */
708 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
709 return;
710
711 spin_lock_irqsave(&priv->lock, flags);
712
713 if (priv->status & STATUS_LED_LINK_ON) {
714 led = ipw_read_reg32(priv, CX2_EVENT_REG);
715 led &= priv->led_association_off;
716 led = ipw_register_toggle(led);
717
718 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
719 ipw_write_reg32(priv, CX2_EVENT_REG, led);
720
721 IPW_DEBUG_LED("Link LED Off\n");
722
723 priv->status &= ~STATUS_LED_LINK_ON;
724
725 /* If we aren't associated and the radio is on, schedule
726 * turning the LED on (blink while unassociated) */
727 if (!(priv->status & STATUS_RF_KILL_MASK) &&
728 !(priv->status & STATUS_ASSOCIATED))
729 queue_delayed_work(priv->workqueue, &priv->led_link_on,
730 LD_TIME_LINK_OFF);
731
732 }
733
734 spin_unlock_irqrestore(&priv->lock, flags);
735}
736
737void ipw_led_activity_on(struct ipw_priv *priv)
738{
739 unsigned long flags;
740 u32 led;
741
742 if (priv->config & CFG_NO_LED)
743 return;
744
745 spin_lock_irqsave(&priv->lock, flags);
746
747 if (priv->status & STATUS_RF_KILL_MASK) {
748 spin_unlock_irqrestore(&priv->lock, flags);
749 return;
750 }
751
752 if (!(priv->status & STATUS_LED_ACT_ON)) {
753 led = ipw_read_reg32(priv, CX2_EVENT_REG);
754 led |= priv->led_activity_on;
755
756 led = ipw_register_toggle(led);
757
758 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
759 ipw_write_reg32(priv, CX2_EVENT_REG, led);
760
761 IPW_DEBUG_LED("Activity LED On\n");
762
763 priv->status |= STATUS_LED_ACT_ON;
764
765 queue_delayed_work(priv->workqueue, &priv->led_act_off,
766 LD_TIME_ACT_ON);
767 } else {
768 /* Reschedule LED off for full time period */
769 cancel_delayed_work(&priv->led_act_off);
770 queue_delayed_work(priv->workqueue, &priv->led_act_off,
771 LD_TIME_ACT_ON);
772 }
773
774 spin_unlock_irqrestore(&priv->lock, flags);
775}
776
777void ipw_led_activity_off(struct ipw_priv *priv)
778{
779 unsigned long flags;
780 u32 led;
781
782 if (priv->config & CFG_NO_LED)
783 return;
784
785 spin_lock_irqsave(&priv->lock, flags);
786
787 if (priv->status & STATUS_LED_ACT_ON) {
788 led = ipw_read_reg32(priv, CX2_EVENT_REG);
789 led &= priv->led_activity_off;
790
791 led = ipw_register_toggle(led);
792
793 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
794 ipw_write_reg32(priv, CX2_EVENT_REG, led);
795
796 IPW_DEBUG_LED("Activity LED Off\n");
797
798 priv->status &= ~STATUS_LED_ACT_ON;
799 }
800
801 spin_unlock_irqrestore(&priv->lock, flags);
802}
803
804void ipw_led_band_on(struct ipw_priv *priv)
805{
806 unsigned long flags;
807 u32 led;
808
809 /* Only nic type 1 supports mode LEDs */
810 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
811 return;
812
813 spin_lock_irqsave(&priv->lock, flags);
814
815 led = ipw_read_reg32(priv, CX2_EVENT_REG);
816 if (priv->assoc_network->mode == IEEE_A) {
817 led |= priv->led_ofdm_on;
818 led &= priv->led_association_off;
819 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
820 } else if (priv->assoc_network->mode == IEEE_G) {
821 led |= priv->led_ofdm_on;
822 led |= priv->led_association_on;
823 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
824 } else {
825 led &= priv->led_ofdm_off;
826 led |= priv->led_association_on;
827 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
828 }
829
830 led = ipw_register_toggle(led);
831
832 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
833 ipw_write_reg32(priv, CX2_EVENT_REG, led);
834
835 spin_unlock_irqrestore(&priv->lock, flags);
836}
837
838void ipw_led_band_off(struct ipw_priv *priv)
839{
840 unsigned long flags;
841 u32 led;
842
843 /* Only nic type 1 supports mode LEDs */
844 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
845 return;
846
847 spin_lock_irqsave(&priv->lock, flags);
848
849 led = ipw_read_reg32(priv, CX2_EVENT_REG);
850 led &= priv->led_ofdm_off;
851 led &= priv->led_association_off;
852
853 led = ipw_register_toggle(led);
854
855 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
856 ipw_write_reg32(priv, CX2_EVENT_REG, led);
857
858 spin_unlock_irqrestore(&priv->lock, flags);
859}
860
861void ipw_led_radio_on(struct ipw_priv *priv)
862{
863 ipw_led_link_on(priv);
864}
865
866void ipw_led_radio_off(struct ipw_priv *priv)
867{
868 ipw_led_activity_off(priv);
869 ipw_led_link_off(priv);
870}
871
872void ipw_led_link_up(struct ipw_priv *priv)
873{
874 /* Set the Link Led on for all nic types */
875 ipw_led_link_on(priv);
876}
877
878void ipw_led_link_down(struct ipw_priv *priv)
879{
880 ipw_led_activity_off(priv);
881 ipw_led_link_off(priv);
882
883 if (priv->status & STATUS_RF_KILL_MASK)
884 ipw_led_radio_off(priv);
885}
886
887void ipw_led_init(struct ipw_priv *priv)
888{
889 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
890
891 /* Set the default PINs for the link and activity leds */
892 priv->led_activity_on = CX2_ACTIVITY_LED;
893 priv->led_activity_off = ~(CX2_ACTIVITY_LED);
894
895 priv->led_association_on = CX2_ASSOCIATED_LED;
896 priv->led_association_off = ~(CX2_ASSOCIATED_LED);
897
898 /* Set the default PINs for the OFDM leds */
899 priv->led_ofdm_on = CX2_OFDM_LED;
900 priv->led_ofdm_off = ~(CX2_OFDM_LED);
901
902 switch (priv->nic_type) {
903 case EEPROM_NIC_TYPE_1:
904 /* In this NIC type, the LEDs are reversed.... */
905 priv->led_activity_on = CX2_ASSOCIATED_LED;
906 priv->led_activity_off = ~(CX2_ASSOCIATED_LED);
907 priv->led_association_on = CX2_ACTIVITY_LED;
908 priv->led_association_off = ~(CX2_ACTIVITY_LED);
909
910 if (!(priv->config & CFG_NO_LED))
911 ipw_led_band_on(priv);
912
913 /* And we don't blink link LEDs for this nic, so
914 * just return here */
915 return;
916
917 case EEPROM_NIC_TYPE_3:
918 case EEPROM_NIC_TYPE_2:
919 case EEPROM_NIC_TYPE_4:
920 case EEPROM_NIC_TYPE_0:
921 break;
922
923 default:
924 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
925 priv->nic_type);
926 priv->nic_type = EEPROM_NIC_TYPE_0;
927 break;
928 }
929
930 if (!(priv->config & CFG_NO_LED)) {
931 if (priv->status & STATUS_ASSOCIATED)
932 ipw_led_link_on(priv);
933 else
934 ipw_led_link_off(priv);
935 }
936}
937
938void ipw_led_shutdown(struct ipw_priv *priv)
939{
940 cancel_delayed_work(&priv->led_link_on);
941 cancel_delayed_work(&priv->led_link_off);
942 cancel_delayed_work(&priv->led_act_off);
943 ipw_led_activity_off(priv);
944 ipw_led_link_off(priv);
945 ipw_led_band_off(priv);
946}
947
43f66a6c
JK
948/*
949 * The following adds a new attribute to the sysfs representation
950 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
951 * used for controling the debug level.
bf79451e 952 *
43f66a6c
JK
953 * See the level definitions in ipw for details.
954 */
955static ssize_t show_debug_level(struct device_driver *d, char *buf)
956{
957 return sprintf(buf, "0x%08X\n", ipw_debug_level);
958}
a613bffd
JK
959
960static ssize_t store_debug_level(struct device_driver *d, const char *buf,
961 size_t count)
43f66a6c
JK
962{
963 char *p = (char *)buf;
964 u32 val;
965
966 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
967 p++;
968 if (p[0] == 'x' || p[0] == 'X')
969 p++;
970 val = simple_strtoul(p, &p, 16);
971 } else
972 val = simple_strtoul(p, &p, 10);
bf79451e
JG
973 if (p == buf)
974 printk(KERN_INFO DRV_NAME
43f66a6c
JK
975 ": %s is not in hex or decimal form.\n", buf);
976 else
977 ipw_debug_level = val;
978
979 return strnlen(buf, count);
980}
981
bf79451e 982static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
43f66a6c
JK
983 show_debug_level, store_debug_level);
984
a613bffd
JK
985static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
986 char *buf)
987{
988 struct ipw_priv *priv = dev_get_drvdata(d);
989 return sprintf(buf, "%d\n", priv->ieee->scan_age);
990}
991
992static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
993 const char *buf, size_t count)
994{
995 struct ipw_priv *priv = dev_get_drvdata(d);
996 struct net_device *dev = priv->net_dev;
997 char buffer[] = "00000000";
998 unsigned long len =
999 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1000 unsigned long val;
1001 char *p = buffer;
1002
1003 IPW_DEBUG_INFO("enter\n");
1004
1005 strncpy(buffer, buf, len);
1006 buffer[len] = 0;
1007
1008 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1009 p++;
1010 if (p[0] == 'x' || p[0] == 'X')
1011 p++;
1012 val = simple_strtoul(p, &p, 16);
1013 } else
1014 val = simple_strtoul(p, &p, 10);
1015 if (p == buffer) {
1016 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1017 } else {
1018 priv->ieee->scan_age = val;
1019 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1020 }
1021
1022 IPW_DEBUG_INFO("exit\n");
1023 return len;
1024}
1025
1026static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1027
1028static ssize_t show_led(struct device *d, struct device_attribute *attr,
1029 char *buf)
1030{
1031 struct ipw_priv *priv = dev_get_drvdata(d);
1032 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1033}
1034
1035static ssize_t store_led(struct device *d, struct device_attribute *attr,
1036 const char *buf, size_t count)
1037{
1038 struct ipw_priv *priv = dev_get_drvdata(d);
1039
1040 IPW_DEBUG_INFO("enter\n");
1041
1042 if (count == 0)
1043 return 0;
1044
1045 if (*buf == 0) {
1046 IPW_DEBUG_LED("Disabling LED control.\n");
1047 priv->config |= CFG_NO_LED;
1048 ipw_led_shutdown(priv);
1049 } else {
1050 IPW_DEBUG_LED("Enabling LED control.\n");
1051 priv->config &= ~CFG_NO_LED;
1052 ipw_led_init(priv);
1053 }
1054
1055 IPW_DEBUG_INFO("exit\n");
1056 return count;
1057}
1058
1059static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1060
ad3fee56 1061static ssize_t show_status(struct device *d,
0edd5b44 1062 struct device_attribute *attr, char *buf)
43f66a6c 1063{
ad3fee56 1064 struct ipw_priv *p = d->driver_data;
43f66a6c
JK
1065 return sprintf(buf, "0x%08x\n", (int)p->status);
1066}
0edd5b44 1067
43f66a6c
JK
1068static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1069
ad3fee56
AM
1070static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1071 char *buf)
43f66a6c 1072{
ad3fee56 1073 struct ipw_priv *p = d->driver_data;
43f66a6c
JK
1074 return sprintf(buf, "0x%08x\n", (int)p->config);
1075}
0edd5b44 1076
43f66a6c
JK
1077static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1078
ad3fee56 1079static ssize_t show_nic_type(struct device *d,
0edd5b44 1080 struct device_attribute *attr, char *buf)
43f66a6c 1081{
a613bffd
JK
1082 struct ipw_priv *priv = d->driver_data;
1083 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
43f66a6c 1084}
0edd5b44 1085
43f66a6c
JK
1086static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1087
ad3fee56 1088static ssize_t dump_error_log(struct device *d,
0edd5b44
JG
1089 struct device_attribute *attr, const char *buf,
1090 size_t count)
43f66a6c
JK
1091{
1092 char *p = (char *)buf;
1093
bf79451e 1094 if (p[0] == '1')
0edd5b44 1095 ipw_dump_nic_error_log((struct ipw_priv *)d->driver_data);
43f66a6c
JK
1096
1097 return strnlen(buf, count);
1098}
0edd5b44 1099
43f66a6c
JK
1100static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
1101
ad3fee56 1102static ssize_t dump_event_log(struct device *d,
0edd5b44
JG
1103 struct device_attribute *attr, const char *buf,
1104 size_t count)
43f66a6c
JK
1105{
1106 char *p = (char *)buf;
1107
bf79451e 1108 if (p[0] == '1')
0edd5b44 1109 ipw_dump_nic_event_log((struct ipw_priv *)d->driver_data);
43f66a6c
JK
1110
1111 return strnlen(buf, count);
1112}
0edd5b44 1113
43f66a6c
JK
1114static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
1115
ad3fee56 1116static ssize_t show_ucode_version(struct device *d,
0edd5b44 1117 struct device_attribute *attr, char *buf)
43f66a6c
JK
1118{
1119 u32 len = sizeof(u32), tmp = 0;
ad3fee56 1120 struct ipw_priv *p = d->driver_data;
43f66a6c 1121
0edd5b44 1122 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
43f66a6c
JK
1123 return 0;
1124
1125 return sprintf(buf, "0x%08x\n", tmp);
1126}
0edd5b44
JG
1127
1128static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
43f66a6c 1129
ad3fee56
AM
1130static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1131 char *buf)
43f66a6c
JK
1132{
1133 u32 len = sizeof(u32), tmp = 0;
ad3fee56 1134 struct ipw_priv *p = d->driver_data;
43f66a6c 1135
0edd5b44 1136 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
43f66a6c
JK
1137 return 0;
1138
1139 return sprintf(buf, "0x%08x\n", tmp);
1140}
0edd5b44
JG
1141
1142static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
43f66a6c
JK
1143
1144/*
1145 * Add a device attribute to view/control the delay between eeprom
1146 * operations.
1147 */
ad3fee56 1148static ssize_t show_eeprom_delay(struct device *d,
0edd5b44 1149 struct device_attribute *attr, char *buf)
43f66a6c 1150{
0edd5b44 1151 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
43f66a6c
JK
1152 return sprintf(buf, "%i\n", n);
1153}
ad3fee56 1154static ssize_t store_eeprom_delay(struct device *d,
0edd5b44
JG
1155 struct device_attribute *attr,
1156 const char *buf, size_t count)
43f66a6c 1157{
ad3fee56 1158 struct ipw_priv *p = d->driver_data;
43f66a6c
JK
1159 sscanf(buf, "%i", &p->eeprom_delay);
1160 return strnlen(buf, count);
1161}
0edd5b44
JG
1162
1163static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1164 show_eeprom_delay, store_eeprom_delay);
43f66a6c 1165
ad3fee56 1166static ssize_t show_command_event_reg(struct device *d,
0edd5b44 1167 struct device_attribute *attr, char *buf)
43f66a6c
JK
1168{
1169 u32 reg = 0;
ad3fee56 1170 struct ipw_priv *p = d->driver_data;
43f66a6c
JK
1171
1172 reg = ipw_read_reg32(p, CX2_INTERNAL_CMD_EVENT);
1173 return sprintf(buf, "0x%08x\n", reg);
1174}
ad3fee56 1175static ssize_t store_command_event_reg(struct device *d,
0edd5b44
JG
1176 struct device_attribute *attr,
1177 const char *buf, size_t count)
43f66a6c
JK
1178{
1179 u32 reg;
ad3fee56 1180 struct ipw_priv *p = d->driver_data;
43f66a6c
JK
1181
1182 sscanf(buf, "%x", &reg);
1183 ipw_write_reg32(p, CX2_INTERNAL_CMD_EVENT, reg);
1184 return strnlen(buf, count);
1185}
0edd5b44
JG
1186
1187static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1188 show_command_event_reg, store_command_event_reg);
43f66a6c 1189
ad3fee56 1190static ssize_t show_mem_gpio_reg(struct device *d,
0edd5b44 1191 struct device_attribute *attr, char *buf)
43f66a6c
JK
1192{
1193 u32 reg = 0;
ad3fee56 1194 struct ipw_priv *p = d->driver_data;
43f66a6c
JK
1195
1196 reg = ipw_read_reg32(p, 0x301100);
1197 return sprintf(buf, "0x%08x\n", reg);
1198}
ad3fee56 1199static ssize_t store_mem_gpio_reg(struct device *d,
0edd5b44
JG
1200 struct device_attribute *attr,
1201 const char *buf, size_t count)
43f66a6c
JK
1202{
1203 u32 reg;
ad3fee56 1204 struct ipw_priv *p = d->driver_data;
43f66a6c
JK
1205
1206 sscanf(buf, "%x", &reg);
1207 ipw_write_reg32(p, 0x301100, reg);
1208 return strnlen(buf, count);
1209}
0edd5b44
JG
1210
1211static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1212 show_mem_gpio_reg, store_mem_gpio_reg);
43f66a6c 1213
ad3fee56 1214static ssize_t show_indirect_dword(struct device *d,
0edd5b44 1215 struct device_attribute *attr, char *buf)
43f66a6c
JK
1216{
1217 u32 reg = 0;
ad3fee56 1218 struct ipw_priv *priv = d->driver_data;
bf79451e 1219 if (priv->status & STATUS_INDIRECT_DWORD)
43f66a6c 1220 reg = ipw_read_reg32(priv, priv->indirect_dword);
bf79451e 1221 else
43f66a6c 1222 reg = 0;
bf79451e 1223
43f66a6c
JK
1224 return sprintf(buf, "0x%08x\n", reg);
1225}
ad3fee56 1226static ssize_t store_indirect_dword(struct device *d,
0edd5b44
JG
1227 struct device_attribute *attr,
1228 const char *buf, size_t count)
43f66a6c 1229{
ad3fee56 1230 struct ipw_priv *priv = d->driver_data;
43f66a6c
JK
1231
1232 sscanf(buf, "%x", &priv->indirect_dword);
1233 priv->status |= STATUS_INDIRECT_DWORD;
1234 return strnlen(buf, count);
1235}
0edd5b44
JG
1236
1237static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1238 show_indirect_dword, store_indirect_dword);
43f66a6c 1239
ad3fee56 1240static ssize_t show_indirect_byte(struct device *d,
0edd5b44 1241 struct device_attribute *attr, char *buf)
43f66a6c
JK
1242{
1243 u8 reg = 0;
ad3fee56 1244 struct ipw_priv *priv = d->driver_data;
bf79451e 1245 if (priv->status & STATUS_INDIRECT_BYTE)
43f66a6c 1246 reg = ipw_read_reg8(priv, priv->indirect_byte);
bf79451e 1247 else
43f66a6c
JK
1248 reg = 0;
1249
1250 return sprintf(buf, "0x%02x\n", reg);
1251}
ad3fee56 1252static ssize_t store_indirect_byte(struct device *d,
0edd5b44
JG
1253 struct device_attribute *attr,
1254 const char *buf, size_t count)
43f66a6c 1255{
ad3fee56 1256 struct ipw_priv *priv = d->driver_data;
43f66a6c
JK
1257
1258 sscanf(buf, "%x", &priv->indirect_byte);
1259 priv->status |= STATUS_INDIRECT_BYTE;
1260 return strnlen(buf, count);
1261}
0edd5b44
JG
1262
1263static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
43f66a6c
JK
1264 show_indirect_byte, store_indirect_byte);
1265
ad3fee56 1266static ssize_t show_direct_dword(struct device *d,
0edd5b44 1267 struct device_attribute *attr, char *buf)
43f66a6c
JK
1268{
1269 u32 reg = 0;
ad3fee56 1270 struct ipw_priv *priv = d->driver_data;
43f66a6c 1271
bf79451e 1272 if (priv->status & STATUS_DIRECT_DWORD)
43f66a6c 1273 reg = ipw_read32(priv, priv->direct_dword);
bf79451e 1274 else
43f66a6c
JK
1275 reg = 0;
1276
1277 return sprintf(buf, "0x%08x\n", reg);
1278}
ad3fee56 1279static ssize_t store_direct_dword(struct device *d,
0edd5b44
JG
1280 struct device_attribute *attr,
1281 const char *buf, size_t count)
43f66a6c 1282{
ad3fee56 1283 struct ipw_priv *priv = d->driver_data;
43f66a6c
JK
1284
1285 sscanf(buf, "%x", &priv->direct_dword);
1286 priv->status |= STATUS_DIRECT_DWORD;
1287 return strnlen(buf, count);
1288}
43f66a6c 1289
0edd5b44
JG
1290static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1291 show_direct_dword, store_direct_dword);
43f66a6c
JK
1292
1293static inline int rf_kill_active(struct ipw_priv *priv)
1294{
1295 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1296 priv->status |= STATUS_RF_KILL_HW;
1297 else
1298 priv->status &= ~STATUS_RF_KILL_HW;
1299
1300 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1301}
1302
ad3fee56 1303static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
0edd5b44 1304 char *buf)
43f66a6c
JK
1305{
1306 /* 0 - RF kill not enabled
bf79451e 1307 1 - SW based RF kill active (sysfs)
43f66a6c
JK
1308 2 - HW based RF kill active
1309 3 - Both HW and SW baed RF kill active */
ad3fee56 1310 struct ipw_priv *priv = d->driver_data;
43f66a6c 1311 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
0edd5b44 1312 (rf_kill_active(priv) ? 0x2 : 0x0);
43f66a6c
JK
1313 return sprintf(buf, "%i\n", val);
1314}
1315
1316static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1317{
bf79451e 1318 if ((disable_radio ? 1 : 0) ==
ea2b26e0 1319 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
0edd5b44 1320 return 0;
43f66a6c
JK
1321
1322 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1323 disable_radio ? "OFF" : "ON");
1324
1325 if (disable_radio) {
1326 priv->status |= STATUS_RF_KILL_SW;
1327
a613bffd 1328 if (priv->workqueue)
43f66a6c 1329 cancel_delayed_work(&priv->request_scan);
43f66a6c
JK
1330 wake_up_interruptible(&priv->wait_command_queue);
1331 queue_work(priv->workqueue, &priv->down);
1332 } else {
1333 priv->status &= ~STATUS_RF_KILL_SW;
1334 if (rf_kill_active(priv)) {
1335 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1336 "disabled by HW switch\n");
1337 /* Make sure the RF_KILL check timer is running */
1338 cancel_delayed_work(&priv->rf_kill);
bf79451e 1339 queue_delayed_work(priv->workqueue, &priv->rf_kill,
43f66a6c 1340 2 * HZ);
bf79451e 1341 } else
43f66a6c
JK
1342 queue_work(priv->workqueue, &priv->up);
1343 }
1344
1345 return 1;
1346}
1347
0edd5b44
JG
1348static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1349 const char *buf, size_t count)
43f66a6c 1350{
ad3fee56 1351 struct ipw_priv *priv = d->driver_data;
bf79451e 1352
43f66a6c
JK
1353 ipw_radio_kill_sw(priv, buf[0] == '1');
1354
1355 return count;
1356}
0edd5b44
JG
1357
1358static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
43f66a6c 1359
ea2b26e0
JK
1360static void notify_wx_assoc_event(struct ipw_priv *priv)
1361{
1362 union iwreq_data wrqu;
1363 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1364 if (priv->status & STATUS_ASSOCIATED)
1365 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1366 else
1367 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1368 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1369}
1370
43f66a6c
JK
1371static void ipw_irq_tasklet(struct ipw_priv *priv)
1372{
1373 u32 inta, inta_mask, handled = 0;
1374 unsigned long flags;
1375 int rc = 0;
1376
1377 spin_lock_irqsave(&priv->lock, flags);
1378
1379 inta = ipw_read32(priv, CX2_INTA_RW);
1380 inta_mask = ipw_read32(priv, CX2_INTA_MASK_R);
1381 inta &= (CX2_INTA_MASK_ALL & inta_mask);
1382
1383 /* Add any cached INTA values that need to be handled */
1384 inta |= priv->isr_inta;
1385
1386 /* handle all the justifications for the interrupt */
1387 if (inta & CX2_INTA_BIT_RX_TRANSFER) {
1388 ipw_rx(priv);
1389 handled |= CX2_INTA_BIT_RX_TRANSFER;
1390 }
1391
1392 if (inta & CX2_INTA_BIT_TX_CMD_QUEUE) {
1393 IPW_DEBUG_HC("Command completed.\n");
0edd5b44 1394 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
43f66a6c
JK
1395 priv->status &= ~STATUS_HCMD_ACTIVE;
1396 wake_up_interruptible(&priv->wait_command_queue);
1397 handled |= CX2_INTA_BIT_TX_CMD_QUEUE;
1398 }
1399
1400 if (inta & CX2_INTA_BIT_TX_QUEUE_1) {
1401 IPW_DEBUG_TX("TX_QUEUE_1\n");
0edd5b44 1402 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
43f66a6c
JK
1403 handled |= CX2_INTA_BIT_TX_QUEUE_1;
1404 }
1405
1406 if (inta & CX2_INTA_BIT_TX_QUEUE_2) {
1407 IPW_DEBUG_TX("TX_QUEUE_2\n");
0edd5b44 1408 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
43f66a6c
JK
1409 handled |= CX2_INTA_BIT_TX_QUEUE_2;
1410 }
1411
1412 if (inta & CX2_INTA_BIT_TX_QUEUE_3) {
1413 IPW_DEBUG_TX("TX_QUEUE_3\n");
0edd5b44 1414 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
43f66a6c
JK
1415 handled |= CX2_INTA_BIT_TX_QUEUE_3;
1416 }
1417
1418 if (inta & CX2_INTA_BIT_TX_QUEUE_4) {
1419 IPW_DEBUG_TX("TX_QUEUE_4\n");
0edd5b44 1420 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
43f66a6c
JK
1421 handled |= CX2_INTA_BIT_TX_QUEUE_4;
1422 }
1423
1424 if (inta & CX2_INTA_BIT_STATUS_CHANGE) {
1425 IPW_WARNING("STATUS_CHANGE\n");
1426 handled |= CX2_INTA_BIT_STATUS_CHANGE;
1427 }
1428
1429 if (inta & CX2_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1430 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1431 handled |= CX2_INTA_BIT_BEACON_PERIOD_EXPIRED;
1432 }
1433
1434 if (inta & CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1435 IPW_WARNING("HOST_CMD_DONE\n");
1436 handled |= CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1437 }
1438
1439 if (inta & CX2_INTA_BIT_FW_INITIALIZATION_DONE) {
1440 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1441 handled |= CX2_INTA_BIT_FW_INITIALIZATION_DONE;
1442 }
1443
1444 if (inta & CX2_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1445 IPW_WARNING("PHY_OFF_DONE\n");
1446 handled |= CX2_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1447 }
1448
1449 if (inta & CX2_INTA_BIT_RF_KILL_DONE) {
1450 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1451 priv->status |= STATUS_RF_KILL_HW;
1452 wake_up_interruptible(&priv->wait_command_queue);
ea2b26e0 1453 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
43f66a6c 1454 cancel_delayed_work(&priv->request_scan);
a613bffd 1455 schedule_work(&priv->link_down);
43f66a6c
JK
1456 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
1457 handled |= CX2_INTA_BIT_RF_KILL_DONE;
1458 }
bf79451e 1459
43f66a6c
JK
1460 if (inta & CX2_INTA_BIT_FATAL_ERROR) {
1461 IPW_ERROR("Firmware error detected. Restarting.\n");
1462#ifdef CONFIG_IPW_DEBUG
1463 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1464 ipw_dump_nic_error_log(priv);
1465 ipw_dump_nic_event_log(priv);
1466 }
1467#endif
1468 queue_work(priv->workqueue, &priv->adapter_restart);
1469 handled |= CX2_INTA_BIT_FATAL_ERROR;
1470 }
1471
1472 if (inta & CX2_INTA_BIT_PARITY_ERROR) {
1473 IPW_ERROR("Parity error\n");
1474 handled |= CX2_INTA_BIT_PARITY_ERROR;
1475 }
1476
1477 if (handled != inta) {
0edd5b44 1478 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
43f66a6c
JK
1479 }
1480
1481 /* enable all interrupts */
1482 ipw_enable_interrupts(priv);
1483
1484 spin_unlock_irqrestore(&priv->lock, flags);
1485}
bf79451e 1486
43f66a6c
JK
1487#ifdef CONFIG_IPW_DEBUG
1488#define IPW_CMD(x) case IPW_CMD_ ## x : return #x
1489static char *get_cmd_string(u8 cmd)
1490{
1491 switch (cmd) {
1492 IPW_CMD(HOST_COMPLETE);
bf79451e
JG
1493 IPW_CMD(POWER_DOWN);
1494 IPW_CMD(SYSTEM_CONFIG);
1495 IPW_CMD(MULTICAST_ADDRESS);
1496 IPW_CMD(SSID);
1497 IPW_CMD(ADAPTER_ADDRESS);
1498 IPW_CMD(PORT_TYPE);
1499 IPW_CMD(RTS_THRESHOLD);
1500 IPW_CMD(FRAG_THRESHOLD);
1501 IPW_CMD(POWER_MODE);
1502 IPW_CMD(WEP_KEY);
1503 IPW_CMD(TGI_TX_KEY);
1504 IPW_CMD(SCAN_REQUEST);
1505 IPW_CMD(SCAN_REQUEST_EXT);
1506 IPW_CMD(ASSOCIATE);
1507 IPW_CMD(SUPPORTED_RATES);
1508 IPW_CMD(SCAN_ABORT);
1509 IPW_CMD(TX_FLUSH);
1510 IPW_CMD(QOS_PARAMETERS);
1511 IPW_CMD(DINO_CONFIG);
1512 IPW_CMD(RSN_CAPABILITIES);
1513 IPW_CMD(RX_KEY);
1514 IPW_CMD(CARD_DISABLE);
1515 IPW_CMD(SEED_NUMBER);
1516 IPW_CMD(TX_POWER);
1517 IPW_CMD(COUNTRY_INFO);
1518 IPW_CMD(AIRONET_INFO);
1519 IPW_CMD(AP_TX_POWER);
1520 IPW_CMD(CCKM_INFO);
1521 IPW_CMD(CCX_VER_INFO);
1522 IPW_CMD(SET_CALIBRATION);
1523 IPW_CMD(SENSITIVITY_CALIB);
1524 IPW_CMD(RETRY_LIMIT);
1525 IPW_CMD(IPW_PRE_POWER_DOWN);
1526 IPW_CMD(VAP_BEACON_TEMPLATE);
1527 IPW_CMD(VAP_DTIM_PERIOD);
1528 IPW_CMD(EXT_SUPPORTED_RATES);
1529 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
1530 IPW_CMD(VAP_QUIET_INTERVALS);
1531 IPW_CMD(VAP_CHANNEL_SWITCH);
1532 IPW_CMD(VAP_MANDATORY_CHANNELS);
1533 IPW_CMD(VAP_CELL_PWR_LIMIT);
1534 IPW_CMD(VAP_CF_PARAM_SET);
1535 IPW_CMD(VAP_SET_BEACONING_STATE);
1536 IPW_CMD(MEASUREMENT);
1537 IPW_CMD(POWER_CAPABILITY);
1538 IPW_CMD(SUPPORTED_CHANNELS);
1539 IPW_CMD(TPC_REPORT);
1540 IPW_CMD(WME_INFO);
1541 IPW_CMD(PRODUCTION_COMMAND);
1542 default:
43f66a6c
JK
1543 return "UNKNOWN";
1544 }
1545}
ea2b26e0 1546#endif
43f66a6c
JK
1547
1548#define HOST_COMPLETE_TIMEOUT HZ
1549static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
1550{
1551 int rc = 0;
a613bffd 1552 unsigned long flags;
43f66a6c 1553
a613bffd 1554 spin_lock_irqsave(&priv->lock, flags);
43f66a6c
JK
1555 if (priv->status & STATUS_HCMD_ACTIVE) {
1556 IPW_ERROR("Already sending a command\n");
a613bffd 1557 spin_unlock_irqrestore(&priv->lock, flags);
43f66a6c
JK
1558 return -1;
1559 }
1560
1561 priv->status |= STATUS_HCMD_ACTIVE;
bf79451e
JG
1562
1563 IPW_DEBUG_HC("Sending %s command (#%d), %d bytes\n",
43f66a6c 1564 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len);
0edd5b44 1565 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
43f66a6c
JK
1566
1567 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, &cmd->param, cmd->len, 0);
a613bffd
JK
1568 if (rc) {
1569 priv->status &= ~STATUS_HCMD_ACTIVE;
1570 spin_unlock_irqrestore(&priv->lock, flags);
43f66a6c 1571 return rc;
a613bffd
JK
1572 }
1573 spin_unlock_irqrestore(&priv->lock, flags);
43f66a6c 1574
0edd5b44
JG
1575 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
1576 !(priv->
1577 status & STATUS_HCMD_ACTIVE),
1578 HOST_COMPLETE_TIMEOUT);
43f66a6c 1579 if (rc == 0) {
a613bffd
JK
1580 spin_lock_irqsave(&priv->lock, flags);
1581 if (priv->status & STATUS_HCMD_ACTIVE) {
1582 IPW_DEBUG_INFO("Command completion failed out after "
1583 "%dms.\n",
1584 1000 * (HOST_COMPLETE_TIMEOUT / HZ));
1585 priv->status &= ~STATUS_HCMD_ACTIVE;
1586 spin_unlock_irqrestore(&priv->lock, flags);
1587 return -EIO;
1588 }
1589 spin_unlock_irqrestore(&priv->lock, flags);
43f66a6c 1590 }
a613bffd 1591
43f66a6c
JK
1592 if (priv->status & STATUS_RF_KILL_MASK) {
1593 IPW_DEBUG_INFO("Command aborted due to RF Kill Switch\n");
1594 return -EIO;
1595 }
1596
1597 return 0;
1598}
1599
1600static int ipw_send_host_complete(struct ipw_priv *priv)
1601{
1602 struct host_cmd cmd = {
1603 .cmd = IPW_CMD_HOST_COMPLETE,
1604 .len = 0
1605 };
1606
1607 if (!priv) {
1608 IPW_ERROR("Invalid args\n");
1609 return -1;
1610 }
1611
1612 if (ipw_send_cmd(priv, &cmd)) {
1613 IPW_ERROR("failed to send HOST_COMPLETE command\n");
1614 return -1;
1615 }
bf79451e 1616
43f66a6c
JK
1617 return 0;
1618}
1619
bf79451e 1620static int ipw_send_system_config(struct ipw_priv *priv,
43f66a6c
JK
1621 struct ipw_sys_config *config)
1622{
1623 struct host_cmd cmd = {
1624 .cmd = IPW_CMD_SYSTEM_CONFIG,
1625 .len = sizeof(*config)
1626 };
1627
1628 if (!priv || !config) {
1629 IPW_ERROR("Invalid args\n");
1630 return -1;
1631 }
1632
0edd5b44 1633 memcpy(&cmd.param, config, sizeof(*config));
43f66a6c
JK
1634 if (ipw_send_cmd(priv, &cmd)) {
1635 IPW_ERROR("failed to send SYSTEM_CONFIG command\n");
1636 return -1;
1637 }
1638
1639 return 0;
1640}
1641
0edd5b44 1642static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
43f66a6c
JK
1643{
1644 struct host_cmd cmd = {
1645 .cmd = IPW_CMD_SSID,
1646 .len = min(len, IW_ESSID_MAX_SIZE)
1647 };
1648
1649 if (!priv || !ssid) {
1650 IPW_ERROR("Invalid args\n");
1651 return -1;
1652 }
1653
1654 memcpy(&cmd.param, ssid, cmd.len);
1655 if (ipw_send_cmd(priv, &cmd)) {
1656 IPW_ERROR("failed to send SSID command\n");
1657 return -1;
1658 }
bf79451e 1659
43f66a6c
JK
1660 return 0;
1661}
1662
0edd5b44 1663static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
43f66a6c
JK
1664{
1665 struct host_cmd cmd = {
1666 .cmd = IPW_CMD_ADAPTER_ADDRESS,
1667 .len = ETH_ALEN
1668 };
1669
1670 if (!priv || !mac) {
1671 IPW_ERROR("Invalid args\n");
1672 return -1;
1673 }
1674
1675 IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
1676 priv->net_dev->name, MAC_ARG(mac));
1677
1678 memcpy(&cmd.param, mac, ETH_ALEN);
1679
1680 if (ipw_send_cmd(priv, &cmd)) {
1681 IPW_ERROR("failed to send ADAPTER_ADDRESS command\n");
1682 return -1;
1683 }
bf79451e 1684
43f66a6c
JK
1685 return 0;
1686}
1687
a613bffd
JK
1688/*
1689 * NOTE: This must be executed from our workqueue as it results in udelay
1690 * being called which may corrupt the keyboard if executed on default
1691 * workqueue
1692 */
43f66a6c
JK
1693static void ipw_adapter_restart(void *adapter)
1694{
1695 struct ipw_priv *priv = adapter;
1696
1697 if (priv->status & STATUS_RF_KILL_MASK)
1698 return;
1699
1700 ipw_down(priv);
1701 if (ipw_up(priv)) {
1702 IPW_ERROR("Failed to up device\n");
1703 return;
1704 }
1705}
1706
43f66a6c
JK
1707#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
1708
1709static void ipw_scan_check(void *data)
1710{
1711 struct ipw_priv *priv = data;
1712 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
1713 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
bf79451e 1714 "adapter (%dms).\n",
43f66a6c 1715 IPW_SCAN_CHECK_WATCHDOG / 100);
a613bffd 1716 queue_work(priv->workqueue, &priv->adapter_restart);
43f66a6c
JK
1717 }
1718}
1719
1720static int ipw_send_scan_request_ext(struct ipw_priv *priv,
1721 struct ipw_scan_request_ext *request)
1722{
1723 struct host_cmd cmd = {
1724 .cmd = IPW_CMD_SCAN_REQUEST_EXT,
1725 .len = sizeof(*request)
1726 };
1727
1728 if (!priv || !request) {
1729 IPW_ERROR("Invalid args\n");
1730 return -1;
1731 }
1732
0edd5b44 1733 memcpy(&cmd.param, request, sizeof(*request));
43f66a6c
JK
1734 if (ipw_send_cmd(priv, &cmd)) {
1735 IPW_ERROR("failed to send SCAN_REQUEST_EXT command\n");
1736 return -1;
1737 }
bf79451e
JG
1738
1739 queue_delayed_work(priv->workqueue, &priv->scan_check,
43f66a6c
JK
1740 IPW_SCAN_CHECK_WATCHDOG);
1741 return 0;
1742}
1743
1744static int ipw_send_scan_abort(struct ipw_priv *priv)
1745{
1746 struct host_cmd cmd = {
1747 .cmd = IPW_CMD_SCAN_ABORT,
1748 .len = 0
1749 };
1750
1751 if (!priv) {
1752 IPW_ERROR("Invalid args\n");
1753 return -1;
1754 }
1755
1756 if (ipw_send_cmd(priv, &cmd)) {
1757 IPW_ERROR("failed to send SCAN_ABORT command\n");
1758 return -1;
1759 }
bf79451e 1760
43f66a6c
JK
1761 return 0;
1762}
1763
1764static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
1765{
1766 struct host_cmd cmd = {
1767 .cmd = IPW_CMD_SENSITIVITY_CALIB,
1768 .len = sizeof(struct ipw_sensitivity_calib)
1769 };
1770 struct ipw_sensitivity_calib *calib = (struct ipw_sensitivity_calib *)
0edd5b44 1771 &cmd.param;
43f66a6c
JK
1772 calib->beacon_rssi_raw = sens;
1773 if (ipw_send_cmd(priv, &cmd)) {
1774 IPW_ERROR("failed to send SENSITIVITY CALIB command\n");
1775 return -1;
1776 }
1777
1778 return 0;
1779}
1780
1781static int ipw_send_associate(struct ipw_priv *priv,
1782 struct ipw_associate *associate)
1783{
1784 struct host_cmd cmd = {
1785 .cmd = IPW_CMD_ASSOCIATE,
1786 .len = sizeof(*associate)
1787 };
1788
a613bffd
JK
1789 struct ipw_associate tmp_associate;
1790 memcpy(&tmp_associate, associate, sizeof(*associate));
1791 tmp_associate.policy_support =
1792 cpu_to_le16(tmp_associate.policy_support);
1793 tmp_associate.assoc_tsf_msw = cpu_to_le32(tmp_associate.assoc_tsf_msw);
1794 tmp_associate.assoc_tsf_lsw = cpu_to_le32(tmp_associate.assoc_tsf_lsw);
1795 tmp_associate.capability = cpu_to_le16(tmp_associate.capability);
1796 tmp_associate.listen_interval =
1797 cpu_to_le16(tmp_associate.listen_interval);
1798 tmp_associate.beacon_interval =
1799 cpu_to_le16(tmp_associate.beacon_interval);
1800 tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window);
1801
43f66a6c
JK
1802 if (!priv || !associate) {
1803 IPW_ERROR("Invalid args\n");
1804 return -1;
1805 }
1806
a613bffd 1807 memcpy(&cmd.param, &tmp_associate, sizeof(*associate));
43f66a6c
JK
1808 if (ipw_send_cmd(priv, &cmd)) {
1809 IPW_ERROR("failed to send ASSOCIATE command\n");
1810 return -1;
1811 }
bf79451e 1812
43f66a6c
JK
1813 return 0;
1814}
1815
1816static int ipw_send_supported_rates(struct ipw_priv *priv,
1817 struct ipw_supported_rates *rates)
1818{
1819 struct host_cmd cmd = {
1820 .cmd = IPW_CMD_SUPPORTED_RATES,
1821 .len = sizeof(*rates)
1822 };
1823
1824 if (!priv || !rates) {
1825 IPW_ERROR("Invalid args\n");
1826 return -1;
1827 }
1828
0edd5b44 1829 memcpy(&cmd.param, rates, sizeof(*rates));
43f66a6c
JK
1830 if (ipw_send_cmd(priv, &cmd)) {
1831 IPW_ERROR("failed to send SUPPORTED_RATES command\n");
1832 return -1;
1833 }
bf79451e 1834
43f66a6c
JK
1835 return 0;
1836}
1837
1838static int ipw_set_random_seed(struct ipw_priv *priv)
1839{
1840 struct host_cmd cmd = {
1841 .cmd = IPW_CMD_SEED_NUMBER,
1842 .len = sizeof(u32)
1843 };
1844
1845 if (!priv) {
1846 IPW_ERROR("Invalid args\n");
1847 return -1;
1848 }
1849
1850 get_random_bytes(&cmd.param, sizeof(u32));
1851
1852 if (ipw_send_cmd(priv, &cmd)) {
1853 IPW_ERROR("failed to send SEED_NUMBER command\n");
1854 return -1;
1855 }
bf79451e 1856
43f66a6c
JK
1857 return 0;
1858}
1859
1860#if 0
1861static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
1862{
1863 struct host_cmd cmd = {
1864 .cmd = IPW_CMD_CARD_DISABLE,
1865 .len = sizeof(u32)
1866 };
1867
1868 if (!priv) {
1869 IPW_ERROR("Invalid args\n");
1870 return -1;
1871 }
1872
0edd5b44 1873 *((u32 *) & cmd.param) = phy_off;
43f66a6c
JK
1874
1875 if (ipw_send_cmd(priv, &cmd)) {
1876 IPW_ERROR("failed to send CARD_DISABLE command\n");
1877 return -1;
1878 }
bf79451e 1879
43f66a6c
JK
1880 return 0;
1881}
1882#endif
1883
0edd5b44 1884static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
43f66a6c
JK
1885{
1886 struct host_cmd cmd = {
1887 .cmd = IPW_CMD_TX_POWER,
1888 .len = sizeof(*power)
1889 };
1890
1891 if (!priv || !power) {
1892 IPW_ERROR("Invalid args\n");
1893 return -1;
1894 }
1895
0edd5b44 1896 memcpy(&cmd.param, power, sizeof(*power));
43f66a6c
JK
1897 if (ipw_send_cmd(priv, &cmd)) {
1898 IPW_ERROR("failed to send TX_POWER command\n");
1899 return -1;
1900 }
bf79451e 1901
43f66a6c
JK
1902 return 0;
1903}
1904
1905static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
1906{
1907 struct ipw_rts_threshold rts_threshold = {
1908 .rts_threshold = rts,
1909 };
1910 struct host_cmd cmd = {
1911 .cmd = IPW_CMD_RTS_THRESHOLD,
1912 .len = sizeof(rts_threshold)
1913 };
1914
1915 if (!priv) {
1916 IPW_ERROR("Invalid args\n");
1917 return -1;
1918 }
1919
1920 memcpy(&cmd.param, &rts_threshold, sizeof(rts_threshold));
1921 if (ipw_send_cmd(priv, &cmd)) {
1922 IPW_ERROR("failed to send RTS_THRESHOLD command\n");
1923 return -1;
1924 }
1925
1926 return 0;
1927}
1928
1929static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
1930{
1931 struct ipw_frag_threshold frag_threshold = {
1932 .frag_threshold = frag,
1933 };
1934 struct host_cmd cmd = {
1935 .cmd = IPW_CMD_FRAG_THRESHOLD,
1936 .len = sizeof(frag_threshold)
1937 };
1938
1939 if (!priv) {
1940 IPW_ERROR("Invalid args\n");
1941 return -1;
1942 }
1943
1944 memcpy(&cmd.param, &frag_threshold, sizeof(frag_threshold));
1945 if (ipw_send_cmd(priv, &cmd)) {
1946 IPW_ERROR("failed to send FRAG_THRESHOLD command\n");
1947 return -1;
1948 }
1949
1950 return 0;
1951}
1952
1953static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
1954{
1955 struct host_cmd cmd = {
1956 .cmd = IPW_CMD_POWER_MODE,
1957 .len = sizeof(u32)
1958 };
0edd5b44 1959 u32 *param = (u32 *) (&cmd.param);
43f66a6c
JK
1960
1961 if (!priv) {
1962 IPW_ERROR("Invalid args\n");
1963 return -1;
1964 }
bf79451e 1965
43f66a6c
JK
1966 /* If on battery, set to 3, if AC set to CAM, else user
1967 * level */
1968 switch (mode) {
1969 case IPW_POWER_BATTERY:
1970 *param = IPW_POWER_INDEX_3;
1971 break;
1972 case IPW_POWER_AC:
1973 *param = IPW_POWER_MODE_CAM;
1974 break;
1975 default:
1976 *param = mode;
1977 break;
1978 }
1979
1980 if (ipw_send_cmd(priv, &cmd)) {
1981 IPW_ERROR("failed to send POWER_MODE command\n");
1982 return -1;
1983 }
1984
1985 return 0;
1986}
1987
1988/*
1989 * The IPW device contains a Microwire compatible EEPROM that stores
1990 * various data like the MAC address. Usually the firmware has exclusive
1991 * access to the eeprom, but during device initialization (before the
1992 * device driver has sent the HostComplete command to the firmware) the
1993 * device driver has read access to the EEPROM by way of indirect addressing
1994 * through a couple of memory mapped registers.
1995 *
1996 * The following is a simplified implementation for pulling data out of the
1997 * the eeprom, along with some helper functions to find information in
1998 * the per device private data's copy of the eeprom.
1999 *
2000 * NOTE: To better understand how these functions work (i.e what is a chip
2001 * select and why do have to keep driving the eeprom clock?), read
2002 * just about any data sheet for a Microwire compatible EEPROM.
2003 */
2004
2005/* write a 32 bit value into the indirect accessor register */
2006static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2007{
2008 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
bf79451e 2009
43f66a6c
JK
2010 /* the eeprom requires some time to complete the operation */
2011 udelay(p->eeprom_delay);
2012
2013 return;
2014}
2015
2016/* perform a chip select operation */
0edd5b44 2017static inline void eeprom_cs(struct ipw_priv *priv)
43f66a6c 2018{
0edd5b44
JG
2019 eeprom_write_reg(priv, 0);
2020 eeprom_write_reg(priv, EEPROM_BIT_CS);
2021 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2022 eeprom_write_reg(priv, EEPROM_BIT_CS);
43f66a6c
JK
2023}
2024
2025/* perform a chip select operation */
0edd5b44 2026static inline void eeprom_disable_cs(struct ipw_priv *priv)
43f66a6c 2027{
0edd5b44
JG
2028 eeprom_write_reg(priv, EEPROM_BIT_CS);
2029 eeprom_write_reg(priv, 0);
2030 eeprom_write_reg(priv, EEPROM_BIT_SK);
43f66a6c
JK
2031}
2032
2033/* push a single bit down to the eeprom */
0edd5b44 2034static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
43f66a6c 2035{
0edd5b44
JG
2036 int d = (bit ? EEPROM_BIT_DI : 0);
2037 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2038 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
43f66a6c
JK
2039}
2040
2041/* push an opcode followed by an address down to the eeprom */
0edd5b44 2042static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
43f66a6c
JK
2043{
2044 int i;
2045
2046 eeprom_cs(priv);
0edd5b44
JG
2047 eeprom_write_bit(priv, 1);
2048 eeprom_write_bit(priv, op & 2);
2049 eeprom_write_bit(priv, op & 1);
2050 for (i = 7; i >= 0; i--) {
2051 eeprom_write_bit(priv, addr & (1 << i));
43f66a6c
JK
2052 }
2053}
2054
2055/* pull 16 bits off the eeprom, one bit at a time */
0edd5b44 2056static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
43f66a6c
JK
2057{
2058 int i;
0edd5b44 2059 u16 r = 0;
bf79451e 2060
43f66a6c 2061 /* Send READ Opcode */
0edd5b44 2062 eeprom_op(priv, EEPROM_CMD_READ, addr);
43f66a6c
JK
2063
2064 /* Send dummy bit */
0edd5b44 2065 eeprom_write_reg(priv, EEPROM_BIT_CS);
43f66a6c
JK
2066
2067 /* Read the byte off the eeprom one bit at a time */
0edd5b44 2068 for (i = 0; i < 16; i++) {
43f66a6c 2069 u32 data = 0;
0edd5b44
JG
2070 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2071 eeprom_write_reg(priv, EEPROM_BIT_CS);
2072 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2073 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
43f66a6c 2074 }
bf79451e 2075
43f66a6c 2076 /* Send another dummy bit */
0edd5b44 2077 eeprom_write_reg(priv, 0);
43f66a6c 2078 eeprom_disable_cs(priv);
bf79451e 2079
43f66a6c
JK
2080 return r;
2081}
2082
2083/* helper function for pulling the mac address out of the private */
2084/* data's copy of the eeprom data */
0edd5b44 2085static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
43f66a6c 2086{
0edd5b44 2087 u8 *ee = (u8 *) priv->eeprom;
43f66a6c
JK
2088 memcpy(mac, &ee[EEPROM_MAC_ADDRESS], 6);
2089}
2090
2091/*
2092 * Either the device driver (i.e. the host) or the firmware can
2093 * load eeprom data into the designated region in SRAM. If neither
2094 * happens then the FW will shutdown with a fatal error.
2095 *
2096 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2097 * bit needs region of shared SRAM needs to be non-zero.
2098 */
2099static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2100{
2101 int i;
0edd5b44 2102 u16 *eeprom = (u16 *) priv->eeprom;
bf79451e 2103
43f66a6c
JK
2104 IPW_DEBUG_TRACE(">>\n");
2105
2106 /* read entire contents of eeprom into private buffer */
0edd5b44 2107 for (i = 0; i < 128; i++)
a613bffd 2108 eeprom[i] = le16_to_cpu(eeprom_read_u16(priv, (u8) i));
43f66a6c 2109
bf79451e
JG
2110 /*
2111 If the data looks correct, then copy it to our private
43f66a6c
JK
2112 copy. Otherwise let the firmware know to perform the operation
2113 on it's own
0edd5b44 2114 */
43f66a6c
JK
2115 if ((priv->eeprom + EEPROM_VERSION) != 0) {
2116 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2117
2118 /* write the eeprom data to sram */
0edd5b44
JG
2119 for (i = 0; i < CX2_EEPROM_IMAGE_SIZE; i++)
2120 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
43f66a6c
JK
2121
2122 /* Do not load eeprom data on fatal error or suspend */
2123 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2124 } else {
2125 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2126
2127 /* Load eeprom data on fatal error or suspend */
2128 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2129 }
2130
2131 IPW_DEBUG_TRACE("<<\n");
2132}
2133
43f66a6c
JK
2134static inline void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2135{
2136 count >>= 2;
0edd5b44
JG
2137 if (!count)
2138 return;
43f66a6c 2139 _ipw_write32(priv, CX2_AUTOINC_ADDR, start);
bf79451e 2140 while (count--)
43f66a6c
JK
2141 _ipw_write32(priv, CX2_AUTOINC_DATA, 0);
2142}
2143
2144static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2145{
2146 ipw_zero_memory(priv, CX2_SHARED_SRAM_DMA_CONTROL,
bf79451e 2147 CB_NUMBER_OF_ELEMENTS_SMALL *
43f66a6c
JK
2148 sizeof(struct command_block));
2149}
2150
2151static int ipw_fw_dma_enable(struct ipw_priv *priv)
0edd5b44 2152{ /* start dma engine but no transfers yet */
43f66a6c
JK
2153
2154 IPW_DEBUG_FW(">> : \n");
bf79451e 2155
43f66a6c
JK
2156 /* Start the dma */
2157 ipw_fw_dma_reset_command_blocks(priv);
bf79451e 2158
43f66a6c
JK
2159 /* Write CB base address */
2160 ipw_write_reg32(priv, CX2_DMA_I_CB_BASE, CX2_SHARED_SRAM_DMA_CONTROL);
2161
2162 IPW_DEBUG_FW("<< : \n");
2163 return 0;
2164}
2165
2166static void ipw_fw_dma_abort(struct ipw_priv *priv)
2167{
2168 u32 control = 0;
2169
2170 IPW_DEBUG_FW(">> :\n");
bf79451e
JG
2171
2172 //set the Stop and Abort bit
43f66a6c
JK
2173 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2174 ipw_write_reg32(priv, CX2_DMA_I_DMA_CONTROL, control);
2175 priv->sram_desc.last_cb_index = 0;
bf79451e 2176
43f66a6c
JK
2177 IPW_DEBUG_FW("<< \n");
2178}
2179
0edd5b44
JG
2180static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2181 struct command_block *cb)
43f66a6c 2182{
0edd5b44
JG
2183 u32 address =
2184 CX2_SHARED_SRAM_DMA_CONTROL +
2185 (sizeof(struct command_block) * index);
43f66a6c
JK
2186 IPW_DEBUG_FW(">> :\n");
2187
0edd5b44
JG
2188 ipw_write_indirect(priv, address, (u8 *) cb,
2189 (int)sizeof(struct command_block));
43f66a6c
JK
2190
2191 IPW_DEBUG_FW("<< :\n");
2192 return 0;
2193
2194}
2195
2196static int ipw_fw_dma_kick(struct ipw_priv *priv)
2197{
2198 u32 control = 0;
0edd5b44 2199 u32 index = 0;
43f66a6c
JK
2200
2201 IPW_DEBUG_FW(">> :\n");
bf79451e 2202
43f66a6c 2203 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
0edd5b44
JG
2204 ipw_fw_dma_write_command_block(priv, index,
2205 &priv->sram_desc.cb_list[index]);
43f66a6c
JK
2206
2207 /* Enable the DMA in the CSR register */
0edd5b44
JG
2208 ipw_clear_bit(priv, CX2_RESET_REG,
2209 CX2_RESET_REG_MASTER_DISABLED |
2210 CX2_RESET_REG_STOP_MASTER);
bf79451e 2211
0edd5b44 2212 /* Set the Start bit. */
43f66a6c
JK
2213 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2214 ipw_write_reg32(priv, CX2_DMA_I_DMA_CONTROL, control);
2215
2216 IPW_DEBUG_FW("<< :\n");
2217 return 0;
2218}
2219
2220static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2221{
2222 u32 address;
0edd5b44
JG
2223 u32 register_value = 0;
2224 u32 cb_fields_address = 0;
43f66a6c
JK
2225
2226 IPW_DEBUG_FW(">> :\n");
0edd5b44
JG
2227 address = ipw_read_reg32(priv, CX2_DMA_I_CURRENT_CB);
2228 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
43f66a6c
JK
2229
2230 /* Read the DMA Controlor register */
2231 register_value = ipw_read_reg32(priv, CX2_DMA_I_DMA_CONTROL);
0edd5b44 2232 IPW_DEBUG_FW_INFO("CX2_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
43f66a6c 2233
0edd5b44 2234 /* Print the CB values */
43f66a6c
JK
2235 cb_fields_address = address;
2236 register_value = ipw_read_reg32(priv, cb_fields_address);
0edd5b44 2237 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
43f66a6c
JK
2238
2239 cb_fields_address += sizeof(u32);
2240 register_value = ipw_read_reg32(priv, cb_fields_address);
0edd5b44 2241 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
43f66a6c
JK
2242
2243 cb_fields_address += sizeof(u32);
2244 register_value = ipw_read_reg32(priv, cb_fields_address);
2245 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2246 register_value);
2247
2248 cb_fields_address += sizeof(u32);
2249 register_value = ipw_read_reg32(priv, cb_fields_address);
0edd5b44 2250 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
43f66a6c
JK
2251
2252 IPW_DEBUG_FW(">> :\n");
2253}
2254
2255static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2256{
2257 u32 current_cb_address = 0;
2258 u32 current_cb_index = 0;
2259
2260 IPW_DEBUG_FW("<< :\n");
0edd5b44 2261 current_cb_address = ipw_read_reg32(priv, CX2_DMA_I_CURRENT_CB);
bf79451e 2262
0edd5b44
JG
2263 current_cb_index = (current_cb_address - CX2_SHARED_SRAM_DMA_CONTROL) /
2264 sizeof(struct command_block);
bf79451e 2265
43f66a6c 2266 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
0edd5b44 2267 current_cb_index, current_cb_address);
43f66a6c
JK
2268
2269 IPW_DEBUG_FW(">> :\n");
2270 return current_cb_index;
2271
2272}
2273
2274static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2275 u32 src_address,
2276 u32 dest_address,
2277 u32 length,
0edd5b44 2278 int interrupt_enabled, int is_last)
43f66a6c
JK
2279{
2280
bf79451e 2281 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
0edd5b44
JG
2282 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2283 CB_DEST_SIZE_LONG;
43f66a6c 2284 struct command_block *cb;
0edd5b44 2285 u32 last_cb_element = 0;
43f66a6c
JK
2286
2287 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2288 src_address, dest_address, length);
2289
2290 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2291 return -1;
2292
2293 last_cb_element = priv->sram_desc.last_cb_index;
2294 cb = &priv->sram_desc.cb_list[last_cb_element];
2295 priv->sram_desc.last_cb_index++;
2296
2297 /* Calculate the new CB control word */
0edd5b44 2298 if (interrupt_enabled)
43f66a6c
JK
2299 control |= CB_INT_ENABLED;
2300
2301 if (is_last)
2302 control |= CB_LAST_VALID;
bf79451e 2303
43f66a6c
JK
2304 control |= length;
2305
2306 /* Calculate the CB Element's checksum value */
0edd5b44 2307 cb->status = control ^ src_address ^ dest_address;
43f66a6c
JK
2308
2309 /* Copy the Source and Destination addresses */
2310 cb->dest_addr = dest_address;
2311 cb->source_addr = src_address;
2312
2313 /* Copy the Control Word last */
2314 cb->control = control;
2315
2316 return 0;
2317}
2318
2319static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
0edd5b44 2320 u32 src_phys, u32 dest_address, u32 length)
43f66a6c
JK
2321{
2322 u32 bytes_left = length;
0edd5b44
JG
2323 u32 src_offset = 0;
2324 u32 dest_offset = 0;
43f66a6c
JK
2325 int status = 0;
2326 IPW_DEBUG_FW(">> \n");
2327 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2328 src_phys, dest_address, length);
2329 while (bytes_left > CB_MAX_LENGTH) {
0edd5b44
JG
2330 status = ipw_fw_dma_add_command_block(priv,
2331 src_phys + src_offset,
2332 dest_address +
2333 dest_offset,
2334 CB_MAX_LENGTH, 0, 0);
43f66a6c
JK
2335 if (status) {
2336 IPW_DEBUG_FW_INFO(": Failed\n");
2337 return -1;
bf79451e 2338 } else
43f66a6c
JK
2339 IPW_DEBUG_FW_INFO(": Added new cb\n");
2340
2341 src_offset += CB_MAX_LENGTH;
2342 dest_offset += CB_MAX_LENGTH;
2343 bytes_left -= CB_MAX_LENGTH;
2344 }
2345
2346 /* add the buffer tail */
2347 if (bytes_left > 0) {
0edd5b44
JG
2348 status =
2349 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2350 dest_address + dest_offset,
2351 bytes_left, 0, 0);
43f66a6c
JK
2352 if (status) {
2353 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2354 return -1;
bf79451e 2355 } else
0edd5b44
JG
2356 IPW_DEBUG_FW_INFO
2357 (": Adding new cb - the buffer tail\n");
43f66a6c 2358 }
bf79451e 2359
43f66a6c
JK
2360 IPW_DEBUG_FW("<< \n");
2361 return 0;
2362}
2363
2364static int ipw_fw_dma_wait(struct ipw_priv *priv)
2365{
2366 u32 current_index = 0;
2367 u32 watchdog = 0;
2368
2369 IPW_DEBUG_FW(">> : \n");
2370
2371 current_index = ipw_fw_dma_command_block_index(priv);
bf79451e 2372 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%8X\n",
0edd5b44 2373 (int)priv->sram_desc.last_cb_index);
43f66a6c
JK
2374
2375 while (current_index < priv->sram_desc.last_cb_index) {
2376 udelay(50);
2377 current_index = ipw_fw_dma_command_block_index(priv);
2378
2379 watchdog++;
2380
2381 if (watchdog > 400) {
2382 IPW_DEBUG_FW_INFO("Timeout\n");
2383 ipw_fw_dma_dump_command_block(priv);
2384 ipw_fw_dma_abort(priv);
2385 return -1;
2386 }
2387 }
2388
2389 ipw_fw_dma_abort(priv);
2390
0edd5b44
JG
2391 /*Disable the DMA in the CSR register */
2392 ipw_set_bit(priv, CX2_RESET_REG,
43f66a6c
JK
2393 CX2_RESET_REG_MASTER_DISABLED | CX2_RESET_REG_STOP_MASTER);
2394
2395 IPW_DEBUG_FW("<< dmaWaitSync \n");
2396 return 0;
2397}
2398
bf79451e 2399static void ipw_remove_current_network(struct ipw_priv *priv)
43f66a6c
JK
2400{
2401 struct list_head *element, *safe;
bf79451e 2402 struct ieee80211_network *network = NULL;
a613bffd
JK
2403 unsigned long flags;
2404
2405 spin_lock_irqsave(&priv->ieee->lock, flags);
43f66a6c
JK
2406 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2407 network = list_entry(element, struct ieee80211_network, list);
2408 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2409 list_del(element);
bf79451e 2410 list_add_tail(&network->list,
43f66a6c
JK
2411 &priv->ieee->network_free_list);
2412 }
2413 }
a613bffd 2414 spin_unlock_irqrestore(&priv->ieee->lock, flags);
43f66a6c
JK
2415}
2416
2417/**
bf79451e 2418 * Check that card is still alive.
43f66a6c
JK
2419 * Reads debug register from domain0.
2420 * If card is present, pre-defined value should
2421 * be found there.
bf79451e 2422 *
43f66a6c
JK
2423 * @param priv
2424 * @return 1 if card is present, 0 otherwise
2425 */
2426static inline int ipw_alive(struct ipw_priv *priv)
2427{
2428 return ipw_read32(priv, 0x90) == 0xd55555d5;
2429}
2430
2431static inline int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2432 int timeout)
2433{
2434 int i = 0;
2435
2436 do {
bf79451e 2437 if ((ipw_read32(priv, addr) & mask) == mask)
43f66a6c
JK
2438 return i;
2439 mdelay(10);
2440 i += 10;
2441 } while (i < timeout);
bf79451e 2442
43f66a6c
JK
2443 return -ETIME;
2444}
2445
bf79451e 2446/* These functions load the firmware and micro code for the operation of
43f66a6c
JK
2447 * the ipw hardware. It assumes the buffer has all the bits for the
2448 * image and the caller is handling the memory allocation and clean up.
2449 */
2450
0edd5b44 2451static int ipw_stop_master(struct ipw_priv *priv)
43f66a6c
JK
2452{
2453 int rc;
bf79451e 2454
43f66a6c
JK
2455 IPW_DEBUG_TRACE(">> \n");
2456 /* stop master. typical delay - 0 */
2457 ipw_set_bit(priv, CX2_RESET_REG, CX2_RESET_REG_STOP_MASTER);
2458
2459 rc = ipw_poll_bit(priv, CX2_RESET_REG,
2460 CX2_RESET_REG_MASTER_DISABLED, 100);
2461 if (rc < 0) {
2462 IPW_ERROR("stop master failed in 10ms\n");
2463 return -1;
2464 }
2465
2466 IPW_DEBUG_INFO("stop master %dms\n", rc);
2467
2468 return rc;
2469}
2470
2471static void ipw_arc_release(struct ipw_priv *priv)
2472{
2473 IPW_DEBUG_TRACE(">> \n");
2474 mdelay(5);
2475
2476 ipw_clear_bit(priv, CX2_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2477
2478 /* no one knows timing, for safety add some delay */
2479 mdelay(5);
2480}
2481
2482struct fw_header {
2483 u32 version;
2484 u32 mode;
2485};
2486
2487struct fw_chunk {
2488 u32 address;
2489 u32 length;
2490};
2491
2492#define IPW_FW_MAJOR_VERSION 2
2493#define IPW_FW_MINOR_VERSION 2
2494
2495#define IPW_FW_MINOR(x) ((x & 0xff) >> 8)
2496#define IPW_FW_MAJOR(x) (x & 0xff)
2497
2498#define IPW_FW_VERSION ((IPW_FW_MINOR_VERSION << 8) | \
2499 IPW_FW_MAJOR_VERSION)
2500
2501#define IPW_FW_PREFIX "ipw-" __stringify(IPW_FW_MAJOR_VERSION) \
2502"." __stringify(IPW_FW_MINOR_VERSION) "-"
2503
2504#if IPW_FW_MAJOR_VERSION >= 2 && IPW_FW_MINOR_VERSION > 0
2505#define IPW_FW_NAME(x) IPW_FW_PREFIX "" x ".fw"
2506#else
2507#define IPW_FW_NAME(x) "ipw2200_" x ".fw"
2508#endif
2509
0edd5b44 2510static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
43f66a6c
JK
2511{
2512 int rc = 0, i, addr;
2513 u8 cr = 0;
2514 u16 *image;
2515
0edd5b44 2516 image = (u16 *) data;
bf79451e 2517
43f66a6c
JK
2518 IPW_DEBUG_TRACE(">> \n");
2519
2520 rc = ipw_stop_master(priv);
2521
2522 if (rc < 0)
2523 return rc;
bf79451e 2524
0edd5b44 2525// spin_lock_irqsave(&priv->lock, flags);
bf79451e 2526
43f66a6c
JK
2527 for (addr = CX2_SHARED_LOWER_BOUND;
2528 addr < CX2_REGISTER_DOMAIN1_END; addr += 4) {
2529 ipw_write32(priv, addr, 0);
2530 }
2531
2532 /* no ucode (yet) */
2533 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
2534 /* destroy DMA queues */
2535 /* reset sequence */
2536
0edd5b44 2537 ipw_write_reg32(priv, CX2_MEM_HALT_AND_RESET, CX2_BIT_HALT_RESET_ON);
43f66a6c
JK
2538 ipw_arc_release(priv);
2539 ipw_write_reg32(priv, CX2_MEM_HALT_AND_RESET, CX2_BIT_HALT_RESET_OFF);
2540 mdelay(1);
2541
2542 /* reset PHY */
2543 ipw_write_reg32(priv, CX2_INTERNAL_CMD_EVENT, CX2_BASEBAND_POWER_DOWN);
2544 mdelay(1);
bf79451e 2545
43f66a6c
JK
2546 ipw_write_reg32(priv, CX2_INTERNAL_CMD_EVENT, 0);
2547 mdelay(1);
bf79451e 2548
43f66a6c
JK
2549 /* enable ucode store */
2550 ipw_write_reg8(priv, DINO_CONTROL_REG, 0x0);
2551 ipw_write_reg8(priv, DINO_CONTROL_REG, DINO_ENABLE_CS);
2552 mdelay(1);
2553
2554 /* write ucode */
2555 /**
2556 * @bug
2557 * Do NOT set indirect address register once and then
2558 * store data to indirect data register in the loop.
2559 * It seems very reasonable, but in this case DINO do not
2560 * accept ucode. It is essential to set address each time.
2561 */
2562 /* load new ipw uCode */
2563 for (i = 0; i < len / 2; i++)
a613bffd
JK
2564 ipw_write_reg16(priv, CX2_BASEBAND_CONTROL_STORE,
2565 cpu_to_le16(image[i]));
43f66a6c 2566
43f66a6c
JK
2567 /* enable DINO */
2568 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 0);
0edd5b44 2569 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
43f66a6c 2570
0edd5b44 2571 /* this is where the igx / win driver deveates from the VAP driver. */
43f66a6c
JK
2572
2573 /* wait for alive response */
2574 for (i = 0; i < 100; i++) {
2575 /* poll for incoming data */
2576 cr = ipw_read_reg8(priv, CX2_BASEBAND_CONTROL_STATUS);
2577 if (cr & DINO_RXFIFO_DATA)
2578 break;
2579 mdelay(1);
2580 }
2581
2582 if (cr & DINO_RXFIFO_DATA) {
2583 /* alive_command_responce size is NOT multiple of 4 */
2584 u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
bf79451e
JG
2585
2586 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
43f66a6c 2587 response_buffer[i] =
a613bffd
JK
2588 le32_to_cpu(ipw_read_reg32(priv,
2589 CX2_BASEBAND_RX_FIFO_READ));
43f66a6c
JK
2590 memcpy(&priv->dino_alive, response_buffer,
2591 sizeof(priv->dino_alive));
2592 if (priv->dino_alive.alive_command == 1
2593 && priv->dino_alive.ucode_valid == 1) {
2594 rc = 0;
0edd5b44
JG
2595 IPW_DEBUG_INFO
2596 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
2597 "of %02d/%02d/%02d %02d:%02d\n",
2598 priv->dino_alive.software_revision,
2599 priv->dino_alive.software_revision,
2600 priv->dino_alive.device_identifier,
2601 priv->dino_alive.device_identifier,
2602 priv->dino_alive.time_stamp[0],
2603 priv->dino_alive.time_stamp[1],
2604 priv->dino_alive.time_stamp[2],
2605 priv->dino_alive.time_stamp[3],
2606 priv->dino_alive.time_stamp[4]);
43f66a6c
JK
2607 } else {
2608 IPW_DEBUG_INFO("Microcode is not alive\n");
2609 rc = -EINVAL;
2610 }
2611 } else {
2612 IPW_DEBUG_INFO("No alive response from DINO\n");
2613 rc = -ETIME;
2614 }
2615
2616 /* disable DINO, otherwise for some reason
2617 firmware have problem getting alive resp. */
2618 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 0);
2619
0edd5b44 2620// spin_unlock_irqrestore(&priv->lock, flags);
43f66a6c
JK
2621
2622 return rc;
2623}
2624
0edd5b44 2625static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
43f66a6c
JK
2626{
2627 int rc = -1;
2628 int offset = 0;
2629 struct fw_chunk *chunk;
2630 dma_addr_t shared_phys;
2631 u8 *shared_virt;
2632
2633 IPW_DEBUG_TRACE("<< : \n");
2634 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
2635
2636 if (!shared_virt)
2637 return -ENOMEM;
2638
2639 memmove(shared_virt, data, len);
2640
2641 /* Start the Dma */
2642 rc = ipw_fw_dma_enable(priv);
2643
2644 if (priv->sram_desc.last_cb_index > 0) {
2645 /* the DMA is already ready this would be a bug. */
2646 BUG();
2647 goto out;
2648 }
2649
2650 do {
2651 chunk = (struct fw_chunk *)(data + offset);
2652 offset += sizeof(struct fw_chunk);
2653 /* build DMA packet and queue up for sending */
bf79451e 2654 /* dma to chunk->address, the chunk->length bytes from data +
43f66a6c
JK
2655 * offeset*/
2656 /* Dma loading */
2657 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
a613bffd
JK
2658 le32_to_cpu(chunk->address),
2659 le32_to_cpu(chunk->length));
43f66a6c
JK
2660 if (rc) {
2661 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
2662 goto out;
2663 }
bf79451e 2664
a613bffd 2665 offset += le32_to_cpu(chunk->length);
43f66a6c
JK
2666 } while (offset < len);
2667
0edd5b44 2668 /* Run the DMA and wait for the answer */
43f66a6c
JK
2669 rc = ipw_fw_dma_kick(priv);
2670 if (rc) {
2671 IPW_ERROR("dmaKick Failed\n");
2672 goto out;
2673 }
2674
2675 rc = ipw_fw_dma_wait(priv);
2676 if (rc) {
2677 IPW_ERROR("dmaWaitSync Failed\n");
2678 goto out;
2679 }
0edd5b44
JG
2680 out:
2681 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
43f66a6c
JK
2682 return rc;
2683}
2684
2685/* stop nic */
2686static int ipw_stop_nic(struct ipw_priv *priv)
2687{
2688 int rc = 0;
2689
0edd5b44 2690 /* stop */
43f66a6c 2691 ipw_write32(priv, CX2_RESET_REG, CX2_RESET_REG_STOP_MASTER);
bf79451e
JG
2692
2693 rc = ipw_poll_bit(priv, CX2_RESET_REG,
2694 CX2_RESET_REG_MASTER_DISABLED, 500);
43f66a6c
JK
2695 if (rc < 0) {
2696 IPW_ERROR("wait for reg master disabled failed\n");
2697 return rc;
bf79451e 2698 }
43f66a6c
JK
2699
2700 ipw_set_bit(priv, CX2_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
bf79451e 2701
43f66a6c
JK
2702 return rc;
2703}
2704
2705static void ipw_start_nic(struct ipw_priv *priv)
2706{
2707 IPW_DEBUG_TRACE(">>\n");
2708
0edd5b44 2709 /* prvHwStartNic release ARC */
43f66a6c 2710 ipw_clear_bit(priv, CX2_RESET_REG,
bf79451e
JG
2711 CX2_RESET_REG_MASTER_DISABLED |
2712 CX2_RESET_REG_STOP_MASTER |
43f66a6c 2713 CBD_RESET_REG_PRINCETON_RESET);
bf79451e 2714
43f66a6c 2715 /* enable power management */
0edd5b44
JG
2716 ipw_set_bit(priv, CX2_GP_CNTRL_RW,
2717 CX2_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
43f66a6c
JK
2718
2719 IPW_DEBUG_TRACE("<<\n");
2720}
bf79451e 2721
43f66a6c
JK
2722static int ipw_init_nic(struct ipw_priv *priv)
2723{
2724 int rc;
2725
2726 IPW_DEBUG_TRACE(">>\n");
bf79451e 2727 /* reset */
43f66a6c
JK
2728 /*prvHwInitNic */
2729 /* set "initialization complete" bit to move adapter to D0 state */
2730 ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_INIT_DONE);
2731
2732 /* low-level PLL activation */
0edd5b44
JG
2733 ipw_write32(priv, CX2_READ_INT_REGISTER,
2734 CX2_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
43f66a6c
JK
2735
2736 /* wait for clock stabilization */
bf79451e
JG
2737 rc = ipw_poll_bit(priv, CX2_GP_CNTRL_RW,
2738 CX2_GP_CNTRL_BIT_CLOCK_READY, 250);
0edd5b44 2739 if (rc < 0)
43f66a6c
JK
2740 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
2741
2742 /* assert SW reset */
2743 ipw_set_bit(priv, CX2_RESET_REG, CX2_RESET_REG_SW_RESET);
2744
2745 udelay(10);
2746
2747 /* set "initialization complete" bit to move adapter to D0 state */
2748 ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_INIT_DONE);
2749
2750 IPW_DEBUG_TRACE(">>\n");
2751 return 0;
2752}
2753
bf79451e 2754/* Call this function from process context, it will sleep in request_firmware.
43f66a6c
JK
2755 * Probe is an ok place to call this from.
2756 */
2757static int ipw_reset_nic(struct ipw_priv *priv)
2758{
2759 int rc = 0;
a613bffd 2760 unsigned long flags;
43f66a6c
JK
2761
2762 IPW_DEBUG_TRACE(">>\n");
bf79451e 2763
43f66a6c 2764 rc = ipw_init_nic(priv);
bf79451e 2765
a613bffd 2766 spin_lock_irqsave(&priv->lock, flags);
43f66a6c
JK
2767 /* Clear the 'host command active' bit... */
2768 priv->status &= ~STATUS_HCMD_ACTIVE;
2769 wake_up_interruptible(&priv->wait_command_queue);
a613bffd 2770 spin_unlock_irqrestore(&priv->lock, flags);
43f66a6c
JK
2771
2772 IPW_DEBUG_TRACE("<<\n");
2773 return rc;
bf79451e 2774}
43f66a6c 2775
bf79451e 2776static int ipw_get_fw(struct ipw_priv *priv,
43f66a6c
JK
2777 const struct firmware **fw, const char *name)
2778{
2779 struct fw_header *header;
2780 int rc;
2781
2782 /* ask firmware_class module to get the boot firmware off disk */
2783 rc = request_firmware(fw, name, &priv->pci_dev->dev);
2784 if (rc < 0) {
2785 IPW_ERROR("%s load failed: Reason %d\n", name, rc);
2786 return rc;
bf79451e 2787 }
43f66a6c
JK
2788
2789 header = (struct fw_header *)(*fw)->data;
a613bffd 2790 if (IPW_FW_MAJOR(le32_to_cpu(header->version)) != IPW_FW_MAJOR_VERSION) {
43f66a6c
JK
2791 IPW_ERROR("'%s' firmware version not compatible (%d != %d)\n",
2792 name,
a613bffd
JK
2793 IPW_FW_MAJOR(le32_to_cpu(header->version)),
2794 IPW_FW_MAJOR_VERSION);
43f66a6c
JK
2795 return -EINVAL;
2796 }
2797
aaa4d308 2798 IPW_DEBUG_INFO("Loading firmware '%s' file v%d.%d (%zd bytes)\n",
43f66a6c 2799 name,
a613bffd
JK
2800 IPW_FW_MAJOR(le32_to_cpu(header->version)),
2801 IPW_FW_MINOR(le32_to_cpu(header->version)),
43f66a6c
JK
2802 (*fw)->size - sizeof(struct fw_header));
2803 return 0;
2804}
2805
2806#define CX2_RX_BUF_SIZE (3000)
2807
2808static inline void ipw_rx_queue_reset(struct ipw_priv *priv,
2809 struct ipw_rx_queue *rxq)
2810{
2811 unsigned long flags;
2812 int i;
2813
2814 spin_lock_irqsave(&rxq->lock, flags);
2815
2816 INIT_LIST_HEAD(&rxq->rx_free);
2817 INIT_LIST_HEAD(&rxq->rx_used);
2818
2819 /* Fill the rx_used queue with _all_ of the Rx buffers */
2820 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
2821 /* In the reset function, these buffers may have been allocated
2822 * to an SKB, so we need to unmap and free potential storage */
2823 if (rxq->pool[i].skb != NULL) {
2824 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
0edd5b44 2825 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
43f66a6c 2826 dev_kfree_skb(rxq->pool[i].skb);
a613bffd 2827 rxq->pool[i].skb = NULL;
43f66a6c
JK
2828 }
2829 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
2830 }
bf79451e 2831
43f66a6c
JK
2832 /* Set us so that we have processed and used all buffers, but have
2833 * not restocked the Rx queue with fresh buffers */
2834 rxq->read = rxq->write = 0;
2835 rxq->processed = RX_QUEUE_SIZE - 1;
2836 rxq->free_count = 0;
2837 spin_unlock_irqrestore(&rxq->lock, flags);
2838}
2839
2840#ifdef CONFIG_PM
2841static int fw_loaded = 0;
2842static const struct firmware *bootfw = NULL;
2843static const struct firmware *firmware = NULL;
2844static const struct firmware *ucode = NULL;
2845#endif
2846
2847static int ipw_load(struct ipw_priv *priv)
2848{
2849#ifndef CONFIG_PM
2850 const struct firmware *bootfw = NULL;
2851 const struct firmware *firmware = NULL;
2852 const struct firmware *ucode = NULL;
2853#endif
2854 int rc = 0, retries = 3;
2855
2856#ifdef CONFIG_PM
2857 if (!fw_loaded) {
2858#endif
2859 rc = ipw_get_fw(priv, &bootfw, IPW_FW_NAME("boot"));
bf79451e 2860 if (rc)
43f66a6c 2861 goto error;
bf79451e 2862
43f66a6c
JK
2863 switch (priv->ieee->iw_mode) {
2864 case IW_MODE_ADHOC:
bf79451e 2865 rc = ipw_get_fw(priv, &ucode,
43f66a6c 2866 IPW_FW_NAME("ibss_ucode"));
bf79451e 2867 if (rc)
43f66a6c 2868 goto error;
bf79451e 2869
43f66a6c
JK
2870 rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("ibss"));
2871 break;
bf79451e 2872
ea2b26e0 2873#ifdef CONFIG_IPW_MONITOR
43f66a6c 2874 case IW_MODE_MONITOR:
bf79451e 2875 rc = ipw_get_fw(priv, &ucode,
ea2b26e0 2876 IPW_FW_NAME("sniffer_ucode"));
bf79451e 2877 if (rc)
43f66a6c 2878 goto error;
bf79451e 2879
0edd5b44
JG
2880 rc = ipw_get_fw(priv, &firmware,
2881 IPW_FW_NAME("sniffer"));
43f66a6c
JK
2882 break;
2883#endif
2884 case IW_MODE_INFRA:
0edd5b44 2885 rc = ipw_get_fw(priv, &ucode, IPW_FW_NAME("bss_ucode"));
bf79451e 2886 if (rc)
43f66a6c 2887 goto error;
bf79451e 2888
43f66a6c
JK
2889 rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("bss"));
2890 break;
bf79451e 2891
43f66a6c
JK
2892 default:
2893 rc = -EINVAL;
2894 }
2895
bf79451e 2896 if (rc)
43f66a6c
JK
2897 goto error;
2898
2899#ifdef CONFIG_PM
2900 fw_loaded = 1;
2901 }
2902#endif
2903
2904 if (!priv->rxq)
2905 priv->rxq = ipw_rx_queue_alloc(priv);
2906 else
2907 ipw_rx_queue_reset(priv, priv->rxq);
2908 if (!priv->rxq) {
2909 IPW_ERROR("Unable to initialize Rx queue\n");
2910 goto error;
2911 }
2912
0edd5b44 2913 retry:
43f66a6c
JK
2914 /* Ensure interrupts are disabled */
2915 ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
2916 priv->status &= ~STATUS_INT_ENABLED;
2917
2918 /* ack pending interrupts */
2919 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_MASK_ALL);
bf79451e 2920
43f66a6c
JK
2921 ipw_stop_nic(priv);
2922
2923 rc = ipw_reset_nic(priv);
2924 if (rc) {
2925 IPW_ERROR("Unable to reset NIC\n");
2926 goto error;
2927 }
2928
bf79451e 2929 ipw_zero_memory(priv, CX2_NIC_SRAM_LOWER_BOUND,
43f66a6c
JK
2930 CX2_NIC_SRAM_UPPER_BOUND - CX2_NIC_SRAM_LOWER_BOUND);
2931
2932 /* DMA the initial boot firmware into the device */
bf79451e 2933 rc = ipw_load_firmware(priv, bootfw->data + sizeof(struct fw_header),
43f66a6c
JK
2934 bootfw->size - sizeof(struct fw_header));
2935 if (rc < 0) {
2936 IPW_ERROR("Unable to load boot firmware\n");
2937 goto error;
2938 }
2939
2940 /* kick start the device */
2941 ipw_start_nic(priv);
2942
2943 /* wait for the device to finish it's initial startup sequence */
bf79451e
JG
2944 rc = ipw_poll_bit(priv, CX2_INTA_RW,
2945 CX2_INTA_BIT_FW_INITIALIZATION_DONE, 500);
43f66a6c
JK
2946 if (rc < 0) {
2947 IPW_ERROR("device failed to boot initial fw image\n");
2948 goto error;
2949 }
2950 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
2951
bf79451e 2952 /* ack fw init done interrupt */
43f66a6c
JK
2953 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_BIT_FW_INITIALIZATION_DONE);
2954
2955 /* DMA the ucode into the device */
bf79451e 2956 rc = ipw_load_ucode(priv, ucode->data + sizeof(struct fw_header),
43f66a6c
JK
2957 ucode->size - sizeof(struct fw_header));
2958 if (rc < 0) {
2959 IPW_ERROR("Unable to load ucode\n");
2960 goto error;
2961 }
bf79451e 2962
43f66a6c
JK
2963 /* stop nic */
2964 ipw_stop_nic(priv);
2965
2966 /* DMA bss firmware into the device */
bf79451e
JG
2967 rc = ipw_load_firmware(priv, firmware->data +
2968 sizeof(struct fw_header),
43f66a6c 2969 firmware->size - sizeof(struct fw_header));
0edd5b44 2970 if (rc < 0) {
43f66a6c
JK
2971 IPW_ERROR("Unable to load firmware\n");
2972 goto error;
2973 }
2974
2975 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2976
2977 rc = ipw_queue_reset(priv);
2978 if (rc) {
2979 IPW_ERROR("Unable to initialize queues\n");
2980 goto error;
2981 }
2982
2983 /* Ensure interrupts are disabled */
2984 ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
bf79451e 2985
43f66a6c
JK
2986 /* kick start the device */
2987 ipw_start_nic(priv);
2988
2989 if (ipw_read32(priv, CX2_INTA_RW) & CX2_INTA_BIT_PARITY_ERROR) {
2990 if (retries > 0) {
2991 IPW_WARNING("Parity error. Retrying init.\n");
2992 retries--;
2993 goto retry;
2994 }
2995
2996 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
2997 rc = -EIO;
2998 goto error;
2999 }
3000
3001 /* wait for the device */
bf79451e
JG
3002 rc = ipw_poll_bit(priv, CX2_INTA_RW,
3003 CX2_INTA_BIT_FW_INITIALIZATION_DONE, 500);
43f66a6c
JK
3004 if (rc < 0) {
3005 IPW_ERROR("device failed to start after 500ms\n");
3006 goto error;
3007 }
3008 IPW_DEBUG_INFO("device response after %dms\n", rc);
3009
3010 /* ack fw init done interrupt */
3011 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_BIT_FW_INITIALIZATION_DONE);
3012
3013 /* read eeprom data and initialize the eeprom region of sram */
3014 priv->eeprom_delay = 1;
bf79451e 3015 ipw_eeprom_init_sram(priv);
43f66a6c
JK
3016
3017 /* enable interrupts */
3018 ipw_enable_interrupts(priv);
3019
3020 /* Ensure our queue has valid packets */
3021 ipw_rx_queue_replenish(priv);
3022
3023 ipw_write32(priv, CX2_RX_READ_INDEX, priv->rxq->read);
3024
3025 /* ack pending interrupts */
3026 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_MASK_ALL);
3027
3028#ifndef CONFIG_PM
3029 release_firmware(bootfw);
3030 release_firmware(ucode);
3031 release_firmware(firmware);
3032#endif
3033 return 0;
3034
0edd5b44 3035 error:
43f66a6c
JK
3036 if (priv->rxq) {
3037 ipw_rx_queue_free(priv, priv->rxq);
3038 priv->rxq = NULL;
3039 }
3040 ipw_tx_queue_free(priv);
3041 if (bootfw)
3042 release_firmware(bootfw);
3043 if (ucode)
3044 release_firmware(ucode);
3045 if (firmware)
3046 release_firmware(firmware);
3047#ifdef CONFIG_PM
3048 fw_loaded = 0;
3049 bootfw = ucode = firmware = NULL;
3050#endif
3051
3052 return rc;
3053}
3054
bf79451e 3055/**
43f66a6c
JK
3056 * DMA services
3057 *
3058 * Theory of operation
3059 *
3060 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3061 * 2 empty entries always kept in the buffer to protect from overflow.
3062 *
3063 * For Tx queue, there are low mark and high mark limits. If, after queuing
bf79451e
JG
3064 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3065 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
43f66a6c
JK
3066 * Tx queue resumed.
3067 *
3068 * The IPW operates with six queues, one receive queue in the device's
3069 * sram, one transmit queue for sending commands to the device firmware,
bf79451e 3070 * and four transmit queues for data.
43f66a6c 3071 *
bf79451e 3072 * The four transmit queues allow for performing quality of service (qos)
43f66a6c 3073 * transmissions as per the 802.11 protocol. Currently Linux does not
bf79451e 3074 * provide a mechanism to the user for utilizing prioritized queues, so
43f66a6c
JK
3075 * we only utilize the first data transmit queue (queue1).
3076 */
3077
3078/**
3079 * Driver allocates buffers of this size for Rx
3080 */
3081
3082static inline int ipw_queue_space(const struct clx2_queue *q)
3083{
3084 int s = q->last_used - q->first_empty;
3085 if (s <= 0)
3086 s += q->n_bd;
3087 s -= 2; /* keep some reserve to not confuse empty and full situations */
3088 if (s < 0)
3089 s = 0;
3090 return s;
3091}
3092
3093static inline int ipw_queue_inc_wrap(int index, int n_bd)
3094{
3095 return (++index == n_bd) ? 0 : index;
3096}
3097
3098/**
3099 * Initialize common DMA queue structure
bf79451e 3100 *
43f66a6c
JK
3101 * @param q queue to init
3102 * @param count Number of BD's to allocate. Should be power of 2
3103 * @param read_register Address for 'read' register
3104 * (not offset within BAR, full address)
3105 * @param write_register Address for 'write' register
3106 * (not offset within BAR, full address)
3107 * @param base_register Address for 'base' register
3108 * (not offset within BAR, full address)
3109 * @param size Address for 'size' register
3110 * (not offset within BAR, full address)
3111 */
bf79451e 3112static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
0edd5b44 3113 int count, u32 read, u32 write, u32 base, u32 size)
43f66a6c
JK
3114{
3115 q->n_bd = count;
3116
3117 q->low_mark = q->n_bd / 4;
3118 if (q->low_mark < 4)
3119 q->low_mark = 4;
3120
3121 q->high_mark = q->n_bd / 8;
3122 if (q->high_mark < 2)
3123 q->high_mark = 2;
3124
3125 q->first_empty = q->last_used = 0;
3126 q->reg_r = read;
3127 q->reg_w = write;
3128
3129 ipw_write32(priv, base, q->dma_addr);
3130 ipw_write32(priv, size, count);
3131 ipw_write32(priv, read, 0);
3132 ipw_write32(priv, write, 0);
3133
3134 _ipw_read32(priv, 0x90);
3135}
3136
bf79451e 3137static int ipw_queue_tx_init(struct ipw_priv *priv,
43f66a6c 3138 struct clx2_tx_queue *q,
0edd5b44 3139 int count, u32 read, u32 write, u32 base, u32 size)
43f66a6c
JK
3140{
3141 struct pci_dev *dev = priv->pci_dev;
3142
3143 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3144 if (!q->txb) {
3145 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3146 return -ENOMEM;
3147 }
3148
0edd5b44
JG
3149 q->bd =
3150 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
43f66a6c 3151 if (!q->bd) {
aaa4d308 3152 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
0edd5b44 3153 sizeof(q->bd[0]) * count);
43f66a6c
JK
3154 kfree(q->txb);
3155 q->txb = NULL;
3156 return -ENOMEM;
3157 }
3158
3159 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3160 return 0;
3161}
3162
3163/**
3164 * Free one TFD, those at index [txq->q.last_used].
3165 * Do NOT advance any indexes
bf79451e 3166 *
43f66a6c
JK
3167 * @param dev
3168 * @param txq
3169 */
3170static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3171 struct clx2_tx_queue *txq)
3172{
3173 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3174 struct pci_dev *dev = priv->pci_dev;
3175 int i;
bf79451e 3176
43f66a6c
JK
3177 /* classify bd */
3178 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3179 /* nothing to cleanup after for host commands */
3180 return;
3181
3182 /* sanity check */
a613bffd
JK
3183 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3184 IPW_ERROR("Too many chunks: %i\n",
3185 le32_to_cpu(bd->u.data.num_chunks));
43f66a6c
JK
3186 /** @todo issue fatal error, it is quite serious situation */
3187 return;
3188 }
3189
3190 /* unmap chunks if any */
a613bffd
JK
3191 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3192 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3193 le16_to_cpu(bd->u.data.chunk_len[i]),
3194 PCI_DMA_TODEVICE);
43f66a6c
JK
3195 if (txq->txb[txq->q.last_used]) {
3196 ieee80211_txb_free(txq->txb[txq->q.last_used]);
3197 txq->txb[txq->q.last_used] = NULL;
3198 }
3199 }
3200}
3201
3202/**
3203 * Deallocate DMA queue.
bf79451e 3204 *
43f66a6c
JK
3205 * Empty queue by removing and destroying all BD's.
3206 * Free all buffers.
bf79451e 3207 *
43f66a6c
JK
3208 * @param dev
3209 * @param q
3210 */
0edd5b44 3211static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
43f66a6c
JK
3212{
3213 struct clx2_queue *q = &txq->q;
3214 struct pci_dev *dev = priv->pci_dev;
3215
bf79451e
JG
3216 if (q->n_bd == 0)
3217 return;
43f66a6c
JK
3218
3219 /* first, empty all BD's */
3220 for (; q->first_empty != q->last_used;
3221 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3222 ipw_queue_tx_free_tfd(priv, txq);
3223 }
bf79451e 3224
43f66a6c 3225 /* free buffers belonging to queue itself */
0edd5b44 3226 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
43f66a6c
JK
3227 q->dma_addr);
3228 kfree(txq->txb);
3229
3230 /* 0 fill whole structure */
3231 memset(txq, 0, sizeof(*txq));
3232}
3233
43f66a6c
JK
3234/**
3235 * Destroy all DMA queues and structures
bf79451e 3236 *
43f66a6c
JK
3237 * @param priv
3238 */
3239static void ipw_tx_queue_free(struct ipw_priv *priv)
3240{
3241 /* Tx CMD queue */
3242 ipw_queue_tx_free(priv, &priv->txq_cmd);
3243
3244 /* Tx queues */
3245 ipw_queue_tx_free(priv, &priv->txq[0]);
3246 ipw_queue_tx_free(priv, &priv->txq[1]);
3247 ipw_queue_tx_free(priv, &priv->txq[2]);
3248 ipw_queue_tx_free(priv, &priv->txq[3]);
3249}
3250
3251static void inline __maybe_wake_tx(struct ipw_priv *priv)
3252{
3253 if (netif_running(priv->net_dev)) {
3254 switch (priv->port_type) {
3255 case DCR_TYPE_MU_BSS:
3256 case DCR_TYPE_MU_IBSS:
a613bffd 3257 if (!(priv->status & STATUS_ASSOCIATED))
43f66a6c 3258 return;
43f66a6c
JK
3259 }
3260 netif_wake_queue(priv->net_dev);
3261 }
3262
3263}
3264
0edd5b44 3265static inline void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
43f66a6c
JK
3266{
3267 /* First 3 bytes are manufacturer */
3268 bssid[0] = priv->mac_addr[0];
3269 bssid[1] = priv->mac_addr[1];
3270 bssid[2] = priv->mac_addr[2];
3271
3272 /* Last bytes are random */
0edd5b44 3273 get_random_bytes(&bssid[3], ETH_ALEN - 3);
43f66a6c 3274
0edd5b44
JG
3275 bssid[0] &= 0xfe; /* clear multicast bit */
3276 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
43f66a6c
JK
3277}
3278
0edd5b44 3279static inline u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
43f66a6c
JK
3280{
3281 struct ipw_station_entry entry;
3282 int i;
3283
3284 for (i = 0; i < priv->num_stations; i++) {
3285 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3286 /* Another node is active in network */
3287 priv->missed_adhoc_beacons = 0;
3288 if (!(priv->config & CFG_STATIC_CHANNEL))
3289 /* when other nodes drop out, we drop out */
3290 priv->config &= ~CFG_ADHOC_PERSIST;
3291
3292 return i;
3293 }
3294 }
3295
3296 if (i == MAX_STATIONS)
3297 return IPW_INVALID_STATION;
3298
3299 IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
3300
3301 entry.reserved = 0;
3302 entry.support_mode = 0;
3303 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3304 memcpy(priv->stations[i], bssid, ETH_ALEN);
3305 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
0edd5b44 3306 &entry, sizeof(entry));
43f66a6c
JK
3307 priv->num_stations++;
3308
3309 return i;
3310}
3311
0edd5b44 3312static inline u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
43f66a6c
JK
3313{
3314 int i;
3315
bf79451e
JG
3316 for (i = 0; i < priv->num_stations; i++)
3317 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
43f66a6c
JK
3318 return i;
3319
3320 return IPW_INVALID_STATION;
3321}
3322
3323static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3324{
3325 int err;
3326
3327 if (!(priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))) {
3328 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3329 return;
3330 }
3331
3332 IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
3333 "on channel %d.\n",
bf79451e 3334 MAC_ARG(priv->assoc_request.bssid),
43f66a6c
JK
3335 priv->assoc_request.channel);
3336
3337 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3338 priv->status |= STATUS_DISASSOCIATING;
3339
3340 if (quiet)
3341 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3342 else
3343 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3344 err = ipw_send_associate(priv, &priv->assoc_request);
3345 if (err) {
3346 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3347 "failed.\n");
3348 return;
3349 }
3350
3351}
3352
3353static void ipw_disassociate(void *data)
3354{
3355 ipw_send_disassociate(data, 0);
3356}
3357
43f66a6c
JK
3358struct ipw_status_code {
3359 u16 status;
3360 const char *reason;
3361};
3362
3363static const struct ipw_status_code ipw_status_codes[] = {
3364 {0x00, "Successful"},
3365 {0x01, "Unspecified failure"},
3366 {0x0A, "Cannot support all requested capabilities in the "
3367 "Capability information field"},
3368 {0x0B, "Reassociation denied due to inability to confirm that "
3369 "association exists"},
3370 {0x0C, "Association denied due to reason outside the scope of this "
3371 "standard"},
0edd5b44
JG
3372 {0x0D,
3373 "Responding station does not support the specified authentication "
43f66a6c 3374 "algorithm"},
0edd5b44
JG
3375 {0x0E,
3376 "Received an Authentication frame with authentication sequence "
43f66a6c
JK
3377 "transaction sequence number out of expected sequence"},
3378 {0x0F, "Authentication rejected because of challenge failure"},
3379 {0x10, "Authentication rejected due to timeout waiting for next "
3380 "frame in sequence"},
3381 {0x11, "Association denied because AP is unable to handle additional "
3382 "associated stations"},
0edd5b44
JG
3383 {0x12,
3384 "Association denied due to requesting station not supporting all "
43f66a6c 3385 "of the datarates in the BSSBasicServiceSet Parameter"},
0edd5b44
JG
3386 {0x13,
3387 "Association denied due to requesting station not supporting "
43f66a6c 3388 "short preamble operation"},
0edd5b44
JG
3389 {0x14,
3390 "Association denied due to requesting station not supporting "
43f66a6c 3391 "PBCC encoding"},
0edd5b44
JG
3392 {0x15,
3393 "Association denied due to requesting station not supporting "
43f66a6c 3394 "channel agility"},
0edd5b44
JG
3395 {0x19,
3396 "Association denied due to requesting station not supporting "
43f66a6c 3397 "short slot operation"},
0edd5b44
JG
3398 {0x1A,
3399 "Association denied due to requesting station not supporting "
43f66a6c
JK
3400 "DSSS-OFDM operation"},
3401 {0x28, "Invalid Information Element"},
3402 {0x29, "Group Cipher is not valid"},
3403 {0x2A, "Pairwise Cipher is not valid"},
3404 {0x2B, "AKMP is not valid"},
3405 {0x2C, "Unsupported RSN IE version"},
3406 {0x2D, "Invalid RSN IE Capabilities"},
3407 {0x2E, "Cipher suite is rejected per security policy"},
3408};
3409
3410#ifdef CONFIG_IPW_DEBUG
bf79451e 3411static const char *ipw_get_status_code(u16 status)
43f66a6c
JK
3412{
3413 int i;
bf79451e 3414 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
ea2b26e0 3415 if (ipw_status_codes[i].status == (status & 0xff))
43f66a6c
JK
3416 return ipw_status_codes[i].reason;
3417 return "Unknown status value.";
3418}
3419#endif
3420
3421static void inline average_init(struct average *avg)
3422{
3423 memset(avg, 0, sizeof(*avg));
3424}
3425
3426static void inline average_add(struct average *avg, s16 val)
3427{
3428 avg->sum -= avg->entries[avg->pos];
3429 avg->sum += val;
3430 avg->entries[avg->pos++] = val;
3431 if (unlikely(avg->pos == AVG_ENTRIES)) {
3432 avg->init = 1;
3433 avg->pos = 0;
3434 }
3435}
3436
3437static s16 inline average_value(struct average *avg)
3438{
3439 if (!unlikely(avg->init)) {
3440 if (avg->pos)
3441 return avg->sum / avg->pos;
3442 return 0;
3443 }
3444
3445 return avg->sum / AVG_ENTRIES;
3446}
3447
3448static void ipw_reset_stats(struct ipw_priv *priv)
3449{
3450 u32 len = sizeof(u32);
3451
3452 priv->quality = 0;
3453
3454 average_init(&priv->average_missed_beacons);
3455 average_init(&priv->average_rssi);
3456 average_init(&priv->average_noise);
3457
3458 priv->last_rate = 0;
3459 priv->last_missed_beacons = 0;
3460 priv->last_rx_packets = 0;
3461 priv->last_tx_packets = 0;
3462 priv->last_tx_failures = 0;
bf79451e 3463
43f66a6c
JK
3464 /* Firmware managed, reset only when NIC is restarted, so we have to
3465 * normalize on the current value */
bf79451e 3466 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
43f66a6c 3467 &priv->last_rx_err, &len);
bf79451e 3468 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
43f66a6c
JK
3469 &priv->last_tx_failures, &len);
3470
3471 /* Driver managed, reset with each association */
3472 priv->missed_adhoc_beacons = 0;
3473 priv->missed_beacons = 0;
3474 priv->tx_packets = 0;
3475 priv->rx_packets = 0;
3476
3477}
3478
43f66a6c
JK
3479static inline u32 ipw_get_max_rate(struct ipw_priv *priv)
3480{
3481 u32 i = 0x80000000;
3482 u32 mask = priv->rates_mask;
3483 /* If currently associated in B mode, restrict the maximum
3484 * rate match to B rates */
3485 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
3486 mask &= IEEE80211_CCK_RATES_MASK;
3487
3488 /* TODO: Verify that the rate is supported by the current rates
3489 * list. */
3490
0edd5b44
JG
3491 while (i && !(mask & i))
3492 i >>= 1;
43f66a6c 3493 switch (i) {
ea2b26e0
JK
3494 case IEEE80211_CCK_RATE_1MB_MASK:
3495 return 1000000;
3496 case IEEE80211_CCK_RATE_2MB_MASK:
3497 return 2000000;
3498 case IEEE80211_CCK_RATE_5MB_MASK:
3499 return 5500000;
3500 case IEEE80211_OFDM_RATE_6MB_MASK:
3501 return 6000000;
3502 case IEEE80211_OFDM_RATE_9MB_MASK:
3503 return 9000000;
3504 case IEEE80211_CCK_RATE_11MB_MASK:
3505 return 11000000;
3506 case IEEE80211_OFDM_RATE_12MB_MASK:
3507 return 12000000;
3508 case IEEE80211_OFDM_RATE_18MB_MASK:
3509 return 18000000;
3510 case IEEE80211_OFDM_RATE_24MB_MASK:
3511 return 24000000;
3512 case IEEE80211_OFDM_RATE_36MB_MASK:
3513 return 36000000;
3514 case IEEE80211_OFDM_RATE_48MB_MASK:
3515 return 48000000;
3516 case IEEE80211_OFDM_RATE_54MB_MASK:
3517 return 54000000;
43f66a6c
JK
3518 }
3519
bf79451e 3520 if (priv->ieee->mode == IEEE_B)
43f66a6c
JK
3521 return 11000000;
3522 else
3523 return 54000000;
3524}
3525
3526static u32 ipw_get_current_rate(struct ipw_priv *priv)
3527{
3528 u32 rate, len = sizeof(rate);
3529 int err;
3530
bf79451e 3531 if (!(priv->status & STATUS_ASSOCIATED))
43f66a6c
JK
3532 return 0;
3533
3534 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
bf79451e 3535 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
43f66a6c
JK
3536 &len);
3537 if (err) {
3538 IPW_DEBUG_INFO("failed querying ordinals.\n");
3539 return 0;
3540 }
bf79451e 3541 } else
43f66a6c
JK
3542 return ipw_get_max_rate(priv);
3543
3544 switch (rate) {
ea2b26e0
JK
3545 case IPW_TX_RATE_1MB:
3546 return 1000000;
3547 case IPW_TX_RATE_2MB:
3548 return 2000000;
3549 case IPW_TX_RATE_5MB:
3550 return 5500000;
3551 case IPW_TX_RATE_6MB:
3552 return 6000000;
3553 case IPW_TX_RATE_9MB:
3554 return 9000000;
3555 case IPW_TX_RATE_11MB:
3556 return 11000000;
3557 case IPW_TX_RATE_12MB:
3558 return 12000000;
3559 case IPW_TX_RATE_18MB:
3560 return 18000000;
3561 case IPW_TX_RATE_24MB:
3562 return 24000000;
3563 case IPW_TX_RATE_36MB:
3564 return 36000000;
3565 case IPW_TX_RATE_48MB:
3566 return 48000000;
3567 case IPW_TX_RATE_54MB:
3568 return 54000000;
43f66a6c
JK
3569 }
3570
3571 return 0;
3572}
3573
ea2b26e0 3574#define PERFECT_RSSI (-20)
43f66a6c
JK
3575#define WORST_RSSI (-85)
3576#define IPW_STATS_INTERVAL (2 * HZ)
3577static void ipw_gather_stats(struct ipw_priv *priv)
3578{
3579 u32 rx_err, rx_err_delta, rx_packets_delta;
3580 u32 tx_failures, tx_failures_delta, tx_packets_delta;
3581 u32 missed_beacons_percent, missed_beacons_delta;
3582 u32 quality = 0;
3583 u32 len = sizeof(u32);
3584 s16 rssi;
bf79451e 3585 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
0edd5b44 3586 rate_quality;
ea2b26e0 3587 u32 max_rate;
43f66a6c
JK
3588
3589 if (!(priv->status & STATUS_ASSOCIATED)) {
3590 priv->quality = 0;
3591 return;
3592 }
3593
3594 /* Update the statistics */
bf79451e 3595 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
43f66a6c 3596 &priv->missed_beacons, &len);
0edd5b44 3597 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
43f66a6c
JK
3598 priv->last_missed_beacons = priv->missed_beacons;
3599 if (priv->assoc_request.beacon_interval) {
3600 missed_beacons_percent = missed_beacons_delta *
0edd5b44
JG
3601 (HZ * priv->assoc_request.beacon_interval) /
3602 (IPW_STATS_INTERVAL * 10);
43f66a6c
JK
3603 } else {
3604 missed_beacons_percent = 0;
3605 }
3606 average_add(&priv->average_missed_beacons, missed_beacons_percent);
3607
3608 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
3609 rx_err_delta = rx_err - priv->last_rx_err;
3610 priv->last_rx_err = rx_err;
3611
3612 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
3613 tx_failures_delta = tx_failures - priv->last_tx_failures;
3614 priv->last_tx_failures = tx_failures;
3615
3616 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
3617 priv->last_rx_packets = priv->rx_packets;
3618
3619 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
3620 priv->last_tx_packets = priv->tx_packets;
3621
3622 /* Calculate quality based on the following:
bf79451e 3623 *
43f66a6c
JK
3624 * Missed beacon: 100% = 0, 0% = 70% missed
3625 * Rate: 60% = 1Mbs, 100% = Max
3626 * Rx and Tx errors represent a straight % of total Rx/Tx
3627 * RSSI: 100% = > -50, 0% = < -80
3628 * Rx errors: 100% = 0, 0% = 50% missed
bf79451e 3629 *
43f66a6c
JK
3630 * The lowest computed quality is used.
3631 *
3632 */
3633#define BEACON_THRESHOLD 5
3634 beacon_quality = 100 - missed_beacons_percent;
3635 if (beacon_quality < BEACON_THRESHOLD)
3636 beacon_quality = 0;
3637 else
bf79451e 3638 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
0edd5b44 3639 (100 - BEACON_THRESHOLD);
bf79451e 3640 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
43f66a6c 3641 beacon_quality, missed_beacons_percent);
bf79451e 3642
43f66a6c 3643 priv->last_rate = ipw_get_current_rate(priv);
ea2b26e0
JK
3644 max_rate = ipw_get_max_rate(priv);
3645 rate_quality = priv->last_rate * 40 / max_rate + 60;
43f66a6c
JK
3646 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
3647 rate_quality, priv->last_rate / 1000000);
bf79451e 3648
0edd5b44 3649 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
bf79451e 3650 rx_quality = 100 - (rx_err_delta * 100) /
0edd5b44 3651 (rx_packets_delta + rx_err_delta);
43f66a6c
JK
3652 else
3653 rx_quality = 100;
3654 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
3655 rx_quality, rx_err_delta, rx_packets_delta);
bf79451e 3656
0edd5b44 3657 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
bf79451e 3658 tx_quality = 100 - (tx_failures_delta * 100) /
0edd5b44 3659 (tx_packets_delta + tx_failures_delta);
43f66a6c
JK
3660 else
3661 tx_quality = 100;
3662 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
3663 tx_quality, tx_failures_delta, tx_packets_delta);
bf79451e 3664
43f66a6c
JK
3665 rssi = average_value(&priv->average_rssi);
3666 if (rssi > PERFECT_RSSI)
3667 signal_quality = 100;
3668 else if (rssi < WORST_RSSI)
3669 signal_quality = 0;
ea2b26e0
JK
3670 else /* qual = 100a^2 - 15ab + 62b^2 / a^2 */
3671 signal_quality =
3672 (100 *
3673 (PERFECT_RSSI - WORST_RSSI) *
3674 (PERFECT_RSSI - WORST_RSSI) -
3675 (PERFECT_RSSI - rssi) *
3676 (15 * (PERFECT_RSSI - WORST_RSSI) +
3677 62 * (PERFECT_RSSI - rssi))) /
3678 ((PERFECT_RSSI - WORST_RSSI) * (PERFECT_RSSI - WORST_RSSI));
3679
43f66a6c
JK
3680 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
3681 signal_quality, rssi);
bf79451e
JG
3682
3683 quality = min(beacon_quality,
43f66a6c
JK
3684 min(rate_quality,
3685 min(tx_quality, min(rx_quality, signal_quality))));
3686 if (quality == beacon_quality)
0edd5b44
JG
3687 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
3688 quality);
43f66a6c 3689 if (quality == rate_quality)
0edd5b44
JG
3690 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
3691 quality);
43f66a6c 3692 if (quality == tx_quality)
0edd5b44
JG
3693 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
3694 quality);
43f66a6c 3695 if (quality == rx_quality)
0edd5b44
JG
3696 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
3697 quality);
43f66a6c 3698 if (quality == signal_quality)
0edd5b44
JG
3699 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
3700 quality);
43f66a6c
JK
3701
3702 priv->quality = quality;
bf79451e
JG
3703
3704 queue_delayed_work(priv->workqueue, &priv->gather_stats,
43f66a6c
JK
3705 IPW_STATS_INTERVAL);
3706}
3707
ea2b26e0
JK
3708static inline void ipw_handle_missed_beacon(struct ipw_priv *priv,
3709 int missed_count)
3710{
3711 priv->notif_missed_beacons = missed_count;
3712
3713 if (missed_count > priv->missed_beacon_threshold &&
3714 priv->status & STATUS_ASSOCIATED) {
3715 /* If associated and we've hit the missed
3716 * beacon threshold, disassociate, turn
3717 * off roaming, and abort any active scans */
3718 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
3719 IPW_DL_STATE,
3720 "Missed beacon: %d - disassociate\n", missed_count);
3721 priv->status &= ~STATUS_ROAMING;
a613bffd
JK
3722 if (priv->status & STATUS_SCANNING) {
3723 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
3724 IPW_DL_STATE,
3725 "Aborting scan with missed beacon.\n");
ea2b26e0 3726 queue_work(priv->workqueue, &priv->abort_scan);
a613bffd
JK
3727 }
3728
ea2b26e0
JK
3729 queue_work(priv->workqueue, &priv->disassociate);
3730 return;
3731 }
3732
3733 if (priv->status & STATUS_ROAMING) {
3734 /* If we are currently roaming, then just
3735 * print a debug statement... */
3736 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3737 "Missed beacon: %d - roam in progress\n",
3738 missed_count);
3739 return;
3740 }
3741
3742 if (missed_count > priv->roaming_threshold) {
3743 /* If we are not already roaming, set the ROAM
3744 * bit in the status and kick off a scan */
3745 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3746 "Missed beacon: %d - initiate "
3747 "roaming\n", missed_count);
3748 if (!(priv->status & STATUS_ROAMING)) {
3749 priv->status |= STATUS_ROAMING;
3750 if (!(priv->status & STATUS_SCANNING))
3751 queue_work(priv->workqueue,
3752 &priv->request_scan);
3753 }
3754 return;
3755 }
3756
3757 if (priv->status & STATUS_SCANNING) {
3758 /* Stop scan to keep fw from getting
3759 * stuck (only if we aren't roaming --
3760 * otherwise we'll never scan more than 2 or 3
3761 * channels..) */
a613bffd
JK
3762 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
3763 IPW_DL_STATE, "Aborting scan with missed beacon.\n");
ea2b26e0
JK
3764 queue_work(priv->workqueue, &priv->abort_scan);
3765 }
3766
3767 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
3768
3769}
3770
43f66a6c
JK
3771/**
3772 * Handle host notification packet.
3773 * Called from interrupt routine
3774 */
0edd5b44 3775static inline void ipw_rx_notification(struct ipw_priv *priv,
43f66a6c
JK
3776 struct ipw_rx_notification *notif)
3777{
a613bffd
JK
3778 notif->size = le16_to_cpu(notif->size);
3779
0edd5b44 3780 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size);
bf79451e 3781
43f66a6c 3782 switch (notif->subtype) {
0edd5b44
JG
3783 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
3784 struct notif_association *assoc = &notif->u.assoc;
3785
3786 switch (assoc->state) {
3787 case CMAS_ASSOCIATED:{
3788 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3789 IPW_DL_ASSOC,
3790 "associated: '%s' " MAC_FMT
3791 " \n",
3792 escape_essid(priv->essid,
3793 priv->essid_len),
3794 MAC_ARG(priv->bssid));
3795
3796 switch (priv->ieee->iw_mode) {
3797 case IW_MODE_INFRA:
3798 memcpy(priv->ieee->bssid,
3799 priv->bssid, ETH_ALEN);
3800 break;
3801
3802 case IW_MODE_ADHOC:
3803 memcpy(priv->ieee->bssid,
3804 priv->bssid, ETH_ALEN);
3805
3806 /* clear out the station table */
3807 priv->num_stations = 0;
3808
3809 IPW_DEBUG_ASSOC
3810 ("queueing adhoc check\n");
3811 queue_delayed_work(priv->
3812 workqueue,
3813 &priv->
3814 adhoc_check,
3815 priv->
3816 assoc_request.
3817 beacon_interval);
3818 break;
3819 }
3820
3821 priv->status &= ~STATUS_ASSOCIATING;
3822 priv->status |= STATUS_ASSOCIATED;
3823
a613bffd 3824 schedule_work(&priv->link_up);
0edd5b44 3825
0edd5b44
JG
3826 break;
3827 }
bf79451e 3828
0edd5b44
JG
3829 case CMAS_AUTHENTICATED:{
3830 if (priv->
3831 status & (STATUS_ASSOCIATED |
3832 STATUS_AUTH)) {
43f66a6c 3833#ifdef CONFIG_IPW_DEBUG
0edd5b44
JG
3834 struct notif_authenticate *auth
3835 = &notif->u.auth;
3836 IPW_DEBUG(IPW_DL_NOTIF |
3837 IPW_DL_STATE |
3838 IPW_DL_ASSOC,
3839 "deauthenticated: '%s' "
3840 MAC_FMT
3841 ": (0x%04X) - %s \n",
3842 escape_essid(priv->
3843 essid,
3844 priv->
3845 essid_len),
3846 MAC_ARG(priv->bssid),
3847 ntohs(auth->status),
3848 ipw_get_status_code
3849 (ntohs
3850 (auth->status)));
43f66a6c
JK
3851#endif
3852
0edd5b44
JG
3853 priv->status &=
3854 ~(STATUS_ASSOCIATING |
3855 STATUS_AUTH |
3856 STATUS_ASSOCIATED);
3857
a613bffd 3858 schedule_work(&priv->link_down);
0edd5b44
JG
3859 break;
3860 }
3861
3862 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3863 IPW_DL_ASSOC,
3864 "authenticated: '%s' " MAC_FMT
3865 "\n",
3866 escape_essid(priv->essid,
3867 priv->essid_len),
3868 MAC_ARG(priv->bssid));
3869 break;
3870 }
3871
3872 case CMAS_INIT:{
ea2b26e0
JK
3873 if (priv->status & STATUS_AUTH) {
3874 struct
3875 ieee80211_assoc_response
3876 *resp;
3877 resp =
3878 (struct
3879 ieee80211_assoc_response
3880 *)&notif->u.raw;
3881 IPW_DEBUG(IPW_DL_NOTIF |
3882 IPW_DL_STATE |
3883 IPW_DL_ASSOC,
3884 "association failed (0x%04X): %s\n",
3885 ntohs(resp->status),
3886 ipw_get_status_code
3887 (ntohs
3888 (resp->status)));
3889 }
3890
0edd5b44
JG
3891 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3892 IPW_DL_ASSOC,
3893 "disassociated: '%s' " MAC_FMT
3894 " \n",
3895 escape_essid(priv->essid,
3896 priv->essid_len),
3897 MAC_ARG(priv->bssid));
3898
3899 priv->status &=
3900 ~(STATUS_DISASSOCIATING |
3901 STATUS_ASSOCIATING |
3902 STATUS_ASSOCIATED | STATUS_AUTH);
3903
a613bffd 3904 schedule_work(&priv->link_down);
0edd5b44 3905
0edd5b44
JG
3906 break;
3907 }
43f66a6c 3908
0edd5b44
JG
3909 default:
3910 IPW_ERROR("assoc: unknown (%d)\n",
3911 assoc->state);
43f66a6c 3912 break;
bf79451e 3913 }
43f66a6c 3914
43f66a6c
JK
3915 break;
3916 }
bf79451e 3917
0edd5b44
JG
3918 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
3919 struct notif_authenticate *auth = &notif->u.auth;
3920 switch (auth->state) {
3921 case CMAS_AUTHENTICATED:
3922 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3923 "authenticated: '%s' " MAC_FMT " \n",
3924 escape_essid(priv->essid,
3925 priv->essid_len),
3926 MAC_ARG(priv->bssid));
3927 priv->status |= STATUS_AUTH;
3928 break;
43f66a6c 3929
0edd5b44
JG
3930 case CMAS_INIT:
3931 if (priv->status & STATUS_AUTH) {
3932 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3933 IPW_DL_ASSOC,
3934 "authentication failed (0x%04X): %s\n",
3935 ntohs(auth->status),
3936 ipw_get_status_code(ntohs
3937 (auth->
3938 status)));
3939 }
3940 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3941 IPW_DL_ASSOC,
3942 "deauthenticated: '%s' " MAC_FMT "\n",
3943 escape_essid(priv->essid,
3944 priv->essid_len),
3945 MAC_ARG(priv->bssid));
bf79451e 3946
0edd5b44
JG
3947 priv->status &= ~(STATUS_ASSOCIATING |
3948 STATUS_AUTH |
3949 STATUS_ASSOCIATED);
43f66a6c 3950
a613bffd 3951 schedule_work(&priv->link_down);
0edd5b44 3952 break;
43f66a6c 3953
0edd5b44
JG
3954 case CMAS_TX_AUTH_SEQ_1:
3955 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3956 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
3957 break;
3958 case CMAS_RX_AUTH_SEQ_2:
3959 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3960 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
3961 break;
3962 case CMAS_AUTH_SEQ_1_PASS:
3963 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3964 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
3965 break;
3966 case CMAS_AUTH_SEQ_1_FAIL:
3967 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3968 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
3969 break;
3970 case CMAS_TX_AUTH_SEQ_3:
3971 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3972 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
3973 break;
3974 case CMAS_RX_AUTH_SEQ_4:
3975 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3976 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
3977 break;
3978 case CMAS_AUTH_SEQ_2_PASS:
3979 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3980 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
3981 break;
3982 case CMAS_AUTH_SEQ_2_FAIL:
3983 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3984 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
3985 break;
3986 case CMAS_TX_ASSOC:
3987 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3988 IPW_DL_ASSOC, "TX_ASSOC\n");
3989 break;
3990 case CMAS_RX_ASSOC_RESP:
3991 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3992 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
3993 break;
3994 case CMAS_ASSOCIATED:
3995 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3996 IPW_DL_ASSOC, "ASSOCIATED\n");
3997 break;
3998 default:
3999 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4000 auth->state);
4001 break;
43f66a6c 4002 }
43f66a6c
JK
4003 break;
4004 }
4005
0edd5b44
JG
4006 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4007 struct notif_channel_result *x =
4008 &notif->u.channel_result;
43f66a6c 4009
0edd5b44
JG
4010 if (notif->size == sizeof(*x)) {
4011 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4012 x->channel_num);
4013 } else {
4014 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4015 "(should be %zd)\n",
4016 notif->size, sizeof(*x));
bf79451e 4017 }
43f66a6c
JK
4018 break;
4019 }
43f66a6c 4020
0edd5b44
JG
4021 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4022 struct notif_scan_complete *x = &notif->u.scan_complete;
4023 if (notif->size == sizeof(*x)) {
4024 IPW_DEBUG_SCAN
4025 ("Scan completed: type %d, %d channels, "
4026 "%d status\n", x->scan_type,
4027 x->num_channels, x->status);
4028 } else {
4029 IPW_ERROR("Scan completed of wrong size %d "
4030 "(should be %zd)\n",
4031 notif->size, sizeof(*x));
4032 }
43f66a6c 4033
0edd5b44
JG
4034 priv->status &=
4035 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4036
4037 cancel_delayed_work(&priv->scan_check);
4038
4039 if (!(priv->status & (STATUS_ASSOCIATED |
4040 STATUS_ASSOCIATING |
4041 STATUS_ROAMING |
4042 STATUS_DISASSOCIATING)))
4043 queue_work(priv->workqueue, &priv->associate);
4044 else if (priv->status & STATUS_ROAMING) {
4045 /* If a scan completed and we are in roam mode, then
4046 * the scan that completed was the one requested as a
4047 * result of entering roam... so, schedule the
4048 * roam work */
4049 queue_work(priv->workqueue, &priv->roam);
4050 } else if (priv->status & STATUS_SCAN_PENDING)
4051 queue_work(priv->workqueue,
4052 &priv->request_scan);
a613bffd
JK
4053 else if (priv->config & CFG_BACKGROUND_SCAN
4054 && priv->status & STATUS_ASSOCIATED)
4055 queue_delayed_work(priv->workqueue,
4056 &priv->request_scan, HZ);
43f66a6c 4057
0edd5b44
JG
4058 priv->ieee->scans++;
4059 break;
43f66a6c 4060 }
43f66a6c 4061
0edd5b44
JG
4062 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4063 struct notif_frag_length *x = &notif->u.frag_len;
43f66a6c 4064
a613bffd
JK
4065 if (notif->size == sizeof(*x))
4066 IPW_ERROR("Frag length: %d\n",
4067 le16_to_cpu(x->frag_length));
4068 else
0edd5b44
JG
4069 IPW_ERROR("Frag length of wrong size %d "
4070 "(should be %zd)\n",
4071 notif->size, sizeof(*x));
0edd5b44 4072 break;
43f66a6c 4073 }
43f66a6c 4074
0edd5b44
JG
4075 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4076 struct notif_link_deterioration *x =
4077 &notif->u.link_deterioration;
4078 if (notif->size == sizeof(*x)) {
4079 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4080 "link deterioration: '%s' " MAC_FMT
4081 " \n", escape_essid(priv->essid,
4082 priv->essid_len),
4083 MAC_ARG(priv->bssid));
4084 memcpy(&priv->last_link_deterioration, x,
4085 sizeof(*x));
4086 } else {
4087 IPW_ERROR("Link Deterioration of wrong size %d "
4088 "(should be %zd)\n",
4089 notif->size, sizeof(*x));
4090 }
43f66a6c
JK
4091 break;
4092 }
4093
0edd5b44
JG
4094 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4095 IPW_ERROR("Dino config\n");
4096 if (priv->hcmd
a613bffd 4097 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
0edd5b44 4098 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
a613bffd 4099
0edd5b44
JG
4100 break;
4101 }
43f66a6c 4102
0edd5b44
JG
4103 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4104 struct notif_beacon_state *x = &notif->u.beacon_state;
4105 if (notif->size != sizeof(*x)) {
4106 IPW_ERROR
4107 ("Beacon state of wrong size %d (should "
4108 "be %zd)\n", notif->size, sizeof(*x));
4109 break;
43f66a6c
JK
4110 }
4111
a613bffd
JK
4112 if (le32_to_cpu(x->state) ==
4113 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4114 ipw_handle_missed_beacon(priv,
4115 le32_to_cpu(x->
4116 number));
43f66a6c 4117
0edd5b44
JG
4118 break;
4119 }
43f66a6c 4120
0edd5b44
JG
4121 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4122 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4123 if (notif->size == sizeof(*x)) {
4124 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4125 "0x%02x station %d\n",
4126 x->key_state, x->security_type,
4127 x->station_index);
4128 break;
4129 }
43f66a6c 4130
0edd5b44
JG
4131 IPW_ERROR
4132 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4133 notif->size, sizeof(*x));
43f66a6c 4134 break;
bf79451e 4135 }
43f66a6c 4136
0edd5b44
JG
4137 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4138 struct notif_calibration *x = &notif->u.calibration;
43f66a6c 4139
0edd5b44
JG
4140 if (notif->size == sizeof(*x)) {
4141 memcpy(&priv->calib, x, sizeof(*x));
4142 IPW_DEBUG_INFO("TODO: Calibration\n");
4143 break;
4144 }
43f66a6c 4145
0edd5b44
JG
4146 IPW_ERROR
4147 ("Calibration of wrong size %d (should be %zd)\n",
4148 notif->size, sizeof(*x));
43f66a6c 4149 break;
bf79451e
JG
4150 }
4151
0edd5b44
JG
4152 case HOST_NOTIFICATION_NOISE_STATS:{
4153 if (notif->size == sizeof(u32)) {
4154 priv->last_noise =
a613bffd
JK
4155 (u8) (le32_to_cpu(notif->u.noise.value) &
4156 0xff);
0edd5b44
JG
4157 average_add(&priv->average_noise,
4158 priv->last_noise);
4159 break;
4160 }
43f66a6c 4161
0edd5b44
JG
4162 IPW_ERROR
4163 ("Noise stat is wrong size %d (should be %zd)\n",
4164 notif->size, sizeof(u32));
43f66a6c
JK
4165 break;
4166 }
4167
43f66a6c
JK
4168 default:
4169 IPW_ERROR("Unknown notification: "
4170 "subtype=%d,flags=0x%2x,size=%d\n",
4171 notif->subtype, notif->flags, notif->size);
4172 }
4173}
4174
4175/**
4176 * Destroys all DMA structures and initialise them again
bf79451e 4177 *
43f66a6c
JK
4178 * @param priv
4179 * @return error code
4180 */
4181static int ipw_queue_reset(struct ipw_priv *priv)
4182{
4183 int rc = 0;
4184 /** @todo customize queue sizes */
4185 int nTx = 64, nTxCmd = 8;
4186 ipw_tx_queue_free(priv);
4187 /* Tx CMD queue */
4188 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4189 CX2_TX_CMD_QUEUE_READ_INDEX,
4190 CX2_TX_CMD_QUEUE_WRITE_INDEX,
4191 CX2_TX_CMD_QUEUE_BD_BASE,
4192 CX2_TX_CMD_QUEUE_BD_SIZE);
4193 if (rc) {
4194 IPW_ERROR("Tx Cmd queue init failed\n");
4195 goto error;
4196 }
4197 /* Tx queue(s) */
4198 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4199 CX2_TX_QUEUE_0_READ_INDEX,
4200 CX2_TX_QUEUE_0_WRITE_INDEX,
0edd5b44 4201 CX2_TX_QUEUE_0_BD_BASE, CX2_TX_QUEUE_0_BD_SIZE);
43f66a6c
JK
4202 if (rc) {
4203 IPW_ERROR("Tx 0 queue init failed\n");
4204 goto error;
4205 }
4206 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4207 CX2_TX_QUEUE_1_READ_INDEX,
4208 CX2_TX_QUEUE_1_WRITE_INDEX,
0edd5b44 4209 CX2_TX_QUEUE_1_BD_BASE, CX2_TX_QUEUE_1_BD_SIZE);
43f66a6c
JK
4210 if (rc) {
4211 IPW_ERROR("Tx 1 queue init failed\n");
4212 goto error;
4213 }
4214 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4215 CX2_TX_QUEUE_2_READ_INDEX,
4216 CX2_TX_QUEUE_2_WRITE_INDEX,
0edd5b44 4217 CX2_TX_QUEUE_2_BD_BASE, CX2_TX_QUEUE_2_BD_SIZE);
43f66a6c
JK
4218 if (rc) {
4219 IPW_ERROR("Tx 2 queue init failed\n");
4220 goto error;
4221 }
4222 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4223 CX2_TX_QUEUE_3_READ_INDEX,
4224 CX2_TX_QUEUE_3_WRITE_INDEX,
0edd5b44 4225 CX2_TX_QUEUE_3_BD_BASE, CX2_TX_QUEUE_3_BD_SIZE);
43f66a6c
JK
4226 if (rc) {
4227 IPW_ERROR("Tx 3 queue init failed\n");
4228 goto error;
4229 }
4230 /* statistics */
4231 priv->rx_bufs_min = 0;
4232 priv->rx_pend_max = 0;
4233 return rc;
4234
0edd5b44 4235 error:
43f66a6c
JK
4236 ipw_tx_queue_free(priv);
4237 return rc;
4238}
4239
4240/**
4241 * Reclaim Tx queue entries no more used by NIC.
bf79451e 4242 *
43f66a6c
JK
4243 * When FW adwances 'R' index, all entries between old and
4244 * new 'R' index need to be reclaimed. As result, some free space
4245 * forms. If there is enough free space (> low mark), wake Tx queue.
bf79451e 4246 *
43f66a6c
JK
4247 * @note Need to protect against garbage in 'R' index
4248 * @param priv
4249 * @param txq
4250 * @param qindex
4251 * @return Number of used entries remains in the queue
4252 */
bf79451e 4253static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
43f66a6c
JK
4254 struct clx2_tx_queue *txq, int qindex)
4255{
4256 u32 hw_tail;
4257 int used;
4258 struct clx2_queue *q = &txq->q;
4259
4260 hw_tail = ipw_read32(priv, q->reg_r);
4261 if (hw_tail >= q->n_bd) {
4262 IPW_ERROR
0edd5b44
JG
4263 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4264 hw_tail, q->n_bd);
43f66a6c
JK
4265 goto done;
4266 }
4267 for (; q->last_used != hw_tail;
4268 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4269 ipw_queue_tx_free_tfd(priv, txq);
4270 priv->tx_packets++;
4271 }
0edd5b44 4272 done:
a613bffd 4273 if (ipw_queue_space(q) > q->low_mark && qindex >= 0)
43f66a6c 4274 __maybe_wake_tx(priv);
43f66a6c
JK
4275 used = q->first_empty - q->last_used;
4276 if (used < 0)
4277 used += q->n_bd;
4278
4279 return used;
4280}
4281
4282static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4283 int len, int sync)
4284{
4285 struct clx2_tx_queue *txq = &priv->txq_cmd;
4286 struct clx2_queue *q = &txq->q;
4287 struct tfd_frame *tfd;
4288
4289 if (ipw_queue_space(q) < (sync ? 1 : 2)) {
4290 IPW_ERROR("No space for Tx\n");
4291 return -EBUSY;
4292 }
4293
4294 tfd = &txq->bd[q->first_empty];
4295 txq->txb[q->first_empty] = NULL;
4296
4297 memset(tfd, 0, sizeof(*tfd));
4298 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
4299 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
4300 priv->hcmd_seq++;
4301 tfd->u.cmd.index = hcmd;
4302 tfd->u.cmd.length = len;
4303 memcpy(tfd->u.cmd.payload, buf, len);
4304 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
4305 ipw_write32(priv, q->reg_w, q->first_empty);
4306 _ipw_read32(priv, 0x90);
4307
4308 return 0;
4309}
4310
bf79451e 4311/*
43f66a6c
JK
4312 * Rx theory of operation
4313 *
4314 * The host allocates 32 DMA target addresses and passes the host address
4315 * to the firmware at register CX2_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
4316 * 0 to 31
4317 *
4318 * Rx Queue Indexes
4319 * The host/firmware share two index registers for managing the Rx buffers.
4320 *
bf79451e
JG
4321 * The READ index maps to the first position that the firmware may be writing
4322 * to -- the driver can read up to (but not including) this position and get
4323 * good data.
43f66a6c
JK
4324 * The READ index is managed by the firmware once the card is enabled.
4325 *
4326 * The WRITE index maps to the last position the driver has read from -- the
4327 * position preceding WRITE is the last slot the firmware can place a packet.
4328 *
4329 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
bf79451e 4330 * WRITE = READ.
43f66a6c 4331 *
bf79451e 4332 * During initialization the host sets up the READ queue position to the first
43f66a6c
JK
4333 * INDEX position, and WRITE to the last (READ - 1 wrapped)
4334 *
4335 * When the firmware places a packet in a buffer it will advance the READ index
4336 * and fire the RX interrupt. The driver can then query the READ index and
4337 * process as many packets as possible, moving the WRITE index forward as it
4338 * resets the Rx queue buffers with new memory.
bf79451e 4339 *
43f66a6c 4340 * The management in the driver is as follows:
bf79451e 4341 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
43f66a6c 4342 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
bf79451e 4343 * to replensish the ipw->rxq->rx_free.
43f66a6c
JK
4344 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
4345 * ipw->rxq is replenished and the READ INDEX is updated (updating the
4346 * 'processed' and 'read' driver indexes as well)
4347 * + A received packet is processed and handed to the kernel network stack,
4348 * detached from the ipw->rxq. The driver 'processed' index is updated.
4349 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
bf79451e
JG
4350 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
4351 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
43f66a6c
JK
4352 * were enough free buffers and RX_STALLED is set it is cleared.
4353 *
4354 *
4355 * Driver sequence:
4356 *
bf79451e 4357 * ipw_rx_queue_alloc() Allocates rx_free
43f66a6c
JK
4358 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
4359 * ipw_rx_queue_restock
4360 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
4361 * queue, updates firmware pointers, and updates
4362 * the WRITE index. If insufficient rx_free buffers
4363 * are available, schedules ipw_rx_queue_replenish
4364 *
4365 * -- enable interrupts --
4366 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
bf79451e 4367 * READ INDEX, detaching the SKB from the pool.
43f66a6c
JK
4368 * Moves the packet buffer from queue to rx_used.
4369 * Calls ipw_rx_queue_restock to refill any empty
4370 * slots.
4371 * ...
4372 *
4373 */
4374
bf79451e 4375/*
43f66a6c
JK
4376 * If there are slots in the RX queue that need to be restocked,
4377 * and we have free pre-allocated buffers, fill the ranks as much
4378 * as we can pulling from rx_free.
4379 *
4380 * This moves the 'write' index forward to catch up with 'processed', and
4381 * also updates the memory address in the firmware to reference the new
4382 * target buffer.
4383 */
4384static void ipw_rx_queue_restock(struct ipw_priv *priv)
4385{
4386 struct ipw_rx_queue *rxq = priv->rxq;
4387 struct list_head *element;
4388 struct ipw_rx_mem_buffer *rxb;
4389 unsigned long flags;
4390 int write;
4391
4392 spin_lock_irqsave(&rxq->lock, flags);
4393 write = rxq->write;
4394 while ((rxq->write != rxq->processed) && (rxq->free_count)) {
4395 element = rxq->rx_free.next;
4396 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
4397 list_del(element);
4398
4399 ipw_write32(priv, CX2_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
4400 rxb->dma_addr);
4401 rxq->queue[rxq->write] = rxb;
4402 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
4403 rxq->free_count--;
4404 }
4405 spin_unlock_irqrestore(&rxq->lock, flags);
4406
bf79451e 4407 /* If the pre-allocated buffer pool is dropping low, schedule to
43f66a6c
JK
4408 * refill it */
4409 if (rxq->free_count <= RX_LOW_WATERMARK)
4410 queue_work(priv->workqueue, &priv->rx_replenish);
4411
4412 /* If we've added more space for the firmware to place data, tell it */
bf79451e 4413 if (write != rxq->write)
43f66a6c
JK
4414 ipw_write32(priv, CX2_RX_WRITE_INDEX, rxq->write);
4415}
4416
4417/*
4418 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
bf79451e
JG
4419 * Also restock the Rx queue via ipw_rx_queue_restock.
4420 *
43f66a6c
JK
4421 * This is called as a scheduled work item (except for during intialization)
4422 */
4423static void ipw_rx_queue_replenish(void *data)
4424{
4425 struct ipw_priv *priv = data;
4426 struct ipw_rx_queue *rxq = priv->rxq;
4427 struct list_head *element;
4428 struct ipw_rx_mem_buffer *rxb;
4429 unsigned long flags;
4430
4431 spin_lock_irqsave(&rxq->lock, flags);
4432 while (!list_empty(&rxq->rx_used)) {
4433 element = rxq->rx_used.next;
4434 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
4435 rxb->skb = alloc_skb(CX2_RX_BUF_SIZE, GFP_ATOMIC);
4436 if (!rxb->skb) {
4437 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
4438 priv->net_dev->name);
4439 /* We don't reschedule replenish work here -- we will
4440 * call the restock method and if it still needs
4441 * more buffers it will schedule replenish */
4442 break;
4443 }
4444 list_del(element);
bf79451e 4445
43f66a6c 4446 rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data;
0edd5b44
JG
4447 rxb->dma_addr =
4448 pci_map_single(priv->pci_dev, rxb->skb->data,
4449 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
bf79451e 4450
43f66a6c
JK
4451 list_add_tail(&rxb->list, &rxq->rx_free);
4452 rxq->free_count++;
4453 }
4454 spin_unlock_irqrestore(&rxq->lock, flags);
4455
4456 ipw_rx_queue_restock(priv);
4457}
4458
4459/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
4460 * If an SKB has been detached, the POOL needs to have it's SKB set to NULL
bf79451e 4461 * This free routine walks the list of POOL entries and if SKB is set to
43f66a6c
JK
4462 * non NULL it is unmapped and freed
4463 */
0edd5b44 4464static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
43f66a6c
JK
4465{
4466 int i;
4467
4468 if (!rxq)
4469 return;
bf79451e 4470
43f66a6c
JK
4471 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
4472 if (rxq->pool[i].skb != NULL) {
4473 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
0edd5b44 4474 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
43f66a6c
JK
4475 dev_kfree_skb(rxq->pool[i].skb);
4476 }
4477 }
4478
4479 kfree(rxq);
4480}
4481
4482static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
4483{
4484 struct ipw_rx_queue *rxq;
4485 int i;
4486
4487 rxq = (struct ipw_rx_queue *)kmalloc(sizeof(*rxq), GFP_KERNEL);
ad18b0ea
PI
4488 if (unlikely(!rxq)) {
4489 IPW_ERROR("memory allocation failed\n");
4490 return NULL;
4491 }
43f66a6c
JK
4492 memset(rxq, 0, sizeof(*rxq));
4493 spin_lock_init(&rxq->lock);
4494 INIT_LIST_HEAD(&rxq->rx_free);
4495 INIT_LIST_HEAD(&rxq->rx_used);
4496
4497 /* Fill the rx_used queue with _all_ of the Rx buffers */
bf79451e 4498 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
43f66a6c
JK
4499 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4500
4501 /* Set us so that we have processed and used all buffers, but have
4502 * not restocked the Rx queue with fresh buffers */
4503 rxq->read = rxq->write = 0;
4504 rxq->processed = RX_QUEUE_SIZE - 1;
4505 rxq->free_count = 0;
4506
4507 return rxq;
4508}
4509
4510static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
4511{
4512 rate &= ~IEEE80211_BASIC_RATE_MASK;
4513 if (ieee_mode == IEEE_A) {
4514 switch (rate) {
bf79451e
JG
4515 case IEEE80211_OFDM_RATE_6MB:
4516 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
0edd5b44 4517 1 : 0;
bf79451e
JG
4518 case IEEE80211_OFDM_RATE_9MB:
4519 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
0edd5b44 4520 1 : 0;
bf79451e 4521 case IEEE80211_OFDM_RATE_12MB:
0edd5b44
JG
4522 return priv->
4523 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
bf79451e 4524 case IEEE80211_OFDM_RATE_18MB:
0edd5b44
JG
4525 return priv->
4526 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
bf79451e 4527 case IEEE80211_OFDM_RATE_24MB:
0edd5b44
JG
4528 return priv->
4529 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
bf79451e 4530 case IEEE80211_OFDM_RATE_36MB:
0edd5b44
JG
4531 return priv->
4532 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
bf79451e 4533 case IEEE80211_OFDM_RATE_48MB:
0edd5b44
JG
4534 return priv->
4535 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
bf79451e 4536 case IEEE80211_OFDM_RATE_54MB:
0edd5b44
JG
4537 return priv->
4538 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
43f66a6c
JK
4539 default:
4540 return 0;
4541 }
4542 }
bf79451e 4543
43f66a6c
JK
4544 /* B and G mixed */
4545 switch (rate) {
bf79451e 4546 case IEEE80211_CCK_RATE_1MB:
43f66a6c 4547 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
bf79451e 4548 case IEEE80211_CCK_RATE_2MB:
43f66a6c 4549 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
bf79451e 4550 case IEEE80211_CCK_RATE_5MB:
43f66a6c 4551 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
bf79451e 4552 case IEEE80211_CCK_RATE_11MB:
43f66a6c
JK
4553 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
4554 }
4555
4556 /* If we are limited to B modulations, bail at this point */
4557 if (ieee_mode == IEEE_B)
4558 return 0;
4559
4560 /* G */
4561 switch (rate) {
bf79451e 4562 case IEEE80211_OFDM_RATE_6MB:
43f66a6c 4563 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
bf79451e 4564 case IEEE80211_OFDM_RATE_9MB:
43f66a6c 4565 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
bf79451e 4566 case IEEE80211_OFDM_RATE_12MB:
43f66a6c 4567 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
bf79451e 4568 case IEEE80211_OFDM_RATE_18MB:
43f66a6c 4569 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
bf79451e 4570 case IEEE80211_OFDM_RATE_24MB:
43f66a6c 4571 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
bf79451e 4572 case IEEE80211_OFDM_RATE_36MB:
43f66a6c 4573 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
bf79451e 4574 case IEEE80211_OFDM_RATE_48MB:
43f66a6c 4575 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
bf79451e 4576 case IEEE80211_OFDM_RATE_54MB:
43f66a6c
JK
4577 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
4578 }
4579
4580 return 0;
4581}
4582
bf79451e 4583static int ipw_compatible_rates(struct ipw_priv *priv,
43f66a6c
JK
4584 const struct ieee80211_network *network,
4585 struct ipw_supported_rates *rates)
4586{
4587 int num_rates, i;
4588
4589 memset(rates, 0, sizeof(*rates));
0edd5b44 4590 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
43f66a6c
JK
4591 rates->num_rates = 0;
4592 for (i = 0; i < num_rates; i++) {
a613bffd
JK
4593 if (!ipw_is_rate_in_mask(priv, network->mode,
4594 network->rates[i])) {
4595
ea2b26e0 4596 if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
a613bffd
JK
4597 IPW_DEBUG_SCAN("Adding masked mandatory "
4598 "rate %02X\n",
4599 network->rates[i]);
4600 rates->supported_rates[rates->num_rates++] =
4601 network->rates[i];
4602 continue;
ea2b26e0
JK
4603 }
4604
43f66a6c
JK
4605 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
4606 network->rates[i], priv->rates_mask);
4607 continue;
4608 }
bf79451e 4609
43f66a6c
JK
4610 rates->supported_rates[rates->num_rates++] = network->rates[i];
4611 }
4612
a613bffd
JK
4613 num_rates = min(network->rates_ex_len,
4614 (u8) (IPW_MAX_RATES - num_rates));
43f66a6c 4615 for (i = 0; i < num_rates; i++) {
a613bffd
JK
4616 if (!ipw_is_rate_in_mask(priv, network->mode,
4617 network->rates_ex[i])) {
ea2b26e0 4618 if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
a613bffd
JK
4619 IPW_DEBUG_SCAN("Adding masked mandatory "
4620 "rate %02X\n",
4621 network->rates_ex[i]);
4622 rates->supported_rates[rates->num_rates++] =
4623 network->rates[i];
4624 continue;
ea2b26e0
JK
4625 }
4626
43f66a6c
JK
4627 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
4628 network->rates_ex[i], priv->rates_mask);
4629 continue;
4630 }
bf79451e 4631
0edd5b44
JG
4632 rates->supported_rates[rates->num_rates++] =
4633 network->rates_ex[i];
43f66a6c
JK
4634 }
4635
ea2b26e0 4636 return 1;
43f66a6c
JK
4637}
4638
4639static inline void ipw_copy_rates(struct ipw_supported_rates *dest,
4640 const struct ipw_supported_rates *src)
4641{
4642 u8 i;
4643 for (i = 0; i < src->num_rates; i++)
4644 dest->supported_rates[i] = src->supported_rates[i];
4645 dest->num_rates = src->num_rates;
4646}
4647
4648/* TODO: Look at sniffed packets in the air to determine if the basic rate
4649 * mask should ever be used -- right now all callers to add the scan rates are
4650 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
4651static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
0edd5b44 4652 u8 modulation, u32 rate_mask)
43f66a6c 4653{
bf79451e 4654 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
0edd5b44 4655 IEEE80211_BASIC_RATE_MASK : 0;
bf79451e 4656
43f66a6c 4657 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
bf79451e 4658 rates->supported_rates[rates->num_rates++] =
0edd5b44 4659 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
43f66a6c
JK
4660
4661 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
bf79451e 4662 rates->supported_rates[rates->num_rates++] =
0edd5b44 4663 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
43f66a6c
JK
4664
4665 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
bf79451e 4666 rates->supported_rates[rates->num_rates++] = basic_mask |
0edd5b44 4667 IEEE80211_CCK_RATE_5MB;
43f66a6c
JK
4668
4669 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
bf79451e 4670 rates->supported_rates[rates->num_rates++] = basic_mask |
0edd5b44 4671 IEEE80211_CCK_RATE_11MB;
43f66a6c
JK
4672}
4673
4674static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
0edd5b44 4675 u8 modulation, u32 rate_mask)
43f66a6c 4676{
bf79451e 4677 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
0edd5b44 4678 IEEE80211_BASIC_RATE_MASK : 0;
43f66a6c
JK
4679
4680 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
bf79451e 4681 rates->supported_rates[rates->num_rates++] = basic_mask |
0edd5b44 4682 IEEE80211_OFDM_RATE_6MB;
43f66a6c
JK
4683
4684 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
bf79451e 4685 rates->supported_rates[rates->num_rates++] =
0edd5b44 4686 IEEE80211_OFDM_RATE_9MB;
43f66a6c
JK
4687
4688 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
bf79451e 4689 rates->supported_rates[rates->num_rates++] = basic_mask |
0edd5b44 4690 IEEE80211_OFDM_RATE_12MB;
43f66a6c
JK
4691
4692 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
bf79451e 4693 rates->supported_rates[rates->num_rates++] =
0edd5b44 4694 IEEE80211_OFDM_RATE_18MB;
43f66a6c
JK
4695
4696 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
bf79451e 4697 rates->supported_rates[rates->num_rates++] = basic_mask |
0edd5b44 4698 IEEE80211_OFDM_RATE_24MB;
43f66a6c
JK
4699
4700 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
bf79451e 4701 rates->supported_rates[rates->num_rates++] =
0edd5b44 4702 IEEE80211_OFDM_RATE_36MB;
43f66a6c
JK
4703
4704 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
bf79451e 4705 rates->supported_rates[rates->num_rates++] =
0edd5b44 4706 IEEE80211_OFDM_RATE_48MB;
43f66a6c
JK
4707
4708 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
bf79451e 4709 rates->supported_rates[rates->num_rates++] =
0edd5b44 4710 IEEE80211_OFDM_RATE_54MB;
43f66a6c
JK
4711}
4712
4713struct ipw_network_match {
4714 struct ieee80211_network *network;
4715 struct ipw_supported_rates rates;
4716};
4717
0edd5b44
JG
4718static int ipw_best_network(struct ipw_priv *priv,
4719 struct ipw_network_match *match,
4720 struct ieee80211_network *network, int roaming)
43f66a6c
JK
4721{
4722 struct ipw_supported_rates rates;
4723
4724 /* Verify that this network's capability is compatible with the
4725 * current mode (AdHoc or Infrastructure) */
4726 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
2474385e 4727 !(network->capability & WLAN_CAPABILITY_ESS)) ||
43f66a6c
JK
4728 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
4729 !(network->capability & WLAN_CAPABILITY_IBSS))) {
4730 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to "
bf79451e 4731 "capability mismatch.\n",
43f66a6c
JK
4732 escape_essid(network->ssid, network->ssid_len),
4733 MAC_ARG(network->bssid));
4734 return 0;
4735 }
4736
4737 /* If we do not have an ESSID for this AP, we can not associate with
4738 * it */
4739 if (network->flags & NETWORK_EMPTY_ESSID) {
4740 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4741 "because of hidden ESSID.\n",
4742 escape_essid(network->ssid, network->ssid_len),
4743 MAC_ARG(network->bssid));
4744 return 0;
4745 }
bf79451e 4746
43f66a6c
JK
4747 if (unlikely(roaming)) {
4748 /* If we are roaming, then ensure check if this is a valid
4749 * network to try and roam to */
4750 if ((network->ssid_len != match->network->ssid_len) ||
bf79451e 4751 memcmp(network->ssid, match->network->ssid,
43f66a6c
JK
4752 network->ssid_len)) {
4753 IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded "
4754 "because of non-network ESSID.\n",
bf79451e 4755 escape_essid(network->ssid,
43f66a6c
JK
4756 network->ssid_len),
4757 MAC_ARG(network->bssid));
4758 return 0;
4759 }
4760 } else {
bf79451e
JG
4761 /* If an ESSID has been configured then compare the broadcast
4762 * ESSID to ours */
4763 if ((priv->config & CFG_STATIC_ESSID) &&
43f66a6c 4764 ((network->ssid_len != priv->essid_len) ||
bf79451e 4765 memcmp(network->ssid, priv->essid,
43f66a6c
JK
4766 min(network->ssid_len, priv->essid_len)))) {
4767 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
0edd5b44
JG
4768 strncpy(escaped,
4769 escape_essid(network->ssid, network->ssid_len),
43f66a6c
JK
4770 sizeof(escaped));
4771 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
bf79451e 4772 "because of ESSID mismatch: '%s'.\n",
43f66a6c 4773 escaped, MAC_ARG(network->bssid),
0edd5b44
JG
4774 escape_essid(priv->essid,
4775 priv->essid_len));
43f66a6c
JK
4776 return 0;
4777 }
4778 }
4779
4780 /* If the old network rate is better than this one, don't bother
4781 * testing everything else. */
0edd5b44 4782 if (match->network && match->network->stats.rssi > network->stats.rssi) {
43f66a6c 4783 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
bf79451e
JG
4784 strncpy(escaped,
4785 escape_essid(network->ssid, network->ssid_len),
43f66a6c
JK
4786 sizeof(escaped));
4787 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because "
4788 "'%s (" MAC_FMT ")' has a stronger signal.\n",
4789 escaped, MAC_ARG(network->bssid),
4790 escape_essid(match->network->ssid,
4791 match->network->ssid_len),
4792 MAC_ARG(match->network->bssid));
4793 return 0;
4794 }
bf79451e 4795
43f66a6c
JK
4796 /* If this network has already had an association attempt within the
4797 * last 3 seconds, do not try and associate again... */
4798 if (network->last_associate &&
ea2b26e0 4799 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
43f66a6c
JK
4800 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4801 "because of storming (%lu since last "
4802 "assoc attempt).\n",
4803 escape_essid(network->ssid, network->ssid_len),
4804 MAC_ARG(network->bssid),
4805 (jiffies - network->last_associate) / HZ);
4806 return 0;
4807 }
4808
4809 /* Now go through and see if the requested network is valid... */
bf79451e 4810 if (priv->ieee->scan_age != 0 &&
ea2b26e0 4811 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
43f66a6c
JK
4812 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4813 "because of age: %lums.\n",
4814 escape_essid(network->ssid, network->ssid_len),
4815 MAC_ARG(network->bssid),
4816 (jiffies - network->last_scanned) / (HZ / 100));
4817 return 0;
bf79451e 4818 }
43f66a6c 4819
bf79451e 4820 if ((priv->config & CFG_STATIC_CHANNEL) &&
43f66a6c
JK
4821 (network->channel != priv->channel)) {
4822 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4823 "because of channel mismatch: %d != %d.\n",
4824 escape_essid(network->ssid, network->ssid_len),
4825 MAC_ARG(network->bssid),
4826 network->channel, priv->channel);
4827 return 0;
4828 }
bf79451e 4829
43f66a6c 4830 /* Verify privacy compatability */
bf79451e 4831 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
43f66a6c
JK
4832 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
4833 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4834 "because of privacy mismatch: %s != %s.\n",
4835 escape_essid(network->ssid, network->ssid_len),
4836 MAC_ARG(network->bssid),
bf79451e 4837 priv->capability & CAP_PRIVACY_ON ? "on" :
43f66a6c 4838 "off",
bf79451e 4839 network->capability &
0edd5b44 4840 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
43f66a6c
JK
4841 return 0;
4842 }
bf79451e
JG
4843
4844 if ((priv->config & CFG_STATIC_BSSID) &&
43f66a6c
JK
4845 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
4846 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4847 "because of BSSID mismatch: " MAC_FMT ".\n",
4848 escape_essid(network->ssid, network->ssid_len),
0edd5b44 4849 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
43f66a6c
JK
4850 return 0;
4851 }
bf79451e 4852
43f66a6c
JK
4853 /* Filter out any incompatible freq / mode combinations */
4854 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
4855 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4856 "because of invalid frequency/mode "
4857 "combination.\n",
4858 escape_essid(network->ssid, network->ssid_len),
4859 MAC_ARG(network->bssid));
4860 return 0;
4861 }
bf79451e 4862
ea2b26e0
JK
4863 /* Ensure that the rates supported by the driver are compatible with
4864 * this AP, including verification of basic rates (mandatory) */
4865 if (!ipw_compatible_rates(priv, network, &rates)) {
4866 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4867 "because configured rate mask excludes "
4868 "AP mandatory rate.\n",
4869 escape_essid(network->ssid, network->ssid_len),
4870 MAC_ARG(network->bssid));
4871 return 0;
4872 }
4873
43f66a6c
JK
4874 if (rates.num_rates == 0) {
4875 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4876 "because of no compatible rates.\n",
4877 escape_essid(network->ssid, network->ssid_len),
4878 MAC_ARG(network->bssid));
4879 return 0;
4880 }
bf79451e 4881
43f66a6c
JK
4882 /* TODO: Perform any further minimal comparititive tests. We do not
4883 * want to put too much policy logic here; intelligent scan selection
4884 * should occur within a generic IEEE 802.11 user space tool. */
4885
4886 /* Set up 'new' AP to this network */
4887 ipw_copy_rates(&match->rates, &rates);
4888 match->network = network;
4889
4890 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n",
4891 escape_essid(network->ssid, network->ssid_len),
4892 MAC_ARG(network->bssid));
4893
4894 return 1;
4895}
4896
bf79451e 4897static void ipw_adhoc_create(struct ipw_priv *priv,
0edd5b44 4898 struct ieee80211_network *network)
43f66a6c
JK
4899{
4900 /*
4901 * For the purposes of scanning, we can set our wireless mode
4902 * to trigger scans across combinations of bands, but when it
4903 * comes to creating a new ad-hoc network, we have tell the FW
4904 * exactly which band to use.
4905 *
bf79451e 4906 * We also have the possibility of an invalid channel for the
43f66a6c
JK
4907 * chossen band. Attempting to create a new ad-hoc network
4908 * with an invalid channel for wireless mode will trigger a
4909 * FW fatal error.
4910 */
4911 network->mode = is_valid_channel(priv->ieee->mode, priv->channel);
a613bffd 4912 if (!network->mode) {
43f66a6c
JK
4913 IPW_WARNING("Overriding invalid channel\n");
4914 if (priv->ieee->mode & IEEE_A) {
4915 network->mode = IEEE_A;
4916 priv->channel = band_a_active_channel[0];
4917 } else if (priv->ieee->mode & IEEE_G) {
4918 network->mode = IEEE_G;
4919 priv->channel = band_b_active_channel[0];
4920 } else {
4921 network->mode = IEEE_B;
4922 priv->channel = band_b_active_channel[0];
4923 }
4924 }
4925
4926 network->channel = priv->channel;
4927 priv->config |= CFG_ADHOC_PERSIST;
4928 ipw_create_bssid(priv, network->bssid);
4929 network->ssid_len = priv->essid_len;
4930 memcpy(network->ssid, priv->essid, priv->essid_len);
4931 memset(&network->stats, 0, sizeof(network->stats));
4932 network->capability = WLAN_CAPABILITY_IBSS;
ea2b26e0
JK
4933 if (!(priv->config & CFG_PREAMBLE_LONG))
4934 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
43f66a6c
JK
4935 if (priv->capability & CAP_PRIVACY_ON)
4936 network->capability |= WLAN_CAPABILITY_PRIVACY;
4937 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
0edd5b44 4938 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
43f66a6c 4939 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
bf79451e 4940 memcpy(network->rates_ex,
43f66a6c
JK
4941 &priv->rates.supported_rates[network->rates_len],
4942 network->rates_ex_len);
4943 network->last_scanned = 0;
4944 network->flags = 0;
4945 network->last_associate = 0;
4946 network->time_stamp[0] = 0;
4947 network->time_stamp[1] = 0;
0edd5b44
JG
4948 network->beacon_interval = 100; /* Default */
4949 network->listen_interval = 10; /* Default */
4950 network->atim_window = 0; /* Default */
43f66a6c
JK
4951 network->wpa_ie_len = 0;
4952 network->rsn_ie_len = 0;
43f66a6c
JK
4953}
4954
4955static void ipw_send_wep_keys(struct ipw_priv *priv)
4956{
4957 struct ipw_wep_key *key;
4958 int i;
4959 struct host_cmd cmd = {
4960 .cmd = IPW_CMD_WEP_KEY,
4961 .len = sizeof(*key)
4962 };
4963
4964 key = (struct ipw_wep_key *)&cmd.param;
4965 key->cmd_id = DINO_CMD_WEP_KEY;
4966 key->seq_num = 0;
4967
bf79451e 4968 for (i = 0; i < 4; i++) {
43f66a6c 4969 key->key_index = i;
a613bffd 4970 if (!(priv->sec.flags & (1 << i)))
43f66a6c 4971 key->key_size = 0;
a613bffd 4972 else {
43f66a6c
JK
4973 key->key_size = priv->sec.key_sizes[i];
4974 memcpy(key->key, priv->sec.keys[i], key->key_size);
4975 }
4976
4977 if (ipw_send_cmd(priv, &cmd)) {
4978 IPW_ERROR("failed to send WEP_KEY command\n");
4979 return;
4980 }
bf79451e 4981 }
43f66a6c
JK
4982}
4983
4984static void ipw_adhoc_check(void *data)
4985{
4986 struct ipw_priv *priv = data;
bf79451e 4987
43f66a6c
JK
4988 if (priv->missed_adhoc_beacons++ > priv->missed_beacon_threshold &&
4989 !(priv->config & CFG_ADHOC_PERSIST)) {
4990 IPW_DEBUG_SCAN("Disassociating due to missed beacons\n");
4991 ipw_remove_current_network(priv);
4992 ipw_disassociate(priv);
4993 return;
4994 }
4995
bf79451e 4996 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
43f66a6c
JK
4997 priv->assoc_request.beacon_interval);
4998}
4999
5000#ifdef CONFIG_IPW_DEBUG
5001static void ipw_debug_config(struct ipw_priv *priv)
5002{
5003 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
5004 "[CFG 0x%08X]\n", priv->config);
5005 if (priv->config & CFG_STATIC_CHANNEL)
0edd5b44 5006 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
43f66a6c
JK
5007 else
5008 IPW_DEBUG_INFO("Channel unlocked.\n");
5009 if (priv->config & CFG_STATIC_ESSID)
bf79451e 5010 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
0edd5b44 5011 escape_essid(priv->essid, priv->essid_len));
43f66a6c
JK
5012 else
5013 IPW_DEBUG_INFO("ESSID unlocked.\n");
5014 if (priv->config & CFG_STATIC_BSSID)
ea2b26e0
JK
5015 IPW_DEBUG_INFO("BSSID locked to " MAC_FMT "\n",
5016 MAC_ARG(priv->bssid));
43f66a6c
JK
5017 else
5018 IPW_DEBUG_INFO("BSSID unlocked.\n");
5019 if (priv->capability & CAP_PRIVACY_ON)
5020 IPW_DEBUG_INFO("PRIVACY on\n");
5021 else
5022 IPW_DEBUG_INFO("PRIVACY off\n");
5023 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
5024}
5025#else
8d45ff7d 5026#define ipw_debug_config(x) do {} while (0)
43f66a6c
JK
5027#endif
5028
5029static inline void ipw_set_fixed_rate(struct ipw_priv *priv,
5030 struct ieee80211_network *network)
5031{
5032 /* TODO: Verify that this works... */
5033 struct ipw_fixed_rate fr = {
5034 .tx_rates = priv->rates_mask
5035 };
5036 u32 reg;
5037 u16 mask = 0;
5038
bf79451e 5039 /* Identify 'current FW band' and match it with the fixed
43f66a6c 5040 * Tx rates */
bf79451e 5041
43f66a6c 5042 switch (priv->ieee->freq_band) {
0edd5b44 5043 case IEEE80211_52GHZ_BAND: /* A only */
43f66a6c
JK
5044 /* IEEE_A */
5045 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
5046 /* Invalid fixed rate mask */
ea2b26e0
JK
5047 IPW_DEBUG_WX
5048 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
43f66a6c
JK
5049 fr.tx_rates = 0;
5050 break;
5051 }
bf79451e 5052
43f66a6c
JK
5053 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
5054 break;
5055
0edd5b44 5056 default: /* 2.4Ghz or Mixed */
43f66a6c
JK
5057 /* IEEE_B */
5058 if (network->mode == IEEE_B) {
5059 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
5060 /* Invalid fixed rate mask */
ea2b26e0
JK
5061 IPW_DEBUG_WX
5062 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
43f66a6c
JK
5063 fr.tx_rates = 0;
5064 }
5065 break;
bf79451e 5066 }
43f66a6c
JK
5067
5068 /* IEEE_G */
5069 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
5070 IEEE80211_OFDM_RATES_MASK)) {
5071 /* Invalid fixed rate mask */
ea2b26e0
JK
5072 IPW_DEBUG_WX
5073 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
43f66a6c
JK
5074 fr.tx_rates = 0;
5075 break;
5076 }
5077
5078 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
5079 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
5080 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
5081 }
bf79451e 5082
43f66a6c
JK
5083 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
5084 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
5085 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
5086 }
bf79451e 5087
43f66a6c
JK
5088 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
5089 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
5090 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
5091 }
bf79451e 5092
43f66a6c
JK
5093 fr.tx_rates |= mask;
5094 break;
5095 }
5096
5097 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
0edd5b44 5098 ipw_write_reg32(priv, reg, *(u32 *) & fr);
43f66a6c
JK
5099}
5100
ea2b26e0 5101static void ipw_abort_scan(struct ipw_priv *priv)
43f66a6c
JK
5102{
5103 int err;
5104
ea2b26e0
JK
5105 if (priv->status & STATUS_SCAN_ABORTING) {
5106 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
5107 return;
5108 }
5109 priv->status |= STATUS_SCAN_ABORTING;
43f66a6c 5110
ea2b26e0
JK
5111 err = ipw_send_scan_abort(priv);
5112 if (err)
5113 IPW_DEBUG_HC("Request to abort scan failed.\n");
5114}
5115
5116static int ipw_request_scan(struct ipw_priv *priv)
5117{
5118 struct ipw_scan_request_ext scan;
5119 int channel_index = 0;
5120 int i, err, scan_type;
5121
5122 if (priv->status & STATUS_EXIT_PENDING) {
5123 IPW_DEBUG_SCAN("Aborting scan due to device shutdown\n");
5124 priv->status |= STATUS_SCAN_PENDING;
5125 return 0;
43f66a6c
JK
5126 }
5127
ea2b26e0 5128 if (priv->status & STATUS_SCANNING) {
a613bffd
JK
5129 IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n");
5130// IPW_DEBUG_HC("Concurrent scan requested. Aborting first.\n");
ea2b26e0 5131 priv->status |= STATUS_SCAN_PENDING;
a613bffd 5132// ipw_abort_scan(priv);
ea2b26e0
JK
5133 return 0;
5134 }
43f66a6c 5135
ea2b26e0
JK
5136 if (priv->status & STATUS_SCAN_ABORTING) {
5137 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
5138 priv->status |= STATUS_SCAN_PENDING;
5139 return 0;
43f66a6c
JK
5140 }
5141
ea2b26e0
JK
5142 if (priv->status & STATUS_RF_KILL_MASK) {
5143 IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
5144 priv->status |= STATUS_SCAN_PENDING;
5145 return 0;
5146 }
43f66a6c 5147
ea2b26e0 5148 memset(&scan, 0, sizeof(scan));
43f66a6c 5149
a613bffd
JK
5150 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = cpu_to_le16(20);
5151 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
5152 cpu_to_le16(20);
5153 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(20);
43f66a6c 5154
a613bffd 5155 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
43f66a6c 5156
ea2b26e0
JK
5157#ifdef CONFIG_IPW_MONITOR
5158 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
5159 u8 band = 0, channel = priv->channel;
43f66a6c 5160
ea2b26e0
JK
5161 if (is_valid_channel(IEEE_A, channel))
5162 band = (u8) (IPW_A_MODE << 6) | 1;
5163
5164 if (is_valid_channel(IEEE_B | IEEE_G, channel))
5165 band = (u8) (IPW_B_MODE << 6) | 1;
5166
5167 if (band == 0) {
5168 band = (u8) (IPW_B_MODE << 6) | 1;
5169 channel = 9;
5170 }
5171
5172 scan.channels_list[channel_index++] = band;
5173 scan.channels_list[channel_index] = channel;
5174 ipw_set_scan_type(&scan, channel_index,
5175 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
5176
a613bffd
JK
5177 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
5178 cpu_to_le16(2000);
43f66a6c 5179 } else {
ea2b26e0
JK
5180#endif /* CONFIG_IPW_MONITOR */
5181 /* If we are roaming, then make this a directed scan for the current
5182 * network. Otherwise, ensure that every other scan is a fast
5183 * channel hop scan */
5184 if ((priv->status & STATUS_ROAMING)
5185 || (!(priv->status & STATUS_ASSOCIATED)
5186 && (priv->config & CFG_STATIC_ESSID)
a613bffd 5187 && (le32_to_cpu(scan.full_scan_index) % 2))) {
ea2b26e0
JK
5188 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
5189 if (err) {
5190 IPW_DEBUG_HC
5191 ("Attempt to send SSID command failed.\n");
5192 return err;
5193 }
43f66a6c 5194
ea2b26e0
JK
5195 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
5196 } else {
5197 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
5198 }
bf79451e 5199
ea2b26e0
JK
5200 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
5201 int start = channel_index;
5202 for (i = 0; i < MAX_A_CHANNELS; i++) {
5203 if (band_a_active_channel[i] == 0)
5204 break;
5205 if ((priv->status & STATUS_ASSOCIATED) &&
5206 band_a_active_channel[i] == priv->channel)
5207 continue;
5208 channel_index++;
5209 scan.channels_list[channel_index] =
5210 band_a_active_channel[i];
5211 ipw_set_scan_type(&scan, channel_index,
5212 scan_type);
5213 }
43f66a6c 5214
ea2b26e0
JK
5215 if (start != channel_index) {
5216 scan.channels_list[start] =
5217 (u8) (IPW_A_MODE << 6) | (channel_index -
5218 start);
5219 channel_index++;
5220 }
5221 }
bf79451e 5222
ea2b26e0
JK
5223 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
5224 int start = channel_index;
5225 for (i = 0; i < MAX_B_CHANNELS; i++) {
5226 if (band_b_active_channel[i] == 0)
5227 break;
5228 if ((priv->status & STATUS_ASSOCIATED) &&
5229 band_b_active_channel[i] == priv->channel)
5230 continue;
5231 channel_index++;
5232 scan.channels_list[channel_index] =
5233 band_b_active_channel[i];
5234 ipw_set_scan_type(&scan, channel_index,
5235 scan_type);
5236 }
5237
5238 if (start != channel_index) {
5239 scan.channels_list[start] =
5240 (u8) (IPW_B_MODE << 6) | (channel_index -
5241 start);
5242 }
5243 }
5244#ifdef CONFIG_IPW_MONITOR
43f66a6c 5245 }
ea2b26e0 5246#endif
bf79451e 5247
ea2b26e0 5248 err = ipw_send_scan_request_ext(priv, &scan);
43f66a6c 5249 if (err) {
ea2b26e0
JK
5250 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
5251 return -EIO;
43f66a6c
JK
5252 }
5253
ea2b26e0
JK
5254 priv->status |= STATUS_SCANNING;
5255 priv->status &= ~STATUS_SCAN_PENDING;
43f66a6c 5256
ea2b26e0
JK
5257 return 0;
5258}
43f66a6c 5259
ea2b26e0
JK
5260/* Support for wpa_supplicant. Will be replaced with WEXT once
5261 * they get WPA support. */
ea2b26e0
JK
5262
5263/* following definitions must match definitions in driver_ipw.c */
5264
5265#define IPW_IOCTL_WPA_SUPPLICANT SIOCIWFIRSTPRIV+30
5266
5267#define IPW_CMD_SET_WPA_PARAM 1
5268#define IPW_CMD_SET_WPA_IE 2
5269#define IPW_CMD_SET_ENCRYPTION 3
5270#define IPW_CMD_MLME 4
5271
5272#define IPW_PARAM_WPA_ENABLED 1
5273#define IPW_PARAM_TKIP_COUNTERMEASURES 2
5274#define IPW_PARAM_DROP_UNENCRYPTED 3
5275#define IPW_PARAM_PRIVACY_INVOKED 4
5276#define IPW_PARAM_AUTH_ALGS 5
5277#define IPW_PARAM_IEEE_802_1X 6
5278
5279#define IPW_MLME_STA_DEAUTH 1
5280#define IPW_MLME_STA_DISASSOC 2
5281
5282#define IPW_CRYPT_ERR_UNKNOWN_ALG 2
5283#define IPW_CRYPT_ERR_UNKNOWN_ADDR 3
5284#define IPW_CRYPT_ERR_CRYPT_INIT_FAILED 4
5285#define IPW_CRYPT_ERR_KEY_SET_FAILED 5
5286#define IPW_CRYPT_ERR_TX_KEY_SET_FAILED 6
5287#define IPW_CRYPT_ERR_CARD_CONF_FAILED 7
5288
5289#define IPW_CRYPT_ALG_NAME_LEN 16
5290
5291struct ipw_param {
5292 u32 cmd;
5293 u8 sta_addr[ETH_ALEN];
5294 union {
5295 struct {
5296 u8 name;
5297 u32 value;
5298 } wpa_param;
5299 struct {
5300 u32 len;
5301 u8 *data;
5302 } wpa_ie;
5303 struct {
5304 int command;
5305 int reason_code;
5306 } mlme;
5307 struct {
5308 u8 alg[IPW_CRYPT_ALG_NAME_LEN];
5309 u8 set_tx;
5310 u32 err;
5311 u8 idx;
5312 u8 seq[8]; /* sequence counter (set: RX, get: TX) */
5313 u16 key_len;
5314 u8 key[0];
5315 } crypt;
5316
5317 } u;
5318};
5319
5320/* end of driver_ipw.c code */
5321
5322static int ipw_wpa_enable(struct ipw_priv *priv, int value)
5323{
5324 struct ieee80211_device *ieee = priv->ieee;
5325 struct ieee80211_security sec = {
5326 .flags = SEC_LEVEL | SEC_ENABLED,
5327 };
5328 int ret = 0;
5329
5330 ieee->wpa_enabled = value;
5331
5332 if (value) {
5333 sec.level = SEC_LEVEL_3;
5334 sec.enabled = 1;
5335 } else {
5336 sec.level = SEC_LEVEL_0;
5337 sec.enabled = 0;
a613bffd 5338 ieee->wpa_ie_len = 0;
ea2b26e0
JK
5339 }
5340
5341 if (ieee->set_security)
5342 ieee->set_security(ieee->dev, &sec);
5343 else
5344 ret = -EOPNOTSUPP;
5345
5346 return ret;
5347}
5348
5349#define AUTH_ALG_OPEN_SYSTEM 0x1
5350#define AUTH_ALG_SHARED_KEY 0x2
5351
5352static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
5353{
5354 struct ieee80211_device *ieee = priv->ieee;
5355 struct ieee80211_security sec = {
5356 .flags = SEC_AUTH_MODE,
5357 };
5358 int ret = 0;
5359
5360 if (value & AUTH_ALG_SHARED_KEY) {
5361 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
5362 ieee->open_wep = 0;
5363 } else {
5364 sec.auth_mode = WLAN_AUTH_OPEN;
5365 ieee->open_wep = 1;
5366 }
5367
5368 if (ieee->set_security)
5369 ieee->set_security(ieee->dev, &sec);
5370 else
5371 ret = -EOPNOTSUPP;
5372
5373 return ret;
5374}
5375
5376static int ipw_wpa_set_param(struct net_device *dev, u8 name, u32 value)
5377{
5378 struct ipw_priv *priv = ieee80211_priv(dev);
a613bffd
JK
5379 struct ieee80211_crypt_data *crypt;
5380 unsigned long flags;
ea2b26e0
JK
5381 int ret = 0;
5382
5383 switch (name) {
5384 case IPW_PARAM_WPA_ENABLED:
5385 ret = ipw_wpa_enable(priv, value);
5386 break;
5387
5388 case IPW_PARAM_TKIP_COUNTERMEASURES:
a613bffd
JK
5389 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
5390 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags) {
5391 IPW_WARNING("Can't set TKIP countermeasures: "
5392 "crypt not set!\n");
5393 break;
5394 }
5395
5396 flags = crypt->ops->get_flags(crypt->priv);
5397
5398 if (value)
5399 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
5400 else
5401 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
5402
5403 crypt->ops->set_flags(flags, crypt->priv);
5404
ea2b26e0
JK
5405 break;
5406
5407 case IPW_PARAM_DROP_UNENCRYPTED:
5408 priv->ieee->drop_unencrypted = value;
5409 break;
5410
5411 case IPW_PARAM_PRIVACY_INVOKED:
5412 priv->ieee->privacy_invoked = value;
5413 break;
5414
5415 case IPW_PARAM_AUTH_ALGS:
5416 ret = ipw_wpa_set_auth_algs(priv, value);
5417 break;
5418
5419 case IPW_PARAM_IEEE_802_1X:
5420 priv->ieee->ieee802_1x = value;
5421 break;
5422
5423 default:
5424 IPW_ERROR("%s: Unknown WPA param: %d\n", dev->name, name);
5425 ret = -EOPNOTSUPP;
5426 }
5427
5428 return ret;
5429}
5430
5431static int ipw_wpa_mlme(struct net_device *dev, int command, int reason)
5432{
5433 struct ipw_priv *priv = ieee80211_priv(dev);
5434 int ret = 0;
5435
5436 switch (command) {
5437 case IPW_MLME_STA_DEAUTH:
5438 // silently ignore
5439 break;
5440
5441 case IPW_MLME_STA_DISASSOC:
5442 ipw_disassociate(priv);
5443 break;
5444
5445 default:
5446 IPW_ERROR("%s: Unknown MLME request: %d\n", dev->name, command);
5447 ret = -EOPNOTSUPP;
5448 }
5449
5450 return ret;
5451}
5452
5453static int ipw_set_rsn_capa(struct ipw_priv *priv,
5454 char *capabilities, int length)
5455{
5456 struct host_cmd cmd = {
5457 .cmd = IPW_CMD_RSN_CAPABILITIES,
5458 .len = length,
5459 };
5460
5461 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
5462
5463 memcpy(&cmd.param, capabilities, length);
5464 if (ipw_send_cmd(priv, &cmd)) {
5465 IPW_ERROR("failed to send HOST_CMD_RSN_CAPABILITIES command\n");
5466 return -1;
5467 }
5468 return 0;
5469}
5470
5471void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie, int wpa_ie_len)
5472{
5473 /* make sure WPA is enabled */
5474 ipw_wpa_enable(priv, 1);
5475
5476 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))
5477 ipw_disassociate(priv);
5478}
5479
5480static int ipw_wpa_set_wpa_ie(struct net_device *dev,
5481 struct ipw_param *param, int plen)
5482{
5483 struct ipw_priv *priv = ieee80211_priv(dev);
5484 struct ieee80211_device *ieee = priv->ieee;
5485 u8 *buf;
5486
5487 if (!ieee->wpa_enabled)
5488 return -EOPNOTSUPP;
5489
5490 if (param->u.wpa_ie.len > MAX_WPA_IE_LEN ||
5491 (param->u.wpa_ie.len && param->u.wpa_ie.data == NULL))
5492 return -EINVAL;
5493
5494 if (param->u.wpa_ie.len) {
5495 buf = kmalloc(param->u.wpa_ie.len, GFP_KERNEL);
5496 if (buf == NULL)
5497 return -ENOMEM;
5498
5499 memcpy(buf, param->u.wpa_ie.data, param->u.wpa_ie.len);
5500 kfree(ieee->wpa_ie);
5501 ieee->wpa_ie = buf;
5502 ieee->wpa_ie_len = param->u.wpa_ie.len;
5503 } else {
5504 kfree(ieee->wpa_ie);
5505 ieee->wpa_ie = NULL;
5506 ieee->wpa_ie_len = 0;
5507 }
5508
5509 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
5510 return 0;
5511}
5512
5513/* implementation borrowed from hostap driver */
5514
5515static int ipw_wpa_set_encryption(struct net_device *dev,
5516 struct ipw_param *param, int param_len)
5517{
5518 int ret = 0;
5519 struct ipw_priv *priv = ieee80211_priv(dev);
5520 struct ieee80211_device *ieee = priv->ieee;
5521 struct ieee80211_crypto_ops *ops;
5522 struct ieee80211_crypt_data **crypt;
5523
5524 struct ieee80211_security sec = {
5525 .flags = 0,
5526 };
5527
5528 param->u.crypt.err = 0;
5529 param->u.crypt.alg[IPW_CRYPT_ALG_NAME_LEN - 1] = '\0';
5530
5531 if (param_len !=
5532 (int)((char *)param->u.crypt.key - (char *)param) +
5533 param->u.crypt.key_len) {
5534 IPW_DEBUG_INFO("Len mismatch %d, %d\n", param_len,
5535 param->u.crypt.key_len);
5536 return -EINVAL;
5537 }
5538 if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
5539 param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
5540 param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
5541 if (param->u.crypt.idx >= WEP_KEYS)
5542 return -EINVAL;
5543 crypt = &ieee->crypt[param->u.crypt.idx];
5544 } else {
5545 return -EINVAL;
5546 }
5547
5548 if (strcmp(param->u.crypt.alg, "none") == 0) {
5549 if (crypt) {
5550 sec.enabled = 0;
5551 sec.level = SEC_LEVEL_0;
5552 sec.flags |= SEC_ENABLED | SEC_LEVEL;
5553 ieee80211_crypt_delayed_deinit(ieee, crypt);
5554 }
5555 goto done;
5556 }
5557 sec.enabled = 1;
5558 sec.flags |= SEC_ENABLED;
5559
5560 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
5561 if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) {
5562 request_module("ieee80211_crypt_wep");
5563 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
5564 } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) {
5565 request_module("ieee80211_crypt_tkip");
5566 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
5567 } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) {
5568 request_module("ieee80211_crypt_ccmp");
5569 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
5570 }
5571 if (ops == NULL) {
5572 IPW_DEBUG_INFO("%s: unknown crypto alg '%s'\n",
5573 dev->name, param->u.crypt.alg);
5574 param->u.crypt.err = IPW_CRYPT_ERR_UNKNOWN_ALG;
5575 ret = -EINVAL;
5576 goto done;
5577 }
5578
5579 if (*crypt == NULL || (*crypt)->ops != ops) {
5580 struct ieee80211_crypt_data *new_crypt;
5581
5582 ieee80211_crypt_delayed_deinit(ieee, crypt);
5583
5584 new_crypt = (struct ieee80211_crypt_data *)
5585 kmalloc(sizeof(*new_crypt), GFP_KERNEL);
5586 if (new_crypt == NULL) {
5587 ret = -ENOMEM;
5588 goto done;
5589 }
5590 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
5591 new_crypt->ops = ops;
5592 if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
5593 new_crypt->priv =
5594 new_crypt->ops->init(param->u.crypt.idx);
5595
5596 if (new_crypt->priv == NULL) {
5597 kfree(new_crypt);
5598 param->u.crypt.err = IPW_CRYPT_ERR_CRYPT_INIT_FAILED;
5599 ret = -EINVAL;
5600 goto done;
5601 }
5602
5603 *crypt = new_crypt;
5604 }
5605
5606 if (param->u.crypt.key_len > 0 && (*crypt)->ops->set_key &&
5607 (*crypt)->ops->set_key(param->u.crypt.key,
5608 param->u.crypt.key_len, param->u.crypt.seq,
5609 (*crypt)->priv) < 0) {
5610 IPW_DEBUG_INFO("%s: key setting failed\n", dev->name);
5611 param->u.crypt.err = IPW_CRYPT_ERR_KEY_SET_FAILED;
5612 ret = -EINVAL;
5613 goto done;
5614 }
5615
5616 if (param->u.crypt.set_tx) {
5617 ieee->tx_keyidx = param->u.crypt.idx;
5618 sec.active_key = param->u.crypt.idx;
5619 sec.flags |= SEC_ACTIVE_KEY;
5620 }
5621
5622 if (ops->name != NULL) {
5623 if (strcmp(ops->name, "WEP") == 0) {
5624 memcpy(sec.keys[param->u.crypt.idx],
5625 param->u.crypt.key, param->u.crypt.key_len);
5626 sec.key_sizes[param->u.crypt.idx] =
5627 param->u.crypt.key_len;
5628 sec.flags |= (1 << param->u.crypt.idx);
5629 sec.flags |= SEC_LEVEL;
5630 sec.level = SEC_LEVEL_1;
5631 } else if (strcmp(ops->name, "TKIP") == 0) {
5632 sec.flags |= SEC_LEVEL;
5633 sec.level = SEC_LEVEL_2;
5634 } else if (strcmp(ops->name, "CCMP") == 0) {
5635 sec.flags |= SEC_LEVEL;
5636 sec.level = SEC_LEVEL_3;
5637 }
5638 }
5639 done:
5640 if (ieee->set_security)
5641 ieee->set_security(ieee->dev, &sec);
5642
5643 /* Do not reset port if card is in Managed mode since resetting will
5644 * generate new IEEE 802.11 authentication which may end up in looping
5645 * with IEEE 802.1X. If your hardware requires a reset after WEP
5646 * configuration (for example... Prism2), implement the reset_port in
5647 * the callbacks structures used to initialize the 802.11 stack. */
5648 if (ieee->reset_on_keychange &&
5649 ieee->iw_mode != IW_MODE_INFRA &&
5650 ieee->reset_port && ieee->reset_port(dev)) {
5651 IPW_DEBUG_INFO("%s: reset_port failed\n", dev->name);
5652 param->u.crypt.err = IPW_CRYPT_ERR_CARD_CONF_FAILED;
5653 return -EINVAL;
5654 }
5655
5656 return ret;
5657}
5658
5659static int ipw_wpa_supplicant(struct net_device *dev, struct iw_point *p)
5660{
5661 struct ipw_param *param;
5662 int ret = 0;
5663
5664 IPW_DEBUG_INFO("wpa_supplicant: len=%d\n", p->length);
5665
5666 if (p->length < sizeof(struct ipw_param) || !p->pointer)
5667 return -EINVAL;
5668
5669 param = (struct ipw_param *)kmalloc(p->length, GFP_KERNEL);
5670 if (param == NULL)
5671 return -ENOMEM;
5672
5673 if (copy_from_user(param, p->pointer, p->length)) {
5674 kfree(param);
5675 return -EFAULT;
5676 }
5677
5678 switch (param->cmd) {
5679
5680 case IPW_CMD_SET_WPA_PARAM:
5681 ret = ipw_wpa_set_param(dev, param->u.wpa_param.name,
5682 param->u.wpa_param.value);
5683 break;
5684
5685 case IPW_CMD_SET_WPA_IE:
5686 ret = ipw_wpa_set_wpa_ie(dev, param, p->length);
5687 break;
5688
5689 case IPW_CMD_SET_ENCRYPTION:
5690 ret = ipw_wpa_set_encryption(dev, param, p->length);
5691 break;
5692
5693 case IPW_CMD_MLME:
5694 ret = ipw_wpa_mlme(dev, param->u.mlme.command,
5695 param->u.mlme.reason_code);
5696 break;
5697
5698 default:
5699 IPW_ERROR("%s: Unknown WPA supplicant request: %d\n",
5700 dev->name, param->cmd);
5701 ret = -EOPNOTSUPP;
5702 }
5703
5704 if (ret == 0 && copy_to_user(p->pointer, param, p->length))
5705 ret = -EFAULT;
5706
5707 kfree(param);
5708 return ret;
5709}
ea2b26e0
JK
5710
5711static int ipw_associate_network(struct ipw_priv *priv,
5712 struct ieee80211_network *network,
5713 struct ipw_supported_rates *rates, int roaming)
5714{
5715 int err;
5716
5717 if (priv->config & CFG_FIXED_RATE)
5718 ipw_set_fixed_rate(priv, network);
5719
5720 if (!(priv->config & CFG_STATIC_ESSID)) {
5721 priv->essid_len = min(network->ssid_len,
5722 (u8) IW_ESSID_MAX_SIZE);
5723 memcpy(priv->essid, network->ssid, priv->essid_len);
5724 }
5725
5726 network->last_associate = jiffies;
5727
5728 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
5729 priv->assoc_request.channel = network->channel;
5730 if ((priv->capability & CAP_PRIVACY_ON) &&
5731 (priv->capability & CAP_SHARED_KEY)) {
5732 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
5733 priv->assoc_request.auth_key = priv->sec.active_key;
5734 } else {
5735 priv->assoc_request.auth_type = AUTH_OPEN;
5736 priv->assoc_request.auth_key = 0;
5737 }
5738
5739 if (priv->capability & CAP_PRIVACY_ON)
5740 ipw_send_wep_keys(priv);
5741
a613bffd 5742 if (priv->ieee->wpa_ie_len) {
ea2b26e0
JK
5743 priv->assoc_request.policy_support = 0x02; /* RSN active */
5744 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
5745 priv->ieee->wpa_ie_len);
5746 }
ea2b26e0
JK
5747
5748 /*
5749 * It is valid for our ieee device to support multiple modes, but
5750 * when it comes to associating to a given network we have to choose
5751 * just one mode.
5752 */
5753 if (network->mode & priv->ieee->mode & IEEE_A)
5754 priv->assoc_request.ieee_mode = IPW_A_MODE;
5755 else if (network->mode & priv->ieee->mode & IEEE_G)
5756 priv->assoc_request.ieee_mode = IPW_G_MODE;
5757 else if (network->mode & priv->ieee->mode & IEEE_B)
5758 priv->assoc_request.ieee_mode = IPW_B_MODE;
5759
5760 priv->assoc_request.capability = network->capability;
5761 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
5762 && !(priv->config & CFG_PREAMBLE_LONG)) {
5763 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
5764 } else {
5765 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
5766
5767 /* Clear the short preamble if we won't be supporting it */
5768 priv->assoc_request.capability &=
5769 ~WLAN_CAPABILITY_SHORT_PREAMBLE;
5770 }
5771
5772 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
5773 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
5774 roaming ? "Rea" : "A",
5775 escape_essid(priv->essid, priv->essid_len),
5776 network->channel,
5777 ipw_modes[priv->assoc_request.ieee_mode],
5778 rates->num_rates,
5779 (priv->assoc_request.preamble_length ==
5780 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
5781 network->capability &
5782 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
5783 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
5784 priv->capability & CAP_PRIVACY_ON ?
5785 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
5786 "(open)") : "",
5787 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
5788 priv->capability & CAP_PRIVACY_ON ?
5789 '1' + priv->sec.active_key : '.',
5790 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
5791
5792 priv->assoc_request.beacon_interval = network->beacon_interval;
5793 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
5794 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
5795 priv->assoc_request.assoc_type = HC_IBSS_START;
5796 priv->assoc_request.assoc_tsf_msw = 0;
5797 priv->assoc_request.assoc_tsf_lsw = 0;
5798 } else {
5799 if (unlikely(roaming))
5800 priv->assoc_request.assoc_type = HC_REASSOCIATE;
5801 else
5802 priv->assoc_request.assoc_type = HC_ASSOCIATE;
5803 priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
5804 priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
5805 }
5806
5807 memcpy(&priv->assoc_request.bssid, network->bssid, ETH_ALEN);
5808
5809 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
5810 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
5811 priv->assoc_request.atim_window = network->atim_window;
5812 } else {
5813 memcpy(&priv->assoc_request.dest, network->bssid, ETH_ALEN);
5814 priv->assoc_request.atim_window = 0;
5815 }
5816
5817 priv->assoc_request.listen_interval = network->listen_interval;
5818
5819 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
5820 if (err) {
5821 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
5822 return err;
5823 }
5824
5825 rates->ieee_mode = priv->assoc_request.ieee_mode;
5826 rates->purpose = IPW_RATE_CONNECT;
5827 ipw_send_supported_rates(priv, rates);
5828
5829 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
5830 priv->sys_config.dot11g_auto_detection = 1;
5831 else
5832 priv->sys_config.dot11g_auto_detection = 0;
5833 err = ipw_send_system_config(priv, &priv->sys_config);
5834 if (err) {
5835 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
5836 return err;
5837 }
5838
5839 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
5840 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
5841 if (err) {
5842 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
5843 return err;
5844 }
5845
5846 /*
5847 * If preemption is enabled, it is possible for the association
5848 * to complete before we return from ipw_send_associate. Therefore
5849 * we have to be sure and update our priviate data first.
5850 */
5851 priv->channel = network->channel;
5852 memcpy(priv->bssid, network->bssid, ETH_ALEN);
5853 priv->status |= STATUS_ASSOCIATING;
5854 priv->status &= ~STATUS_SECURITY_UPDATED;
5855
5856 priv->assoc_network = network;
5857
5858 err = ipw_send_associate(priv, &priv->assoc_request);
5859 if (err) {
43f66a6c
JK
5860 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
5861 return err;
5862 }
bf79451e
JG
5863
5864 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n",
43f66a6c
JK
5865 escape_essid(priv->essid, priv->essid_len),
5866 MAC_ARG(priv->bssid));
5867
5868 return 0;
5869}
5870
5871static void ipw_roam(void *data)
5872{
5873 struct ipw_priv *priv = data;
5874 struct ieee80211_network *network = NULL;
5875 struct ipw_network_match match = {
5876 .network = priv->assoc_network
5877 };
5878
5879 /* The roaming process is as follows:
bf79451e
JG
5880 *
5881 * 1. Missed beacon threshold triggers the roaming process by
43f66a6c
JK
5882 * setting the status ROAM bit and requesting a scan.
5883 * 2. When the scan completes, it schedules the ROAM work
5884 * 3. The ROAM work looks at all of the known networks for one that
5885 * is a better network than the currently associated. If none
5886 * found, the ROAM process is over (ROAM bit cleared)
5887 * 4. If a better network is found, a disassociation request is
5888 * sent.
5889 * 5. When the disassociation completes, the roam work is again
5890 * scheduled. The second time through, the driver is no longer
5891 * associated, and the newly selected network is sent an
bf79451e 5892 * association request.
43f66a6c
JK
5893 * 6. At this point ,the roaming process is complete and the ROAM
5894 * status bit is cleared.
5895 */
5896
5897 /* If we are no longer associated, and the roaming bit is no longer
5898 * set, then we are not actively roaming, so just return */
5899 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
5900 return;
bf79451e 5901
43f66a6c 5902 if (priv->status & STATUS_ASSOCIATED) {
bf79451e 5903 /* First pass through ROAM process -- look for a better
43f66a6c 5904 * network */
a613bffd 5905 unsigned long flags;
43f66a6c
JK
5906 u8 rssi = priv->assoc_network->stats.rssi;
5907 priv->assoc_network->stats.rssi = -128;
a613bffd 5908 spin_lock_irqsave(&priv->ieee->lock, flags);
43f66a6c
JK
5909 list_for_each_entry(network, &priv->ieee->network_list, list) {
5910 if (network != priv->assoc_network)
5911 ipw_best_network(priv, &match, network, 1);
5912 }
a613bffd 5913 spin_unlock_irqrestore(&priv->ieee->lock, flags);
43f66a6c 5914 priv->assoc_network->stats.rssi = rssi;
bf79451e 5915
43f66a6c
JK
5916 if (match.network == priv->assoc_network) {
5917 IPW_DEBUG_ASSOC("No better APs in this network to "
5918 "roam to.\n");
5919 priv->status &= ~STATUS_ROAMING;
5920 ipw_debug_config(priv);
5921 return;
5922 }
bf79451e 5923
43f66a6c
JK
5924 ipw_send_disassociate(priv, 1);
5925 priv->assoc_network = match.network;
5926
5927 return;
bf79451e 5928 }
43f66a6c
JK
5929
5930 /* Second pass through ROAM process -- request association */
5931 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
5932 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
5933 priv->status &= ~STATUS_ROAMING;
5934}
5935
5936static void ipw_associate(void *data)
5937{
5938 struct ipw_priv *priv = data;
5939
5940 struct ieee80211_network *network = NULL;
5941 struct ipw_network_match match = {
5942 .network = NULL
5943 };
5944 struct ipw_supported_rates *rates;
5945 struct list_head *element;
a613bffd 5946 unsigned long flags;
43f66a6c
JK
5947
5948 if (!(priv->config & CFG_ASSOCIATE) &&
5949 !(priv->config & (CFG_STATIC_ESSID |
0edd5b44 5950 CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
43f66a6c
JK
5951 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
5952 return;
5953 }
5954
a613bffd
JK
5955 /* Protect our use of the network_list */
5956 spin_lock_irqsave(&priv->ieee->lock, flags);
bf79451e 5957 list_for_each_entry(network, &priv->ieee->network_list, list)
0edd5b44 5958 ipw_best_network(priv, &match, network, 0);
43f66a6c
JK
5959
5960 network = match.network;
5961 rates = &match.rates;
5962
5963 if (network == NULL &&
5964 priv->ieee->iw_mode == IW_MODE_ADHOC &&
5965 priv->config & CFG_ADHOC_CREATE &&
5966 priv->config & CFG_STATIC_ESSID &&
a613bffd 5967 priv->config & CFG_STATIC_CHANNEL &&
43f66a6c
JK
5968 !list_empty(&priv->ieee->network_free_list)) {
5969 element = priv->ieee->network_free_list.next;
0edd5b44 5970 network = list_entry(element, struct ieee80211_network, list);
43f66a6c
JK
5971 ipw_adhoc_create(priv, network);
5972 rates = &priv->rates;
5973 list_del(element);
5974 list_add_tail(&network->list, &priv->ieee->network_list);
5975 }
a613bffd 5976 spin_unlock_irqrestore(&priv->ieee->lock, flags);
bf79451e 5977
43f66a6c
JK
5978 /* If we reached the end of the list, then we don't have any valid
5979 * matching APs */
5980 if (!network) {
5981 ipw_debug_config(priv);
5982
a613bffd
JK
5983 if (!(priv->status & STATUS_SCANNING))
5984 queue_delayed_work(priv->workqueue, &priv->request_scan,
5985 SCAN_INTERVAL);
bf79451e 5986
43f66a6c
JK
5987 return;
5988 }
5989
5990 ipw_associate_network(priv, network, rates, 0);
5991}
bf79451e
JG
5992
5993static inline void ipw_handle_data_packet(struct ipw_priv *priv,
0edd5b44
JG
5994 struct ipw_rx_mem_buffer *rxb,
5995 struct ieee80211_rx_stats *stats)
43f66a6c
JK
5996{
5997 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
5998
5999 /* We received data from the HW, so stop the watchdog */
6000 priv->net_dev->trans_start = jiffies;
6001
bf79451e 6002 /* We only process data packets if the
43f66a6c 6003 * interface is open */
a613bffd 6004 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
43f66a6c
JK
6005 skb_tailroom(rxb->skb))) {
6006 priv->ieee->stats.rx_errors++;
6007 priv->wstats.discard.misc++;
6008 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
6009 return;
6010 } else if (unlikely(!netif_running(priv->net_dev))) {
6011 priv->ieee->stats.rx_dropped++;
6012 priv->wstats.discard.misc++;
6013 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
6014 return;
6015 }
6016
6017 /* Advance skb->data to the start of the actual payload */
aaa4d308 6018 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
43f66a6c
JK
6019
6020 /* Set the size of the skb to the size of the frame */
a613bffd 6021 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
43f66a6c
JK
6022
6023 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
6024
bf79451e 6025 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
43f66a6c 6026 priv->ieee->stats.rx_errors++;
a613bffd 6027 else { /* ieee80211_rx succeeded, so it now owns the SKB */
43f66a6c 6028 rxb->skb = NULL;
a613bffd
JK
6029 ipw_led_activity_on(priv);
6030 }
43f66a6c
JK
6031}
6032
ea2b26e0
JK
6033static inline int is_network_packet(struct ipw_priv *priv,
6034 struct ieee80211_hdr_4addr *header)
6035{
6036 /* Filter incoming packets to determine if they are targetted toward
6037 * this network, discarding packets coming from ourselves */
6038 switch (priv->ieee->iw_mode) {
a613bffd
JK
6039 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
6040 /* {broad,multi}cast packets to our IBSS go through */
ea2b26e0
JK
6041 if (is_broadcast_ether_addr(header->addr1) ||
6042 is_multicast_ether_addr(header->addr1))
6043 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
a613bffd
JK
6044
6045 /* packets to our adapter go through */
6046 return !memcmp(header->addr1, priv->net_dev->dev_addr,
6047 ETH_ALEN);
ea2b26e0 6048 break;
a613bffd
JK
6049
6050 case IW_MODE_INFRA: /* Header: Dest. | AP{BSSID} | Source */
6051 /* {broad,multi}cast packets to our IBSS go through */
6052 if (is_broadcast_ether_addr(header->addr1) ||
6053 is_multicast_ether_addr(header->addr1))
6054 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
6055
6056 /* packets to our adapter go through */
6057 return !memcmp(header->addr1, priv->net_dev->dev_addr,
6058 ETH_ALEN);
ea2b26e0
JK
6059 break;
6060 }
a613bffd 6061
ea2b26e0
JK
6062 return 1;
6063}
6064
43f66a6c
JK
6065/*
6066 * Main entry function for recieving a packet with 80211 headers. This
6067 * should be called when ever the FW has notified us that there is a new
6068 * skb in the recieve queue.
6069 */
6070static void ipw_rx(struct ipw_priv *priv)
6071{
6072 struct ipw_rx_mem_buffer *rxb;
6073 struct ipw_rx_packet *pkt;
0dacca1f 6074 struct ieee80211_hdr_4addr *header;
43f66a6c
JK
6075 u32 r, w, i;
6076 u8 network_packet;
6077
6078 r = ipw_read32(priv, CX2_RX_READ_INDEX);
6079 w = ipw_read32(priv, CX2_RX_WRITE_INDEX);
6080 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
6081
6082 while (i != r) {
6083 rxb = priv->rxq->queue[i];
6084#ifdef CONFIG_IPW_DEBUG
6085 if (unlikely(rxb == NULL)) {
6086 printk(KERN_CRIT "Queue not allocated!\n");
6087 break;
6088 }
6089#endif
6090 priv->rxq->queue[i] = NULL;
6091
6092 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
bf79451e 6093 CX2_RX_BUF_SIZE,
43f66a6c
JK
6094 PCI_DMA_FROMDEVICE);
6095
6096 pkt = (struct ipw_rx_packet *)rxb->skb->data;
6097 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
6098 pkt->header.message_type,
0edd5b44 6099 pkt->header.rx_seq_num, pkt->header.control_bits);
43f66a6c
JK
6100
6101 switch (pkt->header.message_type) {
0edd5b44
JG
6102 case RX_FRAME_TYPE: /* 802.11 frame */ {
6103 struct ieee80211_rx_stats stats = {
6104 .rssi = pkt->u.frame.rssi_dbm -
6105 IPW_RSSI_TO_DBM,
a613bffd 6106 /* .signal = le16_to_cpu(pkt->u.frame.signal), */
0edd5b44
JG
6107 .rate = pkt->u.frame.rate,
6108 .mac_time = jiffies,
6109 .received_channel =
6110 pkt->u.frame.received_channel,
6111 .freq =
6112 (pkt->u.frame.
6113 control & (1 << 0)) ?
6114 IEEE80211_24GHZ_BAND :
6115 IEEE80211_52GHZ_BAND,
a613bffd 6116 .len = le16_to_cpu(pkt->u.frame.length),
0edd5b44
JG
6117 };
6118
6119 if (stats.rssi != 0)
6120 stats.mask |= IEEE80211_STATMASK_RSSI;
6121 if (stats.signal != 0)
6122 stats.mask |= IEEE80211_STATMASK_SIGNAL;
6123 if (stats.rate != 0)
6124 stats.mask |= IEEE80211_STATMASK_RATE;
6125
6126 priv->rx_packets++;
43f66a6c 6127
ea2b26e0 6128#ifdef CONFIG_IPW_MONITOR
0edd5b44
JG
6129 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6130 ipw_handle_data_packet(priv, rxb,
6131 &stats);
6132 break;
6133 }
43f66a6c 6134#endif
bf79451e 6135
0edd5b44 6136 header =
0dacca1f
JK
6137 (struct ieee80211_hdr_4addr *)(rxb->skb->
6138 data +
6139 IPW_RX_FRAME_SIZE);
43f66a6c
JK
6140 /* TODO: Check Ad-Hoc dest/source and make sure
6141 * that we are actually parsing these packets
bf79451e 6142 * correctly -- we should probably use the
43f66a6c
JK
6143 * frame control of the packet and disregard
6144 * the current iw_mode */
0edd5b44 6145
ea2b26e0
JK
6146 network_packet =
6147 is_network_packet(priv, header);
0edd5b44
JG
6148 if (network_packet && priv->assoc_network) {
6149 priv->assoc_network->stats.rssi =
6150 stats.rssi;
6151 average_add(&priv->average_rssi,
6152 stats.rssi);
6153 priv->last_rx_rssi = stats.rssi;
6154 }
6155
6156 IPW_DEBUG_RX("Frame: len=%u\n",
a613bffd 6157 le16_to_cpu(pkt->u.frame.length));
0edd5b44 6158
a613bffd
JK
6159 if (le16_to_cpu(pkt->u.frame.length) <
6160 frame_hdr_len(header)) {
0edd5b44
JG
6161 IPW_DEBUG_DROP
6162 ("Received packet is too small. "
6163 "Dropping.\n");
6164 priv->ieee->stats.rx_errors++;
6165 priv->wstats.discard.misc++;
6166 break;
6167 }
6168
a613bffd
JK
6169 switch (WLAN_FC_GET_TYPE
6170 (le16_to_cpu(header->frame_ctl))) {
0edd5b44
JG
6171 case IEEE80211_FTYPE_MGMT:
6172 ieee80211_rx_mgt(priv->ieee, header,
6173 &stats);
6174 if (priv->ieee->iw_mode == IW_MODE_ADHOC
6175 &&
6176 ((WLAN_FC_GET_STYPE
a613bffd
JK
6177 (le16_to_cpu(header->frame_ctl))
6178 == IEEE80211_STYPE_PROBE_RESP)
0edd5b44
JG
6179 ||
6180 (WLAN_FC_GET_STYPE
a613bffd
JK
6181 (le16_to_cpu(header->frame_ctl))
6182 == IEEE80211_STYPE_BEACON))
0edd5b44
JG
6183 && !memcmp(header->addr3,
6184 priv->bssid, ETH_ALEN))
6185 ipw_add_station(priv,
6186 header->addr2);
6187 break;
6188
6189 case IEEE80211_FTYPE_CTL:
6190 break;
6191
6192 case IEEE80211_FTYPE_DATA:
6193 if (network_packet)
6194 ipw_handle_data_packet(priv,
6195 rxb,
6196 &stats);
6197 else
6198 IPW_DEBUG_DROP("Dropping: "
6199 MAC_FMT ", "
6200 MAC_FMT ", "
6201 MAC_FMT "\n",
6202 MAC_ARG(header->
6203 addr1),
6204 MAC_ARG(header->
6205 addr2),
6206 MAC_ARG(header->
6207 addr3));
6208 break;
6209 }
43f66a6c
JK
6210 break;
6211 }
bf79451e 6212
0edd5b44
JG
6213 case RX_HOST_NOTIFICATION_TYPE:{
6214 IPW_DEBUG_RX
6215 ("Notification: subtype=%02X flags=%02X size=%d\n",
43f66a6c
JK
6216 pkt->u.notification.subtype,
6217 pkt->u.notification.flags,
6218 pkt->u.notification.size);
0edd5b44
JG
6219 ipw_rx_notification(priv, &pkt->u.notification);
6220 break;
6221 }
43f66a6c
JK
6222
6223 default:
6224 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
6225 pkt->header.message_type);
6226 break;
6227 }
bf79451e
JG
6228
6229 /* For now we just don't re-use anything. We can tweak this
6230 * later to try and re-use notification packets and SKBs that
43f66a6c
JK
6231 * fail to Rx correctly */
6232 if (rxb->skb != NULL) {
6233 dev_kfree_skb_any(rxb->skb);
6234 rxb->skb = NULL;
6235 }
bf79451e 6236
43f66a6c
JK
6237 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
6238 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
6239 list_add_tail(&rxb->list, &priv->rxq->rx_used);
bf79451e 6240
43f66a6c
JK
6241 i = (i + 1) % RX_QUEUE_SIZE;
6242 }
6243
6244 /* Backtrack one entry */
6245 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
6246
6247 ipw_rx_queue_restock(priv);
6248}
6249
43f66a6c
JK
6250/*
6251 * This file defines the Wireless Extension handlers. It does not
6252 * define any methods of hardware manipulation and relies on the
6253 * functions defined in ipw_main to provide the HW interaction.
bf79451e
JG
6254 *
6255 * The exception to this is the use of the ipw_get_ordinal()
43f66a6c
JK
6256 * function used to poll the hardware vs. making unecessary calls.
6257 *
6258 */
6259
bf79451e
JG
6260static int ipw_wx_get_name(struct net_device *dev,
6261 struct iw_request_info *info,
43f66a6c
JK
6262 union iwreq_data *wrqu, char *extra)
6263{
6264 struct ipw_priv *priv = ieee80211_priv(dev);
a613bffd
JK
6265 if (priv->status & STATUS_RF_KILL_MASK) {
6266 strcpy(wrqu->name, "radio off");
6267 } else if (!(priv->status & STATUS_ASSOCIATED))
43f66a6c 6268 strcpy(wrqu->name, "unassociated");
bf79451e 6269 else
43f66a6c
JK
6270 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
6271 ipw_modes[priv->assoc_request.ieee_mode]);
6272 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
6273 return 0;
6274}
6275
6276static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
6277{
6278 if (channel == 0) {
6279 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
6280 priv->config &= ~CFG_STATIC_CHANNEL;
6281 if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
6282 STATUS_ASSOCIATING))) {
6283 IPW_DEBUG_ASSOC("Attempting to associate with new "
6284 "parameters.\n");
6285 ipw_associate(priv);
6286 }
6287
6288 return 0;
6289 }
6290
6291 priv->config |= CFG_STATIC_CHANNEL;
6292
6293 if (priv->channel == channel) {
0edd5b44
JG
6294 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
6295 channel);
43f66a6c
JK
6296 return 0;
6297 }
6298
6299 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
6300 priv->channel = channel;
6301
6302 /* If we are currently associated, or trying to associate
6303 * then see if this is a new channel (causing us to disassociate) */
6304 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
6305 IPW_DEBUG_ASSOC("Disassociating due to channel change.\n");
6306 ipw_disassociate(priv);
a613bffd 6307 } else if (!(priv->status & (STATUS_SCANNING)))
43f66a6c 6308 ipw_associate(priv);
43f66a6c
JK
6309
6310 return 0;
6311}
6312
bf79451e
JG
6313static int ipw_wx_set_freq(struct net_device *dev,
6314 struct iw_request_info *info,
6315 union iwreq_data *wrqu, char *extra)
43f66a6c
JK
6316{
6317 struct ipw_priv *priv = ieee80211_priv(dev);
6318 struct iw_freq *fwrq = &wrqu->freq;
bf79451e 6319
43f66a6c
JK
6320 /* if setting by freq convert to channel */
6321 if (fwrq->e == 1) {
0edd5b44 6322 if ((fwrq->m >= (int)2.412e8 && fwrq->m <= (int)2.487e8)) {
43f66a6c
JK
6323 int f = fwrq->m / 100000;
6324 int c = 0;
bf79451e 6325
43f66a6c
JK
6326 while ((c < REG_MAX_CHANNEL) &&
6327 (f != ipw_frequencies[c]))
6328 c++;
bf79451e 6329
43f66a6c
JK
6330 /* hack to fall through */
6331 fwrq->e = 0;
6332 fwrq->m = c + 1;
6333 }
6334 }
bf79451e
JG
6335
6336 if (fwrq->e > 0 || fwrq->m > 1000)
43f66a6c
JK
6337 return -EOPNOTSUPP;
6338
6339 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
0edd5b44 6340 return ipw_set_channel(priv, (u8) fwrq->m);
43f66a6c
JK
6341}
6342
bf79451e
JG
6343static int ipw_wx_get_freq(struct net_device *dev,
6344 struct iw_request_info *info,
43f66a6c
JK
6345 union iwreq_data *wrqu, char *extra)
6346{
6347 struct ipw_priv *priv = ieee80211_priv(dev);
6348
6349 wrqu->freq.e = 0;
6350
6351 /* If we are associated, trying to associate, or have a statically
6352 * configured CHANNEL then return that; otherwise return ANY */
6353 if (priv->config & CFG_STATIC_CHANNEL ||
6354 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))
6355 wrqu->freq.m = priv->channel;
bf79451e 6356 else
43f66a6c
JK
6357 wrqu->freq.m = 0;
6358
6359 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
6360 return 0;
6361}
6362
bf79451e
JG
6363static int ipw_wx_set_mode(struct net_device *dev,
6364 struct iw_request_info *info,
43f66a6c
JK
6365 union iwreq_data *wrqu, char *extra)
6366{
6367 struct ipw_priv *priv = ieee80211_priv(dev);
6368 int err = 0;
6369
6370 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
6371
6372 if (wrqu->mode == priv->ieee->iw_mode)
6373 return 0;
6374
6375 switch (wrqu->mode) {
ea2b26e0 6376#ifdef CONFIG_IPW_MONITOR
43f66a6c
JK
6377 case IW_MODE_MONITOR:
6378#endif
6379 case IW_MODE_ADHOC:
6380 case IW_MODE_INFRA:
6381 break;
6382 case IW_MODE_AUTO:
6383 wrqu->mode = IW_MODE_INFRA;
6384 break;
6385 default:
6386 return -EINVAL;
6387 }
6388
ea2b26e0 6389#ifdef CONFIG_IPW_MONITOR
bf79451e 6390 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
43f66a6c 6391 priv->net_dev->type = ARPHRD_ETHER;
bf79451e
JG
6392
6393 if (wrqu->mode == IW_MODE_MONITOR)
43f66a6c 6394 priv->net_dev->type = ARPHRD_IEEE80211;
ea2b26e0 6395#endif /* CONFIG_IPW_MONITOR */
bf79451e 6396
43f66a6c 6397#ifdef CONFIG_PM
bf79451e 6398 /* Free the existing firmware and reset the fw_loaded
43f66a6c 6399 * flag so ipw_load() will bring in the new firmawre */
a613bffd 6400 if (fw_loaded)
43f66a6c 6401 fw_loaded = 0;
43f66a6c
JK
6402
6403 release_firmware(bootfw);
6404 release_firmware(ucode);
6405 release_firmware(firmware);
6406 bootfw = ucode = firmware = NULL;
6407#endif
6408
6409 priv->ieee->iw_mode = wrqu->mode;
a613bffd 6410 queue_work(priv->workqueue, &priv->adapter_restart);
bf79451e 6411
0edd5b44 6412 return err;
43f66a6c
JK
6413}
6414
bf79451e 6415static int ipw_wx_get_mode(struct net_device *dev,
0edd5b44
JG
6416 struct iw_request_info *info,
6417 union iwreq_data *wrqu, char *extra)
43f66a6c
JK
6418{
6419 struct ipw_priv *priv = ieee80211_priv(dev);
6420
6421 wrqu->mode = priv->ieee->iw_mode;
6422 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
6423
6424 return 0;
6425}
6426
43f66a6c
JK
6427#define DEFAULT_RTS_THRESHOLD 2304U
6428#define MIN_RTS_THRESHOLD 1U
6429#define MAX_RTS_THRESHOLD 2304U
6430#define DEFAULT_BEACON_INTERVAL 100U
6431#define DEFAULT_SHORT_RETRY_LIMIT 7U
6432#define DEFAULT_LONG_RETRY_LIMIT 4U
6433
6434/* Values are in microsecond */
6435static const s32 timeout_duration[] = {
6436 350000,
6437 250000,
6438 75000,
6439 37000,
6440 25000,
6441};
6442
6443static const s32 period_duration[] = {
6444 400000,
6445 700000,
6446 1000000,
6447 1000000,
6448 1000000
6449};
6450
bf79451e
JG
6451static int ipw_wx_get_range(struct net_device *dev,
6452 struct iw_request_info *info,
43f66a6c
JK
6453 union iwreq_data *wrqu, char *extra)
6454{
6455 struct ipw_priv *priv = ieee80211_priv(dev);
6456 struct iw_range *range = (struct iw_range *)extra;
6457 u16 val;
6458 int i;
6459
6460 wrqu->data.length = sizeof(*range);
6461 memset(range, 0, sizeof(*range));
6462
6463 /* 54Mbs == ~27 Mb/s real (802.11g) */
bf79451e 6464 range->throughput = 27 * 1000 * 1000;
43f66a6c
JK
6465
6466 range->max_qual.qual = 100;
6467 /* TODO: Find real max RSSI and stick here */
6468 range->max_qual.level = 0;
6469 range->max_qual.noise = 0;
0edd5b44 6470 range->max_qual.updated = 7; /* Updated all three */
43f66a6c
JK
6471
6472 range->avg_qual.qual = 70;
6473 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
0edd5b44 6474 range->avg_qual.level = 0; /* FIXME to real average level */
43f66a6c 6475 range->avg_qual.noise = 0;
0edd5b44 6476 range->avg_qual.updated = 7; /* Updated all three */
43f66a6c 6477
0edd5b44 6478 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
43f66a6c 6479
bf79451e
JG
6480 for (i = 0; i < range->num_bitrates; i++)
6481 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
0edd5b44 6482 500000;
bf79451e 6483
43f66a6c
JK
6484 range->max_rts = DEFAULT_RTS_THRESHOLD;
6485 range->min_frag = MIN_FRAG_THRESHOLD;
6486 range->max_frag = MAX_FRAG_THRESHOLD;
6487
6488 range->encoding_size[0] = 5;
bf79451e 6489 range->encoding_size[1] = 13;
43f66a6c
JK
6490 range->num_encoding_sizes = 2;
6491 range->max_encoding_tokens = WEP_KEYS;
6492
6493 /* Set the Wireless Extension versions */
6494 range->we_version_compiled = WIRELESS_EXT;
6495 range->we_version_source = 16;
6496
0edd5b44 6497 range->num_channels = FREQ_COUNT;
43f66a6c
JK
6498
6499 val = 0;
6500 for (i = 0; i < FREQ_COUNT; i++) {
6501 range->freq[val].i = i + 1;
6502 range->freq[val].m = ipw_frequencies[i] * 100000;
6503 range->freq[val].e = 1;
6504 val++;
6505
6506 if (val == IW_MAX_FREQUENCIES)
6507 break;
6508 }
6509 range->num_frequency = val;
6510
6511 IPW_DEBUG_WX("GET Range\n");
6512 return 0;
6513}
6514
bf79451e
JG
6515static int ipw_wx_set_wap(struct net_device *dev,
6516 struct iw_request_info *info,
43f66a6c
JK
6517 union iwreq_data *wrqu, char *extra)
6518{
6519 struct ipw_priv *priv = ieee80211_priv(dev);
6520
6521 static const unsigned char any[] = {
6522 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
6523 };
6524 static const unsigned char off[] = {
6525 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
6526 };
6527
bf79451e 6528 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
43f66a6c
JK
6529 return -EINVAL;
6530
6531 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
6532 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
6533 /* we disable mandatory BSSID association */
6534 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
6535 priv->config &= ~CFG_STATIC_BSSID;
6536 if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
6537 STATUS_ASSOCIATING))) {
6538 IPW_DEBUG_ASSOC("Attempting to associate with new "
6539 "parameters.\n");
6540 ipw_associate(priv);
6541 }
6542
6543 return 0;
6544 }
6545
6546 priv->config |= CFG_STATIC_BSSID;
6547 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
6548 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
6549 return 0;
6550 }
6551
6552 IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n",
6553 MAC_ARG(wrqu->ap_addr.sa_data));
6554
6555 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
6556
6557 /* If we are currently associated, or trying to associate
6558 * then see if this is a new BSSID (causing us to disassociate) */
6559 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
6560 IPW_DEBUG_ASSOC("Disassociating due to BSSID change.\n");
6561 ipw_disassociate(priv);
a613bffd 6562 } else if (!(priv->status & (STATUS_SCANNING)))
43f66a6c 6563 ipw_associate(priv);
43f66a6c
JK
6564
6565 return 0;
6566}
6567
bf79451e
JG
6568static int ipw_wx_get_wap(struct net_device *dev,
6569 struct iw_request_info *info,
43f66a6c
JK
6570 union iwreq_data *wrqu, char *extra)
6571{
6572 struct ipw_priv *priv = ieee80211_priv(dev);
6573 /* If we are associated, trying to associate, or have a statically
6574 * configured BSSID then return that; otherwise return ANY */
bf79451e 6575 if (priv->config & CFG_STATIC_BSSID ||
43f66a6c
JK
6576 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
6577 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
6578 memcpy(wrqu->ap_addr.sa_data, &priv->bssid, ETH_ALEN);
6579 } else
6580 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
6581
6582 IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
6583 MAC_ARG(wrqu->ap_addr.sa_data));
6584 return 0;
6585}
6586
bf79451e
JG
6587static int ipw_wx_set_essid(struct net_device *dev,
6588 struct iw_request_info *info,
43f66a6c
JK
6589 union iwreq_data *wrqu, char *extra)
6590{
6591 struct ipw_priv *priv = ieee80211_priv(dev);
0edd5b44 6592 char *essid = ""; /* ANY */
43f66a6c 6593 int length = 0;
bf79451e 6594
43f66a6c
JK
6595 if (wrqu->essid.flags && wrqu->essid.length) {
6596 length = wrqu->essid.length - 1;
6597 essid = extra;
6598 }
6599 if (length == 0) {
6600 IPW_DEBUG_WX("Setting ESSID to ANY\n");
6601 priv->config &= ~CFG_STATIC_ESSID;
6602 if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
6603 STATUS_ASSOCIATING))) {
6604 IPW_DEBUG_ASSOC("Attempting to associate with new "
6605 "parameters.\n");
6606 ipw_associate(priv);
6607 }
6608
6609 return 0;
6610 }
6611
6612 length = min(length, IW_ESSID_MAX_SIZE);
6613
6614 priv->config |= CFG_STATIC_ESSID;
6615
6616 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
6617 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
6618 return 0;
6619 }
6620
6621 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length),
6622 length);
6623
6624 priv->essid_len = length;
6625 memcpy(priv->essid, essid, priv->essid_len);
bf79451e 6626
43f66a6c
JK
6627 /* If we are currently associated, or trying to associate
6628 * then see if this is a new ESSID (causing us to disassociate) */
6629 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
6630 IPW_DEBUG_ASSOC("Disassociating due to ESSID change.\n");
6631 ipw_disassociate(priv);
a613bffd 6632 } else if (!(priv->status & (STATUS_SCANNING)))
43f66a6c 6633 ipw_associate(priv);
43f66a6c
JK
6634
6635 return 0;
6636}
6637
bf79451e
JG
6638static int ipw_wx_get_essid(struct net_device *dev,
6639 struct iw_request_info *info,
43f66a6c
JK
6640 union iwreq_data *wrqu, char *extra)
6641{
6642 struct ipw_priv *priv = ieee80211_priv(dev);
6643
6644 /* If we are associated, trying to associate, or have a statically
6645 * configured ESSID then return that; otherwise return ANY */
6646 if (priv->config & CFG_STATIC_ESSID ||
bf79451e
JG
6647 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
6648 IPW_DEBUG_WX("Getting essid: '%s'\n",
43f66a6c 6649 escape_essid(priv->essid, priv->essid_len));
bf79451e 6650 memcpy(extra, priv->essid, priv->essid_len);
43f66a6c 6651 wrqu->essid.length = priv->essid_len;
0edd5b44 6652 wrqu->essid.flags = 1; /* active */
43f66a6c
JK
6653 } else {
6654 IPW_DEBUG_WX("Getting essid: ANY\n");
6655 wrqu->essid.length = 0;
0edd5b44 6656 wrqu->essid.flags = 0; /* active */
43f66a6c
JK
6657 }
6658
6659 return 0;
6660}
6661
bf79451e
JG
6662static int ipw_wx_set_nick(struct net_device *dev,
6663 struct iw_request_info *info,
43f66a6c 6664 union iwreq_data *wrqu, char *extra)
bf79451e 6665{
43f66a6c
JK
6666 struct ipw_priv *priv = ieee80211_priv(dev);
6667
6668 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
6669 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
6670 return -E2BIG;
6671
0edd5b44 6672 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
43f66a6c 6673 memset(priv->nick, 0, sizeof(priv->nick));
0edd5b44 6674 memcpy(priv->nick, extra, wrqu->data.length);
43f66a6c
JK
6675 IPW_DEBUG_TRACE("<<\n");
6676 return 0;
6677
6678}
6679
bf79451e
JG
6680static int ipw_wx_get_nick(struct net_device *dev,
6681 struct iw_request_info *info,
43f66a6c 6682 union iwreq_data *wrqu, char *extra)
bf79451e 6683{
43f66a6c
JK
6684 struct ipw_priv *priv = ieee80211_priv(dev);
6685 IPW_DEBUG_WX("Getting nick\n");
6686 wrqu->data.length = strlen(priv->nick) + 1;
6687 memcpy(extra, priv->nick, wrqu->data.length);
0edd5b44 6688 wrqu->data.flags = 1; /* active */
43f66a6c
JK
6689 return 0;
6690}
6691
43f66a6c
JK
6692static int ipw_wx_set_rate(struct net_device *dev,
6693 struct iw_request_info *info,
6694 union iwreq_data *wrqu, char *extra)
bf79451e 6695{
ea2b26e0
JK
6696 /* TODO: We should use semaphores or locks for access to priv */
6697 struct ipw_priv *priv = ieee80211_priv(dev);
6698 u32 target_rate = wrqu->bitrate.value;
6699 u32 fixed, mask;
6700
6701 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
6702 /* value = X, fixed = 1 means only rate X */
6703 /* value = X, fixed = 0 means all rates lower equal X */
6704
6705 if (target_rate == -1) {
6706 fixed = 0;
6707 mask = IEEE80211_DEFAULT_RATES_MASK;
6708 /* Now we should reassociate */
6709 goto apply;
6710 }
6711
6712 mask = 0;
6713 fixed = wrqu->bitrate.fixed;
6714
6715 if (target_rate == 1000000 || !fixed)
6716 mask |= IEEE80211_CCK_RATE_1MB_MASK;
6717 if (target_rate == 1000000)
6718 goto apply;
6719
6720 if (target_rate == 2000000 || !fixed)
6721 mask |= IEEE80211_CCK_RATE_2MB_MASK;
6722 if (target_rate == 2000000)
6723 goto apply;
6724
6725 if (target_rate == 5500000 || !fixed)
6726 mask |= IEEE80211_CCK_RATE_5MB_MASK;
6727 if (target_rate == 5500000)
6728 goto apply;
6729
6730 if (target_rate == 6000000 || !fixed)
6731 mask |= IEEE80211_OFDM_RATE_6MB_MASK;
6732 if (target_rate == 6000000)
6733 goto apply;
6734
6735 if (target_rate == 9000000 || !fixed)
6736 mask |= IEEE80211_OFDM_RATE_9MB_MASK;
6737 if (target_rate == 9000000)
6738 goto apply;
6739
6740 if (target_rate == 11000000 || !fixed)
6741 mask |= IEEE80211_CCK_RATE_11MB_MASK;
6742 if (target_rate == 11000000)
6743 goto apply;
6744
6745 if (target_rate == 12000000 || !fixed)
6746 mask |= IEEE80211_OFDM_RATE_12MB_MASK;
6747 if (target_rate == 12000000)
6748 goto apply;
6749
6750 if (target_rate == 18000000 || !fixed)
6751 mask |= IEEE80211_OFDM_RATE_18MB_MASK;
6752 if (target_rate == 18000000)
6753 goto apply;
6754
6755 if (target_rate == 24000000 || !fixed)
6756 mask |= IEEE80211_OFDM_RATE_24MB_MASK;
6757 if (target_rate == 24000000)
6758 goto apply;
6759
6760 if (target_rate == 36000000 || !fixed)
6761 mask |= IEEE80211_OFDM_RATE_36MB_MASK;
6762 if (target_rate == 36000000)
6763 goto apply;
6764
6765 if (target_rate == 48000000 || !fixed)
6766 mask |= IEEE80211_OFDM_RATE_48MB_MASK;
6767 if (target_rate == 48000000)
6768 goto apply;
6769
6770 if (target_rate == 54000000 || !fixed)
6771 mask |= IEEE80211_OFDM_RATE_54MB_MASK;
6772 if (target_rate == 54000000)
6773 goto apply;
6774
6775 IPW_DEBUG_WX("invalid rate specified, returning error\n");
6776 return -EINVAL;
6777
6778 apply:
6779 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
6780 mask, fixed ? "fixed" : "sub-rates");
6781
6782 if (mask == IEEE80211_DEFAULT_RATES_MASK)
6783 priv->config &= ~CFG_FIXED_RATE;
6784 else
6785 priv->config |= CFG_FIXED_RATE;
6786
6787 if (priv->rates_mask != mask) {
6788 priv->rates_mask = mask;
6789 /* If we are already associated or are currently trying to
6790 * associate, disassociate and try again */
6791 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
6792 IPW_DEBUG_ASSOC("Disassociating due to RATE change.\n");
6793 ipw_disassociate(priv);
a613bffd
JK
6794 } else if (!(priv->status & (STATUS_SCANNING))) {
6795 /* We are not yet associated, so kick one off... */
6796 ipw_associate(priv);
ea2b26e0 6797 }
ea2b26e0
JK
6798 }
6799
6800 return 0;
43f66a6c
JK
6801}
6802
bf79451e
JG
6803static int ipw_wx_get_rate(struct net_device *dev,
6804 struct iw_request_info *info,
43f66a6c 6805 union iwreq_data *wrqu, char *extra)
bf79451e 6806{
0edd5b44 6807 struct ipw_priv *priv = ieee80211_priv(dev);
43f66a6c
JK
6808 wrqu->bitrate.value = priv->last_rate;
6809
6810 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
6811 return 0;
6812}
6813
bf79451e
JG
6814static int ipw_wx_set_rts(struct net_device *dev,
6815 struct iw_request_info *info,
43f66a6c 6816 union iwreq_data *wrqu, char *extra)
bf79451e 6817{
43f66a6c
JK
6818 struct ipw_priv *priv = ieee80211_priv(dev);
6819
6820 if (wrqu->rts.disabled)
6821 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
6822 else {
6823 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
6824 wrqu->rts.value > MAX_RTS_THRESHOLD)
6825 return -EINVAL;
bf79451e 6826
43f66a6c
JK
6827 priv->rts_threshold = wrqu->rts.value;
6828 }
6829
6830 ipw_send_rts_threshold(priv, priv->rts_threshold);
6831 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
6832 return 0;
6833}
6834
bf79451e
JG
6835static int ipw_wx_get_rts(struct net_device *dev,
6836 struct iw_request_info *info,
43f66a6c 6837 union iwreq_data *wrqu, char *extra)
bf79451e 6838{
43f66a6c
JK
6839 struct ipw_priv *priv = ieee80211_priv(dev);
6840 wrqu->rts.value = priv->rts_threshold;
6841 wrqu->rts.fixed = 0; /* no auto select */
0edd5b44 6842 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
43f66a6c
JK
6843
6844 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
6845 return 0;
6846}
6847
bf79451e
JG
6848static int ipw_wx_set_txpow(struct net_device *dev,
6849 struct iw_request_info *info,
43f66a6c 6850 union iwreq_data *wrqu, char *extra)
bf79451e 6851{
43f66a6c
JK
6852 struct ipw_priv *priv = ieee80211_priv(dev);
6853 struct ipw_tx_power tx_power;
6854 int i;
6855
6856 if (ipw_radio_kill_sw(priv, wrqu->power.disabled))
6857 return -EINPROGRESS;
6858
6859 if (wrqu->power.flags != IW_TXPOW_DBM)
6860 return -EINVAL;
6861
0edd5b44 6862 if ((wrqu->power.value > 20) || (wrqu->power.value < -12))
43f66a6c
JK
6863 return -EINVAL;
6864
6865 priv->tx_power = wrqu->power.value;
6866
6867 memset(&tx_power, 0, sizeof(tx_power));
6868
6869 /* configure device for 'G' band */
6870 tx_power.ieee_mode = IPW_G_MODE;
6871 tx_power.num_channels = 11;
6872 for (i = 0; i < 11; i++) {
6873 tx_power.channels_tx_power[i].channel_number = i + 1;
6874 tx_power.channels_tx_power[i].tx_power = priv->tx_power;
6875 }
6876 if (ipw_send_tx_power(priv, &tx_power))
6877 goto error;
6878
6879 /* configure device to also handle 'B' band */
6880 tx_power.ieee_mode = IPW_B_MODE;
6881 if (ipw_send_tx_power(priv, &tx_power))
6882 goto error;
6883
6884 return 0;
6885
0edd5b44 6886 error:
43f66a6c
JK
6887 return -EIO;
6888}
6889
bf79451e
JG
6890static int ipw_wx_get_txpow(struct net_device *dev,
6891 struct iw_request_info *info,
43f66a6c 6892 union iwreq_data *wrqu, char *extra)
bf79451e 6893{
43f66a6c
JK
6894 struct ipw_priv *priv = ieee80211_priv(dev);
6895
6896 wrqu->power.value = priv->tx_power;
6897 wrqu->power.fixed = 1;
6898 wrqu->power.flags = IW_TXPOW_DBM;
6899 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
6900
bf79451e 6901 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
0edd5b44 6902 wrqu->power.disabled ? "ON" : "OFF", wrqu->power.value);
43f66a6c
JK
6903
6904 return 0;
6905}
6906
bf79451e 6907static int ipw_wx_set_frag(struct net_device *dev,
0edd5b44
JG
6908 struct iw_request_info *info,
6909 union iwreq_data *wrqu, char *extra)
43f66a6c
JK
6910{
6911 struct ipw_priv *priv = ieee80211_priv(dev);
6912
6913 if (wrqu->frag.disabled)
6914 priv->ieee->fts = DEFAULT_FTS;
6915 else {
6916 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
6917 wrqu->frag.value > MAX_FRAG_THRESHOLD)
6918 return -EINVAL;
bf79451e 6919
43f66a6c
JK
6920 priv->ieee->fts = wrqu->frag.value & ~0x1;
6921 }
6922
6923 ipw_send_frag_threshold(priv, wrqu->frag.value);
6924 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
6925 return 0;
6926}
6927
bf79451e 6928static int ipw_wx_get_frag(struct net_device *dev,
0edd5b44
JG
6929 struct iw_request_info *info,
6930 union iwreq_data *wrqu, char *extra)
43f66a6c
JK
6931{
6932 struct ipw_priv *priv = ieee80211_priv(dev);
6933 wrqu->frag.value = priv->ieee->fts;
6934 wrqu->frag.fixed = 0; /* no auto select */
0edd5b44 6935 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
43f66a6c
JK
6936
6937 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
6938
6939 return 0;
6940}
6941
bf79451e
JG
6942static int ipw_wx_set_retry(struct net_device *dev,
6943 struct iw_request_info *info,
43f66a6c 6944 union iwreq_data *wrqu, char *extra)
bf79451e 6945{
43f66a6c 6946 IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
bf79451e 6947 return -EOPNOTSUPP;
43f66a6c
JK
6948}
6949
bf79451e
JG
6950static int ipw_wx_get_retry(struct net_device *dev,
6951 struct iw_request_info *info,
43f66a6c 6952 union iwreq_data *wrqu, char *extra)
bf79451e 6953{
43f66a6c 6954 IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
bf79451e 6955 return -EOPNOTSUPP;
43f66a6c
JK
6956}
6957
bf79451e
JG
6958static int ipw_wx_set_scan(struct net_device *dev,
6959 struct iw_request_info *info,
43f66a6c
JK
6960 union iwreq_data *wrqu, char *extra)
6961{
6962 struct ipw_priv *priv = ieee80211_priv(dev);
6963 IPW_DEBUG_WX("Start scan\n");
6964 if (ipw_request_scan(priv))
6965 return -EIO;
6966 return 0;
6967}
6968
bf79451e
JG
6969static int ipw_wx_get_scan(struct net_device *dev,
6970 struct iw_request_info *info,
43f66a6c 6971 union iwreq_data *wrqu, char *extra)
bf79451e 6972{
43f66a6c
JK
6973 struct ipw_priv *priv = ieee80211_priv(dev);
6974 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
6975}
6976
bf79451e 6977static int ipw_wx_set_encode(struct net_device *dev,
0edd5b44
JG
6978 struct iw_request_info *info,
6979 union iwreq_data *wrqu, char *key)
43f66a6c
JK
6980{
6981 struct ipw_priv *priv = ieee80211_priv(dev);
6982 return ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
6983}
6984
bf79451e 6985static int ipw_wx_get_encode(struct net_device *dev,
0edd5b44
JG
6986 struct iw_request_info *info,
6987 union iwreq_data *wrqu, char *key)
43f66a6c
JK
6988{
6989 struct ipw_priv *priv = ieee80211_priv(dev);
6990 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
6991}
6992
bf79451e 6993static int ipw_wx_set_power(struct net_device *dev,
0edd5b44
JG
6994 struct iw_request_info *info,
6995 union iwreq_data *wrqu, char *extra)
43f66a6c
JK
6996{
6997 struct ipw_priv *priv = ieee80211_priv(dev);
6998 int err;
6999
7000 if (wrqu->power.disabled) {
7001 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
7002 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
7003 if (err) {
7004 IPW_DEBUG_WX("failed setting power mode.\n");
7005 return err;
7006 }
43f66a6c
JK
7007 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
7008
7009 return 0;
bf79451e 7010 }
43f66a6c
JK
7011
7012 switch (wrqu->power.flags & IW_POWER_MODE) {
0edd5b44
JG
7013 case IW_POWER_ON: /* If not specified */
7014 case IW_POWER_MODE: /* If set all mask */
7015 case IW_POWER_ALL_R: /* If explicitely state all */
43f66a6c 7016 break;
0edd5b44 7017 default: /* Otherwise we don't support it */
43f66a6c
JK
7018 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
7019 wrqu->power.flags);
bf79451e 7020 return -EOPNOTSUPP;
43f66a6c 7021 }
bf79451e 7022
43f66a6c
JK
7023 /* If the user hasn't specified a power management mode yet, default
7024 * to BATTERY */
0edd5b44 7025 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
43f66a6c 7026 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
bf79451e 7027 else
43f66a6c
JK
7028 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
7029 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
7030 if (err) {
7031 IPW_DEBUG_WX("failed setting power mode.\n");
7032 return err;
7033 }
7034
0edd5b44 7035 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
bf79451e 7036
43f66a6c
JK
7037 return 0;
7038}
7039
bf79451e 7040static int ipw_wx_get_power(struct net_device *dev,
0edd5b44
JG
7041 struct iw_request_info *info,
7042 union iwreq_data *wrqu, char *extra)
43f66a6c
JK
7043{
7044 struct ipw_priv *priv = ieee80211_priv(dev);
7045
a613bffd 7046 if (!(priv->power_mode & IPW_POWER_ENABLED))
43f66a6c 7047 wrqu->power.disabled = 1;
a613bffd 7048 else
43f66a6c 7049 wrqu->power.disabled = 0;
43f66a6c
JK
7050
7051 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
bf79451e 7052
43f66a6c
JK
7053 return 0;
7054}
7055
bf79451e 7056static int ipw_wx_set_powermode(struct net_device *dev,
0edd5b44
JG
7057 struct iw_request_info *info,
7058 union iwreq_data *wrqu, char *extra)
43f66a6c
JK
7059{
7060 struct ipw_priv *priv = ieee80211_priv(dev);
7061 int mode = *(int *)extra;
7062 int err;
bf79451e 7063
43f66a6c
JK
7064 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
7065 mode = IPW_POWER_AC;
7066 priv->power_mode = mode;
7067 } else {
7068 priv->power_mode = IPW_POWER_ENABLED | mode;
7069 }
bf79451e 7070
43f66a6c
JK
7071 if (priv->power_mode != mode) {
7072 err = ipw_send_power_mode(priv, mode);
bf79451e 7073
43f66a6c
JK
7074 if (err) {
7075 IPW_DEBUG_WX("failed setting power mode.\n");
7076 return err;
7077 }
7078 }
bf79451e 7079
43f66a6c
JK
7080 return 0;
7081}
7082
7083#define MAX_WX_STRING 80
bf79451e 7084static int ipw_wx_get_powermode(struct net_device *dev,
0edd5b44
JG
7085 struct iw_request_info *info,
7086 union iwreq_data *wrqu, char *extra)
43f66a6c
JK
7087{
7088 struct ipw_priv *priv = ieee80211_priv(dev);
7089 int level = IPW_POWER_LEVEL(priv->power_mode);
7090 char *p = extra;
7091
7092 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
7093
7094 switch (level) {
7095 case IPW_POWER_AC:
7096 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
7097 break;
7098 case IPW_POWER_BATTERY:
7099 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
7100 break;
7101 default:
7102 p += snprintf(p, MAX_WX_STRING - (p - extra),
bf79451e 7103 "(Timeout %dms, Period %dms)",
43f66a6c
JK
7104 timeout_duration[level - 1] / 1000,
7105 period_duration[level - 1] / 1000);
7106 }
7107
7108 if (!(priv->power_mode & IPW_POWER_ENABLED))
0edd5b44 7109 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
43f66a6c
JK
7110
7111 wrqu->data.length = p - extra + 1;
7112
7113 return 0;
7114}
7115
7116static int ipw_wx_set_wireless_mode(struct net_device *dev,
0edd5b44
JG
7117 struct iw_request_info *info,
7118 union iwreq_data *wrqu, char *extra)
43f66a6c 7119{
0edd5b44 7120 struct ipw_priv *priv = ieee80211_priv(dev);
43f66a6c
JK
7121 int mode = *(int *)extra;
7122 u8 band = 0, modulation = 0;
7123
7124 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
0edd5b44 7125 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
43f66a6c
JK
7126 return -EINVAL;
7127 }
bf79451e 7128
43f66a6c 7129 if (priv->adapter == IPW_2915ABG) {
a33a1982 7130 priv->ieee->abg_true = 1;
43f66a6c
JK
7131 if (mode & IEEE_A) {
7132 band |= IEEE80211_52GHZ_BAND;
7133 modulation |= IEEE80211_OFDM_MODULATION;
7134 } else
a33a1982 7135 priv->ieee->abg_true = 0;
43f66a6c
JK
7136 } else {
7137 if (mode & IEEE_A) {
7138 IPW_WARNING("Attempt to set 2200BG into "
7139 "802.11a mode\n");
7140 return -EINVAL;
7141 }
7142
a33a1982 7143 priv->ieee->abg_true = 0;
43f66a6c
JK
7144 }
7145
7146 if (mode & IEEE_B) {
7147 band |= IEEE80211_24GHZ_BAND;
7148 modulation |= IEEE80211_CCK_MODULATION;
7149 } else
a33a1982 7150 priv->ieee->abg_true = 0;
bf79451e 7151
43f66a6c
JK
7152 if (mode & IEEE_G) {
7153 band |= IEEE80211_24GHZ_BAND;
7154 modulation |= IEEE80211_OFDM_MODULATION;
7155 } else
a33a1982 7156 priv->ieee->abg_true = 0;
43f66a6c
JK
7157
7158 priv->ieee->mode = mode;
7159 priv->ieee->freq_band = band;
7160 priv->ieee->modulation = modulation;
0edd5b44 7161 init_supported_rates(priv, &priv->rates);
43f66a6c
JK
7162
7163 /* If we are currently associated, or trying to associate
0edd5b44 7164 * then see if this is a new configuration (causing us to
43f66a6c 7165 * disassociate) */
0edd5b44 7166 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
bf79451e 7167 /* The resulting association will trigger
43f66a6c 7168 * the new rates to be sent to the device */
0edd5b44
JG
7169 IPW_DEBUG_ASSOC("Disassociating due to mode change.\n");
7170 ipw_disassociate(priv);
43f66a6c
JK
7171 } else
7172 ipw_send_supported_rates(priv, &priv->rates);
7173
a613bffd
JK
7174 /* Update the band LEDs */
7175 ipw_led_band_on(priv);
7176
bf79451e 7177 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
43f66a6c 7178 mode & IEEE_A ? 'a' : '.',
0edd5b44 7179 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
43f66a6c
JK
7180 return 0;
7181}
7182
7183static int ipw_wx_get_wireless_mode(struct net_device *dev,
0edd5b44
JG
7184 struct iw_request_info *info,
7185 union iwreq_data *wrqu, char *extra)
43f66a6c 7186{
0edd5b44 7187 struct ipw_priv *priv = ieee80211_priv(dev);
43f66a6c 7188
ea2b26e0
JK
7189 switch (priv->ieee->mode) {
7190 case IEEE_A:
43f66a6c
JK
7191 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
7192 break;
ea2b26e0
JK
7193 case IEEE_B:
7194 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
7195 break;
7196 case IEEE_A | IEEE_B:
7197 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
7198 break;
7199 case IEEE_G:
7200 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
7201 break;
7202 case IEEE_A | IEEE_G:
7203 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
7204 break;
7205 case IEEE_B | IEEE_G:
7206 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
7207 break;
7208 case IEEE_A | IEEE_B | IEEE_G:
7209 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
7210 break;
7211 default:
7212 strncpy(extra, "unknown", MAX_WX_STRING);
43f66a6c 7213 break;
bf79451e
JG
7214 }
7215
43f66a6c
JK
7216 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
7217
0edd5b44 7218 wrqu->data.length = strlen(extra) + 1;
43f66a6c 7219
0edd5b44 7220 return 0;
43f66a6c
JK
7221}
7222
ea2b26e0
JK
7223static int ipw_wx_set_preamble(struct net_device *dev,
7224 struct iw_request_info *info,
7225 union iwreq_data *wrqu, char *extra)
7226{
7227 struct ipw_priv *priv = ieee80211_priv(dev);
7228 int mode = *(int *)extra;
7229
7230 /* Switching from SHORT -> LONG requires a disassociation */
7231 if (mode == 1) {
7232 if (!(priv->config & CFG_PREAMBLE_LONG)) {
7233 priv->config |= CFG_PREAMBLE_LONG;
7234 if (priv->status &
7235 (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7236 IPW_DEBUG_ASSOC
7237 ("Disassociating due to preamble "
7238 "change.\n");
7239 ipw_disassociate(priv);
7240 }
7241 }
7242 goto done;
7243 }
7244
7245 if (mode == 0) {
7246 priv->config &= ~CFG_PREAMBLE_LONG;
7247 goto done;
7248 }
7249
7250 return -EINVAL;
7251
7252 done:
7253 return 0;
7254}
7255
7256static int ipw_wx_get_preamble(struct net_device *dev,
7257 struct iw_request_info *info,
7258 union iwreq_data *wrqu, char *extra)
7259{
7260 struct ipw_priv *priv = ieee80211_priv(dev);
7261
7262 if (priv->config & CFG_PREAMBLE_LONG)
7263 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
7264 else
7265 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
7266
7267 return 0;
7268}
7269
7270#ifdef CONFIG_IPW_MONITOR
7271static int ipw_wx_set_monitor(struct net_device *dev,
bf79451e 7272 struct iw_request_info *info,
43f66a6c 7273 union iwreq_data *wrqu, char *extra)
bf79451e 7274{
43f66a6c
JK
7275 struct ipw_priv *priv = ieee80211_priv(dev);
7276 int *parms = (int *)extra;
7277 int enable = (parms[0] > 0);
7278
ea2b26e0 7279 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
43f66a6c
JK
7280 if (enable) {
7281 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
7282 priv->net_dev->type = ARPHRD_IEEE80211;
a613bffd 7283 queue_work(priv->workqueue, &priv->adapter_restart);
43f66a6c 7284 }
bf79451e 7285
43f66a6c
JK
7286 ipw_set_channel(priv, parms[1]);
7287 } else {
7288 if (priv->ieee->iw_mode != IW_MODE_MONITOR)
7289 return 0;
7290 priv->net_dev->type = ARPHRD_ETHER;
a613bffd 7291 queue_work(priv->workqueue, &priv->adapter_restart);
43f66a6c
JK
7292 }
7293 return 0;
7294}
7295
bf79451e
JG
7296static int ipw_wx_reset(struct net_device *dev,
7297 struct iw_request_info *info,
43f66a6c 7298 union iwreq_data *wrqu, char *extra)
bf79451e 7299{
43f66a6c
JK
7300 struct ipw_priv *priv = ieee80211_priv(dev);
7301 IPW_DEBUG_WX("RESET\n");
a613bffd 7302 queue_work(priv->workqueue, &priv->adapter_restart);
43f66a6c
JK
7303 return 0;
7304}
ea2b26e0 7305#endif // CONFIG_IPW_MONITOR
43f66a6c
JK
7306
7307/* Rebase the WE IOCTLs to zero for the handler array */
7308#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
0edd5b44 7309static iw_handler ipw_wx_handlers[] = {
ea2b26e0
JK
7310 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
7311 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
7312 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
7313 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
7314 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
7315 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
7316 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
7317 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
7318 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
7319 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
7320 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
7321 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
7322 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
7323 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
7324 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
7325 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
7326 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
7327 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
7328 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
7329 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
7330 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
7331 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
7332 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
7333 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
7334 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
7335 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
7336 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
7337 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
a613bffd
JK
7338 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
7339 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
7340 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
7341 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
43f66a6c
JK
7342};
7343
7344#define IPW_PRIV_SET_POWER SIOCIWFIRSTPRIV
7345#define IPW_PRIV_GET_POWER SIOCIWFIRSTPRIV+1
7346#define IPW_PRIV_SET_MODE SIOCIWFIRSTPRIV+2
7347#define IPW_PRIV_GET_MODE SIOCIWFIRSTPRIV+3
ea2b26e0
JK
7348#define IPW_PRIV_SET_PREAMBLE SIOCIWFIRSTPRIV+4
7349#define IPW_PRIV_GET_PREAMBLE SIOCIWFIRSTPRIV+5
7350#define IPW_PRIV_SET_MONITOR SIOCIWFIRSTPRIV+6
7351#define IPW_PRIV_RESET SIOCIWFIRSTPRIV+7
43f66a6c 7352
bf79451e 7353static struct iw_priv_args ipw_priv_args[] = {
43f66a6c 7354 {
0edd5b44
JG
7355 .cmd = IPW_PRIV_SET_POWER,
7356 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
7357 .name = "set_power"},
43f66a6c 7358 {
0edd5b44
JG
7359 .cmd = IPW_PRIV_GET_POWER,
7360 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
7361 .name = "get_power"},
43f66a6c 7362 {
0edd5b44
JG
7363 .cmd = IPW_PRIV_SET_MODE,
7364 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
7365 .name = "set_mode"},
43f66a6c 7366 {
0edd5b44
JG
7367 .cmd = IPW_PRIV_GET_MODE,
7368 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
7369 .name = "get_mode"},
43f66a6c 7370 {
ea2b26e0
JK
7371 .cmd = IPW_PRIV_SET_PREAMBLE,
7372 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
7373 .name = "set_preamble"},
7374 {
7375 .cmd = IPW_PRIV_GET_PREAMBLE,
7376 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
7377 .name = "get_preamble"},
7378#ifdef CONFIG_IPW_MONITOR
7379 {
7380 IPW_PRIV_SET_MONITOR,
0edd5b44 7381 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
43f66a6c 7382 {
0edd5b44
JG
7383 IPW_PRIV_RESET,
7384 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
ea2b26e0 7385#endif /* CONFIG_IPW_MONITOR */
43f66a6c
JK
7386};
7387
7388static iw_handler ipw_priv_handler[] = {
7389 ipw_wx_set_powermode,
7390 ipw_wx_get_powermode,
7391 ipw_wx_set_wireless_mode,
7392 ipw_wx_get_wireless_mode,
ea2b26e0
JK
7393 ipw_wx_set_preamble,
7394 ipw_wx_get_preamble,
7395#ifdef CONFIG_IPW_MONITOR
7396 ipw_wx_set_monitor,
bf79451e 7397 ipw_wx_reset,
43f66a6c
JK
7398#endif
7399};
7400
0edd5b44 7401static struct iw_handler_def ipw_wx_handler_def = {
ea2b26e0
JK
7402 .standard = ipw_wx_handlers,
7403 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
7404 .num_private = ARRAY_SIZE(ipw_priv_handler),
7405 .num_private_args = ARRAY_SIZE(ipw_priv_args),
7406 .private = ipw_priv_handler,
7407 .private_args = ipw_priv_args,
43f66a6c
JK
7408};
7409
a613bffd
JK
7410static struct iw_public_data ipw_wx_data;
7411
43f66a6c
JK
7412/*
7413 * Get wireless statistics.
7414 * Called by /proc/net/wireless
7415 * Also called by SIOCGIWSTATS
7416 */
0edd5b44 7417static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
43f66a6c
JK
7418{
7419 struct ipw_priv *priv = ieee80211_priv(dev);
7420 struct iw_statistics *wstats;
bf79451e 7421
43f66a6c
JK
7422 wstats = &priv->wstats;
7423
ea2b26e0 7424 /* if hw is disabled, then ipw_get_ordinal() can't be called.
bf79451e 7425 * ipw2100_wx_wireless_stats seems to be called before fw is
43f66a6c
JK
7426 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
7427 * and associated; if not associcated, the values are all meaningless
7428 * anyway, so set them all to NULL and INVALID */
7429 if (!(priv->status & STATUS_ASSOCIATED)) {
7430 wstats->miss.beacon = 0;
7431 wstats->discard.retries = 0;
7432 wstats->qual.qual = 0;
7433 wstats->qual.level = 0;
7434 wstats->qual.noise = 0;
7435 wstats->qual.updated = 7;
7436 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
0edd5b44 7437 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
43f66a6c 7438 return wstats;
bf79451e 7439 }
43f66a6c
JK
7440
7441 wstats->qual.qual = priv->quality;
7442 wstats->qual.level = average_value(&priv->average_rssi);
7443 wstats->qual.noise = average_value(&priv->average_noise);
7444 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
0edd5b44 7445 IW_QUAL_NOISE_UPDATED;
43f66a6c
JK
7446
7447 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
7448 wstats->discard.retries = priv->last_tx_failures;
7449 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
bf79451e 7450
43f66a6c
JK
7451/* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
7452 goto fail_get_ordinal;
7453 wstats->discard.retries += tx_retry; */
bf79451e 7454
43f66a6c
JK
7455 return wstats;
7456}
7457
43f66a6c
JK
7458/* net device stuff */
7459
7460static inline void init_sys_config(struct ipw_sys_config *sys_config)
7461{
0edd5b44
JG
7462 memset(sys_config, 0, sizeof(struct ipw_sys_config));
7463 sys_config->bt_coexistence = 1; /* We may need to look into prvStaBtConfig */
43f66a6c
JK
7464 sys_config->answer_broadcast_ssid_probe = 0;
7465 sys_config->accept_all_data_frames = 0;
7466 sys_config->accept_non_directed_frames = 1;
7467 sys_config->exclude_unicast_unencrypted = 0;
7468 sys_config->disable_unicast_decryption = 1;
7469 sys_config->exclude_multicast_unencrypted = 0;
7470 sys_config->disable_multicast_decryption = 1;
7471 sys_config->antenna_diversity = CFG_SYS_ANTENNA_BOTH;
0edd5b44 7472 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
43f66a6c 7473 sys_config->dot11g_auto_detection = 0;
bf79451e 7474 sys_config->enable_cts_to_self = 0;
43f66a6c 7475 sys_config->bt_coexist_collision_thr = 0;
a613bffd 7476 sys_config->pass_noise_stats_to_host = 0; //1 -- fix for 256
43f66a6c
JK
7477}
7478
7479static int ipw_net_open(struct net_device *dev)
7480{
7481 struct ipw_priv *priv = ieee80211_priv(dev);
7482 IPW_DEBUG_INFO("dev->open\n");
7483 /* we should be verifying the device is ready to be opened */
bf79451e
JG
7484 if (!(priv->status & STATUS_RF_KILL_MASK) &&
7485 (priv->status & STATUS_ASSOCIATED))
43f66a6c
JK
7486 netif_start_queue(dev);
7487 return 0;
7488}
7489
7490static int ipw_net_stop(struct net_device *dev)
7491{
7492 IPW_DEBUG_INFO("dev->close\n");
7493 netif_stop_queue(dev);
7494 return 0;
7495}
7496
7497/*
7498todo:
7499
7500modify to send one tfd per fragment instead of using chunking. otherwise
7501we need to heavily modify the ieee80211_skb_to_txb.
7502*/
7503
7504static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb)
7505{
0dacca1f 7506 struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)
0edd5b44 7507 txb->fragments[0]->data;
43f66a6c
JK
7508 int i = 0;
7509 struct tfd_frame *tfd;
7510 struct clx2_tx_queue *txq = &priv->txq[0];
7511 struct clx2_queue *q = &txq->q;
7512 u8 id, hdr_len, unicast;
7513 u16 remaining_bytes;
7514
7515 switch (priv->ieee->iw_mode) {
7516 case IW_MODE_ADHOC:
7517 hdr_len = IEEE80211_3ADDR_LEN;
7518 unicast = !is_broadcast_ether_addr(hdr->addr1) &&
0edd5b44 7519 !is_multicast_ether_addr(hdr->addr1);
43f66a6c
JK
7520 id = ipw_find_station(priv, hdr->addr1);
7521 if (id == IPW_INVALID_STATION) {
7522 id = ipw_add_station(priv, hdr->addr1);
7523 if (id == IPW_INVALID_STATION) {
7524 IPW_WARNING("Attempt to send data to "
bf79451e 7525 "invalid cell: " MAC_FMT "\n",
43f66a6c
JK
7526 MAC_ARG(hdr->addr1));
7527 goto drop;
7528 }
7529 }
7530 break;
7531
7532 case IW_MODE_INFRA:
7533 default:
7534 unicast = !is_broadcast_ether_addr(hdr->addr3) &&
0edd5b44 7535 !is_multicast_ether_addr(hdr->addr3);
43f66a6c
JK
7536 hdr_len = IEEE80211_3ADDR_LEN;
7537 id = 0;
7538 break;
7539 }
7540
7541 tfd = &txq->bd[q->first_empty];
7542 txq->txb[q->first_empty] = txb;
7543 memset(tfd, 0, sizeof(*tfd));
7544 tfd->u.data.station_number = id;
7545
7546 tfd->control_flags.message_type = TX_FRAME_TYPE;
7547 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
7548
7549 tfd->u.data.cmd_id = DINO_CMD_TX;
a613bffd 7550 tfd->u.data.len = cpu_to_le16(txb->payload_size);
43f66a6c
JK
7551 remaining_bytes = txb->payload_size;
7552 if (unlikely(!unicast))
7553 tfd->u.data.tx_flags = DCT_FLAG_NO_WEP;
7554 else
7555 tfd->u.data.tx_flags = DCT_FLAG_NO_WEP | DCT_FLAG_ACK_REQD;
bf79451e 7556
43f66a6c
JK
7557 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
7558 tfd->u.data.tx_flags_ext = DCT_FLAG_EXT_MODE_CCK;
7559 else
7560 tfd->u.data.tx_flags_ext = DCT_FLAG_EXT_MODE_OFDM;
7561
ea2b26e0
JK
7562 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
7563 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
43f66a6c
JK
7564
7565 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
7566
7567 /* payload */
a613bffd
JK
7568 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
7569 txb->nr_frags));
7570 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
7571 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
7572 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
7573 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
7574 i, le32_to_cpu(tfd->u.data.num_chunks),
7575 txb->fragments[i]->len - hdr_len);
bf79451e 7576 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
43f66a6c
JK
7577 i, tfd->u.data.num_chunks,
7578 txb->fragments[i]->len - hdr_len);
bf79451e 7579 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
43f66a6c
JK
7580 txb->fragments[i]->len - hdr_len);
7581
0edd5b44 7582 tfd->u.data.chunk_ptr[i] =
a613bffd
JK
7583 cpu_to_le32(pci_map_single
7584 (priv->pci_dev,
7585 txb->fragments[i]->data + hdr_len,
7586 txb->fragments[i]->len - hdr_len,
7587 PCI_DMA_TODEVICE));
7588 tfd->u.data.chunk_len[i] =
7589 cpu_to_le16(txb->fragments[i]->len - hdr_len);
43f66a6c
JK
7590 }
7591
7592 if (i != txb->nr_frags) {
7593 struct sk_buff *skb;
7594 u16 remaining_bytes = 0;
7595 int j;
7596
7597 for (j = i; j < txb->nr_frags; j++)
7598 remaining_bytes += txb->fragments[j]->len - hdr_len;
7599
7600 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
7601 remaining_bytes);
7602 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
7603 if (skb != NULL) {
a613bffd 7604 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
43f66a6c
JK
7605 for (j = i; j < txb->nr_frags; j++) {
7606 int size = txb->fragments[j]->len - hdr_len;
7607 printk(KERN_INFO "Adding frag %d %d...\n",
0edd5b44 7608 j, size);
43f66a6c 7609 memcpy(skb_put(skb, size),
0edd5b44 7610 txb->fragments[j]->data + hdr_len, size);
43f66a6c
JK
7611 }
7612 dev_kfree_skb_any(txb->fragments[i]);
7613 txb->fragments[i] = skb;
0edd5b44 7614 tfd->u.data.chunk_ptr[i] =
a613bffd
JK
7615 cpu_to_le32(pci_map_single
7616 (priv->pci_dev, skb->data,
7617 tfd->u.data.chunk_len[i],
7618 PCI_DMA_TODEVICE));
7619
7620 tfd->u.data.num_chunks =
7621 cpu_to_le32(le32_to_cpu(tfd->u.data.num_chunks) +
7622 1);
bf79451e 7623 }
43f66a6c
JK
7624 }
7625
7626 /* kick DMA */
7627 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
7628 ipw_write32(priv, q->reg_w, q->first_empty);
7629
bf79451e 7630 if (ipw_queue_space(q) < q->high_mark)
43f66a6c
JK
7631 netif_stop_queue(priv->net_dev);
7632
7633 return;
7634
0edd5b44 7635 drop:
43f66a6c
JK
7636 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
7637 ieee80211_txb_free(txb);
7638}
7639
7640static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
c8d42d1a 7641 struct net_device *dev, int pri)
43f66a6c
JK
7642{
7643 struct ipw_priv *priv = ieee80211_priv(dev);
7644 unsigned long flags;
7645
7646 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
7647
7648 spin_lock_irqsave(&priv->lock, flags);
7649
7650 if (!(priv->status & STATUS_ASSOCIATED)) {
7651 IPW_DEBUG_INFO("Tx attempt while not associated.\n");
7652 priv->ieee->stats.tx_carrier_errors++;
7653 netif_stop_queue(dev);
7654 goto fail_unlock;
7655 }
7656
7657 ipw_tx_skb(priv, txb);
a613bffd 7658 ipw_led_activity_on(priv);
43f66a6c
JK
7659
7660 spin_unlock_irqrestore(&priv->lock, flags);
7661 return 0;
7662
0edd5b44 7663 fail_unlock:
43f66a6c
JK
7664 spin_unlock_irqrestore(&priv->lock, flags);
7665 return 1;
7666}
7667
7668static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
7669{
7670 struct ipw_priv *priv = ieee80211_priv(dev);
bf79451e 7671
43f66a6c
JK
7672 priv->ieee->stats.tx_packets = priv->tx_packets;
7673 priv->ieee->stats.rx_packets = priv->rx_packets;
7674 return &priv->ieee->stats;
7675}
7676
7677static void ipw_net_set_multicast_list(struct net_device *dev)
7678{
7679
7680}
7681
7682static int ipw_net_set_mac_address(struct net_device *dev, void *p)
7683{
7684 struct ipw_priv *priv = ieee80211_priv(dev);
7685 struct sockaddr *addr = p;
7686 if (!is_valid_ether_addr(addr->sa_data))
7687 return -EADDRNOTAVAIL;
7688 priv->config |= CFG_CUSTOM_MAC;
7689 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
7690 printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n",
7691 priv->net_dev->name, MAC_ARG(priv->mac_addr));
a613bffd 7692 queue_work(priv->workqueue, &priv->adapter_restart);
43f66a6c
JK
7693 return 0;
7694}
7695
bf79451e 7696static void ipw_ethtool_get_drvinfo(struct net_device *dev,
43f66a6c
JK
7697 struct ethtool_drvinfo *info)
7698{
7699 struct ipw_priv *p = ieee80211_priv(dev);
7700 char vers[64];
7701 char date[32];
7702 u32 len;
7703
7704 strcpy(info->driver, DRV_NAME);
7705 strcpy(info->version, DRV_VERSION);
7706
7707 len = sizeof(vers);
7708 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
7709 len = sizeof(date);
7710 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
7711
0edd5b44 7712 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
43f66a6c
JK
7713 vers, date);
7714 strcpy(info->bus_info, pci_name(p->pci_dev));
7715 info->eedump_len = CX2_EEPROM_IMAGE_SIZE;
7716}
7717
7718static u32 ipw_ethtool_get_link(struct net_device *dev)
7719{
7720 struct ipw_priv *priv = ieee80211_priv(dev);
7721 return (priv->status & STATUS_ASSOCIATED) != 0;
7722}
7723
7724static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
7725{
7726 return CX2_EEPROM_IMAGE_SIZE;
7727}
7728
7729static int ipw_ethtool_get_eeprom(struct net_device *dev,
0edd5b44 7730 struct ethtool_eeprom *eeprom, u8 * bytes)
43f66a6c
JK
7731{
7732 struct ipw_priv *p = ieee80211_priv(dev);
7733
7734 if (eeprom->offset + eeprom->len > CX2_EEPROM_IMAGE_SIZE)
7735 return -EINVAL;
bf79451e 7736
0edd5b44 7737 memcpy(bytes, &((u8 *) p->eeprom)[eeprom->offset], eeprom->len);
43f66a6c
JK
7738 return 0;
7739}
7740
7741static int ipw_ethtool_set_eeprom(struct net_device *dev,
0edd5b44 7742 struct ethtool_eeprom *eeprom, u8 * bytes)
43f66a6c
JK
7743{
7744 struct ipw_priv *p = ieee80211_priv(dev);
7745 int i;
7746
7747 if (eeprom->offset + eeprom->len > CX2_EEPROM_IMAGE_SIZE)
7748 return -EINVAL;
7749
0edd5b44 7750 memcpy(&((u8 *) p->eeprom)[eeprom->offset], bytes, eeprom->len);
bf79451e 7751 for (i = IPW_EEPROM_DATA;
0edd5b44 7752 i < IPW_EEPROM_DATA + CX2_EEPROM_IMAGE_SIZE; i++)
43f66a6c
JK
7753 ipw_write8(p, i, p->eeprom[i]);
7754
7755 return 0;
7756}
7757
7758static struct ethtool_ops ipw_ethtool_ops = {
ea2b26e0
JK
7759 .get_link = ipw_ethtool_get_link,
7760 .get_drvinfo = ipw_ethtool_get_drvinfo,
7761 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
7762 .get_eeprom = ipw_ethtool_get_eeprom,
7763 .set_eeprom = ipw_ethtool_set_eeprom,
43f66a6c
JK
7764};
7765
7766static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
7767{
7768 struct ipw_priv *priv = data;
7769 u32 inta, inta_mask;
bf79451e 7770
43f66a6c
JK
7771 if (!priv)
7772 return IRQ_NONE;
7773
7774 spin_lock(&priv->lock);
7775
7776 if (!(priv->status & STATUS_INT_ENABLED)) {
7777 /* Shared IRQ */
7778 goto none;
7779 }
7780
7781 inta = ipw_read32(priv, CX2_INTA_RW);
7782 inta_mask = ipw_read32(priv, CX2_INTA_MASK_R);
bf79451e 7783
43f66a6c
JK
7784 if (inta == 0xFFFFFFFF) {
7785 /* Hardware disappeared */
7786 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
7787 goto none;
7788 }
7789
7790 if (!(inta & (CX2_INTA_MASK_ALL & inta_mask))) {
7791 /* Shared interrupt */
7792 goto none;
7793 }
7794
7795 /* tell the device to stop sending interrupts */
7796 ipw_disable_interrupts(priv);
bf79451e 7797
43f66a6c
JK
7798 /* ack current interrupts */
7799 inta &= (CX2_INTA_MASK_ALL & inta_mask);
7800 ipw_write32(priv, CX2_INTA_RW, inta);
bf79451e 7801
43f66a6c
JK
7802 /* Cache INTA value for our tasklet */
7803 priv->isr_inta = inta;
7804
7805 tasklet_schedule(&priv->irq_tasklet);
7806
0edd5b44 7807 spin_unlock(&priv->lock);
43f66a6c
JK
7808
7809 return IRQ_HANDLED;
0edd5b44 7810 none:
43f66a6c
JK
7811 spin_unlock(&priv->lock);
7812 return IRQ_NONE;
7813}
7814
7815static void ipw_rf_kill(void *adapter)
7816{
7817 struct ipw_priv *priv = adapter;
7818 unsigned long flags;
bf79451e 7819
43f66a6c
JK
7820 spin_lock_irqsave(&priv->lock, flags);
7821
7822 if (rf_kill_active(priv)) {
7823 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
7824 if (priv->workqueue)
7825 queue_delayed_work(priv->workqueue,
7826 &priv->rf_kill, 2 * HZ);
7827 goto exit_unlock;
7828 }
7829
7830 /* RF Kill is now disabled, so bring the device back up */
7831
7832 if (!(priv->status & STATUS_RF_KILL_MASK)) {
7833 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
7834 "device\n");
7835
7836 /* we can not do an adapter restart while inside an irq lock */
7837 queue_work(priv->workqueue, &priv->adapter_restart);
bf79451e 7838 } else
43f66a6c
JK
7839 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
7840 "enabled\n");
7841
0edd5b44 7842 exit_unlock:
43f66a6c
JK
7843 spin_unlock_irqrestore(&priv->lock, flags);
7844}
7845
a613bffd
JK
7846void ipw_link_up(struct ipw_priv *priv)
7847{
7848 netif_carrier_on(priv->net_dev);
7849 if (netif_queue_stopped(priv->net_dev)) {
7850 IPW_DEBUG_NOTIF("waking queue\n");
7851 netif_wake_queue(priv->net_dev);
7852 } else {
7853 IPW_DEBUG_NOTIF("starting queue\n");
7854 netif_start_queue(priv->net_dev);
7855 }
7856
7857 ipw_reset_stats(priv);
7858 /* Ensure the rate is updated immediately */
7859 priv->last_rate = ipw_get_current_rate(priv);
7860 ipw_gather_stats(priv);
7861 ipw_led_link_up(priv);
7862 notify_wx_assoc_event(priv);
7863
7864 if (priv->config & CFG_BACKGROUND_SCAN)
7865 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
7866}
7867
7868void ipw_link_down(struct ipw_priv *priv)
7869{
7870 ipw_led_link_down(priv);
7871 netif_carrier_off(priv->net_dev);
7872 netif_stop_queue(priv->net_dev);
7873 notify_wx_assoc_event(priv);
7874
7875 /* Cancel any queued work ... */
7876 cancel_delayed_work(&priv->request_scan);
7877 cancel_delayed_work(&priv->adhoc_check);
7878 cancel_delayed_work(&priv->gather_stats);
7879
7880 ipw_reset_stats(priv);
7881
7882 /* Queue up another scan... */
7883 queue_work(priv->workqueue, &priv->request_scan);
7884}
7885
43f66a6c
JK
7886static int ipw_setup_deferred_work(struct ipw_priv *priv)
7887{
7888 int ret = 0;
7889
43f66a6c 7890 priv->workqueue = create_workqueue(DRV_NAME);
43f66a6c
JK
7891 init_waitqueue_head(&priv->wait_command_queue);
7892
7893 INIT_WORK(&priv->adhoc_check, ipw_adhoc_check, priv);
7894 INIT_WORK(&priv->associate, ipw_associate, priv);
7895 INIT_WORK(&priv->disassociate, ipw_disassociate, priv);
7896 INIT_WORK(&priv->rx_replenish, ipw_rx_queue_replenish, priv);
7897 INIT_WORK(&priv->adapter_restart, ipw_adapter_restart, priv);
7898 INIT_WORK(&priv->rf_kill, ipw_rf_kill, priv);
7899 INIT_WORK(&priv->up, (void (*)(void *))ipw_up, priv);
7900 INIT_WORK(&priv->down, (void (*)(void *))ipw_down, priv);
bf79451e 7901 INIT_WORK(&priv->request_scan,
43f66a6c 7902 (void (*)(void *))ipw_request_scan, priv);
bf79451e 7903 INIT_WORK(&priv->gather_stats,
43f66a6c
JK
7904 (void (*)(void *))ipw_gather_stats, priv);
7905 INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_abort_scan, priv);
7906 INIT_WORK(&priv->roam, ipw_roam, priv);
7907 INIT_WORK(&priv->scan_check, ipw_scan_check, priv);
a613bffd
JK
7908 INIT_WORK(&priv->link_up, (void (*)(void *))ipw_link_up, priv);
7909 INIT_WORK(&priv->link_down, (void (*)(void *))ipw_link_down, priv);
7910 INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_led_link_on, priv);
7911 INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_led_link_off,
7912 priv);
7913 INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_led_activity_off,
7914 priv);
43f66a6c
JK
7915
7916 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
7917 ipw_irq_tasklet, (unsigned long)priv);
7918
7919 return ret;
7920}
7921
43f66a6c
JK
7922static void shim__set_security(struct net_device *dev,
7923 struct ieee80211_security *sec)
7924{
7925 struct ipw_priv *priv = ieee80211_priv(dev);
7926 int i;
7927
bf79451e 7928 for (i = 0; i < 4; i++) {
43f66a6c
JK
7929 if (sec->flags & (1 << i)) {
7930 priv->sec.key_sizes[i] = sec->key_sizes[i];
7931 if (sec->key_sizes[i] == 0)
7932 priv->sec.flags &= ~(1 << i);
7933 else
bf79451e 7934 memcpy(priv->sec.keys[i], sec->keys[i],
43f66a6c
JK
7935 sec->key_sizes[i]);
7936 priv->sec.flags |= (1 << i);
7937 priv->status |= STATUS_SECURITY_UPDATED;
bf79451e 7938 }
43f66a6c
JK
7939 }
7940
7941 if ((sec->flags & SEC_ACTIVE_KEY) &&
7942 priv->sec.active_key != sec->active_key) {
7943 if (sec->active_key <= 3) {
7944 priv->sec.active_key = sec->active_key;
7945 priv->sec.flags |= SEC_ACTIVE_KEY;
bf79451e 7946 } else
43f66a6c
JK
7947 priv->sec.flags &= ~SEC_ACTIVE_KEY;
7948 priv->status |= STATUS_SECURITY_UPDATED;
7949 }
7950
7951 if ((sec->flags & SEC_AUTH_MODE) &&
7952 (priv->sec.auth_mode != sec->auth_mode)) {
7953 priv->sec.auth_mode = sec->auth_mode;
7954 priv->sec.flags |= SEC_AUTH_MODE;
7955 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
7956 priv->capability |= CAP_SHARED_KEY;
7957 else
7958 priv->capability &= ~CAP_SHARED_KEY;
7959 priv->status |= STATUS_SECURITY_UPDATED;
7960 }
bf79451e 7961
0edd5b44 7962 if (sec->flags & SEC_ENABLED && priv->sec.enabled != sec->enabled) {
43f66a6c
JK
7963 priv->sec.flags |= SEC_ENABLED;
7964 priv->sec.enabled = sec->enabled;
7965 priv->status |= STATUS_SECURITY_UPDATED;
bf79451e 7966 if (sec->enabled)
43f66a6c
JK
7967 priv->capability |= CAP_PRIVACY_ON;
7968 else
7969 priv->capability &= ~CAP_PRIVACY_ON;
7970 }
bf79451e 7971
0edd5b44 7972 if (sec->flags & SEC_LEVEL && priv->sec.level != sec->level) {
43f66a6c
JK
7973 priv->sec.level = sec->level;
7974 priv->sec.flags |= SEC_LEVEL;
7975 priv->status |= STATUS_SECURITY_UPDATED;
7976 }
7977
bf79451e
JG
7978 /* To match current functionality of ipw2100 (which works well w/
7979 * various supplicants, we don't force a disassociate if the
43f66a6c
JK
7980 * privacy capability changes ... */
7981#if 0
7982 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
bf79451e 7983 (((priv->assoc_request.capability &
43f66a6c 7984 WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
bf79451e 7985 (!(priv->assoc_request.capability &
0edd5b44 7986 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
43f66a6c
JK
7987 IPW_DEBUG_ASSOC("Disassociating due to capability "
7988 "change.\n");
7989 ipw_disassociate(priv);
7990 }
7991#endif
7992}
7993
bf79451e 7994static int init_supported_rates(struct ipw_priv *priv,
43f66a6c
JK
7995 struct ipw_supported_rates *rates)
7996{
7997 /* TODO: Mask out rates based on priv->rates_mask */
7998
7999 memset(rates, 0, sizeof(*rates));
0edd5b44 8000 /* configure supported rates */
43f66a6c
JK
8001 switch (priv->ieee->freq_band) {
8002 case IEEE80211_52GHZ_BAND:
8003 rates->ieee_mode = IPW_A_MODE;
8004 rates->purpose = IPW_RATE_CAPABILITIES;
8005 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
8006 IEEE80211_OFDM_DEFAULT_RATES_MASK);
8007 break;
8008
0edd5b44 8009 default: /* Mixed or 2.4Ghz */
43f66a6c
JK
8010 rates->ieee_mode = IPW_G_MODE;
8011 rates->purpose = IPW_RATE_CAPABILITIES;
8012 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
8013 IEEE80211_CCK_DEFAULT_RATES_MASK);
8014 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
8015 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
8016 IEEE80211_OFDM_DEFAULT_RATES_MASK);
8017 }
8018 break;
8019 }
8020
8021 return 0;
8022}
8023
bf79451e 8024static int ipw_config(struct ipw_priv *priv)
43f66a6c
JK
8025{
8026 int i;
8027 struct ipw_tx_power tx_power;
8028
8029 memset(&priv->sys_config, 0, sizeof(priv->sys_config));
8030 memset(&tx_power, 0, sizeof(tx_power));
8031
8032 /* This is only called from ipw_up, which resets/reloads the firmware
8033 so, we don't need to first disable the card before we configure
8034 it */
8035
8036 /* configure device for 'G' band */
8037 tx_power.ieee_mode = IPW_G_MODE;
8038 tx_power.num_channels = 11;
8039 for (i = 0; i < 11; i++) {
8040 tx_power.channels_tx_power[i].channel_number = i + 1;
8041 tx_power.channels_tx_power[i].tx_power = priv->tx_power;
8042 }
8043 if (ipw_send_tx_power(priv, &tx_power))
8044 goto error;
8045
8046 /* configure device to also handle 'B' band */
8047 tx_power.ieee_mode = IPW_B_MODE;
8048 if (ipw_send_tx_power(priv, &tx_power))
8049 goto error;
8050
8051 /* initialize adapter address */
8052 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
8053 goto error;
8054
8055 /* set basic system config settings */
8056 init_sys_config(&priv->sys_config);
8057 if (ipw_send_system_config(priv, &priv->sys_config))
8058 goto error;
8059
0edd5b44
JG
8060 init_supported_rates(priv, &priv->rates);
8061 if (ipw_send_supported_rates(priv, &priv->rates))
43f66a6c
JK
8062 goto error;
8063
8064 /* Set request-to-send threshold */
8065 if (priv->rts_threshold) {
8066 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
8067 goto error;
8068 }
8069
8070 if (ipw_set_random_seed(priv))
8071 goto error;
bf79451e 8072
43f66a6c
JK
8073 /* final state transition to the RUN state */
8074 if (ipw_send_host_complete(priv))
8075 goto error;
8076
8077 /* If configured to try and auto-associate, kick off a scan */
8078 if ((priv->config & CFG_ASSOCIATE) && ipw_request_scan(priv))
8079 goto error;
8080
8081 return 0;
bf79451e 8082
0edd5b44 8083 error:
43f66a6c
JK
8084 return -EIO;
8085}
8086
8087#define MAX_HW_RESTARTS 5
8088static int ipw_up(struct ipw_priv *priv)
8089{
8090 int rc, i;
8091
8092 if (priv->status & STATUS_EXIT_PENDING)
8093 return -EIO;
8094
0edd5b44 8095 for (i = 0; i < MAX_HW_RESTARTS; i++) {
bf79451e 8096 /* Load the microcode, firmware, and eeprom.
43f66a6c
JK
8097 * Also start the clocks. */
8098 rc = ipw_load(priv);
8099 if (rc) {
0edd5b44 8100 IPW_ERROR("Unable to load firmware: 0x%08X\n", rc);
43f66a6c
JK
8101 return rc;
8102 }
8103
8104 ipw_init_ordinals(priv);
8105 if (!(priv->config & CFG_CUSTOM_MAC))
8106 eeprom_parse_mac(priv, priv->mac_addr);
8107 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
8108
8109 if (priv->status & STATUS_RF_KILL_MASK)
8110 return 0;
8111
8112 rc = ipw_config(priv);
8113 if (!rc) {
8114 IPW_DEBUG_INFO("Configured device on count %i\n", i);
a613bffd
JK
8115 ipw_led_init(priv);
8116 ipw_led_radio_on(priv);
43f66a6c 8117 priv->notif_missed_beacons = 0;
43f66a6c
JK
8118 return 0;
8119 } else {
8120 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n",
8121 rc);
8122 }
bf79451e 8123
43f66a6c
JK
8124 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
8125 i, MAX_HW_RESTARTS);
8126
8127 /* We had an error bringing up the hardware, so take it
8128 * all the way back down so we can try again */
8129 ipw_down(priv);
8130 }
8131
bf79451e 8132 /* tried to restart and config the device for as long as our
43f66a6c 8133 * patience could withstand */
0edd5b44 8134 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
43f66a6c
JK
8135 return -EIO;
8136}
8137
8138static void ipw_down(struct ipw_priv *priv)
8139{
8140 /* Attempt to disable the card */
8141#if 0
8142 ipw_send_card_disable(priv, 0);
8143#endif
8144
8145 /* tell the device to stop sending interrupts */
8146 ipw_disable_interrupts(priv);
8147
8148 /* Clear all bits but the RF Kill */
8149 priv->status &= STATUS_RF_KILL_MASK;
8150
8151 netif_carrier_off(priv->net_dev);
8152 netif_stop_queue(priv->net_dev);
8153
8154 ipw_stop_nic(priv);
a613bffd
JK
8155
8156 ipw_led_radio_off(priv);
43f66a6c
JK
8157}
8158
ea2b26e0
JK
8159static int ipw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
8160{
ea2b26e0
JK
8161 struct iwreq *wrq = (struct iwreq *)rq;
8162 int ret = -1;
8163 switch (cmd) {
8164 case IPW_IOCTL_WPA_SUPPLICANT:
8165 ret = ipw_wpa_supplicant(dev, &wrq->u.data);
8166 return ret;
8167
8168 default:
8169 return -EOPNOTSUPP;
8170 }
8171
ea2b26e0
JK
8172 return -EOPNOTSUPP;
8173}
8174
43f66a6c
JK
8175/* Called by register_netdev() */
8176static int ipw_net_init(struct net_device *dev)
8177{
8178 struct ipw_priv *priv = ieee80211_priv(dev);
8179
8180 if (priv->status & STATUS_RF_KILL_SW) {
8181 IPW_WARNING("Radio disabled by module parameter.\n");
8182 return 0;
8183 } else if (rf_kill_active(priv)) {
8184 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
8185 "Kill switch must be turned off for "
8186 "wireless networking to work.\n");
8187 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
8188 return 0;
8189 }
8190
8191 if (ipw_up(priv))
8192 return -EIO;
8193
8194 return 0;
8195}
8196
8197/* PCI driver stuff */
8198static struct pci_device_id card_ids[] = {
8199 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
8200 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
8201 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
8202 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
8203 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
8204 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
8205 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
8206 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
8207 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
8208 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
8209 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
8210 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
8211 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
8212 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
8213 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
8214 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
8215 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
8216 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
0edd5b44 8217 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
a613bffd 8218 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
0edd5b44
JG
8219 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
8220 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
bf79451e 8221
43f66a6c
JK
8222 /* required last entry */
8223 {0,}
8224};
8225
8226MODULE_DEVICE_TABLE(pci, card_ids);
8227
8228static struct attribute *ipw_sysfs_entries[] = {
8229 &dev_attr_rf_kill.attr,
8230 &dev_attr_direct_dword.attr,
8231 &dev_attr_indirect_byte.attr,
8232 &dev_attr_indirect_dword.attr,
8233 &dev_attr_mem_gpio_reg.attr,
8234 &dev_attr_command_event_reg.attr,
8235 &dev_attr_nic_type.attr,
8236 &dev_attr_status.attr,
8237 &dev_attr_cfg.attr,
8238 &dev_attr_dump_errors.attr,
8239 &dev_attr_dump_events.attr,
8240 &dev_attr_eeprom_delay.attr,
8241 &dev_attr_ucode_version.attr,
8242 &dev_attr_rtc.attr,
a613bffd
JK
8243 &dev_attr_scan_age.attr,
8244 &dev_attr_led.attr,
43f66a6c
JK
8245 NULL
8246};
8247
8248static struct attribute_group ipw_attribute_group = {
8249 .name = NULL, /* put in device directory */
0edd5b44 8250 .attrs = ipw_sysfs_entries,
43f66a6c
JK
8251};
8252
0edd5b44 8253static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
43f66a6c
JK
8254{
8255 int err = 0;
8256 struct net_device *net_dev;
8257 void __iomem *base;
8258 u32 length, val;
8259 struct ipw_priv *priv;
8260 int band, modulation;
8261
8262 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
8263 if (net_dev == NULL) {
8264 err = -ENOMEM;
8265 goto out;
8266 }
8267
8268 priv = ieee80211_priv(net_dev);
8269 priv->ieee = netdev_priv(net_dev);
a613bffd 8270
43f66a6c
JK
8271 priv->net_dev = net_dev;
8272 priv->pci_dev = pdev;
8273#ifdef CONFIG_IPW_DEBUG
8274 ipw_debug_level = debug;
8275#endif
8276 spin_lock_init(&priv->lock);
8277
8278 if (pci_enable_device(pdev)) {
8279 err = -ENODEV;
8280 goto out_free_ieee80211;
8281 }
8282
8283 pci_set_master(pdev);
8284
0e08b44e 8285 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
bf79451e 8286 if (!err)
0e08b44e 8287 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
43f66a6c
JK
8288 if (err) {
8289 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
8290 goto out_pci_disable_device;
8291 }
8292
8293 pci_set_drvdata(pdev, priv);
8294
8295 err = pci_request_regions(pdev, DRV_NAME);
bf79451e 8296 if (err)
43f66a6c
JK
8297 goto out_pci_disable_device;
8298
bf79451e 8299 /* We disable the RETRY_TIMEOUT register (0x41) to keep
43f66a6c 8300 * PCI Tx retries from interfering with C3 CPU state */
bf79451e
JG
8301 pci_read_config_dword(pdev, 0x40, &val);
8302 if ((val & 0x0000ff00) != 0)
43f66a6c 8303 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
bf79451e 8304
43f66a6c
JK
8305 length = pci_resource_len(pdev, 0);
8306 priv->hw_len = length;
bf79451e 8307
43f66a6c
JK
8308 base = ioremap_nocache(pci_resource_start(pdev, 0), length);
8309 if (!base) {
8310 err = -ENODEV;
8311 goto out_pci_release_regions;
8312 }
8313
8314 priv->hw_base = base;
8315 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
8316 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
8317
8318 err = ipw_setup_deferred_work(priv);
8319 if (err) {
8320 IPW_ERROR("Unable to setup deferred work\n");
8321 goto out_iounmap;
8322 }
8323
8324 /* Initialize module parameter values here */
a613bffd
JK
8325
8326 /* We default to disabling the LED code as right now it causes
8327 * too many systems to lock up... */
8328 if (!led)
8329 priv->config |= CFG_NO_LED;
8330
bf79451e 8331 if (associate)
43f66a6c
JK
8332 priv->config |= CFG_ASSOCIATE;
8333 else
8334 IPW_DEBUG_INFO("Auto associate disabled.\n");
bf79451e
JG
8335
8336 if (auto_create)
43f66a6c
JK
8337 priv->config |= CFG_ADHOC_CREATE;
8338 else
8339 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
bf79451e 8340
43f66a6c
JK
8341 if (disable) {
8342 priv->status |= STATUS_RF_KILL_SW;
8343 IPW_DEBUG_INFO("Radio disabled.\n");
8344 }
8345
8346 if (channel != 0) {
8347 priv->config |= CFG_STATIC_CHANNEL;
8348 priv->channel = channel;
8349 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
0edd5b44 8350 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
43f66a6c
JK
8351 /* TODO: Validate that provided channel is in range */
8352 }
8353
8354 switch (mode) {
8355 case 1:
8356 priv->ieee->iw_mode = IW_MODE_ADHOC;
8357 break;
ea2b26e0 8358#ifdef CONFIG_IPW_MONITOR
43f66a6c
JK
8359 case 2:
8360 priv->ieee->iw_mode = IW_MODE_MONITOR;
8361 break;
8362#endif
8363 default:
8364 case 0:
8365 priv->ieee->iw_mode = IW_MODE_INFRA;
8366 break;
8367 }
8368
8369 if ((priv->pci_dev->device == 0x4223) ||
8370 (priv->pci_dev->device == 0x4224)) {
bf79451e 8371 printk(KERN_INFO DRV_NAME
43f66a6c
JK
8372 ": Detected Intel PRO/Wireless 2915ABG Network "
8373 "Connection\n");
a33a1982 8374 priv->ieee->abg_true = 1;
43f66a6c
JK
8375 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
8376 modulation = IEEE80211_OFDM_MODULATION |
0edd5b44 8377 IEEE80211_CCK_MODULATION;
43f66a6c 8378 priv->adapter = IPW_2915ABG;
0edd5b44 8379 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
43f66a6c 8380 } else {
a613bffd
JK
8381 printk(KERN_INFO DRV_NAME
8382 ": Detected Intel PRO/Wireless 2200BG Network "
8383 "Connection\n");
bf79451e 8384
a33a1982 8385 priv->ieee->abg_true = 0;
43f66a6c
JK
8386 band = IEEE80211_24GHZ_BAND;
8387 modulation = IEEE80211_OFDM_MODULATION |
0edd5b44 8388 IEEE80211_CCK_MODULATION;
43f66a6c 8389 priv->adapter = IPW_2200BG;
0edd5b44 8390 priv->ieee->mode = IEEE_G | IEEE_B;
43f66a6c
JK
8391 }
8392
8393 priv->ieee->freq_band = band;
8394 priv->ieee->modulation = modulation;
8395
8396 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
8397
8398 priv->missed_beacon_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8399 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8400
8401 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8402
8403 /* If power management is turned on, default to AC mode */
0edd5b44 8404 priv->power_mode = IPW_POWER_AC;
43f66a6c
JK
8405 priv->tx_power = IPW_DEFAULT_TX_POWER;
8406
0edd5b44 8407 err = request_irq(pdev->irq, ipw_isr, SA_SHIRQ, DRV_NAME, priv);
43f66a6c
JK
8408 if (err) {
8409 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
8410 goto out_destroy_workqueue;
8411 }
8412
8413 SET_MODULE_OWNER(net_dev);
8414 SET_NETDEV_DEV(net_dev, &pdev->dev);
8415
a613bffd
JK
8416 ipw_wx_data.spy_data = &priv->ieee->spy_data;
8417 ipw_wx_data.ieee80211 = priv->ieee;
8418
43f66a6c
JK
8419 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
8420 priv->ieee->set_security = shim__set_security;
8421
8422 net_dev->open = ipw_net_open;
8423 net_dev->stop = ipw_net_stop;
8424 net_dev->init = ipw_net_init;
ea2b26e0 8425 net_dev->do_ioctl = ipw_ioctl;
43f66a6c
JK
8426 net_dev->get_stats = ipw_net_get_stats;
8427 net_dev->set_multicast_list = ipw_net_set_multicast_list;
8428 net_dev->set_mac_address = ipw_net_set_mac_address;
8429 net_dev->get_wireless_stats = ipw_get_wireless_stats;
a613bffd 8430 net_dev->wireless_data = &ipw_wx_data;
43f66a6c
JK
8431 net_dev->wireless_handlers = &ipw_wx_handler_def;
8432 net_dev->ethtool_ops = &ipw_ethtool_ops;
8433 net_dev->irq = pdev->irq;
0edd5b44 8434 net_dev->base_addr = (unsigned long)priv->hw_base;
43f66a6c
JK
8435 net_dev->mem_start = pci_resource_start(pdev, 0);
8436 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
8437
8438 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
8439 if (err) {
8440 IPW_ERROR("failed to create sysfs device attributes\n");
8441 goto out_release_irq;
8442 }
8443
8444 err = register_netdev(net_dev);
8445 if (err) {
8446 IPW_ERROR("failed to register network device\n");
a613bffd 8447 goto out_remove_sysfs;
43f66a6c
JK
8448 }
8449
8450 return 0;
8451
a613bffd 8452 out_remove_sysfs:
43f66a6c 8453 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
0edd5b44 8454 out_release_irq:
43f66a6c 8455 free_irq(pdev->irq, priv);
0edd5b44 8456 out_destroy_workqueue:
43f66a6c
JK
8457 destroy_workqueue(priv->workqueue);
8458 priv->workqueue = NULL;
0edd5b44 8459 out_iounmap:
43f66a6c 8460 iounmap(priv->hw_base);
0edd5b44 8461 out_pci_release_regions:
43f66a6c 8462 pci_release_regions(pdev);
0edd5b44 8463 out_pci_disable_device:
43f66a6c
JK
8464 pci_disable_device(pdev);
8465 pci_set_drvdata(pdev, NULL);
0edd5b44 8466 out_free_ieee80211:
43f66a6c 8467 free_ieee80211(priv->net_dev);
0edd5b44 8468 out:
43f66a6c
JK
8469 return err;
8470}
8471
8472static void ipw_pci_remove(struct pci_dev *pdev)
8473{
8474 struct ipw_priv *priv = pci_get_drvdata(pdev);
8475 if (!priv)
8476 return;
8477
8478 priv->status |= STATUS_EXIT_PENDING;
8479
8480 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
8481
8482 ipw_down(priv);
8483
8484 unregister_netdev(priv->net_dev);
8485
8486 if (priv->rxq) {
8487 ipw_rx_queue_free(priv, priv->rxq);
8488 priv->rxq = NULL;
8489 }
8490 ipw_tx_queue_free(priv);
8491
a613bffd
JK
8492 ipw_led_shutdown(priv);
8493
43f66a6c
JK
8494 /* ipw_down will ensure that there is no more pending work
8495 * in the workqueue's, so we can safely remove them now. */
a613bffd
JK
8496 cancel_delayed_work(&priv->adhoc_check);
8497 cancel_delayed_work(&priv->gather_stats);
8498 cancel_delayed_work(&priv->request_scan);
8499 cancel_delayed_work(&priv->rf_kill);
8500 cancel_delayed_work(&priv->scan_check);
8501 destroy_workqueue(priv->workqueue);
8502 priv->workqueue = NULL;
43f66a6c
JK
8503
8504 free_irq(pdev->irq, priv);
8505 iounmap(priv->hw_base);
8506 pci_release_regions(pdev);
8507 pci_disable_device(pdev);
8508 pci_set_drvdata(pdev, NULL);
8509 free_ieee80211(priv->net_dev);
8510
8511#ifdef CONFIG_PM
8512 if (fw_loaded) {
8513 release_firmware(bootfw);
8514 release_firmware(ucode);
8515 release_firmware(firmware);
8516 fw_loaded = 0;
8517 }
8518#endif
8519}
8520
43f66a6c 8521#ifdef CONFIG_PM
583a4e88 8522static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
43f66a6c
JK
8523{
8524 struct ipw_priv *priv = pci_get_drvdata(pdev);
8525 struct net_device *dev = priv->net_dev;
8526
8527 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
8528
0edd5b44 8529 /* Take down the device; powers it off, etc. */
43f66a6c
JK
8530 ipw_down(priv);
8531
8532 /* Remove the PRESENT state of the device */
8533 netif_device_detach(dev);
8534
43f66a6c 8535 pci_save_state(pdev);
43f66a6c 8536 pci_disable_device(pdev);
583a4e88 8537 pci_set_power_state(pdev, pci_choose_state(pdev, state));
bf79451e 8538
43f66a6c
JK
8539 return 0;
8540}
8541
8542static int ipw_pci_resume(struct pci_dev *pdev)
8543{
8544 struct ipw_priv *priv = pci_get_drvdata(pdev);
8545 struct net_device *dev = priv->net_dev;
8546 u32 val;
bf79451e 8547
43f66a6c
JK
8548 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
8549
ea2b26e0 8550 pci_set_power_state(pdev, PCI_D0);
43f66a6c 8551 pci_enable_device(pdev);
43f66a6c 8552 pci_restore_state(pdev);
ea2b26e0 8553
43f66a6c
JK
8554 /*
8555 * Suspend/Resume resets the PCI configuration space, so we have to
8556 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
8557 * from interfering with C3 CPU state. pci_restore_state won't help
8558 * here since it only restores the first 64 bytes pci config header.
8559 */
bf79451e
JG
8560 pci_read_config_dword(pdev, 0x40, &val);
8561 if ((val & 0x0000ff00) != 0)
43f66a6c
JK
8562 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
8563
8564 /* Set the device back into the PRESENT state; this will also wake
8565 * the queue of needed */
8566 netif_device_attach(dev);
8567
8568 /* Bring the device back up */
8569 queue_work(priv->workqueue, &priv->up);
bf79451e 8570
43f66a6c
JK
8571 return 0;
8572}
8573#endif
8574
8575/* driver initialization stuff */
8576static struct pci_driver ipw_driver = {
8577 .name = DRV_NAME,
8578 .id_table = card_ids,
8579 .probe = ipw_pci_probe,
8580 .remove = __devexit_p(ipw_pci_remove),
8581#ifdef CONFIG_PM
8582 .suspend = ipw_pci_suspend,
8583 .resume = ipw_pci_resume,
8584#endif
8585};
8586
8587static int __init ipw_init(void)
8588{
8589 int ret;
8590
8591 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
8592 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
8593
8594 ret = pci_module_init(&ipw_driver);
8595 if (ret) {
8596 IPW_ERROR("Unable to initialize PCI module\n");
8597 return ret;
8598 }
8599
0edd5b44 8600 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
43f66a6c
JK
8601 if (ret) {
8602 IPW_ERROR("Unable to create driver sysfs file\n");
8603 pci_unregister_driver(&ipw_driver);
8604 return ret;
8605 }
8606
8607 return ret;
8608}
8609
8610static void __exit ipw_exit(void)
8611{
8612 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
8613 pci_unregister_driver(&ipw_driver);
8614}
8615
8616module_param(disable, int, 0444);
8617MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
8618
8619module_param(associate, int, 0444);
8620MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
8621
8622module_param(auto_create, int, 0444);
8623MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
8624
a613bffd
JK
8625module_param(led, int, 0444);
8626MODULE_PARM_DESC(auto_create,
8627 "enable led control on some systems (default 0 off)\n");
8628
43f66a6c
JK
8629module_param(debug, int, 0444);
8630MODULE_PARM_DESC(debug, "debug output mask");
8631
8632module_param(channel, int, 0444);
bf79451e 8633MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
43f66a6c 8634
ea2b26e0 8635#ifdef CONFIG_IPW_MONITOR
43f66a6c
JK
8636module_param(mode, int, 0444);
8637MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
8638#else
8639module_param(mode, int, 0444);
8640MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
8641#endif
8642
8643module_exit(ipw_exit);
8644module_init(ipw_init);