]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/wireless/ipw2200.c
[PATCH] ipw2200: Add LEAP authentication algorithm support
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / ipw2200.c
1 /******************************************************************************
2
3 Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
4
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31 ******************************************************************************/
32
33 #include "ipw2200.h"
34 #include <linux/version.h>
35
36 #define IPW2200_VERSION "git-1.0.8"
37 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
38 #define DRV_COPYRIGHT "Copyright(c) 2003-2005 Intel Corporation"
39 #define DRV_VERSION IPW2200_VERSION
40
41 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
42
43 MODULE_DESCRIPTION(DRV_DESCRIPTION);
44 MODULE_VERSION(DRV_VERSION);
45 MODULE_AUTHOR(DRV_COPYRIGHT);
46 MODULE_LICENSE("GPL");
47
48 static int cmdlog = 0;
49 static int debug = 0;
50 static int channel = 0;
51 static int mode = 0;
52
53 static u32 ipw_debug_level;
54 static int associate = 1;
55 static int auto_create = 1;
56 static int led = 0;
57 static int disable = 0;
58 static int hwcrypto = 1;
59 static const char ipw_modes[] = {
60 'a', 'b', 'g', '?'
61 };
62
63 #ifdef CONFIG_IPW_QOS
64 static int qos_enable = 0;
65 static int qos_burst_enable = 0;
66 static int qos_no_ack_mask = 0;
67 static int burst_duration_CCK = 0;
68 static int burst_duration_OFDM = 0;
69
70 static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
71 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
72 QOS_TX3_CW_MIN_OFDM},
73 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
74 QOS_TX3_CW_MAX_OFDM},
75 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
76 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
77 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
78 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
79 };
80
81 static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
82 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
83 QOS_TX3_CW_MIN_CCK},
84 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
85 QOS_TX3_CW_MAX_CCK},
86 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
87 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
88 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
89 QOS_TX3_TXOP_LIMIT_CCK}
90 };
91
92 static struct ieee80211_qos_parameters def_parameters_OFDM = {
93 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
94 DEF_TX3_CW_MIN_OFDM},
95 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
96 DEF_TX3_CW_MAX_OFDM},
97 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
98 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
99 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
100 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
101 };
102
103 static struct ieee80211_qos_parameters def_parameters_CCK = {
104 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
105 DEF_TX3_CW_MIN_CCK},
106 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
107 DEF_TX3_CW_MAX_CCK},
108 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
109 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
110 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
111 DEF_TX3_TXOP_LIMIT_CCK}
112 };
113
114 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
115
116 static int from_priority_to_tx_queue[] = {
117 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
118 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
119 };
120
121 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
122
123 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
124 *qos_param);
125 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
126 *qos_param);
127 #endif /* CONFIG_IPW_QOS */
128
129 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
130 static void ipw_remove_current_network(struct ipw_priv *priv);
131 static void ipw_rx(struct ipw_priv *priv);
132 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
133 struct clx2_tx_queue *txq, int qindex);
134 static int ipw_queue_reset(struct ipw_priv *priv);
135
136 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
137 int len, int sync);
138
139 static void ipw_tx_queue_free(struct ipw_priv *);
140
141 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
142 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
143 static void ipw_rx_queue_replenish(void *);
144 static int ipw_up(struct ipw_priv *);
145 static void ipw_bg_up(void *);
146 static void ipw_down(struct ipw_priv *);
147 static void ipw_bg_down(void *);
148 static int ipw_config(struct ipw_priv *);
149 static int init_supported_rates(struct ipw_priv *priv,
150 struct ipw_supported_rates *prates);
151 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
152 static void ipw_send_wep_keys(struct ipw_priv *, int);
153
154 static int ipw_is_valid_channel(struct ieee80211_device *, u8);
155 static int ipw_channel_to_index(struct ieee80211_device *, u8);
156 static u8 ipw_freq_to_channel(struct ieee80211_device *, u32);
157 static int ipw_set_geo(struct ieee80211_device *, const struct ieee80211_geo *);
158 static const struct ieee80211_geo *ipw_get_geo(struct ieee80211_device *);
159
160 static int snprint_line(char *buf, size_t count,
161 const u8 * data, u32 len, u32 ofs)
162 {
163 int out, i, j, l;
164 char c;
165
166 out = snprintf(buf, count, "%08X", ofs);
167
168 for (l = 0, i = 0; i < 2; i++) {
169 out += snprintf(buf + out, count - out, " ");
170 for (j = 0; j < 8 && l < len; j++, l++)
171 out += snprintf(buf + out, count - out, "%02X ",
172 data[(i * 8 + j)]);
173 for (; j < 8; j++)
174 out += snprintf(buf + out, count - out, " ");
175 }
176
177 out += snprintf(buf + out, count - out, " ");
178 for (l = 0, i = 0; i < 2; i++) {
179 out += snprintf(buf + out, count - out, " ");
180 for (j = 0; j < 8 && l < len; j++, l++) {
181 c = data[(i * 8 + j)];
182 if (!isascii(c) || !isprint(c))
183 c = '.';
184
185 out += snprintf(buf + out, count - out, "%c", c);
186 }
187
188 for (; j < 8; j++)
189 out += snprintf(buf + out, count - out, " ");
190 }
191
192 return out;
193 }
194
195 static void printk_buf(int level, const u8 * data, u32 len)
196 {
197 char line[81];
198 u32 ofs = 0;
199 if (!(ipw_debug_level & level))
200 return;
201
202 while (len) {
203 snprint_line(line, sizeof(line), &data[ofs],
204 min(len, 16U), ofs);
205 printk(KERN_DEBUG "%s\n", line);
206 ofs += 16;
207 len -= min(len, 16U);
208 }
209 }
210
211 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
212 {
213 size_t out = size;
214 u32 ofs = 0;
215 int total = 0;
216
217 while (size && len) {
218 out = snprint_line(output, size, &data[ofs],
219 min_t(size_t, len, 16U), ofs);
220
221 ofs += 16;
222 output += out;
223 size -= out;
224 len -= min_t(size_t, len, 16U);
225 total += out;
226 }
227 return total;
228 }
229
230 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
231 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
232 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
233
234 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
235 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
236 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
237
238 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
239 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
240 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
241 {
242 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
243 __LINE__, (u32) (b), (u32) (c));
244 _ipw_write_reg8(a, b, c);
245 }
246
247 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
248 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
249 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
250 {
251 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
252 __LINE__, (u32) (b), (u32) (c));
253 _ipw_write_reg16(a, b, c);
254 }
255
256 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
257 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
258 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
259 {
260 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
261 __LINE__, (u32) (b), (u32) (c));
262 _ipw_write_reg32(a, b, c);
263 }
264
265 /* 8-bit direct write (low 4K) */
266 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
267
268 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
269 #define ipw_write8(ipw, ofs, val) \
270 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
271 _ipw_write8(ipw, ofs, val)
272
273
274 /* 16-bit direct write (low 4K) */
275 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
276
277 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
278 #define ipw_write16(ipw, ofs, val) \
279 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
280 _ipw_write16(ipw, ofs, val)
281
282
283 /* 32-bit direct write (low 4K) */
284 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
285
286 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
287 #define ipw_write32(ipw, ofs, val) \
288 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
289 _ipw_write32(ipw, ofs, val)
290
291
292 /* 8-bit direct read (low 4K) */
293 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
294
295 /* 8-bit direct read (low 4K), with debug wrapper */
296 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
297 {
298 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
299 return _ipw_read8(ipw, ofs);
300 }
301
302 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
303 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
304
305
306 /* 16-bit direct read (low 4K) */
307 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
308
309 /* 16-bit direct read (low 4K), with debug wrapper */
310 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
311 {
312 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
313 return _ipw_read16(ipw, ofs);
314 }
315
316 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
317 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
318
319
320 /* 32-bit direct read (low 4K) */
321 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
322
323 /* 32-bit direct read (low 4K), with debug wrapper */
324 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
325 {
326 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
327 return _ipw_read32(ipw, ofs);
328 }
329
330 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
331 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
332
333
334 /* multi-byte read (above 4K), with debug wrapper */
335 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
336 static inline void __ipw_read_indirect(const char *f, int l,
337 struct ipw_priv *a, u32 b, u8 * c, int d)
338 {
339 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %d bytes\n", f, l, (u32) (b),
340 d);
341 _ipw_read_indirect(a, b, c, d);
342 }
343
344 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
345 #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d)
346
347 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
348 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
349 int num);
350 #define ipw_write_indirect(a, b, c, d) \
351 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
352 _ipw_write_indirect(a, b, c, d)
353
354 /* 32-bit indirect write (above 4K) */
355 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
356 {
357 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
358 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
359 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
360 }
361
362 /* 8-bit indirect write (above 4K) */
363 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
364 {
365 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
366 u32 dif_len = reg - aligned_addr;
367
368 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
369 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
370 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
371 }
372
373 /* 16-bit indirect write (above 4K) */
374 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
375 {
376 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
377 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
378
379 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
380 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
381 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
382 }
383
384
385 /* 8-bit indirect read (above 4K) */
386 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
387 {
388 u32 word;
389 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
390 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
391 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
392 return (word >> ((reg & 0x3) * 8)) & 0xff;
393 }
394
395 /* 32-bit indirect read (above 4K) */
396 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
397 {
398 u32 value;
399
400 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
401
402 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
403 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
404 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
405 return value;
406 }
407
408 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
409 /* for area above 1st 4K of SRAM/reg space */
410 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
411 int num)
412 {
413 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
414 u32 dif_len = addr - aligned_addr;
415 u32 i;
416
417 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
418
419 if (num <= 0) {
420 return;
421 }
422
423 /* Read the first dword (or portion) byte by byte */
424 if (unlikely(dif_len)) {
425 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
426 /* Start reading at aligned_addr + dif_len */
427 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
428 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
429 aligned_addr += 4;
430 }
431
432 /* Read all of the middle dwords as dwords, with auto-increment */
433 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
434 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
435 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
436
437 /* Read the last dword (or portion) byte by byte */
438 if (unlikely(num)) {
439 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
440 for (i = 0; num > 0; i++, num--)
441 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
442 }
443 }
444
445 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
446 /* for area above 1st 4K of SRAM/reg space */
447 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
448 int num)
449 {
450 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
451 u32 dif_len = addr - aligned_addr;
452 u32 i;
453
454 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
455
456 if (num <= 0) {
457 return;
458 }
459
460 /* Write the first dword (or portion) byte by byte */
461 if (unlikely(dif_len)) {
462 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
463 /* Start writing at aligned_addr + dif_len */
464 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
465 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
466 aligned_addr += 4;
467 }
468
469 /* Write all of the middle dwords as dwords, with auto-increment */
470 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
471 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
472 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
473
474 /* Write the last dword (or portion) byte by byte */
475 if (unlikely(num)) {
476 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
477 for (i = 0; num > 0; i++, num--, buf++)
478 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
479 }
480 }
481
482 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
483 /* for 1st 4K of SRAM/regs space */
484 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
485 int num)
486 {
487 memcpy_toio((priv->hw_base + addr), buf, num);
488 }
489
490 /* Set bit(s) in low 4K of SRAM/regs */
491 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
492 {
493 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
494 }
495
496 /* Clear bit(s) in low 4K of SRAM/regs */
497 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
498 {
499 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
500 }
501
502 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
503 {
504 if (priv->status & STATUS_INT_ENABLED)
505 return;
506 priv->status |= STATUS_INT_ENABLED;
507 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
508 }
509
510 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
511 {
512 if (!(priv->status & STATUS_INT_ENABLED))
513 return;
514 priv->status &= ~STATUS_INT_ENABLED;
515 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
516 }
517
518 #ifdef CONFIG_IPW2200_DEBUG
519 static char *ipw_error_desc(u32 val)
520 {
521 switch (val) {
522 case IPW_FW_ERROR_OK:
523 return "ERROR_OK";
524 case IPW_FW_ERROR_FAIL:
525 return "ERROR_FAIL";
526 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
527 return "MEMORY_UNDERFLOW";
528 case IPW_FW_ERROR_MEMORY_OVERFLOW:
529 return "MEMORY_OVERFLOW";
530 case IPW_FW_ERROR_BAD_PARAM:
531 return "BAD_PARAM";
532 case IPW_FW_ERROR_BAD_CHECKSUM:
533 return "BAD_CHECKSUM";
534 case IPW_FW_ERROR_NMI_INTERRUPT:
535 return "NMI_INTERRUPT";
536 case IPW_FW_ERROR_BAD_DATABASE:
537 return "BAD_DATABASE";
538 case IPW_FW_ERROR_ALLOC_FAIL:
539 return "ALLOC_FAIL";
540 case IPW_FW_ERROR_DMA_UNDERRUN:
541 return "DMA_UNDERRUN";
542 case IPW_FW_ERROR_DMA_STATUS:
543 return "DMA_STATUS";
544 case IPW_FW_ERROR_DINO_ERROR:
545 return "DINO_ERROR";
546 case IPW_FW_ERROR_EEPROM_ERROR:
547 return "EEPROM_ERROR";
548 case IPW_FW_ERROR_SYSASSERT:
549 return "SYSASSERT";
550 case IPW_FW_ERROR_FATAL_ERROR:
551 return "FATAL_ERROR";
552 default:
553 return "UNKNOWN_ERROR";
554 }
555 }
556
557 static void ipw_dump_error_log(struct ipw_priv *priv,
558 struct ipw_fw_error *error)
559 {
560 u32 i;
561
562 if (!error) {
563 IPW_ERROR("Error allocating and capturing error log. "
564 "Nothing to dump.\n");
565 return;
566 }
567
568 IPW_ERROR("Start IPW Error Log Dump:\n");
569 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
570 error->status, error->config);
571
572 for (i = 0; i < error->elem_len; i++)
573 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
574 ipw_error_desc(error->elem[i].desc),
575 error->elem[i].time,
576 error->elem[i].blink1,
577 error->elem[i].blink2,
578 error->elem[i].link1,
579 error->elem[i].link2, error->elem[i].data);
580 for (i = 0; i < error->log_len; i++)
581 IPW_ERROR("%i\t0x%08x\t%i\n",
582 error->log[i].time,
583 error->log[i].data, error->log[i].event);
584 }
585 #endif
586
587 static inline int ipw_is_init(struct ipw_priv *priv)
588 {
589 return (priv->status & STATUS_INIT) ? 1 : 0;
590 }
591
592 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
593 {
594 u32 addr, field_info, field_len, field_count, total_len;
595
596 IPW_DEBUG_ORD("ordinal = %i\n", ord);
597
598 if (!priv || !val || !len) {
599 IPW_DEBUG_ORD("Invalid argument\n");
600 return -EINVAL;
601 }
602
603 /* verify device ordinal tables have been initialized */
604 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
605 IPW_DEBUG_ORD("Access ordinals before initialization\n");
606 return -EINVAL;
607 }
608
609 switch (IPW_ORD_TABLE_ID_MASK & ord) {
610 case IPW_ORD_TABLE_0_MASK:
611 /*
612 * TABLE 0: Direct access to a table of 32 bit values
613 *
614 * This is a very simple table with the data directly
615 * read from the table
616 */
617
618 /* remove the table id from the ordinal */
619 ord &= IPW_ORD_TABLE_VALUE_MASK;
620
621 /* boundary check */
622 if (ord > priv->table0_len) {
623 IPW_DEBUG_ORD("ordinal value (%i) longer then "
624 "max (%i)\n", ord, priv->table0_len);
625 return -EINVAL;
626 }
627
628 /* verify we have enough room to store the value */
629 if (*len < sizeof(u32)) {
630 IPW_DEBUG_ORD("ordinal buffer length too small, "
631 "need %zd\n", sizeof(u32));
632 return -EINVAL;
633 }
634
635 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
636 ord, priv->table0_addr + (ord << 2));
637
638 *len = sizeof(u32);
639 ord <<= 2;
640 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
641 break;
642
643 case IPW_ORD_TABLE_1_MASK:
644 /*
645 * TABLE 1: Indirect access to a table of 32 bit values
646 *
647 * This is a fairly large table of u32 values each
648 * representing starting addr for the data (which is
649 * also a u32)
650 */
651
652 /* remove the table id from the ordinal */
653 ord &= IPW_ORD_TABLE_VALUE_MASK;
654
655 /* boundary check */
656 if (ord > priv->table1_len) {
657 IPW_DEBUG_ORD("ordinal value too long\n");
658 return -EINVAL;
659 }
660
661 /* verify we have enough room to store the value */
662 if (*len < sizeof(u32)) {
663 IPW_DEBUG_ORD("ordinal buffer length too small, "
664 "need %zd\n", sizeof(u32));
665 return -EINVAL;
666 }
667
668 *((u32 *) val) =
669 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
670 *len = sizeof(u32);
671 break;
672
673 case IPW_ORD_TABLE_2_MASK:
674 /*
675 * TABLE 2: Indirect access to a table of variable sized values
676 *
677 * This table consist of six values, each containing
678 * - dword containing the starting offset of the data
679 * - dword containing the lengh in the first 16bits
680 * and the count in the second 16bits
681 */
682
683 /* remove the table id from the ordinal */
684 ord &= IPW_ORD_TABLE_VALUE_MASK;
685
686 /* boundary check */
687 if (ord > priv->table2_len) {
688 IPW_DEBUG_ORD("ordinal value too long\n");
689 return -EINVAL;
690 }
691
692 /* get the address of statistic */
693 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
694
695 /* get the second DW of statistics ;
696 * two 16-bit words - first is length, second is count */
697 field_info =
698 ipw_read_reg32(priv,
699 priv->table2_addr + (ord << 3) +
700 sizeof(u32));
701
702 /* get each entry length */
703 field_len = *((u16 *) & field_info);
704
705 /* get number of entries */
706 field_count = *(((u16 *) & field_info) + 1);
707
708 /* abort if not enought memory */
709 total_len = field_len * field_count;
710 if (total_len > *len) {
711 *len = total_len;
712 return -EINVAL;
713 }
714
715 *len = total_len;
716 if (!total_len)
717 return 0;
718
719 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
720 "field_info = 0x%08x\n",
721 addr, total_len, field_info);
722 ipw_read_indirect(priv, addr, val, total_len);
723 break;
724
725 default:
726 IPW_DEBUG_ORD("Invalid ordinal!\n");
727 return -EINVAL;
728
729 }
730
731 return 0;
732 }
733
734 static void ipw_init_ordinals(struct ipw_priv *priv)
735 {
736 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
737 priv->table0_len = ipw_read32(priv, priv->table0_addr);
738
739 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
740 priv->table0_addr, priv->table0_len);
741
742 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
743 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
744
745 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
746 priv->table1_addr, priv->table1_len);
747
748 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
749 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
750 priv->table2_len &= 0x0000ffff; /* use first two bytes */
751
752 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
753 priv->table2_addr, priv->table2_len);
754
755 }
756
757 static u32 ipw_register_toggle(u32 reg)
758 {
759 reg &= ~IPW_START_STANDBY;
760 if (reg & IPW_GATE_ODMA)
761 reg &= ~IPW_GATE_ODMA;
762 if (reg & IPW_GATE_IDMA)
763 reg &= ~IPW_GATE_IDMA;
764 if (reg & IPW_GATE_ADMA)
765 reg &= ~IPW_GATE_ADMA;
766 return reg;
767 }
768
769 /*
770 * LED behavior:
771 * - On radio ON, turn on any LEDs that require to be on during start
772 * - On initialization, start unassociated blink
773 * - On association, disable unassociated blink
774 * - On disassociation, start unassociated blink
775 * - On radio OFF, turn off any LEDs started during radio on
776 *
777 */
778 #define LD_TIME_LINK_ON 300
779 #define LD_TIME_LINK_OFF 2700
780 #define LD_TIME_ACT_ON 250
781
782 static void ipw_led_link_on(struct ipw_priv *priv)
783 {
784 unsigned long flags;
785 u32 led;
786
787 /* If configured to not use LEDs, or nic_type is 1,
788 * then we don't toggle a LINK led */
789 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
790 return;
791
792 spin_lock_irqsave(&priv->lock, flags);
793
794 if (!(priv->status & STATUS_RF_KILL_MASK) &&
795 !(priv->status & STATUS_LED_LINK_ON)) {
796 IPW_DEBUG_LED("Link LED On\n");
797 led = ipw_read_reg32(priv, IPW_EVENT_REG);
798 led |= priv->led_association_on;
799
800 led = ipw_register_toggle(led);
801
802 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
803 ipw_write_reg32(priv, IPW_EVENT_REG, led);
804
805 priv->status |= STATUS_LED_LINK_ON;
806
807 /* If we aren't associated, schedule turning the LED off */
808 if (!(priv->status & STATUS_ASSOCIATED))
809 queue_delayed_work(priv->workqueue,
810 &priv->led_link_off,
811 LD_TIME_LINK_ON);
812 }
813
814 spin_unlock_irqrestore(&priv->lock, flags);
815 }
816
817 static void ipw_bg_led_link_on(void *data)
818 {
819 struct ipw_priv *priv = data;
820 down(&priv->sem);
821 ipw_led_link_on(data);
822 up(&priv->sem);
823 }
824
825 static void ipw_led_link_off(struct ipw_priv *priv)
826 {
827 unsigned long flags;
828 u32 led;
829
830 /* If configured not to use LEDs, or nic type is 1,
831 * then we don't goggle the LINK led. */
832 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
833 return;
834
835 spin_lock_irqsave(&priv->lock, flags);
836
837 if (priv->status & STATUS_LED_LINK_ON) {
838 led = ipw_read_reg32(priv, IPW_EVENT_REG);
839 led &= priv->led_association_off;
840 led = ipw_register_toggle(led);
841
842 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
843 ipw_write_reg32(priv, IPW_EVENT_REG, led);
844
845 IPW_DEBUG_LED("Link LED Off\n");
846
847 priv->status &= ~STATUS_LED_LINK_ON;
848
849 /* If we aren't associated and the radio is on, schedule
850 * turning the LED on (blink while unassociated) */
851 if (!(priv->status & STATUS_RF_KILL_MASK) &&
852 !(priv->status & STATUS_ASSOCIATED))
853 queue_delayed_work(priv->workqueue, &priv->led_link_on,
854 LD_TIME_LINK_OFF);
855
856 }
857
858 spin_unlock_irqrestore(&priv->lock, flags);
859 }
860
861 static void ipw_bg_led_link_off(void *data)
862 {
863 struct ipw_priv *priv = data;
864 down(&priv->sem);
865 ipw_led_link_off(data);
866 up(&priv->sem);
867 }
868
869 static void __ipw_led_activity_on(struct ipw_priv *priv)
870 {
871 u32 led;
872
873 if (priv->config & CFG_NO_LED)
874 return;
875
876 if (priv->status & STATUS_RF_KILL_MASK)
877 return;
878
879 if (!(priv->status & STATUS_LED_ACT_ON)) {
880 led = ipw_read_reg32(priv, IPW_EVENT_REG);
881 led |= priv->led_activity_on;
882
883 led = ipw_register_toggle(led);
884
885 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
886 ipw_write_reg32(priv, IPW_EVENT_REG, led);
887
888 IPW_DEBUG_LED("Activity LED On\n");
889
890 priv->status |= STATUS_LED_ACT_ON;
891
892 cancel_delayed_work(&priv->led_act_off);
893 queue_delayed_work(priv->workqueue, &priv->led_act_off,
894 LD_TIME_ACT_ON);
895 } else {
896 /* Reschedule LED off for full time period */
897 cancel_delayed_work(&priv->led_act_off);
898 queue_delayed_work(priv->workqueue, &priv->led_act_off,
899 LD_TIME_ACT_ON);
900 }
901 }
902
903 #if 0
904 void ipw_led_activity_on(struct ipw_priv *priv)
905 {
906 unsigned long flags;
907 spin_lock_irqsave(&priv->lock, flags);
908 __ipw_led_activity_on(priv);
909 spin_unlock_irqrestore(&priv->lock, flags);
910 }
911 #endif /* 0 */
912
913 static void ipw_led_activity_off(struct ipw_priv *priv)
914 {
915 unsigned long flags;
916 u32 led;
917
918 if (priv->config & CFG_NO_LED)
919 return;
920
921 spin_lock_irqsave(&priv->lock, flags);
922
923 if (priv->status & STATUS_LED_ACT_ON) {
924 led = ipw_read_reg32(priv, IPW_EVENT_REG);
925 led &= priv->led_activity_off;
926
927 led = ipw_register_toggle(led);
928
929 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
930 ipw_write_reg32(priv, IPW_EVENT_REG, led);
931
932 IPW_DEBUG_LED("Activity LED Off\n");
933
934 priv->status &= ~STATUS_LED_ACT_ON;
935 }
936
937 spin_unlock_irqrestore(&priv->lock, flags);
938 }
939
940 static void ipw_bg_led_activity_off(void *data)
941 {
942 struct ipw_priv *priv = data;
943 down(&priv->sem);
944 ipw_led_activity_off(data);
945 up(&priv->sem);
946 }
947
948 static void ipw_led_band_on(struct ipw_priv *priv)
949 {
950 unsigned long flags;
951 u32 led;
952
953 /* Only nic type 1 supports mode LEDs */
954 if (priv->config & CFG_NO_LED ||
955 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
956 return;
957
958 spin_lock_irqsave(&priv->lock, flags);
959
960 led = ipw_read_reg32(priv, IPW_EVENT_REG);
961 if (priv->assoc_network->mode == IEEE_A) {
962 led |= priv->led_ofdm_on;
963 led &= priv->led_association_off;
964 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
965 } else if (priv->assoc_network->mode == IEEE_G) {
966 led |= priv->led_ofdm_on;
967 led |= priv->led_association_on;
968 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
969 } else {
970 led &= priv->led_ofdm_off;
971 led |= priv->led_association_on;
972 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
973 }
974
975 led = ipw_register_toggle(led);
976
977 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
978 ipw_write_reg32(priv, IPW_EVENT_REG, led);
979
980 spin_unlock_irqrestore(&priv->lock, flags);
981 }
982
983 static void ipw_led_band_off(struct ipw_priv *priv)
984 {
985 unsigned long flags;
986 u32 led;
987
988 /* Only nic type 1 supports mode LEDs */
989 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
990 return;
991
992 spin_lock_irqsave(&priv->lock, flags);
993
994 led = ipw_read_reg32(priv, IPW_EVENT_REG);
995 led &= priv->led_ofdm_off;
996 led &= priv->led_association_off;
997
998 led = ipw_register_toggle(led);
999
1000 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1001 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1002
1003 spin_unlock_irqrestore(&priv->lock, flags);
1004 }
1005
1006 static void ipw_led_radio_on(struct ipw_priv *priv)
1007 {
1008 ipw_led_link_on(priv);
1009 }
1010
1011 static void ipw_led_radio_off(struct ipw_priv *priv)
1012 {
1013 ipw_led_activity_off(priv);
1014 ipw_led_link_off(priv);
1015 }
1016
1017 static void ipw_led_link_up(struct ipw_priv *priv)
1018 {
1019 /* Set the Link Led on for all nic types */
1020 ipw_led_link_on(priv);
1021 }
1022
1023 static void ipw_led_link_down(struct ipw_priv *priv)
1024 {
1025 ipw_led_activity_off(priv);
1026 ipw_led_link_off(priv);
1027
1028 if (priv->status & STATUS_RF_KILL_MASK)
1029 ipw_led_radio_off(priv);
1030 }
1031
1032 static void ipw_led_init(struct ipw_priv *priv)
1033 {
1034 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1035
1036 /* Set the default PINs for the link and activity leds */
1037 priv->led_activity_on = IPW_ACTIVITY_LED;
1038 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1039
1040 priv->led_association_on = IPW_ASSOCIATED_LED;
1041 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1042
1043 /* Set the default PINs for the OFDM leds */
1044 priv->led_ofdm_on = IPW_OFDM_LED;
1045 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1046
1047 switch (priv->nic_type) {
1048 case EEPROM_NIC_TYPE_1:
1049 /* In this NIC type, the LEDs are reversed.... */
1050 priv->led_activity_on = IPW_ASSOCIATED_LED;
1051 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1052 priv->led_association_on = IPW_ACTIVITY_LED;
1053 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1054
1055 if (!(priv->config & CFG_NO_LED))
1056 ipw_led_band_on(priv);
1057
1058 /* And we don't blink link LEDs for this nic, so
1059 * just return here */
1060 return;
1061
1062 case EEPROM_NIC_TYPE_3:
1063 case EEPROM_NIC_TYPE_2:
1064 case EEPROM_NIC_TYPE_4:
1065 case EEPROM_NIC_TYPE_0:
1066 break;
1067
1068 default:
1069 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1070 priv->nic_type);
1071 priv->nic_type = EEPROM_NIC_TYPE_0;
1072 break;
1073 }
1074
1075 if (!(priv->config & CFG_NO_LED)) {
1076 if (priv->status & STATUS_ASSOCIATED)
1077 ipw_led_link_on(priv);
1078 else
1079 ipw_led_link_off(priv);
1080 }
1081 }
1082
1083 static void ipw_led_shutdown(struct ipw_priv *priv)
1084 {
1085 ipw_led_activity_off(priv);
1086 ipw_led_link_off(priv);
1087 ipw_led_band_off(priv);
1088 cancel_delayed_work(&priv->led_link_on);
1089 cancel_delayed_work(&priv->led_link_off);
1090 cancel_delayed_work(&priv->led_act_off);
1091 }
1092
1093 /*
1094 * The following adds a new attribute to the sysfs representation
1095 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1096 * used for controling the debug level.
1097 *
1098 * See the level definitions in ipw for details.
1099 */
1100 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1101 {
1102 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1103 }
1104
1105 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1106 size_t count)
1107 {
1108 char *p = (char *)buf;
1109 u32 val;
1110
1111 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1112 p++;
1113 if (p[0] == 'x' || p[0] == 'X')
1114 p++;
1115 val = simple_strtoul(p, &p, 16);
1116 } else
1117 val = simple_strtoul(p, &p, 10);
1118 if (p == buf)
1119 printk(KERN_INFO DRV_NAME
1120 ": %s is not in hex or decimal form.\n", buf);
1121 else
1122 ipw_debug_level = val;
1123
1124 return strnlen(buf, count);
1125 }
1126
1127 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1128 show_debug_level, store_debug_level);
1129
1130 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1131 {
1132 /* length = 1st dword in log */
1133 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1134 }
1135
1136 static void ipw_capture_event_log(struct ipw_priv *priv,
1137 u32 log_len, struct ipw_event *log)
1138 {
1139 u32 base;
1140
1141 if (log_len) {
1142 base = ipw_read32(priv, IPW_EVENT_LOG);
1143 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1144 (u8 *) log, sizeof(*log) * log_len);
1145 }
1146 }
1147
1148 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1149 {
1150 struct ipw_fw_error *error;
1151 u32 log_len = ipw_get_event_log_len(priv);
1152 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1153 u32 elem_len = ipw_read_reg32(priv, base);
1154
1155 error = kmalloc(sizeof(*error) +
1156 sizeof(*error->elem) * elem_len +
1157 sizeof(*error->log) * log_len, GFP_ATOMIC);
1158 if (!error) {
1159 IPW_ERROR("Memory allocation for firmware error log "
1160 "failed.\n");
1161 return NULL;
1162 }
1163 error->jiffies = jiffies;
1164 error->status = priv->status;
1165 error->config = priv->config;
1166 error->elem_len = elem_len;
1167 error->log_len = log_len;
1168 error->elem = (struct ipw_error_elem *)error->payload;
1169 error->log = (struct ipw_event *)(error->elem + elem_len);
1170
1171 ipw_capture_event_log(priv, log_len, error->log);
1172
1173 if (elem_len)
1174 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1175 sizeof(*error->elem) * elem_len);
1176
1177 return error;
1178 }
1179
1180 static void ipw_free_error_log(struct ipw_fw_error *error)
1181 {
1182 if (error)
1183 kfree(error);
1184 }
1185
1186 static ssize_t show_event_log(struct device *d,
1187 struct device_attribute *attr, char *buf)
1188 {
1189 struct ipw_priv *priv = dev_get_drvdata(d);
1190 u32 log_len = ipw_get_event_log_len(priv);
1191 struct ipw_event log[log_len];
1192 u32 len = 0, i;
1193
1194 ipw_capture_event_log(priv, log_len, log);
1195
1196 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1197 for (i = 0; i < log_len; i++)
1198 len += snprintf(buf + len, PAGE_SIZE - len,
1199 "\n%08X%08X%08X",
1200 log[i].time, log[i].event, log[i].data);
1201 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1202 return len;
1203 }
1204
1205 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1206
1207 static ssize_t show_error(struct device *d,
1208 struct device_attribute *attr, char *buf)
1209 {
1210 struct ipw_priv *priv = dev_get_drvdata(d);
1211 u32 len = 0, i;
1212 if (!priv->error)
1213 return 0;
1214 len += snprintf(buf + len, PAGE_SIZE - len,
1215 "%08lX%08X%08X%08X",
1216 priv->error->jiffies,
1217 priv->error->status,
1218 priv->error->config, priv->error->elem_len);
1219 for (i = 0; i < priv->error->elem_len; i++)
1220 len += snprintf(buf + len, PAGE_SIZE - len,
1221 "\n%08X%08X%08X%08X%08X%08X%08X",
1222 priv->error->elem[i].time,
1223 priv->error->elem[i].desc,
1224 priv->error->elem[i].blink1,
1225 priv->error->elem[i].blink2,
1226 priv->error->elem[i].link1,
1227 priv->error->elem[i].link2,
1228 priv->error->elem[i].data);
1229
1230 len += snprintf(buf + len, PAGE_SIZE - len,
1231 "\n%08X", priv->error->log_len);
1232 for (i = 0; i < priv->error->log_len; i++)
1233 len += snprintf(buf + len, PAGE_SIZE - len,
1234 "\n%08X%08X%08X",
1235 priv->error->log[i].time,
1236 priv->error->log[i].event,
1237 priv->error->log[i].data);
1238 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1239 return len;
1240 }
1241
1242 static ssize_t clear_error(struct device *d,
1243 struct device_attribute *attr,
1244 const char *buf, size_t count)
1245 {
1246 struct ipw_priv *priv = dev_get_drvdata(d);
1247 if (priv->error) {
1248 ipw_free_error_log(priv->error);
1249 priv->error = NULL;
1250 }
1251 return count;
1252 }
1253
1254 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1255
1256 static ssize_t show_cmd_log(struct device *d,
1257 struct device_attribute *attr, char *buf)
1258 {
1259 struct ipw_priv *priv = dev_get_drvdata(d);
1260 u32 len = 0, i;
1261 if (!priv->cmdlog)
1262 return 0;
1263 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1264 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1265 i = (i + 1) % priv->cmdlog_len) {
1266 len +=
1267 snprintf(buf + len, PAGE_SIZE - len,
1268 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1269 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1270 priv->cmdlog[i].cmd.len);
1271 len +=
1272 snprintk_buf(buf + len, PAGE_SIZE - len,
1273 (u8 *) priv->cmdlog[i].cmd.param,
1274 priv->cmdlog[i].cmd.len);
1275 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1276 }
1277 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1278 return len;
1279 }
1280
1281 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1282
1283 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1284 char *buf)
1285 {
1286 struct ipw_priv *priv = dev_get_drvdata(d);
1287 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1288 }
1289
1290 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1291 const char *buf, size_t count)
1292 {
1293 struct ipw_priv *priv = dev_get_drvdata(d);
1294 #ifdef CONFIG_IPW2200_DEBUG
1295 struct net_device *dev = priv->net_dev;
1296 #endif
1297 char buffer[] = "00000000";
1298 unsigned long len =
1299 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1300 unsigned long val;
1301 char *p = buffer;
1302
1303 IPW_DEBUG_INFO("enter\n");
1304
1305 strncpy(buffer, buf, len);
1306 buffer[len] = 0;
1307
1308 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1309 p++;
1310 if (p[0] == 'x' || p[0] == 'X')
1311 p++;
1312 val = simple_strtoul(p, &p, 16);
1313 } else
1314 val = simple_strtoul(p, &p, 10);
1315 if (p == buffer) {
1316 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1317 } else {
1318 priv->ieee->scan_age = val;
1319 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1320 }
1321
1322 IPW_DEBUG_INFO("exit\n");
1323 return len;
1324 }
1325
1326 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1327
1328 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1329 char *buf)
1330 {
1331 struct ipw_priv *priv = dev_get_drvdata(d);
1332 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1333 }
1334
1335 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1336 const char *buf, size_t count)
1337 {
1338 struct ipw_priv *priv = dev_get_drvdata(d);
1339
1340 IPW_DEBUG_INFO("enter\n");
1341
1342 if (count == 0)
1343 return 0;
1344
1345 if (*buf == 0) {
1346 IPW_DEBUG_LED("Disabling LED control.\n");
1347 priv->config |= CFG_NO_LED;
1348 ipw_led_shutdown(priv);
1349 } else {
1350 IPW_DEBUG_LED("Enabling LED control.\n");
1351 priv->config &= ~CFG_NO_LED;
1352 ipw_led_init(priv);
1353 }
1354
1355 IPW_DEBUG_INFO("exit\n");
1356 return count;
1357 }
1358
1359 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1360
1361 static ssize_t show_status(struct device *d,
1362 struct device_attribute *attr, char *buf)
1363 {
1364 struct ipw_priv *p = d->driver_data;
1365 return sprintf(buf, "0x%08x\n", (int)p->status);
1366 }
1367
1368 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1369
1370 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1371 char *buf)
1372 {
1373 struct ipw_priv *p = d->driver_data;
1374 return sprintf(buf, "0x%08x\n", (int)p->config);
1375 }
1376
1377 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1378
1379 static ssize_t show_nic_type(struct device *d,
1380 struct device_attribute *attr, char *buf)
1381 {
1382 struct ipw_priv *priv = d->driver_data;
1383 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1384 }
1385
1386 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1387
1388 static ssize_t show_ucode_version(struct device *d,
1389 struct device_attribute *attr, char *buf)
1390 {
1391 u32 len = sizeof(u32), tmp = 0;
1392 struct ipw_priv *p = d->driver_data;
1393
1394 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1395 return 0;
1396
1397 return sprintf(buf, "0x%08x\n", tmp);
1398 }
1399
1400 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1401
1402 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1403 char *buf)
1404 {
1405 u32 len = sizeof(u32), tmp = 0;
1406 struct ipw_priv *p = d->driver_data;
1407
1408 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1409 return 0;
1410
1411 return sprintf(buf, "0x%08x\n", tmp);
1412 }
1413
1414 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1415
1416 /*
1417 * Add a device attribute to view/control the delay between eeprom
1418 * operations.
1419 */
1420 static ssize_t show_eeprom_delay(struct device *d,
1421 struct device_attribute *attr, char *buf)
1422 {
1423 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
1424 return sprintf(buf, "%i\n", n);
1425 }
1426 static ssize_t store_eeprom_delay(struct device *d,
1427 struct device_attribute *attr,
1428 const char *buf, size_t count)
1429 {
1430 struct ipw_priv *p = d->driver_data;
1431 sscanf(buf, "%i", &p->eeprom_delay);
1432 return strnlen(buf, count);
1433 }
1434
1435 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1436 show_eeprom_delay, store_eeprom_delay);
1437
1438 static ssize_t show_command_event_reg(struct device *d,
1439 struct device_attribute *attr, char *buf)
1440 {
1441 u32 reg = 0;
1442 struct ipw_priv *p = d->driver_data;
1443
1444 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1445 return sprintf(buf, "0x%08x\n", reg);
1446 }
1447 static ssize_t store_command_event_reg(struct device *d,
1448 struct device_attribute *attr,
1449 const char *buf, size_t count)
1450 {
1451 u32 reg;
1452 struct ipw_priv *p = d->driver_data;
1453
1454 sscanf(buf, "%x", &reg);
1455 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1456 return strnlen(buf, count);
1457 }
1458
1459 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1460 show_command_event_reg, store_command_event_reg);
1461
1462 static ssize_t show_mem_gpio_reg(struct device *d,
1463 struct device_attribute *attr, char *buf)
1464 {
1465 u32 reg = 0;
1466 struct ipw_priv *p = d->driver_data;
1467
1468 reg = ipw_read_reg32(p, 0x301100);
1469 return sprintf(buf, "0x%08x\n", reg);
1470 }
1471 static ssize_t store_mem_gpio_reg(struct device *d,
1472 struct device_attribute *attr,
1473 const char *buf, size_t count)
1474 {
1475 u32 reg;
1476 struct ipw_priv *p = d->driver_data;
1477
1478 sscanf(buf, "%x", &reg);
1479 ipw_write_reg32(p, 0x301100, reg);
1480 return strnlen(buf, count);
1481 }
1482
1483 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1484 show_mem_gpio_reg, store_mem_gpio_reg);
1485
1486 static ssize_t show_indirect_dword(struct device *d,
1487 struct device_attribute *attr, char *buf)
1488 {
1489 u32 reg = 0;
1490 struct ipw_priv *priv = d->driver_data;
1491
1492 if (priv->status & STATUS_INDIRECT_DWORD)
1493 reg = ipw_read_reg32(priv, priv->indirect_dword);
1494 else
1495 reg = 0;
1496
1497 return sprintf(buf, "0x%08x\n", reg);
1498 }
1499 static ssize_t store_indirect_dword(struct device *d,
1500 struct device_attribute *attr,
1501 const char *buf, size_t count)
1502 {
1503 struct ipw_priv *priv = d->driver_data;
1504
1505 sscanf(buf, "%x", &priv->indirect_dword);
1506 priv->status |= STATUS_INDIRECT_DWORD;
1507 return strnlen(buf, count);
1508 }
1509
1510 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1511 show_indirect_dword, store_indirect_dword);
1512
1513 static ssize_t show_indirect_byte(struct device *d,
1514 struct device_attribute *attr, char *buf)
1515 {
1516 u8 reg = 0;
1517 struct ipw_priv *priv = d->driver_data;
1518
1519 if (priv->status & STATUS_INDIRECT_BYTE)
1520 reg = ipw_read_reg8(priv, priv->indirect_byte);
1521 else
1522 reg = 0;
1523
1524 return sprintf(buf, "0x%02x\n", reg);
1525 }
1526 static ssize_t store_indirect_byte(struct device *d,
1527 struct device_attribute *attr,
1528 const char *buf, size_t count)
1529 {
1530 struct ipw_priv *priv = d->driver_data;
1531
1532 sscanf(buf, "%x", &priv->indirect_byte);
1533 priv->status |= STATUS_INDIRECT_BYTE;
1534 return strnlen(buf, count);
1535 }
1536
1537 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1538 show_indirect_byte, store_indirect_byte);
1539
1540 static ssize_t show_direct_dword(struct device *d,
1541 struct device_attribute *attr, char *buf)
1542 {
1543 u32 reg = 0;
1544 struct ipw_priv *priv = d->driver_data;
1545
1546 if (priv->status & STATUS_DIRECT_DWORD)
1547 reg = ipw_read32(priv, priv->direct_dword);
1548 else
1549 reg = 0;
1550
1551 return sprintf(buf, "0x%08x\n", reg);
1552 }
1553 static ssize_t store_direct_dword(struct device *d,
1554 struct device_attribute *attr,
1555 const char *buf, size_t count)
1556 {
1557 struct ipw_priv *priv = d->driver_data;
1558
1559 sscanf(buf, "%x", &priv->direct_dword);
1560 priv->status |= STATUS_DIRECT_DWORD;
1561 return strnlen(buf, count);
1562 }
1563
1564 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1565 show_direct_dword, store_direct_dword);
1566
1567 static int rf_kill_active(struct ipw_priv *priv)
1568 {
1569 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1570 priv->status |= STATUS_RF_KILL_HW;
1571 else
1572 priv->status &= ~STATUS_RF_KILL_HW;
1573
1574 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1575 }
1576
1577 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1578 char *buf)
1579 {
1580 /* 0 - RF kill not enabled
1581 1 - SW based RF kill active (sysfs)
1582 2 - HW based RF kill active
1583 3 - Both HW and SW baed RF kill active */
1584 struct ipw_priv *priv = d->driver_data;
1585 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1586 (rf_kill_active(priv) ? 0x2 : 0x0);
1587 return sprintf(buf, "%i\n", val);
1588 }
1589
1590 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1591 {
1592 if ((disable_radio ? 1 : 0) ==
1593 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1594 return 0;
1595
1596 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1597 disable_radio ? "OFF" : "ON");
1598
1599 if (disable_radio) {
1600 priv->status |= STATUS_RF_KILL_SW;
1601
1602 if (priv->workqueue)
1603 cancel_delayed_work(&priv->request_scan);
1604 queue_work(priv->workqueue, &priv->down);
1605 } else {
1606 priv->status &= ~STATUS_RF_KILL_SW;
1607 if (rf_kill_active(priv)) {
1608 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1609 "disabled by HW switch\n");
1610 /* Make sure the RF_KILL check timer is running */
1611 cancel_delayed_work(&priv->rf_kill);
1612 queue_delayed_work(priv->workqueue, &priv->rf_kill,
1613 2 * HZ);
1614 } else
1615 queue_work(priv->workqueue, &priv->up);
1616 }
1617
1618 return 1;
1619 }
1620
1621 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1622 const char *buf, size_t count)
1623 {
1624 struct ipw_priv *priv = d->driver_data;
1625
1626 ipw_radio_kill_sw(priv, buf[0] == '1');
1627
1628 return count;
1629 }
1630
1631 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1632
1633 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1634 char *buf)
1635 {
1636 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1637 int pos = 0, len = 0;
1638 if (priv->config & CFG_SPEED_SCAN) {
1639 while (priv->speed_scan[pos] != 0)
1640 len += sprintf(&buf[len], "%d ",
1641 priv->speed_scan[pos++]);
1642 return len + sprintf(&buf[len], "\n");
1643 }
1644
1645 return sprintf(buf, "0\n");
1646 }
1647
1648 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1649 const char *buf, size_t count)
1650 {
1651 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1652 int channel, pos = 0;
1653 const char *p = buf;
1654
1655 /* list of space separated channels to scan, optionally ending with 0 */
1656 while ((channel = simple_strtol(p, NULL, 0))) {
1657 if (pos == MAX_SPEED_SCAN - 1) {
1658 priv->speed_scan[pos] = 0;
1659 break;
1660 }
1661
1662 if (ipw_is_valid_channel(priv->ieee, channel))
1663 priv->speed_scan[pos++] = channel;
1664 else
1665 IPW_WARNING("Skipping invalid channel request: %d\n",
1666 channel);
1667 p = strchr(p, ' ');
1668 if (!p)
1669 break;
1670 while (*p == ' ' || *p == '\t')
1671 p++;
1672 }
1673
1674 if (pos == 0)
1675 priv->config &= ~CFG_SPEED_SCAN;
1676 else {
1677 priv->speed_scan_pos = 0;
1678 priv->config |= CFG_SPEED_SCAN;
1679 }
1680
1681 return count;
1682 }
1683
1684 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1685 store_speed_scan);
1686
1687 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1688 char *buf)
1689 {
1690 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1691 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1692 }
1693
1694 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1695 const char *buf, size_t count)
1696 {
1697 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1698 if (buf[0] == '1')
1699 priv->config |= CFG_NET_STATS;
1700 else
1701 priv->config &= ~CFG_NET_STATS;
1702
1703 return count;
1704 }
1705
1706 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1707 show_net_stats, store_net_stats);
1708
1709 static void notify_wx_assoc_event(struct ipw_priv *priv)
1710 {
1711 union iwreq_data wrqu;
1712 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1713 if (priv->status & STATUS_ASSOCIATED)
1714 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1715 else
1716 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1717 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1718 }
1719
1720 static void ipw_irq_tasklet(struct ipw_priv *priv)
1721 {
1722 u32 inta, inta_mask, handled = 0;
1723 unsigned long flags;
1724 int rc = 0;
1725
1726 spin_lock_irqsave(&priv->lock, flags);
1727
1728 inta = ipw_read32(priv, IPW_INTA_RW);
1729 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1730 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1731
1732 /* Add any cached INTA values that need to be handled */
1733 inta |= priv->isr_inta;
1734
1735 /* handle all the justifications for the interrupt */
1736 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1737 ipw_rx(priv);
1738 handled |= IPW_INTA_BIT_RX_TRANSFER;
1739 }
1740
1741 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1742 IPW_DEBUG_HC("Command completed.\n");
1743 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1744 priv->status &= ~STATUS_HCMD_ACTIVE;
1745 wake_up_interruptible(&priv->wait_command_queue);
1746 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1747 }
1748
1749 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1750 IPW_DEBUG_TX("TX_QUEUE_1\n");
1751 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1752 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1753 }
1754
1755 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1756 IPW_DEBUG_TX("TX_QUEUE_2\n");
1757 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1758 handled |= IPW_INTA_BIT_TX_QUEUE_2;
1759 }
1760
1761 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1762 IPW_DEBUG_TX("TX_QUEUE_3\n");
1763 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1764 handled |= IPW_INTA_BIT_TX_QUEUE_3;
1765 }
1766
1767 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1768 IPW_DEBUG_TX("TX_QUEUE_4\n");
1769 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1770 handled |= IPW_INTA_BIT_TX_QUEUE_4;
1771 }
1772
1773 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
1774 IPW_WARNING("STATUS_CHANGE\n");
1775 handled |= IPW_INTA_BIT_STATUS_CHANGE;
1776 }
1777
1778 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1779 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1780 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
1781 }
1782
1783 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1784 IPW_WARNING("HOST_CMD_DONE\n");
1785 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1786 }
1787
1788 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
1789 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1790 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
1791 }
1792
1793 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1794 IPW_WARNING("PHY_OFF_DONE\n");
1795 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1796 }
1797
1798 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
1799 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1800 priv->status |= STATUS_RF_KILL_HW;
1801 wake_up_interruptible(&priv->wait_command_queue);
1802 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1803 cancel_delayed_work(&priv->request_scan);
1804 schedule_work(&priv->link_down);
1805 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
1806 handled |= IPW_INTA_BIT_RF_KILL_DONE;
1807 }
1808
1809 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
1810 IPW_ERROR("Firmware error detected. Restarting.\n");
1811 if (priv->error) {
1812 IPW_ERROR("Sysfs 'error' log already exists.\n");
1813 #ifdef CONFIG_IPW2200_DEBUG
1814 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1815 struct ipw_fw_error *error =
1816 ipw_alloc_error_log(priv);
1817 ipw_dump_error_log(priv, error);
1818 if (error)
1819 ipw_free_error_log(error);
1820 }
1821 #endif
1822 } else {
1823 priv->error = ipw_alloc_error_log(priv);
1824 if (priv->error)
1825 IPW_ERROR("Sysfs 'error' log captured.\n");
1826 else
1827 IPW_ERROR("Error allocating sysfs 'error' "
1828 "log.\n");
1829 #ifdef CONFIG_IPW2200_DEBUG
1830 if (ipw_debug_level & IPW_DL_FW_ERRORS)
1831 ipw_dump_error_log(priv, priv->error);
1832 #endif
1833 }
1834
1835 /* XXX: If hardware encryption is for WPA/WPA2,
1836 * we have to notify the supplicant. */
1837 if (priv->ieee->sec.encrypt) {
1838 priv->status &= ~STATUS_ASSOCIATED;
1839 notify_wx_assoc_event(priv);
1840 }
1841
1842 /* Keep the restart process from trying to send host
1843 * commands by clearing the INIT status bit */
1844 priv->status &= ~STATUS_INIT;
1845
1846 /* Cancel currently queued command. */
1847 priv->status &= ~STATUS_HCMD_ACTIVE;
1848 wake_up_interruptible(&priv->wait_command_queue);
1849
1850 queue_work(priv->workqueue, &priv->adapter_restart);
1851 handled |= IPW_INTA_BIT_FATAL_ERROR;
1852 }
1853
1854 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
1855 IPW_ERROR("Parity error\n");
1856 handled |= IPW_INTA_BIT_PARITY_ERROR;
1857 }
1858
1859 if (handled != inta) {
1860 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
1861 }
1862
1863 /* enable all interrupts */
1864 ipw_enable_interrupts(priv);
1865
1866 spin_unlock_irqrestore(&priv->lock, flags);
1867 }
1868
1869 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
1870 static char *get_cmd_string(u8 cmd)
1871 {
1872 switch (cmd) {
1873 IPW_CMD(HOST_COMPLETE);
1874 IPW_CMD(POWER_DOWN);
1875 IPW_CMD(SYSTEM_CONFIG);
1876 IPW_CMD(MULTICAST_ADDRESS);
1877 IPW_CMD(SSID);
1878 IPW_CMD(ADAPTER_ADDRESS);
1879 IPW_CMD(PORT_TYPE);
1880 IPW_CMD(RTS_THRESHOLD);
1881 IPW_CMD(FRAG_THRESHOLD);
1882 IPW_CMD(POWER_MODE);
1883 IPW_CMD(WEP_KEY);
1884 IPW_CMD(TGI_TX_KEY);
1885 IPW_CMD(SCAN_REQUEST);
1886 IPW_CMD(SCAN_REQUEST_EXT);
1887 IPW_CMD(ASSOCIATE);
1888 IPW_CMD(SUPPORTED_RATES);
1889 IPW_CMD(SCAN_ABORT);
1890 IPW_CMD(TX_FLUSH);
1891 IPW_CMD(QOS_PARAMETERS);
1892 IPW_CMD(DINO_CONFIG);
1893 IPW_CMD(RSN_CAPABILITIES);
1894 IPW_CMD(RX_KEY);
1895 IPW_CMD(CARD_DISABLE);
1896 IPW_CMD(SEED_NUMBER);
1897 IPW_CMD(TX_POWER);
1898 IPW_CMD(COUNTRY_INFO);
1899 IPW_CMD(AIRONET_INFO);
1900 IPW_CMD(AP_TX_POWER);
1901 IPW_CMD(CCKM_INFO);
1902 IPW_CMD(CCX_VER_INFO);
1903 IPW_CMD(SET_CALIBRATION);
1904 IPW_CMD(SENSITIVITY_CALIB);
1905 IPW_CMD(RETRY_LIMIT);
1906 IPW_CMD(IPW_PRE_POWER_DOWN);
1907 IPW_CMD(VAP_BEACON_TEMPLATE);
1908 IPW_CMD(VAP_DTIM_PERIOD);
1909 IPW_CMD(EXT_SUPPORTED_RATES);
1910 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
1911 IPW_CMD(VAP_QUIET_INTERVALS);
1912 IPW_CMD(VAP_CHANNEL_SWITCH);
1913 IPW_CMD(VAP_MANDATORY_CHANNELS);
1914 IPW_CMD(VAP_CELL_PWR_LIMIT);
1915 IPW_CMD(VAP_CF_PARAM_SET);
1916 IPW_CMD(VAP_SET_BEACONING_STATE);
1917 IPW_CMD(MEASUREMENT);
1918 IPW_CMD(POWER_CAPABILITY);
1919 IPW_CMD(SUPPORTED_CHANNELS);
1920 IPW_CMD(TPC_REPORT);
1921 IPW_CMD(WME_INFO);
1922 IPW_CMD(PRODUCTION_COMMAND);
1923 default:
1924 return "UNKNOWN";
1925 }
1926 }
1927
1928 #define HOST_COMPLETE_TIMEOUT HZ
1929 static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
1930 {
1931 int rc = 0;
1932 unsigned long flags;
1933
1934 spin_lock_irqsave(&priv->lock, flags);
1935 if (priv->status & STATUS_HCMD_ACTIVE) {
1936 IPW_ERROR("Failed to send %s: Already sending a command.\n",
1937 get_cmd_string(cmd->cmd));
1938 spin_unlock_irqrestore(&priv->lock, flags);
1939 return -EAGAIN;
1940 }
1941
1942 priv->status |= STATUS_HCMD_ACTIVE;
1943
1944 if (priv->cmdlog) {
1945 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
1946 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
1947 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
1948 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
1949 cmd->len);
1950 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
1951 }
1952
1953 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
1954 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
1955 priv->status);
1956
1957 #ifndef DEBUG_CMD_WEP_KEY
1958 if (cmd->cmd == IPW_CMD_WEP_KEY)
1959 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
1960 else
1961 #endif
1962 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
1963
1964
1965 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, &cmd->param, cmd->len, 0);
1966 if (rc) {
1967 priv->status &= ~STATUS_HCMD_ACTIVE;
1968 IPW_ERROR("Failed to send %s: Reason %d\n",
1969 get_cmd_string(cmd->cmd), rc);
1970 spin_unlock_irqrestore(&priv->lock, flags);
1971 goto exit;
1972 }
1973 spin_unlock_irqrestore(&priv->lock, flags);
1974
1975 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
1976 !(priv->
1977 status & STATUS_HCMD_ACTIVE),
1978 HOST_COMPLETE_TIMEOUT);
1979 if (rc == 0) {
1980 spin_lock_irqsave(&priv->lock, flags);
1981 if (priv->status & STATUS_HCMD_ACTIVE) {
1982 IPW_ERROR("Failed to send %s: Command timed out.\n",
1983 get_cmd_string(cmd->cmd));
1984 priv->status &= ~STATUS_HCMD_ACTIVE;
1985 spin_unlock_irqrestore(&priv->lock, flags);
1986 rc = -EIO;
1987 goto exit;
1988 }
1989 spin_unlock_irqrestore(&priv->lock, flags);
1990 } else
1991 rc = 0;
1992
1993 if (priv->status & STATUS_RF_KILL_HW) {
1994 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
1995 get_cmd_string(cmd->cmd));
1996 rc = -EIO;
1997 goto exit;
1998 }
1999
2000 exit:
2001 if (priv->cmdlog) {
2002 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2003 priv->cmdlog_pos %= priv->cmdlog_len;
2004 }
2005 return rc;
2006 }
2007
2008 static int ipw_send_host_complete(struct ipw_priv *priv)
2009 {
2010 struct host_cmd cmd = {
2011 .cmd = IPW_CMD_HOST_COMPLETE,
2012 .len = 0
2013 };
2014
2015 if (!priv) {
2016 IPW_ERROR("Invalid args\n");
2017 return -1;
2018 }
2019
2020 return ipw_send_cmd(priv, &cmd);
2021 }
2022
2023 static int ipw_send_system_config(struct ipw_priv *priv,
2024 struct ipw_sys_config *config)
2025 {
2026 struct host_cmd cmd = {
2027 .cmd = IPW_CMD_SYSTEM_CONFIG,
2028 .len = sizeof(*config)
2029 };
2030
2031 if (!priv || !config) {
2032 IPW_ERROR("Invalid args\n");
2033 return -1;
2034 }
2035
2036 memcpy(cmd.param, config, sizeof(*config));
2037 return ipw_send_cmd(priv, &cmd);
2038 }
2039
2040 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2041 {
2042 struct host_cmd cmd = {
2043 .cmd = IPW_CMD_SSID,
2044 .len = min(len, IW_ESSID_MAX_SIZE)
2045 };
2046
2047 if (!priv || !ssid) {
2048 IPW_ERROR("Invalid args\n");
2049 return -1;
2050 }
2051
2052 memcpy(cmd.param, ssid, cmd.len);
2053 return ipw_send_cmd(priv, &cmd);
2054 }
2055
2056 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2057 {
2058 struct host_cmd cmd = {
2059 .cmd = IPW_CMD_ADAPTER_ADDRESS,
2060 .len = ETH_ALEN
2061 };
2062
2063 if (!priv || !mac) {
2064 IPW_ERROR("Invalid args\n");
2065 return -1;
2066 }
2067
2068 IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
2069 priv->net_dev->name, MAC_ARG(mac));
2070
2071 memcpy(cmd.param, mac, ETH_ALEN);
2072 return ipw_send_cmd(priv, &cmd);
2073 }
2074
2075 /*
2076 * NOTE: This must be executed from our workqueue as it results in udelay
2077 * being called which may corrupt the keyboard if executed on default
2078 * workqueue
2079 */
2080 static void ipw_adapter_restart(void *adapter)
2081 {
2082 struct ipw_priv *priv = adapter;
2083
2084 if (priv->status & STATUS_RF_KILL_MASK)
2085 return;
2086
2087 ipw_down(priv);
2088
2089 if (priv->assoc_network &&
2090 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2091 ipw_remove_current_network(priv);
2092
2093 if (ipw_up(priv)) {
2094 IPW_ERROR("Failed to up device\n");
2095 return;
2096 }
2097 }
2098
2099 static void ipw_bg_adapter_restart(void *data)
2100 {
2101 struct ipw_priv *priv = data;
2102 down(&priv->sem);
2103 ipw_adapter_restart(data);
2104 up(&priv->sem);
2105 }
2106
2107 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2108
2109 static void ipw_scan_check(void *data)
2110 {
2111 struct ipw_priv *priv = data;
2112 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2113 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2114 "adapter (%dms).\n",
2115 IPW_SCAN_CHECK_WATCHDOG / 100);
2116 queue_work(priv->workqueue, &priv->adapter_restart);
2117 }
2118 }
2119
2120 static void ipw_bg_scan_check(void *data)
2121 {
2122 struct ipw_priv *priv = data;
2123 down(&priv->sem);
2124 ipw_scan_check(data);
2125 up(&priv->sem);
2126 }
2127
2128 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2129 struct ipw_scan_request_ext *request)
2130 {
2131 struct host_cmd cmd = {
2132 .cmd = IPW_CMD_SCAN_REQUEST_EXT,
2133 .len = sizeof(*request)
2134 };
2135
2136 memcpy(cmd.param, request, sizeof(*request));
2137 return ipw_send_cmd(priv, &cmd);
2138 }
2139
2140 static int ipw_send_scan_abort(struct ipw_priv *priv)
2141 {
2142 struct host_cmd cmd = {
2143 .cmd = IPW_CMD_SCAN_ABORT,
2144 .len = 0
2145 };
2146
2147 if (!priv) {
2148 IPW_ERROR("Invalid args\n");
2149 return -1;
2150 }
2151
2152 return ipw_send_cmd(priv, &cmd);
2153 }
2154
2155 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2156 {
2157 struct host_cmd cmd = {
2158 .cmd = IPW_CMD_SENSITIVITY_CALIB,
2159 .len = sizeof(struct ipw_sensitivity_calib)
2160 };
2161 struct ipw_sensitivity_calib *calib = (struct ipw_sensitivity_calib *)
2162 &cmd.param;
2163 calib->beacon_rssi_raw = sens;
2164 return ipw_send_cmd(priv, &cmd);
2165 }
2166
2167 static int ipw_send_associate(struct ipw_priv *priv,
2168 struct ipw_associate *associate)
2169 {
2170 struct host_cmd cmd = {
2171 .cmd = IPW_CMD_ASSOCIATE,
2172 .len = sizeof(*associate)
2173 };
2174
2175 struct ipw_associate tmp_associate;
2176 memcpy(&tmp_associate, associate, sizeof(*associate));
2177 tmp_associate.policy_support =
2178 cpu_to_le16(tmp_associate.policy_support);
2179 tmp_associate.assoc_tsf_msw = cpu_to_le32(tmp_associate.assoc_tsf_msw);
2180 tmp_associate.assoc_tsf_lsw = cpu_to_le32(tmp_associate.assoc_tsf_lsw);
2181 tmp_associate.capability = cpu_to_le16(tmp_associate.capability);
2182 tmp_associate.listen_interval =
2183 cpu_to_le16(tmp_associate.listen_interval);
2184 tmp_associate.beacon_interval =
2185 cpu_to_le16(tmp_associate.beacon_interval);
2186 tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window);
2187
2188 if (!priv || !associate) {
2189 IPW_ERROR("Invalid args\n");
2190 return -1;
2191 }
2192
2193 memcpy(cmd.param, &tmp_associate, sizeof(*associate));
2194 return ipw_send_cmd(priv, &cmd);
2195 }
2196
2197 static int ipw_send_supported_rates(struct ipw_priv *priv,
2198 struct ipw_supported_rates *rates)
2199 {
2200 struct host_cmd cmd = {
2201 .cmd = IPW_CMD_SUPPORTED_RATES,
2202 .len = sizeof(*rates)
2203 };
2204
2205 if (!priv || !rates) {
2206 IPW_ERROR("Invalid args\n");
2207 return -1;
2208 }
2209
2210 memcpy(cmd.param, rates, sizeof(*rates));
2211 return ipw_send_cmd(priv, &cmd);
2212 }
2213
2214 static int ipw_set_random_seed(struct ipw_priv *priv)
2215 {
2216 struct host_cmd cmd = {
2217 .cmd = IPW_CMD_SEED_NUMBER,
2218 .len = sizeof(u32)
2219 };
2220
2221 if (!priv) {
2222 IPW_ERROR("Invalid args\n");
2223 return -1;
2224 }
2225
2226 get_random_bytes(&cmd.param, sizeof(u32));
2227
2228 return ipw_send_cmd(priv, &cmd);
2229 }
2230
2231 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2232 {
2233 struct host_cmd cmd = {
2234 .cmd = IPW_CMD_CARD_DISABLE,
2235 .len = sizeof(u32)
2236 };
2237
2238 if (!priv) {
2239 IPW_ERROR("Invalid args\n");
2240 return -1;
2241 }
2242
2243 *((u32 *) & cmd.param) = phy_off;
2244
2245 return ipw_send_cmd(priv, &cmd);
2246 }
2247
2248 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2249 {
2250 struct host_cmd cmd = {
2251 .cmd = IPW_CMD_TX_POWER,
2252 .len = sizeof(*power)
2253 };
2254
2255 if (!priv || !power) {
2256 IPW_ERROR("Invalid args\n");
2257 return -1;
2258 }
2259
2260 memcpy(cmd.param, power, sizeof(*power));
2261 return ipw_send_cmd(priv, &cmd);
2262 }
2263
2264 static int ipw_set_tx_power(struct ipw_priv *priv)
2265 {
2266 const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee);
2267 struct ipw_tx_power tx_power;
2268 s8 max_power;
2269 int i;
2270
2271 memset(&tx_power, 0, sizeof(tx_power));
2272
2273 /* configure device for 'G' band */
2274 tx_power.ieee_mode = IPW_G_MODE;
2275 tx_power.num_channels = geo->bg_channels;
2276 for (i = 0; i < geo->bg_channels; i++) {
2277 max_power = geo->bg[i].max_power;
2278 tx_power.channels_tx_power[i].channel_number =
2279 geo->bg[i].channel;
2280 tx_power.channels_tx_power[i].tx_power = max_power ?
2281 min(max_power, priv->tx_power) : priv->tx_power;
2282 }
2283 if (ipw_send_tx_power(priv, &tx_power))
2284 return -EIO;
2285
2286 /* configure device to also handle 'B' band */
2287 tx_power.ieee_mode = IPW_B_MODE;
2288 if (ipw_send_tx_power(priv, &tx_power))
2289 return -EIO;
2290
2291 /* configure device to also handle 'A' band */
2292 if (priv->ieee->abg_true) {
2293 tx_power.ieee_mode = IPW_A_MODE;
2294 tx_power.num_channels = geo->a_channels;
2295 for (i = 0; i < tx_power.num_channels; i++) {
2296 max_power = geo->a[i].max_power;
2297 tx_power.channels_tx_power[i].channel_number =
2298 geo->a[i].channel;
2299 tx_power.channels_tx_power[i].tx_power = max_power ?
2300 min(max_power, priv->tx_power) : priv->tx_power;
2301 }
2302 if (ipw_send_tx_power(priv, &tx_power))
2303 return -EIO;
2304 }
2305 return 0;
2306 }
2307
2308 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2309 {
2310 struct ipw_rts_threshold rts_threshold = {
2311 .rts_threshold = rts,
2312 };
2313 struct host_cmd cmd = {
2314 .cmd = IPW_CMD_RTS_THRESHOLD,
2315 .len = sizeof(rts_threshold)
2316 };
2317
2318 if (!priv) {
2319 IPW_ERROR("Invalid args\n");
2320 return -1;
2321 }
2322
2323 memcpy(cmd.param, &rts_threshold, sizeof(rts_threshold));
2324 return ipw_send_cmd(priv, &cmd);
2325 }
2326
2327 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2328 {
2329 struct ipw_frag_threshold frag_threshold = {
2330 .frag_threshold = frag,
2331 };
2332 struct host_cmd cmd = {
2333 .cmd = IPW_CMD_FRAG_THRESHOLD,
2334 .len = sizeof(frag_threshold)
2335 };
2336
2337 if (!priv) {
2338 IPW_ERROR("Invalid args\n");
2339 return -1;
2340 }
2341
2342 memcpy(cmd.param, &frag_threshold, sizeof(frag_threshold));
2343 return ipw_send_cmd(priv, &cmd);
2344 }
2345
2346 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2347 {
2348 struct host_cmd cmd = {
2349 .cmd = IPW_CMD_POWER_MODE,
2350 .len = sizeof(u32)
2351 };
2352 u32 *param = (u32 *) (&cmd.param);
2353
2354 if (!priv) {
2355 IPW_ERROR("Invalid args\n");
2356 return -1;
2357 }
2358
2359 /* If on battery, set to 3, if AC set to CAM, else user
2360 * level */
2361 switch (mode) {
2362 case IPW_POWER_BATTERY:
2363 *param = IPW_POWER_INDEX_3;
2364 break;
2365 case IPW_POWER_AC:
2366 *param = IPW_POWER_MODE_CAM;
2367 break;
2368 default:
2369 *param = mode;
2370 break;
2371 }
2372
2373 return ipw_send_cmd(priv, &cmd);
2374 }
2375
2376 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2377 {
2378 struct ipw_retry_limit retry_limit = {
2379 .short_retry_limit = slimit,
2380 .long_retry_limit = llimit
2381 };
2382 struct host_cmd cmd = {
2383 .cmd = IPW_CMD_RETRY_LIMIT,
2384 .len = sizeof(retry_limit)
2385 };
2386
2387 if (!priv) {
2388 IPW_ERROR("Invalid args\n");
2389 return -1;
2390 }
2391
2392 memcpy(cmd.param, &retry_limit, sizeof(retry_limit));
2393 return ipw_send_cmd(priv, &cmd);
2394 }
2395
2396 /*
2397 * The IPW device contains a Microwire compatible EEPROM that stores
2398 * various data like the MAC address. Usually the firmware has exclusive
2399 * access to the eeprom, but during device initialization (before the
2400 * device driver has sent the HostComplete command to the firmware) the
2401 * device driver has read access to the EEPROM by way of indirect addressing
2402 * through a couple of memory mapped registers.
2403 *
2404 * The following is a simplified implementation for pulling data out of the
2405 * the eeprom, along with some helper functions to find information in
2406 * the per device private data's copy of the eeprom.
2407 *
2408 * NOTE: To better understand how these functions work (i.e what is a chip
2409 * select and why do have to keep driving the eeprom clock?), read
2410 * just about any data sheet for a Microwire compatible EEPROM.
2411 */
2412
2413 /* write a 32 bit value into the indirect accessor register */
2414 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2415 {
2416 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2417
2418 /* the eeprom requires some time to complete the operation */
2419 udelay(p->eeprom_delay);
2420
2421 return;
2422 }
2423
2424 /* perform a chip select operation */
2425 static void eeprom_cs(struct ipw_priv *priv)
2426 {
2427 eeprom_write_reg(priv, 0);
2428 eeprom_write_reg(priv, EEPROM_BIT_CS);
2429 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2430 eeprom_write_reg(priv, EEPROM_BIT_CS);
2431 }
2432
2433 /* perform a chip select operation */
2434 static void eeprom_disable_cs(struct ipw_priv *priv)
2435 {
2436 eeprom_write_reg(priv, EEPROM_BIT_CS);
2437 eeprom_write_reg(priv, 0);
2438 eeprom_write_reg(priv, EEPROM_BIT_SK);
2439 }
2440
2441 /* push a single bit down to the eeprom */
2442 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2443 {
2444 int d = (bit ? EEPROM_BIT_DI : 0);
2445 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2446 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2447 }
2448
2449 /* push an opcode followed by an address down to the eeprom */
2450 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2451 {
2452 int i;
2453
2454 eeprom_cs(priv);
2455 eeprom_write_bit(priv, 1);
2456 eeprom_write_bit(priv, op & 2);
2457 eeprom_write_bit(priv, op & 1);
2458 for (i = 7; i >= 0; i--) {
2459 eeprom_write_bit(priv, addr & (1 << i));
2460 }
2461 }
2462
2463 /* pull 16 bits off the eeprom, one bit at a time */
2464 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2465 {
2466 int i;
2467 u16 r = 0;
2468
2469 /* Send READ Opcode */
2470 eeprom_op(priv, EEPROM_CMD_READ, addr);
2471
2472 /* Send dummy bit */
2473 eeprom_write_reg(priv, EEPROM_BIT_CS);
2474
2475 /* Read the byte off the eeprom one bit at a time */
2476 for (i = 0; i < 16; i++) {
2477 u32 data = 0;
2478 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2479 eeprom_write_reg(priv, EEPROM_BIT_CS);
2480 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2481 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2482 }
2483
2484 /* Send another dummy bit */
2485 eeprom_write_reg(priv, 0);
2486 eeprom_disable_cs(priv);
2487
2488 return r;
2489 }
2490
2491 /* helper function for pulling the mac address out of the private */
2492 /* data's copy of the eeprom data */
2493 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2494 {
2495 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2496 }
2497
2498 /*
2499 * Either the device driver (i.e. the host) or the firmware can
2500 * load eeprom data into the designated region in SRAM. If neither
2501 * happens then the FW will shutdown with a fatal error.
2502 *
2503 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2504 * bit needs region of shared SRAM needs to be non-zero.
2505 */
2506 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2507 {
2508 int i;
2509 u16 *eeprom = (u16 *) priv->eeprom;
2510
2511 IPW_DEBUG_TRACE(">>\n");
2512
2513 /* read entire contents of eeprom into private buffer */
2514 for (i = 0; i < 128; i++)
2515 eeprom[i] = le16_to_cpu(eeprom_read_u16(priv, (u8) i));
2516
2517 /*
2518 If the data looks correct, then copy it to our private
2519 copy. Otherwise let the firmware know to perform the operation
2520 on it's own
2521 */
2522 if ((priv->eeprom + EEPROM_VERSION) != 0) {
2523 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2524
2525 /* write the eeprom data to sram */
2526 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2527 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2528
2529 /* Do not load eeprom data on fatal error or suspend */
2530 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2531 } else {
2532 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2533
2534 /* Load eeprom data on fatal error or suspend */
2535 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2536 }
2537
2538 IPW_DEBUG_TRACE("<<\n");
2539 }
2540
2541 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2542 {
2543 count >>= 2;
2544 if (!count)
2545 return;
2546 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2547 while (count--)
2548 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2549 }
2550
2551 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2552 {
2553 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2554 CB_NUMBER_OF_ELEMENTS_SMALL *
2555 sizeof(struct command_block));
2556 }
2557
2558 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2559 { /* start dma engine but no transfers yet */
2560
2561 IPW_DEBUG_FW(">> : \n");
2562
2563 /* Start the dma */
2564 ipw_fw_dma_reset_command_blocks(priv);
2565
2566 /* Write CB base address */
2567 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2568
2569 IPW_DEBUG_FW("<< : \n");
2570 return 0;
2571 }
2572
2573 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2574 {
2575 u32 control = 0;
2576
2577 IPW_DEBUG_FW(">> :\n");
2578
2579 //set the Stop and Abort bit
2580 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2581 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2582 priv->sram_desc.last_cb_index = 0;
2583
2584 IPW_DEBUG_FW("<< \n");
2585 }
2586
2587 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2588 struct command_block *cb)
2589 {
2590 u32 address =
2591 IPW_SHARED_SRAM_DMA_CONTROL +
2592 (sizeof(struct command_block) * index);
2593 IPW_DEBUG_FW(">> :\n");
2594
2595 ipw_write_indirect(priv, address, (u8 *) cb,
2596 (int)sizeof(struct command_block));
2597
2598 IPW_DEBUG_FW("<< :\n");
2599 return 0;
2600
2601 }
2602
2603 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2604 {
2605 u32 control = 0;
2606 u32 index = 0;
2607
2608 IPW_DEBUG_FW(">> :\n");
2609
2610 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2611 ipw_fw_dma_write_command_block(priv, index,
2612 &priv->sram_desc.cb_list[index]);
2613
2614 /* Enable the DMA in the CSR register */
2615 ipw_clear_bit(priv, IPW_RESET_REG,
2616 IPW_RESET_REG_MASTER_DISABLED |
2617 IPW_RESET_REG_STOP_MASTER);
2618
2619 /* Set the Start bit. */
2620 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2621 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2622
2623 IPW_DEBUG_FW("<< :\n");
2624 return 0;
2625 }
2626
2627 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2628 {
2629 u32 address;
2630 u32 register_value = 0;
2631 u32 cb_fields_address = 0;
2632
2633 IPW_DEBUG_FW(">> :\n");
2634 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2635 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2636
2637 /* Read the DMA Controlor register */
2638 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2639 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2640
2641 /* Print the CB values */
2642 cb_fields_address = address;
2643 register_value = ipw_read_reg32(priv, cb_fields_address);
2644 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2645
2646 cb_fields_address += sizeof(u32);
2647 register_value = ipw_read_reg32(priv, cb_fields_address);
2648 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2649
2650 cb_fields_address += sizeof(u32);
2651 register_value = ipw_read_reg32(priv, cb_fields_address);
2652 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2653 register_value);
2654
2655 cb_fields_address += sizeof(u32);
2656 register_value = ipw_read_reg32(priv, cb_fields_address);
2657 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2658
2659 IPW_DEBUG_FW(">> :\n");
2660 }
2661
2662 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2663 {
2664 u32 current_cb_address = 0;
2665 u32 current_cb_index = 0;
2666
2667 IPW_DEBUG_FW("<< :\n");
2668 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2669
2670 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2671 sizeof(struct command_block);
2672
2673 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2674 current_cb_index, current_cb_address);
2675
2676 IPW_DEBUG_FW(">> :\n");
2677 return current_cb_index;
2678
2679 }
2680
2681 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2682 u32 src_address,
2683 u32 dest_address,
2684 u32 length,
2685 int interrupt_enabled, int is_last)
2686 {
2687
2688 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2689 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2690 CB_DEST_SIZE_LONG;
2691 struct command_block *cb;
2692 u32 last_cb_element = 0;
2693
2694 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2695 src_address, dest_address, length);
2696
2697 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2698 return -1;
2699
2700 last_cb_element = priv->sram_desc.last_cb_index;
2701 cb = &priv->sram_desc.cb_list[last_cb_element];
2702 priv->sram_desc.last_cb_index++;
2703
2704 /* Calculate the new CB control word */
2705 if (interrupt_enabled)
2706 control |= CB_INT_ENABLED;
2707
2708 if (is_last)
2709 control |= CB_LAST_VALID;
2710
2711 control |= length;
2712
2713 /* Calculate the CB Element's checksum value */
2714 cb->status = control ^ src_address ^ dest_address;
2715
2716 /* Copy the Source and Destination addresses */
2717 cb->dest_addr = dest_address;
2718 cb->source_addr = src_address;
2719
2720 /* Copy the Control Word last */
2721 cb->control = control;
2722
2723 return 0;
2724 }
2725
2726 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2727 u32 src_phys, u32 dest_address, u32 length)
2728 {
2729 u32 bytes_left = length;
2730 u32 src_offset = 0;
2731 u32 dest_offset = 0;
2732 int status = 0;
2733 IPW_DEBUG_FW(">> \n");
2734 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2735 src_phys, dest_address, length);
2736 while (bytes_left > CB_MAX_LENGTH) {
2737 status = ipw_fw_dma_add_command_block(priv,
2738 src_phys + src_offset,
2739 dest_address +
2740 dest_offset,
2741 CB_MAX_LENGTH, 0, 0);
2742 if (status) {
2743 IPW_DEBUG_FW_INFO(": Failed\n");
2744 return -1;
2745 } else
2746 IPW_DEBUG_FW_INFO(": Added new cb\n");
2747
2748 src_offset += CB_MAX_LENGTH;
2749 dest_offset += CB_MAX_LENGTH;
2750 bytes_left -= CB_MAX_LENGTH;
2751 }
2752
2753 /* add the buffer tail */
2754 if (bytes_left > 0) {
2755 status =
2756 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2757 dest_address + dest_offset,
2758 bytes_left, 0, 0);
2759 if (status) {
2760 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2761 return -1;
2762 } else
2763 IPW_DEBUG_FW_INFO
2764 (": Adding new cb - the buffer tail\n");
2765 }
2766
2767 IPW_DEBUG_FW("<< \n");
2768 return 0;
2769 }
2770
2771 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2772 {
2773 u32 current_index = 0;
2774 u32 watchdog = 0;
2775
2776 IPW_DEBUG_FW(">> : \n");
2777
2778 current_index = ipw_fw_dma_command_block_index(priv);
2779 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%8X\n",
2780 (int)priv->sram_desc.last_cb_index);
2781
2782 while (current_index < priv->sram_desc.last_cb_index) {
2783 udelay(50);
2784 current_index = ipw_fw_dma_command_block_index(priv);
2785
2786 watchdog++;
2787
2788 if (watchdog > 400) {
2789 IPW_DEBUG_FW_INFO("Timeout\n");
2790 ipw_fw_dma_dump_command_block(priv);
2791 ipw_fw_dma_abort(priv);
2792 return -1;
2793 }
2794 }
2795
2796 ipw_fw_dma_abort(priv);
2797
2798 /*Disable the DMA in the CSR register */
2799 ipw_set_bit(priv, IPW_RESET_REG,
2800 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2801
2802 IPW_DEBUG_FW("<< dmaWaitSync \n");
2803 return 0;
2804 }
2805
2806 static void ipw_remove_current_network(struct ipw_priv *priv)
2807 {
2808 struct list_head *element, *safe;
2809 struct ieee80211_network *network = NULL;
2810 unsigned long flags;
2811
2812 spin_lock_irqsave(&priv->ieee->lock, flags);
2813 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2814 network = list_entry(element, struct ieee80211_network, list);
2815 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2816 list_del(element);
2817 list_add_tail(&network->list,
2818 &priv->ieee->network_free_list);
2819 }
2820 }
2821 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2822 }
2823
2824 /**
2825 * Check that card is still alive.
2826 * Reads debug register from domain0.
2827 * If card is present, pre-defined value should
2828 * be found there.
2829 *
2830 * @param priv
2831 * @return 1 if card is present, 0 otherwise
2832 */
2833 static inline int ipw_alive(struct ipw_priv *priv)
2834 {
2835 return ipw_read32(priv, 0x90) == 0xd55555d5;
2836 }
2837
2838 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2839 int timeout)
2840 {
2841 int i = 0;
2842
2843 do {
2844 if ((ipw_read32(priv, addr) & mask) == mask)
2845 return i;
2846 mdelay(10);
2847 i += 10;
2848 } while (i < timeout);
2849
2850 return -ETIME;
2851 }
2852
2853 /* These functions load the firmware and micro code for the operation of
2854 * the ipw hardware. It assumes the buffer has all the bits for the
2855 * image and the caller is handling the memory allocation and clean up.
2856 */
2857
2858 static int ipw_stop_master(struct ipw_priv *priv)
2859 {
2860 int rc;
2861
2862 IPW_DEBUG_TRACE(">> \n");
2863 /* stop master. typical delay - 0 */
2864 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
2865
2866 rc = ipw_poll_bit(priv, IPW_RESET_REG,
2867 IPW_RESET_REG_MASTER_DISABLED, 100);
2868 if (rc < 0) {
2869 IPW_ERROR("stop master failed in 10ms\n");
2870 return -1;
2871 }
2872
2873 IPW_DEBUG_INFO("stop master %dms\n", rc);
2874
2875 return rc;
2876 }
2877
2878 static void ipw_arc_release(struct ipw_priv *priv)
2879 {
2880 IPW_DEBUG_TRACE(">> \n");
2881 mdelay(5);
2882
2883 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2884
2885 /* no one knows timing, for safety add some delay */
2886 mdelay(5);
2887 }
2888
2889 struct fw_header {
2890 u32 version;
2891 u32 mode;
2892 };
2893
2894 struct fw_chunk {
2895 u32 address;
2896 u32 length;
2897 };
2898
2899 #define IPW_FW_MAJOR_VERSION 2
2900 #define IPW_FW_MINOR_VERSION 4
2901
2902 #define IPW_FW_MINOR(x) ((x & 0xff) >> 8)
2903 #define IPW_FW_MAJOR(x) (x & 0xff)
2904
2905 #define IPW_FW_VERSION ((IPW_FW_MINOR_VERSION << 8) | IPW_FW_MAJOR_VERSION)
2906
2907 #define IPW_FW_PREFIX "ipw-" __stringify(IPW_FW_MAJOR_VERSION) \
2908 "." __stringify(IPW_FW_MINOR_VERSION) "-"
2909
2910 #if IPW_FW_MAJOR_VERSION >= 2 && IPW_FW_MINOR_VERSION > 0
2911 #define IPW_FW_NAME(x) IPW_FW_PREFIX "" x ".fw"
2912 #else
2913 #define IPW_FW_NAME(x) "ipw2200_" x ".fw"
2914 #endif
2915
2916 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
2917 {
2918 int rc = 0, i, addr;
2919 u8 cr = 0;
2920 u16 *image;
2921
2922 image = (u16 *) data;
2923
2924 IPW_DEBUG_TRACE(">> \n");
2925
2926 rc = ipw_stop_master(priv);
2927
2928 if (rc < 0)
2929 return rc;
2930
2931 // spin_lock_irqsave(&priv->lock, flags);
2932
2933 for (addr = IPW_SHARED_LOWER_BOUND;
2934 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
2935 ipw_write32(priv, addr, 0);
2936 }
2937
2938 /* no ucode (yet) */
2939 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
2940 /* destroy DMA queues */
2941 /* reset sequence */
2942
2943 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
2944 ipw_arc_release(priv);
2945 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
2946 mdelay(1);
2947
2948 /* reset PHY */
2949 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
2950 mdelay(1);
2951
2952 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
2953 mdelay(1);
2954
2955 /* enable ucode store */
2956 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
2957 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
2958 mdelay(1);
2959
2960 /* write ucode */
2961 /**
2962 * @bug
2963 * Do NOT set indirect address register once and then
2964 * store data to indirect data register in the loop.
2965 * It seems very reasonable, but in this case DINO do not
2966 * accept ucode. It is essential to set address each time.
2967 */
2968 /* load new ipw uCode */
2969 for (i = 0; i < len / 2; i++)
2970 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
2971 cpu_to_le16(image[i]));
2972
2973 /* enable DINO */
2974 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
2975 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
2976
2977 /* this is where the igx / win driver deveates from the VAP driver. */
2978
2979 /* wait for alive response */
2980 for (i = 0; i < 100; i++) {
2981 /* poll for incoming data */
2982 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
2983 if (cr & DINO_RXFIFO_DATA)
2984 break;
2985 mdelay(1);
2986 }
2987
2988 if (cr & DINO_RXFIFO_DATA) {
2989 /* alive_command_responce size is NOT multiple of 4 */
2990 u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
2991
2992 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
2993 response_buffer[i] =
2994 le32_to_cpu(ipw_read_reg32(priv,
2995 IPW_BASEBAND_RX_FIFO_READ));
2996 memcpy(&priv->dino_alive, response_buffer,
2997 sizeof(priv->dino_alive));
2998 if (priv->dino_alive.alive_command == 1
2999 && priv->dino_alive.ucode_valid == 1) {
3000 rc = 0;
3001 IPW_DEBUG_INFO
3002 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3003 "of %02d/%02d/%02d %02d:%02d\n",
3004 priv->dino_alive.software_revision,
3005 priv->dino_alive.software_revision,
3006 priv->dino_alive.device_identifier,
3007 priv->dino_alive.device_identifier,
3008 priv->dino_alive.time_stamp[0],
3009 priv->dino_alive.time_stamp[1],
3010 priv->dino_alive.time_stamp[2],
3011 priv->dino_alive.time_stamp[3],
3012 priv->dino_alive.time_stamp[4]);
3013 } else {
3014 IPW_DEBUG_INFO("Microcode is not alive\n");
3015 rc = -EINVAL;
3016 }
3017 } else {
3018 IPW_DEBUG_INFO("No alive response from DINO\n");
3019 rc = -ETIME;
3020 }
3021
3022 /* disable DINO, otherwise for some reason
3023 firmware have problem getting alive resp. */
3024 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3025
3026 // spin_unlock_irqrestore(&priv->lock, flags);
3027
3028 return rc;
3029 }
3030
3031 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3032 {
3033 int rc = -1;
3034 int offset = 0;
3035 struct fw_chunk *chunk;
3036 dma_addr_t shared_phys;
3037 u8 *shared_virt;
3038
3039 IPW_DEBUG_TRACE("<< : \n");
3040 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
3041
3042 if (!shared_virt)
3043 return -ENOMEM;
3044
3045 memmove(shared_virt, data, len);
3046
3047 /* Start the Dma */
3048 rc = ipw_fw_dma_enable(priv);
3049
3050 if (priv->sram_desc.last_cb_index > 0) {
3051 /* the DMA is already ready this would be a bug. */
3052 BUG();
3053 goto out;
3054 }
3055
3056 do {
3057 chunk = (struct fw_chunk *)(data + offset);
3058 offset += sizeof(struct fw_chunk);
3059 /* build DMA packet and queue up for sending */
3060 /* dma to chunk->address, the chunk->length bytes from data +
3061 * offeset*/
3062 /* Dma loading */
3063 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
3064 le32_to_cpu(chunk->address),
3065 le32_to_cpu(chunk->length));
3066 if (rc) {
3067 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3068 goto out;
3069 }
3070
3071 offset += le32_to_cpu(chunk->length);
3072 } while (offset < len);
3073
3074 /* Run the DMA and wait for the answer */
3075 rc = ipw_fw_dma_kick(priv);
3076 if (rc) {
3077 IPW_ERROR("dmaKick Failed\n");
3078 goto out;
3079 }
3080
3081 rc = ipw_fw_dma_wait(priv);
3082 if (rc) {
3083 IPW_ERROR("dmaWaitSync Failed\n");
3084 goto out;
3085 }
3086 out:
3087 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
3088 return rc;
3089 }
3090
3091 /* stop nic */
3092 static int ipw_stop_nic(struct ipw_priv *priv)
3093 {
3094 int rc = 0;
3095
3096 /* stop */
3097 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3098
3099 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3100 IPW_RESET_REG_MASTER_DISABLED, 500);
3101 if (rc < 0) {
3102 IPW_ERROR("wait for reg master disabled failed\n");
3103 return rc;
3104 }
3105
3106 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3107
3108 return rc;
3109 }
3110
3111 static void ipw_start_nic(struct ipw_priv *priv)
3112 {
3113 IPW_DEBUG_TRACE(">>\n");
3114
3115 /* prvHwStartNic release ARC */
3116 ipw_clear_bit(priv, IPW_RESET_REG,
3117 IPW_RESET_REG_MASTER_DISABLED |
3118 IPW_RESET_REG_STOP_MASTER |
3119 CBD_RESET_REG_PRINCETON_RESET);
3120
3121 /* enable power management */
3122 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3123 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3124
3125 IPW_DEBUG_TRACE("<<\n");
3126 }
3127
3128 static int ipw_init_nic(struct ipw_priv *priv)
3129 {
3130 int rc;
3131
3132 IPW_DEBUG_TRACE(">>\n");
3133 /* reset */
3134 /*prvHwInitNic */
3135 /* set "initialization complete" bit to move adapter to D0 state */
3136 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3137
3138 /* low-level PLL activation */
3139 ipw_write32(priv, IPW_READ_INT_REGISTER,
3140 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3141
3142 /* wait for clock stabilization */
3143 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3144 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3145 if (rc < 0)
3146 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3147
3148 /* assert SW reset */
3149 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3150
3151 udelay(10);
3152
3153 /* set "initialization complete" bit to move adapter to D0 state */
3154 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3155
3156 IPW_DEBUG_TRACE(">>\n");
3157 return 0;
3158 }
3159
3160 /* Call this function from process context, it will sleep in request_firmware.
3161 * Probe is an ok place to call this from.
3162 */
3163 static int ipw_reset_nic(struct ipw_priv *priv)
3164 {
3165 int rc = 0;
3166 unsigned long flags;
3167
3168 IPW_DEBUG_TRACE(">>\n");
3169
3170 rc = ipw_init_nic(priv);
3171
3172 spin_lock_irqsave(&priv->lock, flags);
3173 /* Clear the 'host command active' bit... */
3174 priv->status &= ~STATUS_HCMD_ACTIVE;
3175 wake_up_interruptible(&priv->wait_command_queue);
3176 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3177 wake_up_interruptible(&priv->wait_state);
3178 spin_unlock_irqrestore(&priv->lock, flags);
3179
3180 IPW_DEBUG_TRACE("<<\n");
3181 return rc;
3182 }
3183
3184 static int ipw_get_fw(struct ipw_priv *priv,
3185 const struct firmware **fw, const char *name)
3186 {
3187 struct fw_header *header;
3188 int rc;
3189
3190 /* ask firmware_class module to get the boot firmware off disk */
3191 rc = request_firmware(fw, name, &priv->pci_dev->dev);
3192 if (rc < 0) {
3193 IPW_ERROR("%s load failed: Reason %d\n", name, rc);
3194 return rc;
3195 }
3196
3197 header = (struct fw_header *)(*fw)->data;
3198 if (IPW_FW_MAJOR(le32_to_cpu(header->version)) != IPW_FW_MAJOR_VERSION) {
3199 IPW_ERROR("'%s' firmware version not compatible (%d != %d)\n",
3200 name,
3201 IPW_FW_MAJOR(le32_to_cpu(header->version)),
3202 IPW_FW_MAJOR_VERSION);
3203 return -EINVAL;
3204 }
3205
3206 IPW_DEBUG_INFO("Loading firmware '%s' file v%d.%d (%zd bytes)\n",
3207 name,
3208 IPW_FW_MAJOR(le32_to_cpu(header->version)),
3209 IPW_FW_MINOR(le32_to_cpu(header->version)),
3210 (*fw)->size - sizeof(struct fw_header));
3211 return 0;
3212 }
3213
3214 #define IPW_RX_BUF_SIZE (3000)
3215
3216 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3217 struct ipw_rx_queue *rxq)
3218 {
3219 unsigned long flags;
3220 int i;
3221
3222 spin_lock_irqsave(&rxq->lock, flags);
3223
3224 INIT_LIST_HEAD(&rxq->rx_free);
3225 INIT_LIST_HEAD(&rxq->rx_used);
3226
3227 /* Fill the rx_used queue with _all_ of the Rx buffers */
3228 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3229 /* In the reset function, these buffers may have been allocated
3230 * to an SKB, so we need to unmap and free potential storage */
3231 if (rxq->pool[i].skb != NULL) {
3232 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3233 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3234 dev_kfree_skb(rxq->pool[i].skb);
3235 rxq->pool[i].skb = NULL;
3236 }
3237 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3238 }
3239
3240 /* Set us so that we have processed and used all buffers, but have
3241 * not restocked the Rx queue with fresh buffers */
3242 rxq->read = rxq->write = 0;
3243 rxq->processed = RX_QUEUE_SIZE - 1;
3244 rxq->free_count = 0;
3245 spin_unlock_irqrestore(&rxq->lock, flags);
3246 }
3247
3248 #ifdef CONFIG_PM
3249 static int fw_loaded = 0;
3250 static const struct firmware *bootfw = NULL;
3251 static const struct firmware *firmware = NULL;
3252 static const struct firmware *ucode = NULL;
3253
3254 static void free_firmware(void)
3255 {
3256 if (fw_loaded) {
3257 release_firmware(bootfw);
3258 release_firmware(ucode);
3259 release_firmware(firmware);
3260 bootfw = ucode = firmware = NULL;
3261 fw_loaded = 0;
3262 }
3263 }
3264 #else
3265 #define free_firmware() do {} while (0)
3266 #endif
3267
3268 static int ipw_load(struct ipw_priv *priv)
3269 {
3270 #ifndef CONFIG_PM
3271 const struct firmware *bootfw = NULL;
3272 const struct firmware *firmware = NULL;
3273 const struct firmware *ucode = NULL;
3274 #endif
3275 int rc = 0, retries = 3;
3276
3277 #ifdef CONFIG_PM
3278 if (!fw_loaded) {
3279 #endif
3280 rc = ipw_get_fw(priv, &bootfw, IPW_FW_NAME("boot"));
3281 if (rc)
3282 goto error;
3283
3284 switch (priv->ieee->iw_mode) {
3285 case IW_MODE_ADHOC:
3286 rc = ipw_get_fw(priv, &ucode,
3287 IPW_FW_NAME("ibss_ucode"));
3288 if (rc)
3289 goto error;
3290
3291 rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("ibss"));
3292 break;
3293
3294 #ifdef CONFIG_IPW2200_MONITOR
3295 case IW_MODE_MONITOR:
3296 rc = ipw_get_fw(priv, &ucode,
3297 IPW_FW_NAME("sniffer_ucode"));
3298 if (rc)
3299 goto error;
3300
3301 rc = ipw_get_fw(priv, &firmware,
3302 IPW_FW_NAME("sniffer"));
3303 break;
3304 #endif
3305 case IW_MODE_INFRA:
3306 rc = ipw_get_fw(priv, &ucode, IPW_FW_NAME("bss_ucode"));
3307 if (rc)
3308 goto error;
3309
3310 rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("bss"));
3311 break;
3312
3313 default:
3314 rc = -EINVAL;
3315 }
3316
3317 if (rc)
3318 goto error;
3319
3320 #ifdef CONFIG_PM
3321 fw_loaded = 1;
3322 }
3323 #endif
3324
3325 if (!priv->rxq)
3326 priv->rxq = ipw_rx_queue_alloc(priv);
3327 else
3328 ipw_rx_queue_reset(priv, priv->rxq);
3329 if (!priv->rxq) {
3330 IPW_ERROR("Unable to initialize Rx queue\n");
3331 goto error;
3332 }
3333
3334 retry:
3335 /* Ensure interrupts are disabled */
3336 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3337 priv->status &= ~STATUS_INT_ENABLED;
3338
3339 /* ack pending interrupts */
3340 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3341
3342 ipw_stop_nic(priv);
3343
3344 rc = ipw_reset_nic(priv);
3345 if (rc) {
3346 IPW_ERROR("Unable to reset NIC\n");
3347 goto error;
3348 }
3349
3350 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3351 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3352
3353 /* DMA the initial boot firmware into the device */
3354 rc = ipw_load_firmware(priv, bootfw->data + sizeof(struct fw_header),
3355 bootfw->size - sizeof(struct fw_header));
3356 if (rc < 0) {
3357 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3358 goto error;
3359 }
3360
3361 /* kick start the device */
3362 ipw_start_nic(priv);
3363
3364 /* wait for the device to finish it's initial startup sequence */
3365 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3366 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3367 if (rc < 0) {
3368 IPW_ERROR("device failed to boot initial fw image\n");
3369 goto error;
3370 }
3371 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3372
3373 /* ack fw init done interrupt */
3374 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3375
3376 /* DMA the ucode into the device */
3377 rc = ipw_load_ucode(priv, ucode->data + sizeof(struct fw_header),
3378 ucode->size - sizeof(struct fw_header));
3379 if (rc < 0) {
3380 IPW_ERROR("Unable to load ucode: %d\n", rc);
3381 goto error;
3382 }
3383
3384 /* stop nic */
3385 ipw_stop_nic(priv);
3386
3387 /* DMA bss firmware into the device */
3388 rc = ipw_load_firmware(priv, firmware->data +
3389 sizeof(struct fw_header),
3390 firmware->size - sizeof(struct fw_header));
3391 if (rc < 0) {
3392 IPW_ERROR("Unable to load firmware: %d\n", rc);
3393 goto error;
3394 }
3395
3396 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3397
3398 rc = ipw_queue_reset(priv);
3399 if (rc) {
3400 IPW_ERROR("Unable to initialize queues\n");
3401 goto error;
3402 }
3403
3404 /* Ensure interrupts are disabled */
3405 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3406 /* ack pending interrupts */
3407 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3408
3409 /* kick start the device */
3410 ipw_start_nic(priv);
3411
3412 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3413 if (retries > 0) {
3414 IPW_WARNING("Parity error. Retrying init.\n");
3415 retries--;
3416 goto retry;
3417 }
3418
3419 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3420 rc = -EIO;
3421 goto error;
3422 }
3423
3424 /* wait for the device */
3425 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3426 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3427 if (rc < 0) {
3428 IPW_ERROR("device failed to start after 500ms\n");
3429 goto error;
3430 }
3431 IPW_DEBUG_INFO("device response after %dms\n", rc);
3432
3433 /* ack fw init done interrupt */
3434 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3435
3436 /* read eeprom data and initialize the eeprom region of sram */
3437 priv->eeprom_delay = 1;
3438 ipw_eeprom_init_sram(priv);
3439
3440 /* enable interrupts */
3441 ipw_enable_interrupts(priv);
3442
3443 /* Ensure our queue has valid packets */
3444 ipw_rx_queue_replenish(priv);
3445
3446 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3447
3448 /* ack pending interrupts */
3449 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3450
3451 #ifndef CONFIG_PM
3452 release_firmware(bootfw);
3453 release_firmware(ucode);
3454 release_firmware(firmware);
3455 #endif
3456 return 0;
3457
3458 error:
3459 if (priv->rxq) {
3460 ipw_rx_queue_free(priv, priv->rxq);
3461 priv->rxq = NULL;
3462 }
3463 ipw_tx_queue_free(priv);
3464 if (bootfw)
3465 release_firmware(bootfw);
3466 if (ucode)
3467 release_firmware(ucode);
3468 if (firmware)
3469 release_firmware(firmware);
3470 #ifdef CONFIG_PM
3471 fw_loaded = 0;
3472 bootfw = ucode = firmware = NULL;
3473 #endif
3474
3475 return rc;
3476 }
3477
3478 /**
3479 * DMA services
3480 *
3481 * Theory of operation
3482 *
3483 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3484 * 2 empty entries always kept in the buffer to protect from overflow.
3485 *
3486 * For Tx queue, there are low mark and high mark limits. If, after queuing
3487 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3488 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3489 * Tx queue resumed.
3490 *
3491 * The IPW operates with six queues, one receive queue in the device's
3492 * sram, one transmit queue for sending commands to the device firmware,
3493 * and four transmit queues for data.
3494 *
3495 * The four transmit queues allow for performing quality of service (qos)
3496 * transmissions as per the 802.11 protocol. Currently Linux does not
3497 * provide a mechanism to the user for utilizing prioritized queues, so
3498 * we only utilize the first data transmit queue (queue1).
3499 */
3500
3501 /**
3502 * Driver allocates buffers of this size for Rx
3503 */
3504
3505 static inline int ipw_queue_space(const struct clx2_queue *q)
3506 {
3507 int s = q->last_used - q->first_empty;
3508 if (s <= 0)
3509 s += q->n_bd;
3510 s -= 2; /* keep some reserve to not confuse empty and full situations */
3511 if (s < 0)
3512 s = 0;
3513 return s;
3514 }
3515
3516 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3517 {
3518 return (++index == n_bd) ? 0 : index;
3519 }
3520
3521 /**
3522 * Initialize common DMA queue structure
3523 *
3524 * @param q queue to init
3525 * @param count Number of BD's to allocate. Should be power of 2
3526 * @param read_register Address for 'read' register
3527 * (not offset within BAR, full address)
3528 * @param write_register Address for 'write' register
3529 * (not offset within BAR, full address)
3530 * @param base_register Address for 'base' register
3531 * (not offset within BAR, full address)
3532 * @param size Address for 'size' register
3533 * (not offset within BAR, full address)
3534 */
3535 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3536 int count, u32 read, u32 write, u32 base, u32 size)
3537 {
3538 q->n_bd = count;
3539
3540 q->low_mark = q->n_bd / 4;
3541 if (q->low_mark < 4)
3542 q->low_mark = 4;
3543
3544 q->high_mark = q->n_bd / 8;
3545 if (q->high_mark < 2)
3546 q->high_mark = 2;
3547
3548 q->first_empty = q->last_used = 0;
3549 q->reg_r = read;
3550 q->reg_w = write;
3551
3552 ipw_write32(priv, base, q->dma_addr);
3553 ipw_write32(priv, size, count);
3554 ipw_write32(priv, read, 0);
3555 ipw_write32(priv, write, 0);
3556
3557 _ipw_read32(priv, 0x90);
3558 }
3559
3560 static int ipw_queue_tx_init(struct ipw_priv *priv,
3561 struct clx2_tx_queue *q,
3562 int count, u32 read, u32 write, u32 base, u32 size)
3563 {
3564 struct pci_dev *dev = priv->pci_dev;
3565
3566 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3567 if (!q->txb) {
3568 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3569 return -ENOMEM;
3570 }
3571
3572 q->bd =
3573 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3574 if (!q->bd) {
3575 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3576 sizeof(q->bd[0]) * count);
3577 kfree(q->txb);
3578 q->txb = NULL;
3579 return -ENOMEM;
3580 }
3581
3582 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3583 return 0;
3584 }
3585
3586 /**
3587 * Free one TFD, those at index [txq->q.last_used].
3588 * Do NOT advance any indexes
3589 *
3590 * @param dev
3591 * @param txq
3592 */
3593 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3594 struct clx2_tx_queue *txq)
3595 {
3596 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3597 struct pci_dev *dev = priv->pci_dev;
3598 int i;
3599
3600 /* classify bd */
3601 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3602 /* nothing to cleanup after for host commands */
3603 return;
3604
3605 /* sanity check */
3606 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3607 IPW_ERROR("Too many chunks: %i\n",
3608 le32_to_cpu(bd->u.data.num_chunks));
3609 /** @todo issue fatal error, it is quite serious situation */
3610 return;
3611 }
3612
3613 /* unmap chunks if any */
3614 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3615 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3616 le16_to_cpu(bd->u.data.chunk_len[i]),
3617 PCI_DMA_TODEVICE);
3618 if (txq->txb[txq->q.last_used]) {
3619 ieee80211_txb_free(txq->txb[txq->q.last_used]);
3620 txq->txb[txq->q.last_used] = NULL;
3621 }
3622 }
3623 }
3624
3625 /**
3626 * Deallocate DMA queue.
3627 *
3628 * Empty queue by removing and destroying all BD's.
3629 * Free all buffers.
3630 *
3631 * @param dev
3632 * @param q
3633 */
3634 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3635 {
3636 struct clx2_queue *q = &txq->q;
3637 struct pci_dev *dev = priv->pci_dev;
3638
3639 if (q->n_bd == 0)
3640 return;
3641
3642 /* first, empty all BD's */
3643 for (; q->first_empty != q->last_used;
3644 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3645 ipw_queue_tx_free_tfd(priv, txq);
3646 }
3647
3648 /* free buffers belonging to queue itself */
3649 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3650 q->dma_addr);
3651 kfree(txq->txb);
3652
3653 /* 0 fill whole structure */
3654 memset(txq, 0, sizeof(*txq));
3655 }
3656
3657 /**
3658 * Destroy all DMA queues and structures
3659 *
3660 * @param priv
3661 */
3662 static void ipw_tx_queue_free(struct ipw_priv *priv)
3663 {
3664 /* Tx CMD queue */
3665 ipw_queue_tx_free(priv, &priv->txq_cmd);
3666
3667 /* Tx queues */
3668 ipw_queue_tx_free(priv, &priv->txq[0]);
3669 ipw_queue_tx_free(priv, &priv->txq[1]);
3670 ipw_queue_tx_free(priv, &priv->txq[2]);
3671 ipw_queue_tx_free(priv, &priv->txq[3]);
3672 }
3673
3674 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3675 {
3676 /* First 3 bytes are manufacturer */
3677 bssid[0] = priv->mac_addr[0];
3678 bssid[1] = priv->mac_addr[1];
3679 bssid[2] = priv->mac_addr[2];
3680
3681 /* Last bytes are random */
3682 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3683
3684 bssid[0] &= 0xfe; /* clear multicast bit */
3685 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3686 }
3687
3688 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3689 {
3690 struct ipw_station_entry entry;
3691 int i;
3692
3693 for (i = 0; i < priv->num_stations; i++) {
3694 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3695 /* Another node is active in network */
3696 priv->missed_adhoc_beacons = 0;
3697 if (!(priv->config & CFG_STATIC_CHANNEL))
3698 /* when other nodes drop out, we drop out */
3699 priv->config &= ~CFG_ADHOC_PERSIST;
3700
3701 return i;
3702 }
3703 }
3704
3705 if (i == MAX_STATIONS)
3706 return IPW_INVALID_STATION;
3707
3708 IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
3709
3710 entry.reserved = 0;
3711 entry.support_mode = 0;
3712 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3713 memcpy(priv->stations[i], bssid, ETH_ALEN);
3714 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3715 &entry, sizeof(entry));
3716 priv->num_stations++;
3717
3718 return i;
3719 }
3720
3721 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3722 {
3723 int i;
3724
3725 for (i = 0; i < priv->num_stations; i++)
3726 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3727 return i;
3728
3729 return IPW_INVALID_STATION;
3730 }
3731
3732 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3733 {
3734 int err;
3735
3736 if (priv->status & STATUS_ASSOCIATING) {
3737 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3738 queue_work(priv->workqueue, &priv->disassociate);
3739 return;
3740 }
3741
3742 if (!(priv->status & STATUS_ASSOCIATED)) {
3743 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3744 return;
3745 }
3746
3747 IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
3748 "on channel %d.\n",
3749 MAC_ARG(priv->assoc_request.bssid),
3750 priv->assoc_request.channel);
3751
3752 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3753 priv->status |= STATUS_DISASSOCIATING;
3754
3755 if (quiet)
3756 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3757 else
3758 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3759
3760 err = ipw_send_associate(priv, &priv->assoc_request);
3761 if (err) {
3762 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3763 "failed.\n");
3764 return;
3765 }
3766
3767 }
3768
3769 static int ipw_disassociate(void *data)
3770 {
3771 struct ipw_priv *priv = data;
3772 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3773 return 0;
3774 ipw_send_disassociate(data, 0);
3775 return 1;
3776 }
3777
3778 static void ipw_bg_disassociate(void *data)
3779 {
3780 struct ipw_priv *priv = data;
3781 down(&priv->sem);
3782 ipw_disassociate(data);
3783 up(&priv->sem);
3784 }
3785
3786 static void ipw_system_config(void *data)
3787 {
3788 struct ipw_priv *priv = data;
3789 ipw_send_system_config(priv, &priv->sys_config);
3790 }
3791
3792 struct ipw_status_code {
3793 u16 status;
3794 const char *reason;
3795 };
3796
3797 static const struct ipw_status_code ipw_status_codes[] = {
3798 {0x00, "Successful"},
3799 {0x01, "Unspecified failure"},
3800 {0x0A, "Cannot support all requested capabilities in the "
3801 "Capability information field"},
3802 {0x0B, "Reassociation denied due to inability to confirm that "
3803 "association exists"},
3804 {0x0C, "Association denied due to reason outside the scope of this "
3805 "standard"},
3806 {0x0D,
3807 "Responding station does not support the specified authentication "
3808 "algorithm"},
3809 {0x0E,
3810 "Received an Authentication frame with authentication sequence "
3811 "transaction sequence number out of expected sequence"},
3812 {0x0F, "Authentication rejected because of challenge failure"},
3813 {0x10, "Authentication rejected due to timeout waiting for next "
3814 "frame in sequence"},
3815 {0x11, "Association denied because AP is unable to handle additional "
3816 "associated stations"},
3817 {0x12,
3818 "Association denied due to requesting station not supporting all "
3819 "of the datarates in the BSSBasicServiceSet Parameter"},
3820 {0x13,
3821 "Association denied due to requesting station not supporting "
3822 "short preamble operation"},
3823 {0x14,
3824 "Association denied due to requesting station not supporting "
3825 "PBCC encoding"},
3826 {0x15,
3827 "Association denied due to requesting station not supporting "
3828 "channel agility"},
3829 {0x19,
3830 "Association denied due to requesting station not supporting "
3831 "short slot operation"},
3832 {0x1A,
3833 "Association denied due to requesting station not supporting "
3834 "DSSS-OFDM operation"},
3835 {0x28, "Invalid Information Element"},
3836 {0x29, "Group Cipher is not valid"},
3837 {0x2A, "Pairwise Cipher is not valid"},
3838 {0x2B, "AKMP is not valid"},
3839 {0x2C, "Unsupported RSN IE version"},
3840 {0x2D, "Invalid RSN IE Capabilities"},
3841 {0x2E, "Cipher suite is rejected per security policy"},
3842 };
3843
3844 #ifdef CONFIG_IPW2200_DEBUG
3845 static const char *ipw_get_status_code(u16 status)
3846 {
3847 int i;
3848 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3849 if (ipw_status_codes[i].status == (status & 0xff))
3850 return ipw_status_codes[i].reason;
3851 return "Unknown status value.";
3852 }
3853 #endif
3854
3855 static void inline average_init(struct average *avg)
3856 {
3857 memset(avg, 0, sizeof(*avg));
3858 }
3859
3860 static void average_add(struct average *avg, s16 val)
3861 {
3862 avg->sum -= avg->entries[avg->pos];
3863 avg->sum += val;
3864 avg->entries[avg->pos++] = val;
3865 if (unlikely(avg->pos == AVG_ENTRIES)) {
3866 avg->init = 1;
3867 avg->pos = 0;
3868 }
3869 }
3870
3871 static s16 average_value(struct average *avg)
3872 {
3873 if (!unlikely(avg->init)) {
3874 if (avg->pos)
3875 return avg->sum / avg->pos;
3876 return 0;
3877 }
3878
3879 return avg->sum / AVG_ENTRIES;
3880 }
3881
3882 static void ipw_reset_stats(struct ipw_priv *priv)
3883 {
3884 u32 len = sizeof(u32);
3885
3886 priv->quality = 0;
3887
3888 average_init(&priv->average_missed_beacons);
3889 average_init(&priv->average_rssi);
3890 average_init(&priv->average_noise);
3891
3892 priv->last_rate = 0;
3893 priv->last_missed_beacons = 0;
3894 priv->last_rx_packets = 0;
3895 priv->last_tx_packets = 0;
3896 priv->last_tx_failures = 0;
3897
3898 /* Firmware managed, reset only when NIC is restarted, so we have to
3899 * normalize on the current value */
3900 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
3901 &priv->last_rx_err, &len);
3902 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
3903 &priv->last_tx_failures, &len);
3904
3905 /* Driver managed, reset with each association */
3906 priv->missed_adhoc_beacons = 0;
3907 priv->missed_beacons = 0;
3908 priv->tx_packets = 0;
3909 priv->rx_packets = 0;
3910
3911 }
3912
3913 static u32 ipw_get_max_rate(struct ipw_priv *priv)
3914 {
3915 u32 i = 0x80000000;
3916 u32 mask = priv->rates_mask;
3917 /* If currently associated in B mode, restrict the maximum
3918 * rate match to B rates */
3919 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
3920 mask &= IEEE80211_CCK_RATES_MASK;
3921
3922 /* TODO: Verify that the rate is supported by the current rates
3923 * list. */
3924
3925 while (i && !(mask & i))
3926 i >>= 1;
3927 switch (i) {
3928 case IEEE80211_CCK_RATE_1MB_MASK:
3929 return 1000000;
3930 case IEEE80211_CCK_RATE_2MB_MASK:
3931 return 2000000;
3932 case IEEE80211_CCK_RATE_5MB_MASK:
3933 return 5500000;
3934 case IEEE80211_OFDM_RATE_6MB_MASK:
3935 return 6000000;
3936 case IEEE80211_OFDM_RATE_9MB_MASK:
3937 return 9000000;
3938 case IEEE80211_CCK_RATE_11MB_MASK:
3939 return 11000000;
3940 case IEEE80211_OFDM_RATE_12MB_MASK:
3941 return 12000000;
3942 case IEEE80211_OFDM_RATE_18MB_MASK:
3943 return 18000000;
3944 case IEEE80211_OFDM_RATE_24MB_MASK:
3945 return 24000000;
3946 case IEEE80211_OFDM_RATE_36MB_MASK:
3947 return 36000000;
3948 case IEEE80211_OFDM_RATE_48MB_MASK:
3949 return 48000000;
3950 case IEEE80211_OFDM_RATE_54MB_MASK:
3951 return 54000000;
3952 }
3953
3954 if (priv->ieee->mode == IEEE_B)
3955 return 11000000;
3956 else
3957 return 54000000;
3958 }
3959
3960 static u32 ipw_get_current_rate(struct ipw_priv *priv)
3961 {
3962 u32 rate, len = sizeof(rate);
3963 int err;
3964
3965 if (!(priv->status & STATUS_ASSOCIATED))
3966 return 0;
3967
3968 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
3969 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
3970 &len);
3971 if (err) {
3972 IPW_DEBUG_INFO("failed querying ordinals.\n");
3973 return 0;
3974 }
3975 } else
3976 return ipw_get_max_rate(priv);
3977
3978 switch (rate) {
3979 case IPW_TX_RATE_1MB:
3980 return 1000000;
3981 case IPW_TX_RATE_2MB:
3982 return 2000000;
3983 case IPW_TX_RATE_5MB:
3984 return 5500000;
3985 case IPW_TX_RATE_6MB:
3986 return 6000000;
3987 case IPW_TX_RATE_9MB:
3988 return 9000000;
3989 case IPW_TX_RATE_11MB:
3990 return 11000000;
3991 case IPW_TX_RATE_12MB:
3992 return 12000000;
3993 case IPW_TX_RATE_18MB:
3994 return 18000000;
3995 case IPW_TX_RATE_24MB:
3996 return 24000000;
3997 case IPW_TX_RATE_36MB:
3998 return 36000000;
3999 case IPW_TX_RATE_48MB:
4000 return 48000000;
4001 case IPW_TX_RATE_54MB:
4002 return 54000000;
4003 }
4004
4005 return 0;
4006 }
4007
4008 #define IPW_STATS_INTERVAL (2 * HZ)
4009 static void ipw_gather_stats(struct ipw_priv *priv)
4010 {
4011 u32 rx_err, rx_err_delta, rx_packets_delta;
4012 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4013 u32 missed_beacons_percent, missed_beacons_delta;
4014 u32 quality = 0;
4015 u32 len = sizeof(u32);
4016 s16 rssi;
4017 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4018 rate_quality;
4019 u32 max_rate;
4020
4021 if (!(priv->status & STATUS_ASSOCIATED)) {
4022 priv->quality = 0;
4023 return;
4024 }
4025
4026 /* Update the statistics */
4027 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4028 &priv->missed_beacons, &len);
4029 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4030 priv->last_missed_beacons = priv->missed_beacons;
4031 if (priv->assoc_request.beacon_interval) {
4032 missed_beacons_percent = missed_beacons_delta *
4033 (HZ * priv->assoc_request.beacon_interval) /
4034 (IPW_STATS_INTERVAL * 10);
4035 } else {
4036 missed_beacons_percent = 0;
4037 }
4038 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4039
4040 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4041 rx_err_delta = rx_err - priv->last_rx_err;
4042 priv->last_rx_err = rx_err;
4043
4044 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4045 tx_failures_delta = tx_failures - priv->last_tx_failures;
4046 priv->last_tx_failures = tx_failures;
4047
4048 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4049 priv->last_rx_packets = priv->rx_packets;
4050
4051 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4052 priv->last_tx_packets = priv->tx_packets;
4053
4054 /* Calculate quality based on the following:
4055 *
4056 * Missed beacon: 100% = 0, 0% = 70% missed
4057 * Rate: 60% = 1Mbs, 100% = Max
4058 * Rx and Tx errors represent a straight % of total Rx/Tx
4059 * RSSI: 100% = > -50, 0% = < -80
4060 * Rx errors: 100% = 0, 0% = 50% missed
4061 *
4062 * The lowest computed quality is used.
4063 *
4064 */
4065 #define BEACON_THRESHOLD 5
4066 beacon_quality = 100 - missed_beacons_percent;
4067 if (beacon_quality < BEACON_THRESHOLD)
4068 beacon_quality = 0;
4069 else
4070 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4071 (100 - BEACON_THRESHOLD);
4072 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4073 beacon_quality, missed_beacons_percent);
4074
4075 priv->last_rate = ipw_get_current_rate(priv);
4076 max_rate = ipw_get_max_rate(priv);
4077 rate_quality = priv->last_rate * 40 / max_rate + 60;
4078 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4079 rate_quality, priv->last_rate / 1000000);
4080
4081 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4082 rx_quality = 100 - (rx_err_delta * 100) /
4083 (rx_packets_delta + rx_err_delta);
4084 else
4085 rx_quality = 100;
4086 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4087 rx_quality, rx_err_delta, rx_packets_delta);
4088
4089 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4090 tx_quality = 100 - (tx_failures_delta * 100) /
4091 (tx_packets_delta + tx_failures_delta);
4092 else
4093 tx_quality = 100;
4094 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4095 tx_quality, tx_failures_delta, tx_packets_delta);
4096
4097 rssi = average_value(&priv->average_rssi);
4098 signal_quality =
4099 (100 *
4100 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4101 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4102 (priv->ieee->perfect_rssi - rssi) *
4103 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4104 62 * (priv->ieee->perfect_rssi - rssi))) /
4105 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4106 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4107 if (signal_quality > 100)
4108 signal_quality = 100;
4109 else if (signal_quality < 1)
4110 signal_quality = 0;
4111
4112 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4113 signal_quality, rssi);
4114
4115 quality = min(beacon_quality,
4116 min(rate_quality,
4117 min(tx_quality, min(rx_quality, signal_quality))));
4118 if (quality == beacon_quality)
4119 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4120 quality);
4121 if (quality == rate_quality)
4122 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4123 quality);
4124 if (quality == tx_quality)
4125 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4126 quality);
4127 if (quality == rx_quality)
4128 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4129 quality);
4130 if (quality == signal_quality)
4131 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4132 quality);
4133
4134 priv->quality = quality;
4135
4136 queue_delayed_work(priv->workqueue, &priv->gather_stats,
4137 IPW_STATS_INTERVAL);
4138 }
4139
4140 static void ipw_bg_gather_stats(void *data)
4141 {
4142 struct ipw_priv *priv = data;
4143 down(&priv->sem);
4144 ipw_gather_stats(data);
4145 up(&priv->sem);
4146 }
4147
4148 /* Missed beacon behavior:
4149 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4150 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4151 * Above disassociate threshold, give up and stop scanning.
4152 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4153 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4154 int missed_count)
4155 {
4156 priv->notif_missed_beacons = missed_count;
4157
4158 if (missed_count > priv->disassociate_threshold &&
4159 priv->status & STATUS_ASSOCIATED) {
4160 /* If associated and we've hit the missed
4161 * beacon threshold, disassociate, turn
4162 * off roaming, and abort any active scans */
4163 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4164 IPW_DL_STATE | IPW_DL_ASSOC,
4165 "Missed beacon: %d - disassociate\n", missed_count);
4166 priv->status &= ~STATUS_ROAMING;
4167 if (priv->status & STATUS_SCANNING) {
4168 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4169 IPW_DL_STATE,
4170 "Aborting scan with missed beacon.\n");
4171 queue_work(priv->workqueue, &priv->abort_scan);
4172 }
4173
4174 queue_work(priv->workqueue, &priv->disassociate);
4175 return;
4176 }
4177
4178 if (priv->status & STATUS_ROAMING) {
4179 /* If we are currently roaming, then just
4180 * print a debug statement... */
4181 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4182 "Missed beacon: %d - roam in progress\n",
4183 missed_count);
4184 return;
4185 }
4186
4187 if (missed_count > priv->roaming_threshold &&
4188 missed_count <= priv->disassociate_threshold) {
4189 /* If we are not already roaming, set the ROAM
4190 * bit in the status and kick off a scan.
4191 * This can happen several times before we reach
4192 * disassociate_threshold. */
4193 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4194 "Missed beacon: %d - initiate "
4195 "roaming\n", missed_count);
4196 if (!(priv->status & STATUS_ROAMING)) {
4197 priv->status |= STATUS_ROAMING;
4198 if (!(priv->status & STATUS_SCANNING))
4199 queue_work(priv->workqueue,
4200 &priv->request_scan);
4201 }
4202 return;
4203 }
4204
4205 if (priv->status & STATUS_SCANNING) {
4206 /* Stop scan to keep fw from getting
4207 * stuck (only if we aren't roaming --
4208 * otherwise we'll never scan more than 2 or 3
4209 * channels..) */
4210 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4211 "Aborting scan with missed beacon.\n");
4212 queue_work(priv->workqueue, &priv->abort_scan);
4213 }
4214
4215 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4216
4217 }
4218
4219 /**
4220 * Handle host notification packet.
4221 * Called from interrupt routine
4222 */
4223 static void ipw_rx_notification(struct ipw_priv *priv,
4224 struct ipw_rx_notification *notif)
4225 {
4226 notif->size = le16_to_cpu(notif->size);
4227
4228 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size);
4229
4230 switch (notif->subtype) {
4231 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4232 struct notif_association *assoc = &notif->u.assoc;
4233
4234 switch (assoc->state) {
4235 case CMAS_ASSOCIATED:{
4236 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4237 IPW_DL_ASSOC,
4238 "associated: '%s' " MAC_FMT
4239 " \n",
4240 escape_essid(priv->essid,
4241 priv->essid_len),
4242 MAC_ARG(priv->bssid));
4243
4244 switch (priv->ieee->iw_mode) {
4245 case IW_MODE_INFRA:
4246 memcpy(priv->ieee->bssid,
4247 priv->bssid, ETH_ALEN);
4248 break;
4249
4250 case IW_MODE_ADHOC:
4251 memcpy(priv->ieee->bssid,
4252 priv->bssid, ETH_ALEN);
4253
4254 /* clear out the station table */
4255 priv->num_stations = 0;
4256
4257 IPW_DEBUG_ASSOC
4258 ("queueing adhoc check\n");
4259 queue_delayed_work(priv->
4260 workqueue,
4261 &priv->
4262 adhoc_check,
4263 priv->
4264 assoc_request.
4265 beacon_interval);
4266 break;
4267 }
4268
4269 priv->status &= ~STATUS_ASSOCIATING;
4270 priv->status |= STATUS_ASSOCIATED;
4271 queue_work(priv->workqueue,
4272 &priv->system_config);
4273
4274 #ifdef CONFIG_IPW_QOS
4275 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4276 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_ctl))
4277 if ((priv->status & STATUS_AUTH) &&
4278 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4279 == IEEE80211_STYPE_ASSOC_RESP)) {
4280 if ((sizeof
4281 (struct
4282 ieee80211_assoc_response)
4283 <= notif->size)
4284 && (notif->size <= 2314)) {
4285 struct
4286 ieee80211_rx_stats
4287 stats = {
4288 .len =
4289 notif->
4290 size - 1,
4291 };
4292
4293 IPW_DEBUG_QOS
4294 ("QoS Associate "
4295 "size %d\n",
4296 notif->size);
4297 ieee80211_rx_mgt(priv->
4298 ieee,
4299 (struct
4300 ieee80211_hdr_4addr
4301 *)
4302 &notif->u.raw, &stats);
4303 }
4304 }
4305 #endif
4306
4307 schedule_work(&priv->link_up);
4308
4309 break;
4310 }
4311
4312 case CMAS_AUTHENTICATED:{
4313 if (priv->
4314 status & (STATUS_ASSOCIATED |
4315 STATUS_AUTH)) {
4316 #ifdef CONFIG_IPW2200_DEBUG
4317 struct notif_authenticate *auth
4318 = &notif->u.auth;
4319 IPW_DEBUG(IPW_DL_NOTIF |
4320 IPW_DL_STATE |
4321 IPW_DL_ASSOC,
4322 "deauthenticated: '%s' "
4323 MAC_FMT
4324 ": (0x%04X) - %s \n",
4325 escape_essid(priv->
4326 essid,
4327 priv->
4328 essid_len),
4329 MAC_ARG(priv->bssid),
4330 ntohs(auth->status),
4331 ipw_get_status_code
4332 (ntohs
4333 (auth->status)));
4334 #endif
4335
4336 priv->status &=
4337 ~(STATUS_ASSOCIATING |
4338 STATUS_AUTH |
4339 STATUS_ASSOCIATED);
4340
4341 schedule_work(&priv->link_down);
4342 break;
4343 }
4344
4345 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4346 IPW_DL_ASSOC,
4347 "authenticated: '%s' " MAC_FMT
4348 "\n",
4349 escape_essid(priv->essid,
4350 priv->essid_len),
4351 MAC_ARG(priv->bssid));
4352 break;
4353 }
4354
4355 case CMAS_INIT:{
4356 if (priv->status & STATUS_AUTH) {
4357 struct
4358 ieee80211_assoc_response
4359 *resp;
4360 resp =
4361 (struct
4362 ieee80211_assoc_response
4363 *)&notif->u.raw;
4364 IPW_DEBUG(IPW_DL_NOTIF |
4365 IPW_DL_STATE |
4366 IPW_DL_ASSOC,
4367 "association failed (0x%04X): %s\n",
4368 ntohs(resp->status),
4369 ipw_get_status_code
4370 (ntohs
4371 (resp->status)));
4372 }
4373
4374 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4375 IPW_DL_ASSOC,
4376 "disassociated: '%s' " MAC_FMT
4377 " \n",
4378 escape_essid(priv->essid,
4379 priv->essid_len),
4380 MAC_ARG(priv->bssid));
4381
4382 priv->status &=
4383 ~(STATUS_DISASSOCIATING |
4384 STATUS_ASSOCIATING |
4385 STATUS_ASSOCIATED | STATUS_AUTH);
4386 if (priv->assoc_network
4387 && (priv->assoc_network->
4388 capability &
4389 WLAN_CAPABILITY_IBSS))
4390 ipw_remove_current_network
4391 (priv);
4392
4393 schedule_work(&priv->link_down);
4394
4395 break;
4396 }
4397
4398 case CMAS_RX_ASSOC_RESP:
4399 break;
4400
4401 default:
4402 IPW_ERROR("assoc: unknown (%d)\n",
4403 assoc->state);
4404 break;
4405 }
4406
4407 break;
4408 }
4409
4410 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4411 struct notif_authenticate *auth = &notif->u.auth;
4412 switch (auth->state) {
4413 case CMAS_AUTHENTICATED:
4414 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4415 "authenticated: '%s' " MAC_FMT " \n",
4416 escape_essid(priv->essid,
4417 priv->essid_len),
4418 MAC_ARG(priv->bssid));
4419 priv->status |= STATUS_AUTH;
4420 break;
4421
4422 case CMAS_INIT:
4423 if (priv->status & STATUS_AUTH) {
4424 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4425 IPW_DL_ASSOC,
4426 "authentication failed (0x%04X): %s\n",
4427 ntohs(auth->status),
4428 ipw_get_status_code(ntohs
4429 (auth->
4430 status)));
4431 }
4432 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4433 IPW_DL_ASSOC,
4434 "deauthenticated: '%s' " MAC_FMT "\n",
4435 escape_essid(priv->essid,
4436 priv->essid_len),
4437 MAC_ARG(priv->bssid));
4438
4439 priv->status &= ~(STATUS_ASSOCIATING |
4440 STATUS_AUTH |
4441 STATUS_ASSOCIATED);
4442
4443 schedule_work(&priv->link_down);
4444 break;
4445
4446 case CMAS_TX_AUTH_SEQ_1:
4447 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4448 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4449 break;
4450 case CMAS_RX_AUTH_SEQ_2:
4451 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4452 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4453 break;
4454 case CMAS_AUTH_SEQ_1_PASS:
4455 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4456 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4457 break;
4458 case CMAS_AUTH_SEQ_1_FAIL:
4459 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4460 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4461 break;
4462 case CMAS_TX_AUTH_SEQ_3:
4463 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4464 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4465 break;
4466 case CMAS_RX_AUTH_SEQ_4:
4467 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4468 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4469 break;
4470 case CMAS_AUTH_SEQ_2_PASS:
4471 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4472 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4473 break;
4474 case CMAS_AUTH_SEQ_2_FAIL:
4475 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4476 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4477 break;
4478 case CMAS_TX_ASSOC:
4479 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4480 IPW_DL_ASSOC, "TX_ASSOC\n");
4481 break;
4482 case CMAS_RX_ASSOC_RESP:
4483 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4484 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4485
4486 break;
4487 case CMAS_ASSOCIATED:
4488 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4489 IPW_DL_ASSOC, "ASSOCIATED\n");
4490 break;
4491 default:
4492 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4493 auth->state);
4494 break;
4495 }
4496 break;
4497 }
4498
4499 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4500 struct notif_channel_result *x =
4501 &notif->u.channel_result;
4502
4503 if (notif->size == sizeof(*x)) {
4504 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4505 x->channel_num);
4506 } else {
4507 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4508 "(should be %zd)\n",
4509 notif->size, sizeof(*x));
4510 }
4511 break;
4512 }
4513
4514 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4515 struct notif_scan_complete *x = &notif->u.scan_complete;
4516 if (notif->size == sizeof(*x)) {
4517 IPW_DEBUG_SCAN
4518 ("Scan completed: type %d, %d channels, "
4519 "%d status\n", x->scan_type,
4520 x->num_channels, x->status);
4521 } else {
4522 IPW_ERROR("Scan completed of wrong size %d "
4523 "(should be %zd)\n",
4524 notif->size, sizeof(*x));
4525 }
4526
4527 priv->status &=
4528 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4529
4530 wake_up_interruptible(&priv->wait_state);
4531 cancel_delayed_work(&priv->scan_check);
4532
4533 if (priv->status & STATUS_EXIT_PENDING)
4534 break;
4535
4536 priv->ieee->scans++;
4537
4538 #ifdef CONFIG_IPW2200_MONITOR
4539 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4540 priv->status |= STATUS_SCAN_FORCED;
4541 queue_work(priv->workqueue,
4542 &priv->request_scan);
4543 break;
4544 }
4545 priv->status &= ~STATUS_SCAN_FORCED;
4546 #endif /* CONFIG_IPW2200_MONITOR */
4547
4548 if (!(priv->status & (STATUS_ASSOCIATED |
4549 STATUS_ASSOCIATING |
4550 STATUS_ROAMING |
4551 STATUS_DISASSOCIATING)))
4552 queue_work(priv->workqueue, &priv->associate);
4553 else if (priv->status & STATUS_ROAMING) {
4554 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4555 /* If a scan completed and we are in roam mode, then
4556 * the scan that completed was the one requested as a
4557 * result of entering roam... so, schedule the
4558 * roam work */
4559 queue_work(priv->workqueue,
4560 &priv->roam);
4561 else
4562 /* Don't schedule if we aborted the scan */
4563 priv->status &= ~STATUS_ROAMING;
4564 } else if (priv->status & STATUS_SCAN_PENDING)
4565 queue_work(priv->workqueue,
4566 &priv->request_scan);
4567 else if (priv->config & CFG_BACKGROUND_SCAN
4568 && priv->status & STATUS_ASSOCIATED)
4569 queue_delayed_work(priv->workqueue,
4570 &priv->request_scan, HZ);
4571 break;
4572 }
4573
4574 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4575 struct notif_frag_length *x = &notif->u.frag_len;
4576
4577 if (notif->size == sizeof(*x))
4578 IPW_ERROR("Frag length: %d\n",
4579 le16_to_cpu(x->frag_length));
4580 else
4581 IPW_ERROR("Frag length of wrong size %d "
4582 "(should be %zd)\n",
4583 notif->size, sizeof(*x));
4584 break;
4585 }
4586
4587 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4588 struct notif_link_deterioration *x =
4589 &notif->u.link_deterioration;
4590
4591 if (notif->size == sizeof(*x)) {
4592 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4593 "link deterioration: '%s' " MAC_FMT
4594 " \n", escape_essid(priv->essid,
4595 priv->essid_len),
4596 MAC_ARG(priv->bssid));
4597 memcpy(&priv->last_link_deterioration, x,
4598 sizeof(*x));
4599 } else {
4600 IPW_ERROR("Link Deterioration of wrong size %d "
4601 "(should be %zd)\n",
4602 notif->size, sizeof(*x));
4603 }
4604 break;
4605 }
4606
4607 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4608 IPW_ERROR("Dino config\n");
4609 if (priv->hcmd
4610 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4611 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4612
4613 break;
4614 }
4615
4616 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4617 struct notif_beacon_state *x = &notif->u.beacon_state;
4618 if (notif->size != sizeof(*x)) {
4619 IPW_ERROR
4620 ("Beacon state of wrong size %d (should "
4621 "be %zd)\n", notif->size, sizeof(*x));
4622 break;
4623 }
4624
4625 if (le32_to_cpu(x->state) ==
4626 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4627 ipw_handle_missed_beacon(priv,
4628 le32_to_cpu(x->
4629 number));
4630
4631 break;
4632 }
4633
4634 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4635 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4636 if (notif->size == sizeof(*x)) {
4637 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4638 "0x%02x station %d\n",
4639 x->key_state, x->security_type,
4640 x->station_index);
4641 break;
4642 }
4643
4644 IPW_ERROR
4645 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4646 notif->size, sizeof(*x));
4647 break;
4648 }
4649
4650 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4651 struct notif_calibration *x = &notif->u.calibration;
4652
4653 if (notif->size == sizeof(*x)) {
4654 memcpy(&priv->calib, x, sizeof(*x));
4655 IPW_DEBUG_INFO("TODO: Calibration\n");
4656 break;
4657 }
4658
4659 IPW_ERROR
4660 ("Calibration of wrong size %d (should be %zd)\n",
4661 notif->size, sizeof(*x));
4662 break;
4663 }
4664
4665 case HOST_NOTIFICATION_NOISE_STATS:{
4666 if (notif->size == sizeof(u32)) {
4667 priv->last_noise =
4668 (u8) (le32_to_cpu(notif->u.noise.value) &
4669 0xff);
4670 average_add(&priv->average_noise,
4671 priv->last_noise);
4672 break;
4673 }
4674
4675 IPW_ERROR
4676 ("Noise stat is wrong size %d (should be %zd)\n",
4677 notif->size, sizeof(u32));
4678 break;
4679 }
4680
4681 default:
4682 IPW_ERROR("Unknown notification: "
4683 "subtype=%d,flags=0x%2x,size=%d\n",
4684 notif->subtype, notif->flags, notif->size);
4685 }
4686 }
4687
4688 /**
4689 * Destroys all DMA structures and initialise them again
4690 *
4691 * @param priv
4692 * @return error code
4693 */
4694 static int ipw_queue_reset(struct ipw_priv *priv)
4695 {
4696 int rc = 0;
4697 /** @todo customize queue sizes */
4698 int nTx = 64, nTxCmd = 8;
4699 ipw_tx_queue_free(priv);
4700 /* Tx CMD queue */
4701 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4702 IPW_TX_CMD_QUEUE_READ_INDEX,
4703 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4704 IPW_TX_CMD_QUEUE_BD_BASE,
4705 IPW_TX_CMD_QUEUE_BD_SIZE);
4706 if (rc) {
4707 IPW_ERROR("Tx Cmd queue init failed\n");
4708 goto error;
4709 }
4710 /* Tx queue(s) */
4711 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4712 IPW_TX_QUEUE_0_READ_INDEX,
4713 IPW_TX_QUEUE_0_WRITE_INDEX,
4714 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4715 if (rc) {
4716 IPW_ERROR("Tx 0 queue init failed\n");
4717 goto error;
4718 }
4719 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4720 IPW_TX_QUEUE_1_READ_INDEX,
4721 IPW_TX_QUEUE_1_WRITE_INDEX,
4722 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4723 if (rc) {
4724 IPW_ERROR("Tx 1 queue init failed\n");
4725 goto error;
4726 }
4727 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4728 IPW_TX_QUEUE_2_READ_INDEX,
4729 IPW_TX_QUEUE_2_WRITE_INDEX,
4730 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4731 if (rc) {
4732 IPW_ERROR("Tx 2 queue init failed\n");
4733 goto error;
4734 }
4735 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4736 IPW_TX_QUEUE_3_READ_INDEX,
4737 IPW_TX_QUEUE_3_WRITE_INDEX,
4738 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4739 if (rc) {
4740 IPW_ERROR("Tx 3 queue init failed\n");
4741 goto error;
4742 }
4743 /* statistics */
4744 priv->rx_bufs_min = 0;
4745 priv->rx_pend_max = 0;
4746 return rc;
4747
4748 error:
4749 ipw_tx_queue_free(priv);
4750 return rc;
4751 }
4752
4753 /**
4754 * Reclaim Tx queue entries no more used by NIC.
4755 *
4756 * When FW adwances 'R' index, all entries between old and
4757 * new 'R' index need to be reclaimed. As result, some free space
4758 * forms. If there is enough free space (> low mark), wake Tx queue.
4759 *
4760 * @note Need to protect against garbage in 'R' index
4761 * @param priv
4762 * @param txq
4763 * @param qindex
4764 * @return Number of used entries remains in the queue
4765 */
4766 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4767 struct clx2_tx_queue *txq, int qindex)
4768 {
4769 u32 hw_tail;
4770 int used;
4771 struct clx2_queue *q = &txq->q;
4772
4773 hw_tail = ipw_read32(priv, q->reg_r);
4774 if (hw_tail >= q->n_bd) {
4775 IPW_ERROR
4776 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4777 hw_tail, q->n_bd);
4778 goto done;
4779 }
4780 for (; q->last_used != hw_tail;
4781 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4782 ipw_queue_tx_free_tfd(priv, txq);
4783 priv->tx_packets++;
4784 }
4785 done:
4786 if ((ipw_queue_space(q) > q->low_mark) &&
4787 (qindex >= 0) &&
4788 (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev))
4789 netif_wake_queue(priv->net_dev);
4790 used = q->first_empty - q->last_used;
4791 if (used < 0)
4792 used += q->n_bd;
4793
4794 return used;
4795 }
4796
4797 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4798 int len, int sync)
4799 {
4800 struct clx2_tx_queue *txq = &priv->txq_cmd;
4801 struct clx2_queue *q = &txq->q;
4802 struct tfd_frame *tfd;
4803
4804 if (ipw_queue_space(q) < (sync ? 1 : 2)) {
4805 IPW_ERROR("No space for Tx\n");
4806 return -EBUSY;
4807 }
4808
4809 tfd = &txq->bd[q->first_empty];
4810 txq->txb[q->first_empty] = NULL;
4811
4812 memset(tfd, 0, sizeof(*tfd));
4813 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
4814 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
4815 priv->hcmd_seq++;
4816 tfd->u.cmd.index = hcmd;
4817 tfd->u.cmd.length = len;
4818 memcpy(tfd->u.cmd.payload, buf, len);
4819 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
4820 ipw_write32(priv, q->reg_w, q->first_empty);
4821 _ipw_read32(priv, 0x90);
4822
4823 return 0;
4824 }
4825
4826 /*
4827 * Rx theory of operation
4828 *
4829 * The host allocates 32 DMA target addresses and passes the host address
4830 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
4831 * 0 to 31
4832 *
4833 * Rx Queue Indexes
4834 * The host/firmware share two index registers for managing the Rx buffers.
4835 *
4836 * The READ index maps to the first position that the firmware may be writing
4837 * to -- the driver can read up to (but not including) this position and get
4838 * good data.
4839 * The READ index is managed by the firmware once the card is enabled.
4840 *
4841 * The WRITE index maps to the last position the driver has read from -- the
4842 * position preceding WRITE is the last slot the firmware can place a packet.
4843 *
4844 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4845 * WRITE = READ.
4846 *
4847 * During initialization the host sets up the READ queue position to the first
4848 * INDEX position, and WRITE to the last (READ - 1 wrapped)
4849 *
4850 * When the firmware places a packet in a buffer it will advance the READ index
4851 * and fire the RX interrupt. The driver can then query the READ index and
4852 * process as many packets as possible, moving the WRITE index forward as it
4853 * resets the Rx queue buffers with new memory.
4854 *
4855 * The management in the driver is as follows:
4856 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
4857 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
4858 * to replensish the ipw->rxq->rx_free.
4859 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
4860 * ipw->rxq is replenished and the READ INDEX is updated (updating the
4861 * 'processed' and 'read' driver indexes as well)
4862 * + A received packet is processed and handed to the kernel network stack,
4863 * detached from the ipw->rxq. The driver 'processed' index is updated.
4864 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
4865 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
4866 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
4867 * were enough free buffers and RX_STALLED is set it is cleared.
4868 *
4869 *
4870 * Driver sequence:
4871 *
4872 * ipw_rx_queue_alloc() Allocates rx_free
4873 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
4874 * ipw_rx_queue_restock
4875 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
4876 * queue, updates firmware pointers, and updates
4877 * the WRITE index. If insufficient rx_free buffers
4878 * are available, schedules ipw_rx_queue_replenish
4879 *
4880 * -- enable interrupts --
4881 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
4882 * READ INDEX, detaching the SKB from the pool.
4883 * Moves the packet buffer from queue to rx_used.
4884 * Calls ipw_rx_queue_restock to refill any empty
4885 * slots.
4886 * ...
4887 *
4888 */
4889
4890 /*
4891 * If there are slots in the RX queue that need to be restocked,
4892 * and we have free pre-allocated buffers, fill the ranks as much
4893 * as we can pulling from rx_free.
4894 *
4895 * This moves the 'write' index forward to catch up with 'processed', and
4896 * also updates the memory address in the firmware to reference the new
4897 * target buffer.
4898 */
4899 static void ipw_rx_queue_restock(struct ipw_priv *priv)
4900 {
4901 struct ipw_rx_queue *rxq = priv->rxq;
4902 struct list_head *element;
4903 struct ipw_rx_mem_buffer *rxb;
4904 unsigned long flags;
4905 int write;
4906
4907 spin_lock_irqsave(&rxq->lock, flags);
4908 write = rxq->write;
4909 while ((rxq->write != rxq->processed) && (rxq->free_count)) {
4910 element = rxq->rx_free.next;
4911 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
4912 list_del(element);
4913
4914 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
4915 rxb->dma_addr);
4916 rxq->queue[rxq->write] = rxb;
4917 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
4918 rxq->free_count--;
4919 }
4920 spin_unlock_irqrestore(&rxq->lock, flags);
4921
4922 /* If the pre-allocated buffer pool is dropping low, schedule to
4923 * refill it */
4924 if (rxq->free_count <= RX_LOW_WATERMARK)
4925 queue_work(priv->workqueue, &priv->rx_replenish);
4926
4927 /* If we've added more space for the firmware to place data, tell it */
4928 if (write != rxq->write)
4929 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
4930 }
4931
4932 /*
4933 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
4934 * Also restock the Rx queue via ipw_rx_queue_restock.
4935 *
4936 * This is called as a scheduled work item (except for during intialization)
4937 */
4938 static void ipw_rx_queue_replenish(void *data)
4939 {
4940 struct ipw_priv *priv = data;
4941 struct ipw_rx_queue *rxq = priv->rxq;
4942 struct list_head *element;
4943 struct ipw_rx_mem_buffer *rxb;
4944 unsigned long flags;
4945
4946 spin_lock_irqsave(&rxq->lock, flags);
4947 while (!list_empty(&rxq->rx_used)) {
4948 element = rxq->rx_used.next;
4949 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
4950 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
4951 if (!rxb->skb) {
4952 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
4953 priv->net_dev->name);
4954 /* We don't reschedule replenish work here -- we will
4955 * call the restock method and if it still needs
4956 * more buffers it will schedule replenish */
4957 break;
4958 }
4959 list_del(element);
4960
4961 rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data;
4962 rxb->dma_addr =
4963 pci_map_single(priv->pci_dev, rxb->skb->data,
4964 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4965
4966 list_add_tail(&rxb->list, &rxq->rx_free);
4967 rxq->free_count++;
4968 }
4969 spin_unlock_irqrestore(&rxq->lock, flags);
4970
4971 ipw_rx_queue_restock(priv);
4972 }
4973
4974 static void ipw_bg_rx_queue_replenish(void *data)
4975 {
4976 struct ipw_priv *priv = data;
4977 down(&priv->sem);
4978 ipw_rx_queue_replenish(data);
4979 up(&priv->sem);
4980 }
4981
4982 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
4983 * If an SKB has been detached, the POOL needs to have it's SKB set to NULL
4984 * This free routine walks the list of POOL entries and if SKB is set to
4985 * non NULL it is unmapped and freed
4986 */
4987 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
4988 {
4989 int i;
4990
4991 if (!rxq)
4992 return;
4993
4994 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
4995 if (rxq->pool[i].skb != NULL) {
4996 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
4997 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4998 dev_kfree_skb(rxq->pool[i].skb);
4999 }
5000 }
5001
5002 kfree(rxq);
5003 }
5004
5005 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5006 {
5007 struct ipw_rx_queue *rxq;
5008 int i;
5009
5010 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5011 if (unlikely(!rxq)) {
5012 IPW_ERROR("memory allocation failed\n");
5013 return NULL;
5014 }
5015 spin_lock_init(&rxq->lock);
5016 INIT_LIST_HEAD(&rxq->rx_free);
5017 INIT_LIST_HEAD(&rxq->rx_used);
5018
5019 /* Fill the rx_used queue with _all_ of the Rx buffers */
5020 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5021 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5022
5023 /* Set us so that we have processed and used all buffers, but have
5024 * not restocked the Rx queue with fresh buffers */
5025 rxq->read = rxq->write = 0;
5026 rxq->processed = RX_QUEUE_SIZE - 1;
5027 rxq->free_count = 0;
5028
5029 return rxq;
5030 }
5031
5032 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5033 {
5034 rate &= ~IEEE80211_BASIC_RATE_MASK;
5035 if (ieee_mode == IEEE_A) {
5036 switch (rate) {
5037 case IEEE80211_OFDM_RATE_6MB:
5038 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
5039 1 : 0;
5040 case IEEE80211_OFDM_RATE_9MB:
5041 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
5042 1 : 0;
5043 case IEEE80211_OFDM_RATE_12MB:
5044 return priv->
5045 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5046 case IEEE80211_OFDM_RATE_18MB:
5047 return priv->
5048 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5049 case IEEE80211_OFDM_RATE_24MB:
5050 return priv->
5051 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5052 case IEEE80211_OFDM_RATE_36MB:
5053 return priv->
5054 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5055 case IEEE80211_OFDM_RATE_48MB:
5056 return priv->
5057 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5058 case IEEE80211_OFDM_RATE_54MB:
5059 return priv->
5060 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5061 default:
5062 return 0;
5063 }
5064 }
5065
5066 /* B and G mixed */
5067 switch (rate) {
5068 case IEEE80211_CCK_RATE_1MB:
5069 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
5070 case IEEE80211_CCK_RATE_2MB:
5071 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
5072 case IEEE80211_CCK_RATE_5MB:
5073 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
5074 case IEEE80211_CCK_RATE_11MB:
5075 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
5076 }
5077
5078 /* If we are limited to B modulations, bail at this point */
5079 if (ieee_mode == IEEE_B)
5080 return 0;
5081
5082 /* G */
5083 switch (rate) {
5084 case IEEE80211_OFDM_RATE_6MB:
5085 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
5086 case IEEE80211_OFDM_RATE_9MB:
5087 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
5088 case IEEE80211_OFDM_RATE_12MB:
5089 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5090 case IEEE80211_OFDM_RATE_18MB:
5091 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5092 case IEEE80211_OFDM_RATE_24MB:
5093 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5094 case IEEE80211_OFDM_RATE_36MB:
5095 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5096 case IEEE80211_OFDM_RATE_48MB:
5097 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5098 case IEEE80211_OFDM_RATE_54MB:
5099 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5100 }
5101
5102 return 0;
5103 }
5104
5105 static int ipw_compatible_rates(struct ipw_priv *priv,
5106 const struct ieee80211_network *network,
5107 struct ipw_supported_rates *rates)
5108 {
5109 int num_rates, i;
5110
5111 memset(rates, 0, sizeof(*rates));
5112 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5113 rates->num_rates = 0;
5114 for (i = 0; i < num_rates; i++) {
5115 if (!ipw_is_rate_in_mask(priv, network->mode,
5116 network->rates[i])) {
5117
5118 if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
5119 IPW_DEBUG_SCAN("Adding masked mandatory "
5120 "rate %02X\n",
5121 network->rates[i]);
5122 rates->supported_rates[rates->num_rates++] =
5123 network->rates[i];
5124 continue;
5125 }
5126
5127 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5128 network->rates[i], priv->rates_mask);
5129 continue;
5130 }
5131
5132 rates->supported_rates[rates->num_rates++] = network->rates[i];
5133 }
5134
5135 num_rates = min(network->rates_ex_len,
5136 (u8) (IPW_MAX_RATES - num_rates));
5137 for (i = 0; i < num_rates; i++) {
5138 if (!ipw_is_rate_in_mask(priv, network->mode,
5139 network->rates_ex[i])) {
5140 if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
5141 IPW_DEBUG_SCAN("Adding masked mandatory "
5142 "rate %02X\n",
5143 network->rates_ex[i]);
5144 rates->supported_rates[rates->num_rates++] =
5145 network->rates[i];
5146 continue;
5147 }
5148
5149 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5150 network->rates_ex[i], priv->rates_mask);
5151 continue;
5152 }
5153
5154 rates->supported_rates[rates->num_rates++] =
5155 network->rates_ex[i];
5156 }
5157
5158 return 1;
5159 }
5160
5161 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5162 const struct ipw_supported_rates *src)
5163 {
5164 u8 i;
5165 for (i = 0; i < src->num_rates; i++)
5166 dest->supported_rates[i] = src->supported_rates[i];
5167 dest->num_rates = src->num_rates;
5168 }
5169
5170 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5171 * mask should ever be used -- right now all callers to add the scan rates are
5172 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5173 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5174 u8 modulation, u32 rate_mask)
5175 {
5176 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5177 IEEE80211_BASIC_RATE_MASK : 0;
5178
5179 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
5180 rates->supported_rates[rates->num_rates++] =
5181 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
5182
5183 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
5184 rates->supported_rates[rates->num_rates++] =
5185 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
5186
5187 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
5188 rates->supported_rates[rates->num_rates++] = basic_mask |
5189 IEEE80211_CCK_RATE_5MB;
5190
5191 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
5192 rates->supported_rates[rates->num_rates++] = basic_mask |
5193 IEEE80211_CCK_RATE_11MB;
5194 }
5195
5196 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5197 u8 modulation, u32 rate_mask)
5198 {
5199 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5200 IEEE80211_BASIC_RATE_MASK : 0;
5201
5202 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
5203 rates->supported_rates[rates->num_rates++] = basic_mask |
5204 IEEE80211_OFDM_RATE_6MB;
5205
5206 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
5207 rates->supported_rates[rates->num_rates++] =
5208 IEEE80211_OFDM_RATE_9MB;
5209
5210 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
5211 rates->supported_rates[rates->num_rates++] = basic_mask |
5212 IEEE80211_OFDM_RATE_12MB;
5213
5214 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
5215 rates->supported_rates[rates->num_rates++] =
5216 IEEE80211_OFDM_RATE_18MB;
5217
5218 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
5219 rates->supported_rates[rates->num_rates++] = basic_mask |
5220 IEEE80211_OFDM_RATE_24MB;
5221
5222 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
5223 rates->supported_rates[rates->num_rates++] =
5224 IEEE80211_OFDM_RATE_36MB;
5225
5226 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
5227 rates->supported_rates[rates->num_rates++] =
5228 IEEE80211_OFDM_RATE_48MB;
5229
5230 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
5231 rates->supported_rates[rates->num_rates++] =
5232 IEEE80211_OFDM_RATE_54MB;
5233 }
5234
5235 struct ipw_network_match {
5236 struct ieee80211_network *network;
5237 struct ipw_supported_rates rates;
5238 };
5239
5240 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5241 struct ipw_network_match *match,
5242 struct ieee80211_network *network,
5243 int roaming)
5244 {
5245 struct ipw_supported_rates rates;
5246
5247 /* Verify that this network's capability is compatible with the
5248 * current mode (AdHoc or Infrastructure) */
5249 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5250 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5251 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded due to "
5252 "capability mismatch.\n",
5253 escape_essid(network->ssid, network->ssid_len),
5254 MAC_ARG(network->bssid));
5255 return 0;
5256 }
5257
5258 /* If we do not have an ESSID for this AP, we can not associate with
5259 * it */
5260 if (network->flags & NETWORK_EMPTY_ESSID) {
5261 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5262 "because of hidden ESSID.\n",
5263 escape_essid(network->ssid, network->ssid_len),
5264 MAC_ARG(network->bssid));
5265 return 0;
5266 }
5267
5268 if (unlikely(roaming)) {
5269 /* If we are roaming, then ensure check if this is a valid
5270 * network to try and roam to */
5271 if ((network->ssid_len != match->network->ssid_len) ||
5272 memcmp(network->ssid, match->network->ssid,
5273 network->ssid_len)) {
5274 IPW_DEBUG_MERGE("Netowrk '%s (" MAC_FMT ")' excluded "
5275 "because of non-network ESSID.\n",
5276 escape_essid(network->ssid,
5277 network->ssid_len),
5278 MAC_ARG(network->bssid));
5279 return 0;
5280 }
5281 } else {
5282 /* If an ESSID has been configured then compare the broadcast
5283 * ESSID to ours */
5284 if ((priv->config & CFG_STATIC_ESSID) &&
5285 ((network->ssid_len != priv->essid_len) ||
5286 memcmp(network->ssid, priv->essid,
5287 min(network->ssid_len, priv->essid_len)))) {
5288 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5289
5290 strncpy(escaped,
5291 escape_essid(network->ssid, network->ssid_len),
5292 sizeof(escaped));
5293 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5294 "because of ESSID mismatch: '%s'.\n",
5295 escaped, MAC_ARG(network->bssid),
5296 escape_essid(priv->essid,
5297 priv->essid_len));
5298 return 0;
5299 }
5300 }
5301
5302 /* If the old network rate is better than this one, don't bother
5303 * testing everything else. */
5304
5305 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5306 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5307 "current network.\n",
5308 escape_essid(match->network->ssid,
5309 match->network->ssid_len));
5310 return 0;
5311 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5312 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5313 "current network.\n",
5314 escape_essid(match->network->ssid,
5315 match->network->ssid_len));
5316 return 0;
5317 }
5318
5319 /* Now go through and see if the requested network is valid... */
5320 if (priv->ieee->scan_age != 0 &&
5321 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5322 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5323 "because of age: %lums.\n",
5324 escape_essid(network->ssid, network->ssid_len),
5325 MAC_ARG(network->bssid),
5326 1000 * (jiffies - network->last_scanned) / HZ);
5327 return 0;
5328 }
5329
5330 if ((priv->config & CFG_STATIC_CHANNEL) &&
5331 (network->channel != priv->channel)) {
5332 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5333 "because of channel mismatch: %d != %d.\n",
5334 escape_essid(network->ssid, network->ssid_len),
5335 MAC_ARG(network->bssid),
5336 network->channel, priv->channel);
5337 return 0;
5338 }
5339
5340 /* Verify privacy compatability */
5341 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5342 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5343 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5344 "because of privacy mismatch: %s != %s.\n",
5345 escape_essid(network->ssid, network->ssid_len),
5346 MAC_ARG(network->bssid),
5347 priv->
5348 capability & CAP_PRIVACY_ON ? "on" : "off",
5349 network->
5350 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5351 "off");
5352 return 0;
5353 }
5354
5355 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5356 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5357 "because of the same BSSID match: " MAC_FMT
5358 ".\n", escape_essid(network->ssid,
5359 network->ssid_len),
5360 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5361 return 0;
5362 }
5363
5364 /* Filter out any incompatible freq / mode combinations */
5365 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5366 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5367 "because of invalid frequency/mode "
5368 "combination.\n",
5369 escape_essid(network->ssid, network->ssid_len),
5370 MAC_ARG(network->bssid));
5371 return 0;
5372 }
5373
5374 /* Ensure that the rates supported by the driver are compatible with
5375 * this AP, including verification of basic rates (mandatory) */
5376 if (!ipw_compatible_rates(priv, network, &rates)) {
5377 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5378 "because configured rate mask excludes "
5379 "AP mandatory rate.\n",
5380 escape_essid(network->ssid, network->ssid_len),
5381 MAC_ARG(network->bssid));
5382 return 0;
5383 }
5384
5385 if (rates.num_rates == 0) {
5386 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5387 "because of no compatible rates.\n",
5388 escape_essid(network->ssid, network->ssid_len),
5389 MAC_ARG(network->bssid));
5390 return 0;
5391 }
5392
5393 /* TODO: Perform any further minimal comparititive tests. We do not
5394 * want to put too much policy logic here; intelligent scan selection
5395 * should occur within a generic IEEE 802.11 user space tool. */
5396
5397 /* Set up 'new' AP to this network */
5398 ipw_copy_rates(&match->rates, &rates);
5399 match->network = network;
5400 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' is a viable match.\n",
5401 escape_essid(network->ssid, network->ssid_len),
5402 MAC_ARG(network->bssid));
5403
5404 return 1;
5405 }
5406
5407 static void ipw_merge_adhoc_network(void *data)
5408 {
5409 struct ipw_priv *priv = data;
5410 struct ieee80211_network *network = NULL;
5411 struct ipw_network_match match = {
5412 .network = priv->assoc_network
5413 };
5414
5415 if ((priv->status & STATUS_ASSOCIATED) &&
5416 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5417 /* First pass through ROAM process -- look for a better
5418 * network */
5419 unsigned long flags;
5420
5421 spin_lock_irqsave(&priv->ieee->lock, flags);
5422 list_for_each_entry(network, &priv->ieee->network_list, list) {
5423 if (network != priv->assoc_network)
5424 ipw_find_adhoc_network(priv, &match, network,
5425 1);
5426 }
5427 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5428
5429 if (match.network == priv->assoc_network) {
5430 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5431 "merge to.\n");
5432 return;
5433 }
5434
5435 down(&priv->sem);
5436 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5437 IPW_DEBUG_MERGE("remove network %s\n",
5438 escape_essid(priv->essid,
5439 priv->essid_len));
5440 ipw_remove_current_network(priv);
5441 }
5442
5443 ipw_disassociate(priv);
5444 priv->assoc_network = match.network;
5445 up(&priv->sem);
5446 return;
5447 }
5448 }
5449
5450 static int ipw_best_network(struct ipw_priv *priv,
5451 struct ipw_network_match *match,
5452 struct ieee80211_network *network, int roaming)
5453 {
5454 struct ipw_supported_rates rates;
5455
5456 /* Verify that this network's capability is compatible with the
5457 * current mode (AdHoc or Infrastructure) */
5458 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5459 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5460 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5461 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5462 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to "
5463 "capability mismatch.\n",
5464 escape_essid(network->ssid, network->ssid_len),
5465 MAC_ARG(network->bssid));
5466 return 0;
5467 }
5468
5469 /* If we do not have an ESSID for this AP, we can not associate with
5470 * it */
5471 if (network->flags & NETWORK_EMPTY_ESSID) {
5472 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5473 "because of hidden ESSID.\n",
5474 escape_essid(network->ssid, network->ssid_len),
5475 MAC_ARG(network->bssid));
5476 return 0;
5477 }
5478
5479 if (unlikely(roaming)) {
5480 /* If we are roaming, then ensure check if this is a valid
5481 * network to try and roam to */
5482 if ((network->ssid_len != match->network->ssid_len) ||
5483 memcmp(network->ssid, match->network->ssid,
5484 network->ssid_len)) {
5485 IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded "
5486 "because of non-network ESSID.\n",
5487 escape_essid(network->ssid,
5488 network->ssid_len),
5489 MAC_ARG(network->bssid));
5490 return 0;
5491 }
5492 } else {
5493 /* If an ESSID has been configured then compare the broadcast
5494 * ESSID to ours */
5495 if ((priv->config & CFG_STATIC_ESSID) &&
5496 ((network->ssid_len != priv->essid_len) ||
5497 memcmp(network->ssid, priv->essid,
5498 min(network->ssid_len, priv->essid_len)))) {
5499 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5500 strncpy(escaped,
5501 escape_essid(network->ssid, network->ssid_len),
5502 sizeof(escaped));
5503 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5504 "because of ESSID mismatch: '%s'.\n",
5505 escaped, MAC_ARG(network->bssid),
5506 escape_essid(priv->essid,
5507 priv->essid_len));
5508 return 0;
5509 }
5510 }
5511
5512 /* If the old network rate is better than this one, don't bother
5513 * testing everything else. */
5514 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5515 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5516 strncpy(escaped,
5517 escape_essid(network->ssid, network->ssid_len),
5518 sizeof(escaped));
5519 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because "
5520 "'%s (" MAC_FMT ")' has a stronger signal.\n",
5521 escaped, MAC_ARG(network->bssid),
5522 escape_essid(match->network->ssid,
5523 match->network->ssid_len),
5524 MAC_ARG(match->network->bssid));
5525 return 0;
5526 }
5527
5528 /* If this network has already had an association attempt within the
5529 * last 3 seconds, do not try and associate again... */
5530 if (network->last_associate &&
5531 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5532 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5533 "because of storming (%lus since last "
5534 "assoc attempt).\n",
5535 escape_essid(network->ssid, network->ssid_len),
5536 MAC_ARG(network->bssid),
5537 (jiffies - network->last_associate) / HZ);
5538 return 0;
5539 }
5540
5541 /* Now go through and see if the requested network is valid... */
5542 if (priv->ieee->scan_age != 0 &&
5543 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5544 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5545 "because of age: %lums.\n",
5546 escape_essid(network->ssid, network->ssid_len),
5547 MAC_ARG(network->bssid),
5548 1000 * (jiffies - network->last_scanned) / HZ);
5549 return 0;
5550 }
5551
5552 if ((priv->config & CFG_STATIC_CHANNEL) &&
5553 (network->channel != priv->channel)) {
5554 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5555 "because of channel mismatch: %d != %d.\n",
5556 escape_essid(network->ssid, network->ssid_len),
5557 MAC_ARG(network->bssid),
5558 network->channel, priv->channel);
5559 return 0;
5560 }
5561
5562 /* Verify privacy compatability */
5563 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5564 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5565 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5566 "because of privacy mismatch: %s != %s.\n",
5567 escape_essid(network->ssid, network->ssid_len),
5568 MAC_ARG(network->bssid),
5569 priv->capability & CAP_PRIVACY_ON ? "on" :
5570 "off",
5571 network->capability &
5572 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5573 return 0;
5574 }
5575
5576 if (!priv->ieee->wpa_enabled && (network->wpa_ie_len > 0 ||
5577 network->rsn_ie_len > 0)) {
5578 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5579 "because of WPA capability mismatch.\n",
5580 escape_essid(network->ssid, network->ssid_len),
5581 MAC_ARG(network->bssid));
5582 return 0;
5583 }
5584
5585 if ((priv->config & CFG_STATIC_BSSID) &&
5586 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5587 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5588 "because of BSSID mismatch: " MAC_FMT ".\n",
5589 escape_essid(network->ssid, network->ssid_len),
5590 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5591 return 0;
5592 }
5593
5594 /* Filter out any incompatible freq / mode combinations */
5595 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5596 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5597 "because of invalid frequency/mode "
5598 "combination.\n",
5599 escape_essid(network->ssid, network->ssid_len),
5600 MAC_ARG(network->bssid));
5601 return 0;
5602 }
5603
5604 /* Filter out invalid channel in current GEO */
5605 if (!ipw_is_valid_channel(priv->ieee, network->channel)) {
5606 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5607 "because of invalid channel in current GEO\n",
5608 escape_essid(network->ssid, network->ssid_len),
5609 MAC_ARG(network->bssid));
5610 return 0;
5611 }
5612
5613 /* Ensure that the rates supported by the driver are compatible with
5614 * this AP, including verification of basic rates (mandatory) */
5615 if (!ipw_compatible_rates(priv, network, &rates)) {
5616 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5617 "because configured rate mask excludes "
5618 "AP mandatory rate.\n",
5619 escape_essid(network->ssid, network->ssid_len),
5620 MAC_ARG(network->bssid));
5621 return 0;
5622 }
5623
5624 if (rates.num_rates == 0) {
5625 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5626 "because of no compatible rates.\n",
5627 escape_essid(network->ssid, network->ssid_len),
5628 MAC_ARG(network->bssid));
5629 return 0;
5630 }
5631
5632 /* TODO: Perform any further minimal comparititive tests. We do not
5633 * want to put too much policy logic here; intelligent scan selection
5634 * should occur within a generic IEEE 802.11 user space tool. */
5635
5636 /* Set up 'new' AP to this network */
5637 ipw_copy_rates(&match->rates, &rates);
5638 match->network = network;
5639
5640 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n",
5641 escape_essid(network->ssid, network->ssid_len),
5642 MAC_ARG(network->bssid));
5643
5644 return 1;
5645 }
5646
5647 static void ipw_adhoc_create(struct ipw_priv *priv,
5648 struct ieee80211_network *network)
5649 {
5650 const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee);
5651 int i;
5652
5653 /*
5654 * For the purposes of scanning, we can set our wireless mode
5655 * to trigger scans across combinations of bands, but when it
5656 * comes to creating a new ad-hoc network, we have tell the FW
5657 * exactly which band to use.
5658 *
5659 * We also have the possibility of an invalid channel for the
5660 * chossen band. Attempting to create a new ad-hoc network
5661 * with an invalid channel for wireless mode will trigger a
5662 * FW fatal error.
5663 *
5664 */
5665 switch (ipw_is_valid_channel(priv->ieee, priv->channel)) {
5666 case IEEE80211_52GHZ_BAND:
5667 network->mode = IEEE_A;
5668 i = ipw_channel_to_index(priv->ieee, priv->channel);
5669 if (i == -1)
5670 BUG();
5671 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5672 IPW_WARNING("Overriding invalid channel\n");
5673 priv->channel = geo->a[0].channel;
5674 }
5675 break;
5676
5677 case IEEE80211_24GHZ_BAND:
5678 if (priv->ieee->mode & IEEE_G)
5679 network->mode = IEEE_G;
5680 else
5681 network->mode = IEEE_B;
5682 i = ipw_channel_to_index(priv->ieee, priv->channel);
5683 if (i == -1)
5684 BUG();
5685 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5686 IPW_WARNING("Overriding invalid channel\n");
5687 priv->channel = geo->bg[0].channel;
5688 }
5689 break;
5690
5691 default:
5692 IPW_WARNING("Overriding invalid channel\n");
5693 if (priv->ieee->mode & IEEE_A) {
5694 network->mode = IEEE_A;
5695 priv->channel = geo->a[0].channel;
5696 } else if (priv->ieee->mode & IEEE_G) {
5697 network->mode = IEEE_G;
5698 priv->channel = geo->bg[0].channel;
5699 } else {
5700 network->mode = IEEE_B;
5701 priv->channel = geo->bg[0].channel;
5702 }
5703 break;
5704 }
5705
5706 network->channel = priv->channel;
5707 priv->config |= CFG_ADHOC_PERSIST;
5708 ipw_create_bssid(priv, network->bssid);
5709 network->ssid_len = priv->essid_len;
5710 memcpy(network->ssid, priv->essid, priv->essid_len);
5711 memset(&network->stats, 0, sizeof(network->stats));
5712 network->capability = WLAN_CAPABILITY_IBSS;
5713 if (!(priv->config & CFG_PREAMBLE_LONG))
5714 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5715 if (priv->capability & CAP_PRIVACY_ON)
5716 network->capability |= WLAN_CAPABILITY_PRIVACY;
5717 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5718 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5719 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5720 memcpy(network->rates_ex,
5721 &priv->rates.supported_rates[network->rates_len],
5722 network->rates_ex_len);
5723 network->last_scanned = 0;
5724 network->flags = 0;
5725 network->last_associate = 0;
5726 network->time_stamp[0] = 0;
5727 network->time_stamp[1] = 0;
5728 network->beacon_interval = 100; /* Default */
5729 network->listen_interval = 10; /* Default */
5730 network->atim_window = 0; /* Default */
5731 network->wpa_ie_len = 0;
5732 network->rsn_ie_len = 0;
5733 }
5734
5735 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5736 {
5737 struct ipw_tgi_tx_key *key;
5738 struct host_cmd cmd = {
5739 .cmd = IPW_CMD_TGI_TX_KEY,
5740 .len = sizeof(*key)
5741 };
5742
5743 if (!(priv->ieee->sec.flags & (1 << index)))
5744 return;
5745
5746 key = (struct ipw_tgi_tx_key *)&cmd.param;
5747 key->key_id = index;
5748 memcpy(key->key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5749 key->security_type = type;
5750 key->station_index = 0; /* always 0 for BSS */
5751 key->flags = 0;
5752 /* 0 for new key; previous value of counter (after fatal error) */
5753 key->tx_counter[0] = 0;
5754 key->tx_counter[1] = 0;
5755
5756 ipw_send_cmd(priv, &cmd);
5757 }
5758
5759 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5760 {
5761 struct ipw_wep_key *key;
5762 int i;
5763 struct host_cmd cmd = {
5764 .cmd = IPW_CMD_WEP_KEY,
5765 .len = sizeof(*key)
5766 };
5767
5768 key = (struct ipw_wep_key *)&cmd.param;
5769 key->cmd_id = DINO_CMD_WEP_KEY;
5770 key->seq_num = 0;
5771
5772 /* Note: AES keys cannot be set for multiple times.
5773 * Only set it at the first time. */
5774 for (i = 0; i < 4; i++) {
5775 key->key_index = i | type;
5776 if (!(priv->ieee->sec.flags & (1 << i))) {
5777 key->key_size = 0;
5778 continue;
5779 }
5780
5781 key->key_size = priv->ieee->sec.key_sizes[i];
5782 memcpy(key->key, priv->ieee->sec.keys[i], key->key_size);
5783
5784 ipw_send_cmd(priv, &cmd);
5785 }
5786 }
5787
5788 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5789 {
5790 if (priv->ieee->host_encrypt)
5791 return;
5792
5793 switch (level) {
5794 case SEC_LEVEL_3:
5795 priv->sys_config.disable_unicast_decryption = 0;
5796 priv->ieee->host_decrypt = 0;
5797 break;
5798 case SEC_LEVEL_2:
5799 priv->sys_config.disable_unicast_decryption = 1;
5800 priv->ieee->host_decrypt = 1;
5801 break;
5802 case SEC_LEVEL_1:
5803 priv->sys_config.disable_unicast_decryption = 0;
5804 priv->ieee->host_decrypt = 0;
5805 break;
5806 case SEC_LEVEL_0:
5807 priv->sys_config.disable_unicast_decryption = 1;
5808 break;
5809 default:
5810 break;
5811 }
5812 }
5813
5814 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5815 {
5816 if (priv->ieee->host_encrypt)
5817 return;
5818
5819 switch (level) {
5820 case SEC_LEVEL_3:
5821 priv->sys_config.disable_multicast_decryption = 0;
5822 break;
5823 case SEC_LEVEL_2:
5824 priv->sys_config.disable_multicast_decryption = 1;
5825 break;
5826 case SEC_LEVEL_1:
5827 priv->sys_config.disable_multicast_decryption = 0;
5828 break;
5829 case SEC_LEVEL_0:
5830 priv->sys_config.disable_multicast_decryption = 1;
5831 break;
5832 default:
5833 break;
5834 }
5835 }
5836
5837 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
5838 {
5839 switch (priv->ieee->sec.level) {
5840 case SEC_LEVEL_3:
5841 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5842 ipw_send_tgi_tx_key(priv,
5843 DCT_FLAG_EXT_SECURITY_CCM,
5844 priv->ieee->sec.active_key);
5845
5846 if (!priv->ieee->host_mc_decrypt)
5847 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
5848 break;
5849 case SEC_LEVEL_2:
5850 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5851 ipw_send_tgi_tx_key(priv,
5852 DCT_FLAG_EXT_SECURITY_TKIP,
5853 priv->ieee->sec.active_key);
5854 break;
5855 case SEC_LEVEL_1:
5856 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
5857 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
5858 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
5859 break;
5860 case SEC_LEVEL_0:
5861 default:
5862 break;
5863 }
5864 }
5865
5866 static void ipw_adhoc_check(void *data)
5867 {
5868 struct ipw_priv *priv = data;
5869
5870 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
5871 !(priv->config & CFG_ADHOC_PERSIST)) {
5872 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
5873 IPW_DL_STATE | IPW_DL_ASSOC,
5874 "Missed beacon: %d - disassociate\n",
5875 priv->missed_adhoc_beacons);
5876 ipw_remove_current_network(priv);
5877 ipw_disassociate(priv);
5878 return;
5879 }
5880
5881 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
5882 priv->assoc_request.beacon_interval);
5883 }
5884
5885 static void ipw_bg_adhoc_check(void *data)
5886 {
5887 struct ipw_priv *priv = data;
5888 down(&priv->sem);
5889 ipw_adhoc_check(data);
5890 up(&priv->sem);
5891 }
5892
5893 #ifdef CONFIG_IPW2200_DEBUG
5894 static void ipw_debug_config(struct ipw_priv *priv)
5895 {
5896 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
5897 "[CFG 0x%08X]\n", priv->config);
5898 if (priv->config & CFG_STATIC_CHANNEL)
5899 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
5900 else
5901 IPW_DEBUG_INFO("Channel unlocked.\n");
5902 if (priv->config & CFG_STATIC_ESSID)
5903 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
5904 escape_essid(priv->essid, priv->essid_len));
5905 else
5906 IPW_DEBUG_INFO("ESSID unlocked.\n");
5907 if (priv->config & CFG_STATIC_BSSID)
5908 IPW_DEBUG_INFO("BSSID locked to " MAC_FMT "\n",
5909 MAC_ARG(priv->bssid));
5910 else
5911 IPW_DEBUG_INFO("BSSID unlocked.\n");
5912 if (priv->capability & CAP_PRIVACY_ON)
5913 IPW_DEBUG_INFO("PRIVACY on\n");
5914 else
5915 IPW_DEBUG_INFO("PRIVACY off\n");
5916 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
5917 }
5918 #else
5919 #define ipw_debug_config(x) do {} while (0)
5920 #endif
5921
5922 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
5923 {
5924 /* TODO: Verify that this works... */
5925 struct ipw_fixed_rate fr = {
5926 .tx_rates = priv->rates_mask
5927 };
5928 u32 reg;
5929 u16 mask = 0;
5930
5931 /* Identify 'current FW band' and match it with the fixed
5932 * Tx rates */
5933
5934 switch (priv->ieee->freq_band) {
5935 case IEEE80211_52GHZ_BAND: /* A only */
5936 /* IEEE_A */
5937 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
5938 /* Invalid fixed rate mask */
5939 IPW_DEBUG_WX
5940 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
5941 fr.tx_rates = 0;
5942 break;
5943 }
5944
5945 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
5946 break;
5947
5948 default: /* 2.4Ghz or Mixed */
5949 /* IEEE_B */
5950 if (mode == IEEE_B) {
5951 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
5952 /* Invalid fixed rate mask */
5953 IPW_DEBUG_WX
5954 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
5955 fr.tx_rates = 0;
5956 }
5957 break;
5958 }
5959
5960 /* IEEE_G */
5961 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
5962 IEEE80211_OFDM_RATES_MASK)) {
5963 /* Invalid fixed rate mask */
5964 IPW_DEBUG_WX
5965 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
5966 fr.tx_rates = 0;
5967 break;
5968 }
5969
5970 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
5971 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
5972 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
5973 }
5974
5975 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
5976 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
5977 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
5978 }
5979
5980 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
5981 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
5982 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
5983 }
5984
5985 fr.tx_rates |= mask;
5986 break;
5987 }
5988
5989 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
5990 ipw_write_reg32(priv, reg, *(u32 *) & fr);
5991 }
5992
5993 static void ipw_abort_scan(struct ipw_priv *priv)
5994 {
5995 int err;
5996
5997 if (priv->status & STATUS_SCAN_ABORTING) {
5998 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
5999 return;
6000 }
6001 priv->status |= STATUS_SCAN_ABORTING;
6002
6003 err = ipw_send_scan_abort(priv);
6004 if (err)
6005 IPW_DEBUG_HC("Request to abort scan failed.\n");
6006 }
6007
6008 static void ipw_add_scan_channels(struct ipw_priv *priv,
6009 struct ipw_scan_request_ext *scan,
6010 int scan_type)
6011 {
6012 int channel_index = 0;
6013 const struct ieee80211_geo *geo;
6014 int i;
6015
6016 geo = ipw_get_geo(priv->ieee);
6017
6018 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
6019 int start = channel_index;
6020 for (i = 0; i < geo->a_channels; i++) {
6021 if ((priv->status & STATUS_ASSOCIATED) &&
6022 geo->a[i].channel == priv->channel)
6023 continue;
6024 channel_index++;
6025 scan->channels_list[channel_index] = geo->a[i].channel;
6026 ipw_set_scan_type(scan, channel_index,
6027 geo->a[i].
6028 flags & IEEE80211_CH_PASSIVE_ONLY ?
6029 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6030 scan_type);
6031 }
6032
6033 if (start != channel_index) {
6034 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6035 (channel_index - start);
6036 channel_index++;
6037 }
6038 }
6039
6040 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
6041 int start = channel_index;
6042 if (priv->config & CFG_SPEED_SCAN) {
6043 int index;
6044 u8 channels[IEEE80211_24GHZ_CHANNELS] = {
6045 /* nop out the list */
6046 [0] = 0
6047 };
6048
6049 u8 channel;
6050 while (channel_index < IPW_SCAN_CHANNELS) {
6051 channel =
6052 priv->speed_scan[priv->speed_scan_pos];
6053 if (channel == 0) {
6054 priv->speed_scan_pos = 0;
6055 channel = priv->speed_scan[0];
6056 }
6057 if ((priv->status & STATUS_ASSOCIATED) &&
6058 channel == priv->channel) {
6059 priv->speed_scan_pos++;
6060 continue;
6061 }
6062
6063 /* If this channel has already been
6064 * added in scan, break from loop
6065 * and this will be the first channel
6066 * in the next scan.
6067 */
6068 if (channels[channel - 1] != 0)
6069 break;
6070
6071 channels[channel - 1] = 1;
6072 priv->speed_scan_pos++;
6073 channel_index++;
6074 scan->channels_list[channel_index] = channel;
6075 index =
6076 ipw_channel_to_index(priv->ieee, channel);
6077 ipw_set_scan_type(scan, channel_index,
6078 geo->bg[index].
6079 flags &
6080 IEEE80211_CH_PASSIVE_ONLY ?
6081 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6082 : scan_type);
6083 }
6084 } else {
6085 for (i = 0; i < geo->bg_channels; i++) {
6086 if ((priv->status & STATUS_ASSOCIATED) &&
6087 geo->bg[i].channel == priv->channel)
6088 continue;
6089 channel_index++;
6090 scan->channels_list[channel_index] =
6091 geo->bg[i].channel;
6092 ipw_set_scan_type(scan, channel_index,
6093 geo->bg[i].
6094 flags &
6095 IEEE80211_CH_PASSIVE_ONLY ?
6096 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6097 : scan_type);
6098 }
6099 }
6100
6101 if (start != channel_index) {
6102 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6103 (channel_index - start);
6104 }
6105 }
6106 }
6107
6108 static int ipw_request_scan(struct ipw_priv *priv)
6109 {
6110 struct ipw_scan_request_ext scan;
6111 int err = 0, scan_type;
6112
6113 if (!(priv->status & STATUS_INIT) ||
6114 (priv->status & STATUS_EXIT_PENDING))
6115 return 0;
6116
6117 down(&priv->sem);
6118
6119 if (priv->status & STATUS_SCANNING) {
6120 IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n");
6121 priv->status |= STATUS_SCAN_PENDING;
6122 goto done;
6123 }
6124
6125 if (!(priv->status & STATUS_SCAN_FORCED) &&
6126 priv->status & STATUS_SCAN_ABORTING) {
6127 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6128 priv->status |= STATUS_SCAN_PENDING;
6129 goto done;
6130 }
6131
6132 if (priv->status & STATUS_RF_KILL_MASK) {
6133 IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6134 priv->status |= STATUS_SCAN_PENDING;
6135 goto done;
6136 }
6137
6138 memset(&scan, 0, sizeof(scan));
6139
6140 if (priv->config & CFG_SPEED_SCAN)
6141 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6142 cpu_to_le16(30);
6143 else
6144 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6145 cpu_to_le16(20);
6146
6147 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6148 cpu_to_le16(20);
6149 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
6150
6151 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
6152
6153 #ifdef CONFIG_IPW2200_MONITOR
6154 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6155 u8 channel;
6156 u8 band = 0;
6157
6158 switch (ipw_is_valid_channel(priv->ieee, priv->channel)) {
6159 case IEEE80211_52GHZ_BAND:
6160 band = (u8) (IPW_A_MODE << 6) | 1;
6161 channel = priv->channel;
6162 break;
6163
6164 case IEEE80211_24GHZ_BAND:
6165 band = (u8) (IPW_B_MODE << 6) | 1;
6166 channel = priv->channel;
6167 break;
6168
6169 default:
6170 band = (u8) (IPW_B_MODE << 6) | 1;
6171 channel = 9;
6172 break;
6173 }
6174
6175 scan.channels_list[0] = band;
6176 scan.channels_list[1] = channel;
6177 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6178
6179 /* NOTE: The card will sit on this channel for this time
6180 * period. Scan aborts are timing sensitive and frequently
6181 * result in firmware restarts. As such, it is best to
6182 * set a small dwell_time here and just keep re-issuing
6183 * scans. Otherwise fast channel hopping will not actually
6184 * hop channels.
6185 *
6186 * TODO: Move SPEED SCAN support to all modes and bands */
6187 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6188 cpu_to_le16(2000);
6189 } else {
6190 #endif /* CONFIG_IPW2200_MONITOR */
6191 /* If we are roaming, then make this a directed scan for the
6192 * current network. Otherwise, ensure that every other scan
6193 * is a fast channel hop scan */
6194 if ((priv->status & STATUS_ROAMING)
6195 || (!(priv->status & STATUS_ASSOCIATED)
6196 && (priv->config & CFG_STATIC_ESSID)
6197 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6198 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6199 if (err) {
6200 IPW_DEBUG_HC("Attempt to send SSID command "
6201 "failed.\n");
6202 goto done;
6203 }
6204
6205 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6206 } else
6207 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6208
6209 ipw_add_scan_channels(priv, &scan, scan_type);
6210 #ifdef CONFIG_IPW2200_MONITOR
6211 }
6212 #endif
6213
6214 err = ipw_send_scan_request_ext(priv, &scan);
6215 if (err) {
6216 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6217 goto done;
6218 }
6219
6220 priv->status |= STATUS_SCANNING;
6221 priv->status &= ~STATUS_SCAN_PENDING;
6222 queue_delayed_work(priv->workqueue, &priv->scan_check,
6223 IPW_SCAN_CHECK_WATCHDOG);
6224 done:
6225 up(&priv->sem);
6226 return err;
6227 }
6228
6229 static void ipw_bg_abort_scan(void *data)
6230 {
6231 struct ipw_priv *priv = data;
6232 down(&priv->sem);
6233 ipw_abort_scan(data);
6234 up(&priv->sem);
6235 }
6236
6237 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6238 {
6239 /* This is called when wpa_supplicant loads and closes the driver
6240 * interface. */
6241 priv->ieee->wpa_enabled = value;
6242 return 0;
6243 }
6244
6245 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6246 {
6247 struct ieee80211_device *ieee = priv->ieee;
6248 struct ieee80211_security sec = {
6249 .flags = SEC_AUTH_MODE,
6250 };
6251 int ret = 0;
6252
6253 if (value & IW_AUTH_ALG_SHARED_KEY) {
6254 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6255 ieee->open_wep = 0;
6256 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6257 sec.auth_mode = WLAN_AUTH_OPEN;
6258 ieee->open_wep = 1;
6259 } else if (value & IW_AUTH_ALG_LEAP) {
6260 sec.auth_mode = WLAN_AUTH_LEAP;
6261 ieee->open_wep = 1;
6262 } else
6263 return -EINVAL;
6264
6265 if (ieee->set_security)
6266 ieee->set_security(ieee->dev, &sec);
6267 else
6268 ret = -EOPNOTSUPP;
6269
6270 return ret;
6271 }
6272
6273 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6274 int wpa_ie_len)
6275 {
6276 /* make sure WPA is enabled */
6277 ipw_wpa_enable(priv, 1);
6278
6279 ipw_disassociate(priv);
6280 }
6281
6282 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6283 char *capabilities, int length)
6284 {
6285 struct host_cmd cmd = {
6286 .cmd = IPW_CMD_RSN_CAPABILITIES,
6287 .len = length,
6288 };
6289
6290 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6291
6292 memcpy(cmd.param, capabilities, length);
6293 return ipw_send_cmd(priv, &cmd);
6294 }
6295
6296 /*
6297 * WE-18 support
6298 */
6299
6300 /* SIOCSIWGENIE */
6301 static int ipw_wx_set_genie(struct net_device *dev,
6302 struct iw_request_info *info,
6303 union iwreq_data *wrqu, char *extra)
6304 {
6305 struct ipw_priv *priv = ieee80211_priv(dev);
6306 struct ieee80211_device *ieee = priv->ieee;
6307 u8 *buf;
6308 int err = 0;
6309
6310 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6311 (wrqu->data.length && extra == NULL))
6312 return -EINVAL;
6313
6314 //down(&priv->sem);
6315
6316 //if (!ieee->wpa_enabled) {
6317 // err = -EOPNOTSUPP;
6318 // goto out;
6319 //}
6320
6321 if (wrqu->data.length) {
6322 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6323 if (buf == NULL) {
6324 err = -ENOMEM;
6325 goto out;
6326 }
6327
6328 memcpy(buf, extra, wrqu->data.length);
6329 kfree(ieee->wpa_ie);
6330 ieee->wpa_ie = buf;
6331 ieee->wpa_ie_len = wrqu->data.length;
6332 } else {
6333 kfree(ieee->wpa_ie);
6334 ieee->wpa_ie = NULL;
6335 ieee->wpa_ie_len = 0;
6336 }
6337
6338 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6339 out:
6340 //up(&priv->sem);
6341 return err;
6342 }
6343
6344 /* SIOCGIWGENIE */
6345 static int ipw_wx_get_genie(struct net_device *dev,
6346 struct iw_request_info *info,
6347 union iwreq_data *wrqu, char *extra)
6348 {
6349 struct ipw_priv *priv = ieee80211_priv(dev);
6350 struct ieee80211_device *ieee = priv->ieee;
6351 int err = 0;
6352
6353 //down(&priv->sem);
6354
6355 //if (!ieee->wpa_enabled) {
6356 // err = -EOPNOTSUPP;
6357 // goto out;
6358 //}
6359
6360 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6361 wrqu->data.length = 0;
6362 goto out;
6363 }
6364
6365 if (wrqu->data.length < ieee->wpa_ie_len) {
6366 err = -E2BIG;
6367 goto out;
6368 }
6369
6370 wrqu->data.length = ieee->wpa_ie_len;
6371 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6372
6373 out:
6374 //up(&priv->sem);
6375 return err;
6376 }
6377
6378 static int wext_cipher2level(int cipher)
6379 {
6380 switch (cipher) {
6381 case IW_AUTH_CIPHER_NONE:
6382 return SEC_LEVEL_0;
6383 case IW_AUTH_CIPHER_WEP40:
6384 case IW_AUTH_CIPHER_WEP104:
6385 return SEC_LEVEL_1;
6386 case IW_AUTH_CIPHER_TKIP:
6387 return SEC_LEVEL_2;
6388 case IW_AUTH_CIPHER_CCMP:
6389 return SEC_LEVEL_3;
6390 default:
6391 return -1;
6392 }
6393 }
6394
6395 /* SIOCSIWAUTH */
6396 static int ipw_wx_set_auth(struct net_device *dev,
6397 struct iw_request_info *info,
6398 union iwreq_data *wrqu, char *extra)
6399 {
6400 struct ipw_priv *priv = ieee80211_priv(dev);
6401 struct ieee80211_device *ieee = priv->ieee;
6402 struct iw_param *param = &wrqu->param;
6403 struct ieee80211_crypt_data *crypt;
6404 unsigned long flags;
6405 int ret = 0;
6406
6407 switch (param->flags & IW_AUTH_INDEX) {
6408 case IW_AUTH_WPA_VERSION:
6409 break;
6410 case IW_AUTH_CIPHER_PAIRWISE:
6411 ipw_set_hw_decrypt_unicast(priv,
6412 wext_cipher2level(param->value));
6413 break;
6414 case IW_AUTH_CIPHER_GROUP:
6415 ipw_set_hw_decrypt_multicast(priv,
6416 wext_cipher2level(param->value));
6417 break;
6418 case IW_AUTH_KEY_MGMT:
6419 /*
6420 * ipw2200 does not use these parameters
6421 */
6422 break;
6423
6424 case IW_AUTH_TKIP_COUNTERMEASURES:
6425 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6426 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6427 break;
6428
6429 flags = crypt->ops->get_flags(crypt->priv);
6430
6431 if (param->value)
6432 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6433 else
6434 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6435
6436 crypt->ops->set_flags(flags, crypt->priv);
6437
6438 break;
6439
6440 case IW_AUTH_DROP_UNENCRYPTED:{
6441 /* HACK:
6442 *
6443 * wpa_supplicant calls set_wpa_enabled when the driver
6444 * is loaded and unloaded, regardless of if WPA is being
6445 * used. No other calls are made which can be used to
6446 * determine if encryption will be used or not prior to
6447 * association being expected. If encryption is not being
6448 * used, drop_unencrypted is set to false, else true -- we
6449 * can use this to determine if the CAP_PRIVACY_ON bit should
6450 * be set.
6451 */
6452 struct ieee80211_security sec = {
6453 .flags = SEC_ENABLED,
6454 .enabled = param->value,
6455 };
6456 priv->ieee->drop_unencrypted = param->value;
6457 /* We only change SEC_LEVEL for open mode. Others
6458 * are set by ipw_wpa_set_encryption.
6459 */
6460 if (!param->value) {
6461 sec.flags |= SEC_LEVEL;
6462 sec.level = SEC_LEVEL_0;
6463 } else {
6464 sec.flags |= SEC_LEVEL;
6465 sec.level = SEC_LEVEL_1;
6466 }
6467 if (priv->ieee->set_security)
6468 priv->ieee->set_security(priv->ieee->dev, &sec);
6469 break;
6470 }
6471
6472 case IW_AUTH_80211_AUTH_ALG:
6473 ret = ipw_wpa_set_auth_algs(priv, param->value);
6474 break;
6475
6476 case IW_AUTH_WPA_ENABLED:
6477 ret = ipw_wpa_enable(priv, param->value);
6478 break;
6479
6480 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6481 ieee->ieee802_1x = param->value;
6482 break;
6483
6484 //case IW_AUTH_ROAMING_CONTROL:
6485 case IW_AUTH_PRIVACY_INVOKED:
6486 ieee->privacy_invoked = param->value;
6487 break;
6488
6489 default:
6490 return -EOPNOTSUPP;
6491 }
6492 return ret;
6493 }
6494
6495 /* SIOCGIWAUTH */
6496 static int ipw_wx_get_auth(struct net_device *dev,
6497 struct iw_request_info *info,
6498 union iwreq_data *wrqu, char *extra)
6499 {
6500 struct ipw_priv *priv = ieee80211_priv(dev);
6501 struct ieee80211_device *ieee = priv->ieee;
6502 struct ieee80211_crypt_data *crypt;
6503 struct iw_param *param = &wrqu->param;
6504 int ret = 0;
6505
6506 switch (param->flags & IW_AUTH_INDEX) {
6507 case IW_AUTH_WPA_VERSION:
6508 case IW_AUTH_CIPHER_PAIRWISE:
6509 case IW_AUTH_CIPHER_GROUP:
6510 case IW_AUTH_KEY_MGMT:
6511 /*
6512 * wpa_supplicant will control these internally
6513 */
6514 ret = -EOPNOTSUPP;
6515 break;
6516
6517 case IW_AUTH_TKIP_COUNTERMEASURES:
6518 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6519 if (!crypt || !crypt->ops->get_flags)
6520 break;
6521
6522 param->value = (crypt->ops->get_flags(crypt->priv) &
6523 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6524
6525 break;
6526
6527 case IW_AUTH_DROP_UNENCRYPTED:
6528 param->value = ieee->drop_unencrypted;
6529 break;
6530
6531 case IW_AUTH_80211_AUTH_ALG:
6532 param->value = ieee->sec.auth_mode;
6533 break;
6534
6535 case IW_AUTH_WPA_ENABLED:
6536 param->value = ieee->wpa_enabled;
6537 break;
6538
6539 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6540 param->value = ieee->ieee802_1x;
6541 break;
6542
6543 case IW_AUTH_ROAMING_CONTROL:
6544 case IW_AUTH_PRIVACY_INVOKED:
6545 param->value = ieee->privacy_invoked;
6546 break;
6547
6548 default:
6549 return -EOPNOTSUPP;
6550 }
6551 return 0;
6552 }
6553
6554 /* SIOCSIWENCODEEXT */
6555 static int ipw_wx_set_encodeext(struct net_device *dev,
6556 struct iw_request_info *info,
6557 union iwreq_data *wrqu, char *extra)
6558 {
6559 struct ipw_priv *priv = ieee80211_priv(dev);
6560 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6561
6562 if (hwcrypto) {
6563 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6564 /* IPW HW can't build TKIP MIC,
6565 host decryption still needed */
6566 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6567 priv->ieee->host_mc_decrypt = 1;
6568 else {
6569 priv->ieee->host_encrypt = 0;
6570 priv->ieee->host_encrypt_msdu = 1;
6571 priv->ieee->host_decrypt = 1;
6572 }
6573 } else {
6574 priv->ieee->host_encrypt = 0;
6575 priv->ieee->host_encrypt_msdu = 0;
6576 priv->ieee->host_decrypt = 0;
6577 priv->ieee->host_mc_decrypt = 0;
6578 }
6579 }
6580
6581 return ieee80211_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6582 }
6583
6584 /* SIOCGIWENCODEEXT */
6585 static int ipw_wx_get_encodeext(struct net_device *dev,
6586 struct iw_request_info *info,
6587 union iwreq_data *wrqu, char *extra)
6588 {
6589 struct ipw_priv *priv = ieee80211_priv(dev);
6590 return ieee80211_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6591 }
6592
6593 /* SIOCSIWMLME */
6594 static int ipw_wx_set_mlme(struct net_device *dev,
6595 struct iw_request_info *info,
6596 union iwreq_data *wrqu, char *extra)
6597 {
6598 struct ipw_priv *priv = ieee80211_priv(dev);
6599 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6600 u16 reason;
6601
6602 reason = cpu_to_le16(mlme->reason_code);
6603
6604 switch (mlme->cmd) {
6605 case IW_MLME_DEAUTH:
6606 // silently ignore
6607 break;
6608
6609 case IW_MLME_DISASSOC:
6610 ipw_disassociate(priv);
6611 break;
6612
6613 default:
6614 return -EOPNOTSUPP;
6615 }
6616 return 0;
6617 }
6618
6619 #ifdef CONFIG_IPW_QOS
6620
6621 /* QoS */
6622 /*
6623 * get the modulation type of the current network or
6624 * the card current mode
6625 */
6626 u8 ipw_qos_current_mode(struct ipw_priv * priv)
6627 {
6628 u8 mode = 0;
6629
6630 if (priv->status & STATUS_ASSOCIATED) {
6631 unsigned long flags;
6632
6633 spin_lock_irqsave(&priv->ieee->lock, flags);
6634 mode = priv->assoc_network->mode;
6635 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6636 } else {
6637 mode = priv->ieee->mode;
6638 }
6639 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6640 return mode;
6641 }
6642
6643 /*
6644 * Handle management frame beacon and probe response
6645 */
6646 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6647 int active_network,
6648 struct ieee80211_network *network)
6649 {
6650 u32 size = sizeof(struct ieee80211_qos_parameters);
6651
6652 if (network->capability & WLAN_CAPABILITY_IBSS)
6653 network->qos_data.active = network->qos_data.supported;
6654
6655 if (network->flags & NETWORK_HAS_QOS_MASK) {
6656 if (active_network &&
6657 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6658 network->qos_data.active = network->qos_data.supported;
6659
6660 if ((network->qos_data.active == 1) && (active_network == 1) &&
6661 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6662 (network->qos_data.old_param_count !=
6663 network->qos_data.param_count)) {
6664 network->qos_data.old_param_count =
6665 network->qos_data.param_count;
6666 schedule_work(&priv->qos_activate);
6667 IPW_DEBUG_QOS("QoS parameters change call "
6668 "qos_activate\n");
6669 }
6670 } else {
6671 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6672 memcpy(&network->qos_data.parameters,
6673 &def_parameters_CCK, size);
6674 else
6675 memcpy(&network->qos_data.parameters,
6676 &def_parameters_OFDM, size);
6677
6678 if ((network->qos_data.active == 1) && (active_network == 1)) {
6679 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6680 schedule_work(&priv->qos_activate);
6681 }
6682
6683 network->qos_data.active = 0;
6684 network->qos_data.supported = 0;
6685 }
6686 if ((priv->status & STATUS_ASSOCIATED) &&
6687 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6688 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6689 if ((network->capability & WLAN_CAPABILITY_IBSS) &&
6690 !(network->flags & NETWORK_EMPTY_ESSID))
6691 if ((network->ssid_len ==
6692 priv->assoc_network->ssid_len) &&
6693 !memcmp(network->ssid,
6694 priv->assoc_network->ssid,
6695 network->ssid_len)) {
6696 queue_work(priv->workqueue,
6697 &priv->merge_networks);
6698 }
6699 }
6700
6701 return 0;
6702 }
6703
6704 /*
6705 * This function set up the firmware to support QoS. It sends
6706 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6707 */
6708 static int ipw_qos_activate(struct ipw_priv *priv,
6709 struct ieee80211_qos_data *qos_network_data)
6710 {
6711 int err;
6712 struct ieee80211_qos_parameters qos_parameters[QOS_QOS_SETS];
6713 struct ieee80211_qos_parameters *active_one = NULL;
6714 u32 size = sizeof(struct ieee80211_qos_parameters);
6715 u32 burst_duration;
6716 int i;
6717 u8 type;
6718
6719 type = ipw_qos_current_mode(priv);
6720
6721 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6722 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6723 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6724 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6725
6726 if (qos_network_data == NULL) {
6727 if (type == IEEE_B) {
6728 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6729 active_one = &def_parameters_CCK;
6730 } else
6731 active_one = &def_parameters_OFDM;
6732
6733 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6734 burst_duration = ipw_qos_get_burst_duration(priv);
6735 for (i = 0; i < QOS_QUEUE_NUM; i++)
6736 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6737 (u16) burst_duration;
6738 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6739 if (type == IEEE_B) {
6740 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
6741 type);
6742 if (priv->qos_data.qos_enable == 0)
6743 active_one = &def_parameters_CCK;
6744 else
6745 active_one = priv->qos_data.def_qos_parm_CCK;
6746 } else {
6747 if (priv->qos_data.qos_enable == 0)
6748 active_one = &def_parameters_OFDM;
6749 else
6750 active_one = priv->qos_data.def_qos_parm_OFDM;
6751 }
6752 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6753 } else {
6754 unsigned long flags;
6755 int active;
6756
6757 spin_lock_irqsave(&priv->ieee->lock, flags);
6758 active_one = &(qos_network_data->parameters);
6759 qos_network_data->old_param_count =
6760 qos_network_data->param_count;
6761 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6762 active = qos_network_data->supported;
6763 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6764
6765 if (active == 0) {
6766 burst_duration = ipw_qos_get_burst_duration(priv);
6767 for (i = 0; i < QOS_QUEUE_NUM; i++)
6768 qos_parameters[QOS_PARAM_SET_ACTIVE].
6769 tx_op_limit[i] = (u16) burst_duration;
6770 }
6771 }
6772
6773 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6774 err = ipw_send_qos_params_command(priv,
6775 (struct ieee80211_qos_parameters *)
6776 &(qos_parameters[0]));
6777 if (err)
6778 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6779
6780 return err;
6781 }
6782
6783 /*
6784 * send IPW_CMD_WME_INFO to the firmware
6785 */
6786 static int ipw_qos_set_info_element(struct ipw_priv *priv)
6787 {
6788 int ret = 0;
6789 struct ieee80211_qos_information_element qos_info;
6790
6791 if (priv == NULL)
6792 return -1;
6793
6794 qos_info.elementID = QOS_ELEMENT_ID;
6795 qos_info.length = sizeof(struct ieee80211_qos_information_element) - 2;
6796
6797 qos_info.version = QOS_VERSION_1;
6798 qos_info.ac_info = 0;
6799
6800 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
6801 qos_info.qui_type = QOS_OUI_TYPE;
6802 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
6803
6804 ret = ipw_send_qos_info_command(priv, &qos_info);
6805 if (ret != 0) {
6806 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
6807 }
6808 return ret;
6809 }
6810
6811 /*
6812 * Set the QoS parameter with the association request structure
6813 */
6814 static int ipw_qos_association(struct ipw_priv *priv,
6815 struct ieee80211_network *network)
6816 {
6817 int err = 0;
6818 struct ieee80211_qos_data *qos_data = NULL;
6819 struct ieee80211_qos_data ibss_data = {
6820 .supported = 1,
6821 .active = 1,
6822 };
6823
6824 switch (priv->ieee->iw_mode) {
6825 case IW_MODE_ADHOC:
6826 if (!(network->capability & WLAN_CAPABILITY_IBSS))
6827 BUG();
6828
6829 qos_data = &ibss_data;
6830 break;
6831
6832 case IW_MODE_INFRA:
6833 qos_data = &network->qos_data;
6834 break;
6835
6836 default:
6837 BUG();
6838 break;
6839 }
6840
6841 err = ipw_qos_activate(priv, qos_data);
6842 if (err) {
6843 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
6844 return err;
6845 }
6846
6847 if (priv->qos_data.qos_enable && qos_data->supported) {
6848 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
6849 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
6850 return ipw_qos_set_info_element(priv);
6851 }
6852
6853 return 0;
6854 }
6855
6856 /*
6857 * handling the beaconing responces. if we get different QoS setting
6858 * of the network from the the associated setting adjust the QoS
6859 * setting
6860 */
6861 static int ipw_qos_association_resp(struct ipw_priv *priv,
6862 struct ieee80211_network *network)
6863 {
6864 int ret = 0;
6865 unsigned long flags;
6866 u32 size = sizeof(struct ieee80211_qos_parameters);
6867 int set_qos_param = 0;
6868
6869 if ((priv == NULL) || (network == NULL) ||
6870 (priv->assoc_network == NULL))
6871 return ret;
6872
6873 if (!(priv->status & STATUS_ASSOCIATED))
6874 return ret;
6875
6876 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
6877 return ret;
6878
6879 spin_lock_irqsave(&priv->ieee->lock, flags);
6880 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
6881 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
6882 sizeof(struct ieee80211_qos_data));
6883 priv->assoc_network->qos_data.active = 1;
6884 if ((network->qos_data.old_param_count !=
6885 network->qos_data.param_count)) {
6886 set_qos_param = 1;
6887 network->qos_data.old_param_count =
6888 network->qos_data.param_count;
6889 }
6890
6891 } else {
6892 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
6893 memcpy(&priv->assoc_network->qos_data.parameters,
6894 &def_parameters_CCK, size);
6895 else
6896 memcpy(&priv->assoc_network->qos_data.parameters,
6897 &def_parameters_OFDM, size);
6898 priv->assoc_network->qos_data.active = 0;
6899 priv->assoc_network->qos_data.supported = 0;
6900 set_qos_param = 1;
6901 }
6902
6903 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6904
6905 if (set_qos_param == 1)
6906 schedule_work(&priv->qos_activate);
6907
6908 return ret;
6909 }
6910
6911 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
6912 {
6913 u32 ret = 0;
6914
6915 if ((priv == NULL))
6916 return 0;
6917
6918 if (!(priv->ieee->modulation & IEEE80211_OFDM_MODULATION))
6919 ret = priv->qos_data.burst_duration_CCK;
6920 else
6921 ret = priv->qos_data.burst_duration_OFDM;
6922
6923 return ret;
6924 }
6925
6926 /*
6927 * Initialize the setting of QoS global
6928 */
6929 static void ipw_qos_init(struct ipw_priv *priv, int enable,
6930 int burst_enable, u32 burst_duration_CCK,
6931 u32 burst_duration_OFDM)
6932 {
6933 priv->qos_data.qos_enable = enable;
6934
6935 if (priv->qos_data.qos_enable) {
6936 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
6937 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
6938 IPW_DEBUG_QOS("QoS is enabled\n");
6939 } else {
6940 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
6941 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
6942 IPW_DEBUG_QOS("QoS is not enabled\n");
6943 }
6944
6945 priv->qos_data.burst_enable = burst_enable;
6946
6947 if (burst_enable) {
6948 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
6949 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
6950 } else {
6951 priv->qos_data.burst_duration_CCK = 0;
6952 priv->qos_data.burst_duration_OFDM = 0;
6953 }
6954 }
6955
6956 /*
6957 * map the packet priority to the right TX Queue
6958 */
6959 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
6960 {
6961 if (priority > 7 || !priv->qos_data.qos_enable)
6962 priority = 0;
6963
6964 return from_priority_to_tx_queue[priority] - 1;
6965 }
6966
6967 /*
6968 * add QoS parameter to the TX command
6969 */
6970 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
6971 u16 priority,
6972 struct tfd_data *tfd, u8 unicast)
6973 {
6974 int ret = 0;
6975 int tx_queue_id = 0;
6976 struct ieee80211_qos_data *qos_data = NULL;
6977 int active, supported;
6978 unsigned long flags;
6979
6980 if (!(priv->status & STATUS_ASSOCIATED))
6981 return 0;
6982
6983 qos_data = &priv->assoc_network->qos_data;
6984
6985 spin_lock_irqsave(&priv->ieee->lock, flags);
6986
6987 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6988 if (unicast == 0)
6989 qos_data->active = 0;
6990 else
6991 qos_data->active = qos_data->supported;
6992 }
6993
6994 active = qos_data->active;
6995 supported = qos_data->supported;
6996
6997 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6998
6999 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7000 "unicast %d\n",
7001 priv->qos_data.qos_enable, active, supported, unicast);
7002 if (active && priv->qos_data.qos_enable) {
7003 ret = from_priority_to_tx_queue[priority];
7004 tx_queue_id = ret - 1;
7005 IPW_DEBUG_QOS("QoS packet priority is %d \n", priority);
7006 if (priority <= 7) {
7007 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7008 tfd->tfd.tfd_26.mchdr.qos_ctrl = priority;
7009 tfd->tfd.tfd_26.mchdr.frame_ctl |=
7010 IEEE80211_STYPE_QOS_DATA;
7011
7012 if (priv->qos_data.qos_no_ack_mask &
7013 (1UL << tx_queue_id)) {
7014 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7015 tfd->tfd.tfd_26.mchdr.qos_ctrl |=
7016 CTRL_QOS_NO_ACK;
7017 }
7018 }
7019 }
7020
7021 return ret;
7022 }
7023
7024 /*
7025 * background support to run QoS activate functionality
7026 */
7027 static void ipw_bg_qos_activate(void *data)
7028 {
7029 struct ipw_priv *priv = data;
7030
7031 if (priv == NULL)
7032 return;
7033
7034 down(&priv->sem);
7035
7036 if (priv->status & STATUS_ASSOCIATED)
7037 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7038
7039 up(&priv->sem);
7040 }
7041
7042 static int ipw_handle_probe_response(struct net_device *dev,
7043 struct ieee80211_probe_response *resp,
7044 struct ieee80211_network *network)
7045 {
7046 struct ipw_priv *priv = ieee80211_priv(dev);
7047 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7048 (network == priv->assoc_network));
7049
7050 ipw_qos_handle_probe_response(priv, active_network, network);
7051
7052 return 0;
7053 }
7054
7055 static int ipw_handle_beacon(struct net_device *dev,
7056 struct ieee80211_beacon *resp,
7057 struct ieee80211_network *network)
7058 {
7059 struct ipw_priv *priv = ieee80211_priv(dev);
7060 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7061 (network == priv->assoc_network));
7062
7063 ipw_qos_handle_probe_response(priv, active_network, network);
7064
7065 return 0;
7066 }
7067
7068 static int ipw_handle_assoc_response(struct net_device *dev,
7069 struct ieee80211_assoc_response *resp,
7070 struct ieee80211_network *network)
7071 {
7072 struct ipw_priv *priv = ieee80211_priv(dev);
7073 ipw_qos_association_resp(priv, network);
7074 return 0;
7075 }
7076
7077 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
7078 *qos_param)
7079 {
7080 struct host_cmd cmd = {
7081 .cmd = IPW_CMD_QOS_PARAMETERS,
7082 .len = (sizeof(struct ieee80211_qos_parameters) * 3)
7083 };
7084
7085 memcpy(cmd.param, qos_param, sizeof(*qos_param) * 3);
7086 return ipw_send_cmd(priv, &cmd);
7087 }
7088
7089 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
7090 *qos_param)
7091 {
7092 struct host_cmd cmd = {
7093 .cmd = IPW_CMD_WME_INFO,
7094 .len = sizeof(*qos_param)
7095 };
7096
7097 memcpy(cmd.param, qos_param, sizeof(*qos_param));
7098 return ipw_send_cmd(priv, &cmd);
7099 }
7100
7101 #endif /* CONFIG_IPW_QOS */
7102
7103 static int ipw_associate_network(struct ipw_priv *priv,
7104 struct ieee80211_network *network,
7105 struct ipw_supported_rates *rates, int roaming)
7106 {
7107 int err;
7108
7109 if (priv->config & CFG_FIXED_RATE)
7110 ipw_set_fixed_rate(priv, network->mode);
7111
7112 if (!(priv->config & CFG_STATIC_ESSID)) {
7113 priv->essid_len = min(network->ssid_len,
7114 (u8) IW_ESSID_MAX_SIZE);
7115 memcpy(priv->essid, network->ssid, priv->essid_len);
7116 }
7117
7118 network->last_associate = jiffies;
7119
7120 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7121 priv->assoc_request.channel = network->channel;
7122 priv->assoc_request.auth_key = 0;
7123
7124 if ((priv->capability & CAP_PRIVACY_ON) &&
7125 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7126 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7127 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7128
7129 if ((priv->ieee->sec.level == SEC_LEVEL_1) &&
7130 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
7131 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7132
7133 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7134 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7135 priv->assoc_request.auth_type = AUTH_LEAP;
7136 else
7137 priv->assoc_request.auth_type = AUTH_OPEN;
7138
7139 if (priv->ieee->wpa_ie_len) {
7140 priv->assoc_request.policy_support = 0x02; /* RSN active */
7141 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7142 priv->ieee->wpa_ie_len);
7143 }
7144
7145 /*
7146 * It is valid for our ieee device to support multiple modes, but
7147 * when it comes to associating to a given network we have to choose
7148 * just one mode.
7149 */
7150 if (network->mode & priv->ieee->mode & IEEE_A)
7151 priv->assoc_request.ieee_mode = IPW_A_MODE;
7152 else if (network->mode & priv->ieee->mode & IEEE_G)
7153 priv->assoc_request.ieee_mode = IPW_G_MODE;
7154 else if (network->mode & priv->ieee->mode & IEEE_B)
7155 priv->assoc_request.ieee_mode = IPW_B_MODE;
7156
7157 priv->assoc_request.capability = network->capability;
7158 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7159 && !(priv->config & CFG_PREAMBLE_LONG)) {
7160 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7161 } else {
7162 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7163
7164 /* Clear the short preamble if we won't be supporting it */
7165 priv->assoc_request.capability &=
7166 ~WLAN_CAPABILITY_SHORT_PREAMBLE;
7167 }
7168
7169 /* Clear capability bits that aren't used in Ad Hoc */
7170 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7171 priv->assoc_request.capability &=
7172 ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
7173
7174 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7175 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7176 roaming ? "Rea" : "A",
7177 escape_essid(priv->essid, priv->essid_len),
7178 network->channel,
7179 ipw_modes[priv->assoc_request.ieee_mode],
7180 rates->num_rates,
7181 (priv->assoc_request.preamble_length ==
7182 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7183 network->capability &
7184 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7185 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7186 priv->capability & CAP_PRIVACY_ON ?
7187 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7188 "(open)") : "",
7189 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7190 priv->capability & CAP_PRIVACY_ON ?
7191 '1' + priv->ieee->sec.active_key : '.',
7192 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7193
7194 priv->assoc_request.beacon_interval = network->beacon_interval;
7195 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7196 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7197 priv->assoc_request.assoc_type = HC_IBSS_START;
7198 priv->assoc_request.assoc_tsf_msw = 0;
7199 priv->assoc_request.assoc_tsf_lsw = 0;
7200 } else {
7201 if (unlikely(roaming))
7202 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7203 else
7204 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7205 priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
7206 priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
7207 }
7208
7209 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7210
7211 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7212 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7213 priv->assoc_request.atim_window = network->atim_window;
7214 } else {
7215 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7216 priv->assoc_request.atim_window = 0;
7217 }
7218
7219 priv->assoc_request.listen_interval = network->listen_interval;
7220
7221 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7222 if (err) {
7223 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7224 return err;
7225 }
7226
7227 rates->ieee_mode = priv->assoc_request.ieee_mode;
7228 rates->purpose = IPW_RATE_CONNECT;
7229 ipw_send_supported_rates(priv, rates);
7230
7231 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7232 priv->sys_config.dot11g_auto_detection = 1;
7233 else
7234 priv->sys_config.dot11g_auto_detection = 0;
7235
7236 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7237 priv->sys_config.answer_broadcast_ssid_probe = 1;
7238 else
7239 priv->sys_config.answer_broadcast_ssid_probe = 0;
7240
7241 err = ipw_send_system_config(priv, &priv->sys_config);
7242 if (err) {
7243 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7244 return err;
7245 }
7246
7247 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7248 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7249 if (err) {
7250 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7251 return err;
7252 }
7253
7254 /*
7255 * If preemption is enabled, it is possible for the association
7256 * to complete before we return from ipw_send_associate. Therefore
7257 * we have to be sure and update our priviate data first.
7258 */
7259 priv->channel = network->channel;
7260 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7261 priv->status |= STATUS_ASSOCIATING;
7262 priv->status &= ~STATUS_SECURITY_UPDATED;
7263
7264 priv->assoc_network = network;
7265
7266 #ifdef CONFIG_IPW_QOS
7267 ipw_qos_association(priv, network);
7268 #endif
7269
7270 err = ipw_send_associate(priv, &priv->assoc_request);
7271 if (err) {
7272 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7273 return err;
7274 }
7275
7276 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n",
7277 escape_essid(priv->essid, priv->essid_len),
7278 MAC_ARG(priv->bssid));
7279
7280 return 0;
7281 }
7282
7283 static void ipw_roam(void *data)
7284 {
7285 struct ipw_priv *priv = data;
7286 struct ieee80211_network *network = NULL;
7287 struct ipw_network_match match = {
7288 .network = priv->assoc_network
7289 };
7290
7291 /* The roaming process is as follows:
7292 *
7293 * 1. Missed beacon threshold triggers the roaming process by
7294 * setting the status ROAM bit and requesting a scan.
7295 * 2. When the scan completes, it schedules the ROAM work
7296 * 3. The ROAM work looks at all of the known networks for one that
7297 * is a better network than the currently associated. If none
7298 * found, the ROAM process is over (ROAM bit cleared)
7299 * 4. If a better network is found, a disassociation request is
7300 * sent.
7301 * 5. When the disassociation completes, the roam work is again
7302 * scheduled. The second time through, the driver is no longer
7303 * associated, and the newly selected network is sent an
7304 * association request.
7305 * 6. At this point ,the roaming process is complete and the ROAM
7306 * status bit is cleared.
7307 */
7308
7309 /* If we are no longer associated, and the roaming bit is no longer
7310 * set, then we are not actively roaming, so just return */
7311 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7312 return;
7313
7314 if (priv->status & STATUS_ASSOCIATED) {
7315 /* First pass through ROAM process -- look for a better
7316 * network */
7317 unsigned long flags;
7318 u8 rssi = priv->assoc_network->stats.rssi;
7319 priv->assoc_network->stats.rssi = -128;
7320 spin_lock_irqsave(&priv->ieee->lock, flags);
7321 list_for_each_entry(network, &priv->ieee->network_list, list) {
7322 if (network != priv->assoc_network)
7323 ipw_best_network(priv, &match, network, 1);
7324 }
7325 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7326 priv->assoc_network->stats.rssi = rssi;
7327
7328 if (match.network == priv->assoc_network) {
7329 IPW_DEBUG_ASSOC("No better APs in this network to "
7330 "roam to.\n");
7331 priv->status &= ~STATUS_ROAMING;
7332 ipw_debug_config(priv);
7333 return;
7334 }
7335
7336 ipw_send_disassociate(priv, 1);
7337 priv->assoc_network = match.network;
7338
7339 return;
7340 }
7341
7342 /* Second pass through ROAM process -- request association */
7343 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7344 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7345 priv->status &= ~STATUS_ROAMING;
7346 }
7347
7348 static void ipw_bg_roam(void *data)
7349 {
7350 struct ipw_priv *priv = data;
7351 down(&priv->sem);
7352 ipw_roam(data);
7353 up(&priv->sem);
7354 }
7355
7356 static int ipw_associate(void *data)
7357 {
7358 struct ipw_priv *priv = data;
7359
7360 struct ieee80211_network *network = NULL;
7361 struct ipw_network_match match = {
7362 .network = NULL
7363 };
7364 struct ipw_supported_rates *rates;
7365 struct list_head *element;
7366 unsigned long flags;
7367
7368 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7369 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7370 return 0;
7371 }
7372
7373 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7374 IPW_DEBUG_ASSOC("Not attempting association (already in "
7375 "progress)\n");
7376 return 0;
7377 }
7378
7379 if (priv->status & STATUS_DISASSOCIATING) {
7380 IPW_DEBUG_ASSOC("Not attempting association (in "
7381 "disassociating)\n ");
7382 queue_work(priv->workqueue, &priv->associate);
7383 return 0;
7384 }
7385
7386 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7387 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7388 "initialized)\n");
7389 return 0;
7390 }
7391
7392 if (!(priv->config & CFG_ASSOCIATE) &&
7393 !(priv->config & (CFG_STATIC_ESSID |
7394 CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
7395 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7396 return 0;
7397 }
7398
7399 /* Protect our use of the network_list */
7400 spin_lock_irqsave(&priv->ieee->lock, flags);
7401 list_for_each_entry(network, &priv->ieee->network_list, list)
7402 ipw_best_network(priv, &match, network, 0);
7403
7404 network = match.network;
7405 rates = &match.rates;
7406
7407 if (network == NULL &&
7408 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7409 priv->config & CFG_ADHOC_CREATE &&
7410 priv->config & CFG_STATIC_ESSID &&
7411 priv->config & CFG_STATIC_CHANNEL &&
7412 !list_empty(&priv->ieee->network_free_list)) {
7413 element = priv->ieee->network_free_list.next;
7414 network = list_entry(element, struct ieee80211_network, list);
7415 ipw_adhoc_create(priv, network);
7416 rates = &priv->rates;
7417 list_del(element);
7418 list_add_tail(&network->list, &priv->ieee->network_list);
7419 }
7420 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7421
7422 /* If we reached the end of the list, then we don't have any valid
7423 * matching APs */
7424 if (!network) {
7425 ipw_debug_config(priv);
7426
7427 if (!(priv->status & STATUS_SCANNING)) {
7428 if (!(priv->config & CFG_SPEED_SCAN))
7429 queue_delayed_work(priv->workqueue,
7430 &priv->request_scan,
7431 SCAN_INTERVAL);
7432 else
7433 queue_work(priv->workqueue,
7434 &priv->request_scan);
7435 }
7436
7437 return 0;
7438 }
7439
7440 ipw_associate_network(priv, network, rates, 0);
7441
7442 return 1;
7443 }
7444
7445 static void ipw_bg_associate(void *data)
7446 {
7447 struct ipw_priv *priv = data;
7448 down(&priv->sem);
7449 ipw_associate(data);
7450 up(&priv->sem);
7451 }
7452
7453 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7454 struct sk_buff *skb)
7455 {
7456 struct ieee80211_hdr *hdr;
7457 u16 fc;
7458
7459 hdr = (struct ieee80211_hdr *)skb->data;
7460 fc = le16_to_cpu(hdr->frame_ctl);
7461 if (!(fc & IEEE80211_FCTL_PROTECTED))
7462 return;
7463
7464 fc &= ~IEEE80211_FCTL_PROTECTED;
7465 hdr->frame_ctl = cpu_to_le16(fc);
7466 switch (priv->ieee->sec.level) {
7467 case SEC_LEVEL_3:
7468 /* Remove CCMP HDR */
7469 memmove(skb->data + IEEE80211_3ADDR_LEN,
7470 skb->data + IEEE80211_3ADDR_LEN + 8,
7471 skb->len - IEEE80211_3ADDR_LEN - 8);
7472 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7473 break;
7474 case SEC_LEVEL_2:
7475 break;
7476 case SEC_LEVEL_1:
7477 /* Remove IV */
7478 memmove(skb->data + IEEE80211_3ADDR_LEN,
7479 skb->data + IEEE80211_3ADDR_LEN + 4,
7480 skb->len - IEEE80211_3ADDR_LEN - 4);
7481 skb_trim(skb, skb->len - 8); /* IV + ICV */
7482 break;
7483 case SEC_LEVEL_0:
7484 break;
7485 default:
7486 printk(KERN_ERR "Unknow security level %d\n",
7487 priv->ieee->sec.level);
7488 break;
7489 }
7490 }
7491
7492 static void ipw_handle_data_packet(struct ipw_priv *priv,
7493 struct ipw_rx_mem_buffer *rxb,
7494 struct ieee80211_rx_stats *stats)
7495 {
7496 struct ieee80211_hdr_4addr *hdr;
7497 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7498
7499 /* We received data from the HW, so stop the watchdog */
7500 priv->net_dev->trans_start = jiffies;
7501
7502 /* We only process data packets if the
7503 * interface is open */
7504 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7505 skb_tailroom(rxb->skb))) {
7506 priv->ieee->stats.rx_errors++;
7507 priv->wstats.discard.misc++;
7508 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7509 return;
7510 } else if (unlikely(!netif_running(priv->net_dev))) {
7511 priv->ieee->stats.rx_dropped++;
7512 priv->wstats.discard.misc++;
7513 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7514 return;
7515 }
7516
7517 /* Advance skb->data to the start of the actual payload */
7518 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7519
7520 /* Set the size of the skb to the size of the frame */
7521 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7522
7523 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7524
7525 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7526 hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data;
7527 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7528 (is_multicast_ether_addr(hdr->addr1) ?
7529 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7530 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7531
7532 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7533 priv->ieee->stats.rx_errors++;
7534 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7535 rxb->skb = NULL;
7536 __ipw_led_activity_on(priv);
7537 }
7538 }
7539
7540 #ifdef CONFIG_IEEE80211_RADIOTAP
7541 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7542 struct ipw_rx_mem_buffer *rxb,
7543 struct ieee80211_rx_stats *stats)
7544 {
7545 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7546 struct ipw_rx_frame *frame = &pkt->u.frame;
7547
7548 /* initial pull of some data */
7549 u16 received_channel = frame->received_channel;
7550 u8 antennaAndPhy = frame->antennaAndPhy;
7551 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7552 u16 pktrate = frame->rate;
7553
7554 /* Magic struct that slots into the radiotap header -- no reason
7555 * to build this manually element by element, we can write it much
7556 * more efficiently than we can parse it. ORDER MATTERS HERE */
7557 struct ipw_rt_hdr {
7558 struct ieee80211_radiotap_header rt_hdr;
7559 u8 rt_flags; /* radiotap packet flags */
7560 u8 rt_rate; /* rate in 500kb/s */
7561 u16 rt_channel; /* channel in mhz */
7562 u16 rt_chbitmask; /* channel bitfield */
7563 s8 rt_dbmsignal; /* signal in dbM, kluged to signed */
7564 u8 rt_antenna; /* antenna number */
7565 } *ipw_rt;
7566
7567 short len = le16_to_cpu(pkt->u.frame.length);
7568
7569 /* We received data from the HW, so stop the watchdog */
7570 priv->net_dev->trans_start = jiffies;
7571
7572 /* We only process data packets if the
7573 * interface is open */
7574 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7575 skb_tailroom(rxb->skb))) {
7576 priv->ieee->stats.rx_errors++;
7577 priv->wstats.discard.misc++;
7578 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7579 return;
7580 } else if (unlikely(!netif_running(priv->net_dev))) {
7581 priv->ieee->stats.rx_dropped++;
7582 priv->wstats.discard.misc++;
7583 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7584 return;
7585 }
7586
7587 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7588 * that now */
7589 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7590 /* FIXME: Should alloc bigger skb instead */
7591 priv->ieee->stats.rx_dropped++;
7592 priv->wstats.discard.misc++;
7593 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7594 return;
7595 }
7596
7597 /* copy the frame itself */
7598 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7599 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7600
7601 /* Zero the radiotap static buffer ... We only need to zero the bytes NOT
7602 * part of our real header, saves a little time.
7603 *
7604 * No longer necessary since we fill in all our data. Purge before merging
7605 * patch officially.
7606 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7607 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7608 */
7609
7610 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7611
7612 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7613 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7614 ipw_rt->rt_hdr.it_len = sizeof(struct ipw_rt_hdr); /* total header+data */
7615
7616 /* Big bitfield of all the fields we provide in radiotap */
7617 ipw_rt->rt_hdr.it_present =
7618 ((1 << IEEE80211_RADIOTAP_FLAGS) |
7619 (1 << IEEE80211_RADIOTAP_RATE) |
7620 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7621 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7622 (1 << IEEE80211_RADIOTAP_ANTENNA));
7623
7624 /* Zero the flags, we'll add to them as we go */
7625 ipw_rt->rt_flags = 0;
7626
7627 /* Convert signal to DBM */
7628 ipw_rt->rt_dbmsignal = antsignal;
7629
7630 /* Convert the channel data and set the flags */
7631 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7632 if (received_channel > 14) { /* 802.11a */
7633 ipw_rt->rt_chbitmask =
7634 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7635 } else if (antennaAndPhy & 32) { /* 802.11b */
7636 ipw_rt->rt_chbitmask =
7637 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7638 } else { /* 802.11g */
7639 ipw_rt->rt_chbitmask =
7640 (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7641 }
7642
7643 /* set the rate in multiples of 500k/s */
7644 switch (pktrate) {
7645 case IPW_TX_RATE_1MB:
7646 ipw_rt->rt_rate = 2;
7647 break;
7648 case IPW_TX_RATE_2MB:
7649 ipw_rt->rt_rate = 4;
7650 break;
7651 case IPW_TX_RATE_5MB:
7652 ipw_rt->rt_rate = 10;
7653 break;
7654 case IPW_TX_RATE_6MB:
7655 ipw_rt->rt_rate = 12;
7656 break;
7657 case IPW_TX_RATE_9MB:
7658 ipw_rt->rt_rate = 18;
7659 break;
7660 case IPW_TX_RATE_11MB:
7661 ipw_rt->rt_rate = 22;
7662 break;
7663 case IPW_TX_RATE_12MB:
7664 ipw_rt->rt_rate = 24;
7665 break;
7666 case IPW_TX_RATE_18MB:
7667 ipw_rt->rt_rate = 36;
7668 break;
7669 case IPW_TX_RATE_24MB:
7670 ipw_rt->rt_rate = 48;
7671 break;
7672 case IPW_TX_RATE_36MB:
7673 ipw_rt->rt_rate = 72;
7674 break;
7675 case IPW_TX_RATE_48MB:
7676 ipw_rt->rt_rate = 96;
7677 break;
7678 case IPW_TX_RATE_54MB:
7679 ipw_rt->rt_rate = 108;
7680 break;
7681 default:
7682 ipw_rt->rt_rate = 0;
7683 break;
7684 }
7685
7686 /* antenna number */
7687 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7688
7689 /* set the preamble flag if we have it */
7690 if ((antennaAndPhy & 64))
7691 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7692
7693 /* Set the size of the skb to the size of the frame */
7694 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7695
7696 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7697
7698 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7699 priv->ieee->stats.rx_errors++;
7700 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7701 rxb->skb = NULL;
7702 /* no LED during capture */
7703 }
7704 }
7705 #endif
7706
7707 static int is_network_packet(struct ipw_priv *priv,
7708 struct ieee80211_hdr_4addr *header)
7709 {
7710 /* Filter incoming packets to determine if they are targetted toward
7711 * this network, discarding packets coming from ourselves */
7712 switch (priv->ieee->iw_mode) {
7713 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
7714 /* packets from our adapter are dropped (echo) */
7715 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
7716 return 0;
7717
7718 /* {broad,multi}cast packets to our BSSID go through */
7719 if (is_multicast_ether_addr(header->addr1))
7720 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
7721
7722 /* packets to our adapter go through */
7723 return !memcmp(header->addr1, priv->net_dev->dev_addr,
7724 ETH_ALEN);
7725
7726 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
7727 /* packets from our adapter are dropped (echo) */
7728 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
7729 return 0;
7730
7731 /* {broad,multi}cast packets to our BSS go through */
7732 if (is_multicast_ether_addr(header->addr1))
7733 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
7734
7735 /* packets to our adapter go through */
7736 return !memcmp(header->addr1, priv->net_dev->dev_addr,
7737 ETH_ALEN);
7738 }
7739
7740 return 1;
7741 }
7742
7743 #define IPW_PACKET_RETRY_TIME HZ
7744
7745 static int is_duplicate_packet(struct ipw_priv *priv,
7746 struct ieee80211_hdr_4addr *header)
7747 {
7748 u16 sc = le16_to_cpu(header->seq_ctl);
7749 u16 seq = WLAN_GET_SEQ_SEQ(sc);
7750 u16 frag = WLAN_GET_SEQ_FRAG(sc);
7751 u16 *last_seq, *last_frag;
7752 unsigned long *last_time;
7753
7754 switch (priv->ieee->iw_mode) {
7755 case IW_MODE_ADHOC:
7756 {
7757 struct list_head *p;
7758 struct ipw_ibss_seq *entry = NULL;
7759 u8 *mac = header->addr2;
7760 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
7761
7762 __list_for_each(p, &priv->ibss_mac_hash[index]) {
7763 entry =
7764 list_entry(p, struct ipw_ibss_seq, list);
7765 if (!memcmp(entry->mac, mac, ETH_ALEN))
7766 break;
7767 }
7768 if (p == &priv->ibss_mac_hash[index]) {
7769 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
7770 if (!entry) {
7771 IPW_ERROR
7772 ("Cannot malloc new mac entry\n");
7773 return 0;
7774 }
7775 memcpy(entry->mac, mac, ETH_ALEN);
7776 entry->seq_num = seq;
7777 entry->frag_num = frag;
7778 entry->packet_time = jiffies;
7779 list_add(&entry->list,
7780 &priv->ibss_mac_hash[index]);
7781 return 0;
7782 }
7783 last_seq = &entry->seq_num;
7784 last_frag = &entry->frag_num;
7785 last_time = &entry->packet_time;
7786 break;
7787 }
7788 case IW_MODE_INFRA:
7789 last_seq = &priv->last_seq_num;
7790 last_frag = &priv->last_frag_num;
7791 last_time = &priv->last_packet_time;
7792 break;
7793 default:
7794 return 0;
7795 }
7796 if ((*last_seq == seq) &&
7797 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
7798 if (*last_frag == frag)
7799 goto drop;
7800 if (*last_frag + 1 != frag)
7801 /* out-of-order fragment */
7802 goto drop;
7803 } else
7804 *last_seq = seq;
7805
7806 *last_frag = frag;
7807 *last_time = jiffies;
7808 return 0;
7809
7810 drop:
7811 /* Comment this line now since we observed the card receives
7812 * duplicate packets but the FCTL_RETRY bit is not set in the
7813 * IBSS mode with fragmentation enabled.
7814 BUG_ON(!(le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_RETRY)); */
7815 return 1;
7816 }
7817
7818 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
7819 struct ipw_rx_mem_buffer *rxb,
7820 struct ieee80211_rx_stats *stats)
7821 {
7822 struct sk_buff *skb = rxb->skb;
7823 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
7824 struct ieee80211_hdr_4addr *header = (struct ieee80211_hdr_4addr *)
7825 (skb->data + IPW_RX_FRAME_SIZE);
7826
7827 ieee80211_rx_mgt(priv->ieee, header, stats);
7828
7829 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
7830 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
7831 IEEE80211_STYPE_PROBE_RESP) ||
7832 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
7833 IEEE80211_STYPE_BEACON))) {
7834 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
7835 ipw_add_station(priv, header->addr2);
7836 }
7837
7838 if (priv->config & CFG_NET_STATS) {
7839 IPW_DEBUG_HC("sending stat packet\n");
7840
7841 /* Set the size of the skb to the size of the full
7842 * ipw header and 802.11 frame */
7843 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
7844 IPW_RX_FRAME_SIZE);
7845
7846 /* Advance past the ipw packet header to the 802.11 frame */
7847 skb_pull(skb, IPW_RX_FRAME_SIZE);
7848
7849 /* Push the ieee80211_rx_stats before the 802.11 frame */
7850 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
7851
7852 skb->dev = priv->ieee->dev;
7853
7854 /* Point raw at the ieee80211_stats */
7855 skb->mac.raw = skb->data;
7856
7857 skb->pkt_type = PACKET_OTHERHOST;
7858 skb->protocol = __constant_htons(ETH_P_80211_STATS);
7859 memset(skb->cb, 0, sizeof(rxb->skb->cb));
7860 netif_rx(skb);
7861 rxb->skb = NULL;
7862 }
7863 }
7864
7865 /*
7866 * Main entry function for recieving a packet with 80211 headers. This
7867 * should be called when ever the FW has notified us that there is a new
7868 * skb in the recieve queue.
7869 */
7870 static void ipw_rx(struct ipw_priv *priv)
7871 {
7872 struct ipw_rx_mem_buffer *rxb;
7873 struct ipw_rx_packet *pkt;
7874 struct ieee80211_hdr_4addr *header;
7875 u32 r, w, i;
7876 u8 network_packet;
7877
7878 r = ipw_read32(priv, IPW_RX_READ_INDEX);
7879 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
7880 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
7881
7882 while (i != r) {
7883 rxb = priv->rxq->queue[i];
7884 #ifdef CONFIG_IPW2200_DEBUG
7885 if (unlikely(rxb == NULL)) {
7886 printk(KERN_CRIT "Queue not allocated!\n");
7887 break;
7888 }
7889 #endif
7890 priv->rxq->queue[i] = NULL;
7891
7892 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
7893 IPW_RX_BUF_SIZE,
7894 PCI_DMA_FROMDEVICE);
7895
7896 pkt = (struct ipw_rx_packet *)rxb->skb->data;
7897 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
7898 pkt->header.message_type,
7899 pkt->header.rx_seq_num, pkt->header.control_bits);
7900
7901 switch (pkt->header.message_type) {
7902 case RX_FRAME_TYPE: /* 802.11 frame */ {
7903 struct ieee80211_rx_stats stats = {
7904 .rssi =
7905 le16_to_cpu(pkt->u.frame.rssi_dbm) -
7906 IPW_RSSI_TO_DBM,
7907 .signal =
7908 le16_to_cpu(pkt->u.frame.signal),
7909 .noise =
7910 le16_to_cpu(pkt->u.frame.noise),
7911 .rate = pkt->u.frame.rate,
7912 .mac_time = jiffies,
7913 .received_channel =
7914 pkt->u.frame.received_channel,
7915 .freq =
7916 (pkt->u.frame.
7917 control & (1 << 0)) ?
7918 IEEE80211_24GHZ_BAND :
7919 IEEE80211_52GHZ_BAND,
7920 .len = le16_to_cpu(pkt->u.frame.length),
7921 };
7922
7923 if (stats.rssi != 0)
7924 stats.mask |= IEEE80211_STATMASK_RSSI;
7925 if (stats.signal != 0)
7926 stats.mask |= IEEE80211_STATMASK_SIGNAL;
7927 if (stats.noise != 0)
7928 stats.mask |= IEEE80211_STATMASK_NOISE;
7929 if (stats.rate != 0)
7930 stats.mask |= IEEE80211_STATMASK_RATE;
7931
7932 priv->rx_packets++;
7933
7934 #ifdef CONFIG_IPW2200_MONITOR
7935 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7936 #ifdef CONFIG_IEEE80211_RADIOTAP
7937 ipw_handle_data_packet_monitor(priv,
7938 rxb,
7939 &stats);
7940 #else
7941 ipw_handle_data_packet(priv, rxb,
7942 &stats);
7943 #endif
7944 break;
7945 }
7946 #endif
7947
7948 header =
7949 (struct ieee80211_hdr_4addr *)(rxb->skb->
7950 data +
7951 IPW_RX_FRAME_SIZE);
7952 /* TODO: Check Ad-Hoc dest/source and make sure
7953 * that we are actually parsing these packets
7954 * correctly -- we should probably use the
7955 * frame control of the packet and disregard
7956 * the current iw_mode */
7957
7958 network_packet =
7959 is_network_packet(priv, header);
7960 if (network_packet && priv->assoc_network) {
7961 priv->assoc_network->stats.rssi =
7962 stats.rssi;
7963 average_add(&priv->average_rssi,
7964 stats.rssi);
7965 priv->last_rx_rssi = stats.rssi;
7966 }
7967
7968 IPW_DEBUG_RX("Frame: len=%u\n",
7969 le16_to_cpu(pkt->u.frame.length));
7970
7971 if (le16_to_cpu(pkt->u.frame.length) <
7972 frame_hdr_len(header)) {
7973 IPW_DEBUG_DROP
7974 ("Received packet is too small. "
7975 "Dropping.\n");
7976 priv->ieee->stats.rx_errors++;
7977 priv->wstats.discard.misc++;
7978 break;
7979 }
7980
7981 switch (WLAN_FC_GET_TYPE
7982 (le16_to_cpu(header->frame_ctl))) {
7983
7984 case IEEE80211_FTYPE_MGMT:
7985 ipw_handle_mgmt_packet(priv, rxb,
7986 &stats);
7987 break;
7988
7989 case IEEE80211_FTYPE_CTL:
7990 break;
7991
7992 case IEEE80211_FTYPE_DATA:
7993 if (unlikely(!network_packet ||
7994 is_duplicate_packet(priv,
7995 header)))
7996 {
7997 IPW_DEBUG_DROP("Dropping: "
7998 MAC_FMT ", "
7999 MAC_FMT ", "
8000 MAC_FMT "\n",
8001 MAC_ARG(header->
8002 addr1),
8003 MAC_ARG(header->
8004 addr2),
8005 MAC_ARG(header->
8006 addr3));
8007 break;
8008 }
8009
8010 ipw_handle_data_packet(priv, rxb,
8011 &stats);
8012
8013 break;
8014 }
8015 break;
8016 }
8017
8018 case RX_HOST_NOTIFICATION_TYPE:{
8019 IPW_DEBUG_RX
8020 ("Notification: subtype=%02X flags=%02X size=%d\n",
8021 pkt->u.notification.subtype,
8022 pkt->u.notification.flags,
8023 pkt->u.notification.size);
8024 ipw_rx_notification(priv, &pkt->u.notification);
8025 break;
8026 }
8027
8028 default:
8029 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8030 pkt->header.message_type);
8031 break;
8032 }
8033
8034 /* For now we just don't re-use anything. We can tweak this
8035 * later to try and re-use notification packets and SKBs that
8036 * fail to Rx correctly */
8037 if (rxb->skb != NULL) {
8038 dev_kfree_skb_any(rxb->skb);
8039 rxb->skb = NULL;
8040 }
8041
8042 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8043 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8044 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8045
8046 i = (i + 1) % RX_QUEUE_SIZE;
8047 }
8048
8049 /* Backtrack one entry */
8050 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
8051
8052 ipw_rx_queue_restock(priv);
8053 }
8054
8055 #define DEFAULT_RTS_THRESHOLD 2304U
8056 #define MIN_RTS_THRESHOLD 1U
8057 #define MAX_RTS_THRESHOLD 2304U
8058 #define DEFAULT_BEACON_INTERVAL 100U
8059 #define DEFAULT_SHORT_RETRY_LIMIT 7U
8060 #define DEFAULT_LONG_RETRY_LIMIT 4U
8061
8062 static int ipw_sw_reset(struct ipw_priv *priv, int init)
8063 {
8064 int band, modulation;
8065 int old_mode = priv->ieee->iw_mode;
8066
8067 /* Initialize module parameter values here */
8068 priv->config = 0;
8069
8070 /* We default to disabling the LED code as right now it causes
8071 * too many systems to lock up... */
8072 if (!led)
8073 priv->config |= CFG_NO_LED;
8074
8075 if (associate)
8076 priv->config |= CFG_ASSOCIATE;
8077 else
8078 IPW_DEBUG_INFO("Auto associate disabled.\n");
8079
8080 if (auto_create)
8081 priv->config |= CFG_ADHOC_CREATE;
8082 else
8083 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8084
8085 priv->config &= ~CFG_STATIC_ESSID;
8086 priv->essid_len = 0;
8087 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8088
8089 if (disable) {
8090 priv->status |= STATUS_RF_KILL_SW;
8091 IPW_DEBUG_INFO("Radio disabled.\n");
8092 }
8093
8094 if (channel != 0) {
8095 priv->config |= CFG_STATIC_CHANNEL;
8096 priv->channel = channel;
8097 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
8098 /* TODO: Validate that provided channel is in range */
8099 }
8100 #ifdef CONFIG_IPW_QOS
8101 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8102 burst_duration_CCK, burst_duration_OFDM);
8103 #endif /* CONFIG_IPW_QOS */
8104
8105 switch (mode) {
8106 case 1:
8107 priv->ieee->iw_mode = IW_MODE_ADHOC;
8108 priv->net_dev->type = ARPHRD_ETHER;
8109
8110 break;
8111 #ifdef CONFIG_IPW2200_MONITOR
8112 case 2:
8113 priv->ieee->iw_mode = IW_MODE_MONITOR;
8114 #ifdef CONFIG_IEEE80211_RADIOTAP
8115 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8116 #else
8117 priv->net_dev->type = ARPHRD_IEEE80211;
8118 #endif
8119 break;
8120 #endif
8121 default:
8122 case 0:
8123 priv->net_dev->type = ARPHRD_ETHER;
8124 priv->ieee->iw_mode = IW_MODE_INFRA;
8125 break;
8126 }
8127
8128 if (hwcrypto) {
8129 priv->ieee->host_encrypt = 0;
8130 priv->ieee->host_encrypt_msdu = 0;
8131 priv->ieee->host_decrypt = 0;
8132 priv->ieee->host_mc_decrypt = 0;
8133 }
8134 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8135
8136 /* IPW2200/2915 is abled to do hardware fragmentation. */
8137 priv->ieee->host_open_frag = 0;
8138
8139 if ((priv->pci_dev->device == 0x4223) ||
8140 (priv->pci_dev->device == 0x4224)) {
8141 if (init)
8142 printk(KERN_INFO DRV_NAME
8143 ": Detected Intel PRO/Wireless 2915ABG Network "
8144 "Connection\n");
8145 priv->ieee->abg_true = 1;
8146 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
8147 modulation = IEEE80211_OFDM_MODULATION |
8148 IEEE80211_CCK_MODULATION;
8149 priv->adapter = IPW_2915ABG;
8150 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8151 } else {
8152 if (init)
8153 printk(KERN_INFO DRV_NAME
8154 ": Detected Intel PRO/Wireless 2200BG Network "
8155 "Connection\n");
8156
8157 priv->ieee->abg_true = 0;
8158 band = IEEE80211_24GHZ_BAND;
8159 modulation = IEEE80211_OFDM_MODULATION |
8160 IEEE80211_CCK_MODULATION;
8161 priv->adapter = IPW_2200BG;
8162 priv->ieee->mode = IEEE_G | IEEE_B;
8163 }
8164
8165 priv->ieee->freq_band = band;
8166 priv->ieee->modulation = modulation;
8167
8168 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
8169
8170 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8171 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8172
8173 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8174 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8175 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8176
8177 /* If power management is turned on, default to AC mode */
8178 priv->power_mode = IPW_POWER_AC;
8179 priv->tx_power = IPW_TX_POWER_DEFAULT;
8180
8181 return old_mode == priv->ieee->iw_mode;
8182 }
8183
8184 /*
8185 * This file defines the Wireless Extension handlers. It does not
8186 * define any methods of hardware manipulation and relies on the
8187 * functions defined in ipw_main to provide the HW interaction.
8188 *
8189 * The exception to this is the use of the ipw_get_ordinal()
8190 * function used to poll the hardware vs. making unecessary calls.
8191 *
8192 */
8193
8194 static int ipw_wx_get_name(struct net_device *dev,
8195 struct iw_request_info *info,
8196 union iwreq_data *wrqu, char *extra)
8197 {
8198 struct ipw_priv *priv = ieee80211_priv(dev);
8199 down(&priv->sem);
8200 if (priv->status & STATUS_RF_KILL_MASK)
8201 strcpy(wrqu->name, "radio off");
8202 else if (!(priv->status & STATUS_ASSOCIATED))
8203 strcpy(wrqu->name, "unassociated");
8204 else
8205 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8206 ipw_modes[priv->assoc_request.ieee_mode]);
8207 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8208 up(&priv->sem);
8209 return 0;
8210 }
8211
8212 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8213 {
8214 if (channel == 0) {
8215 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8216 priv->config &= ~CFG_STATIC_CHANNEL;
8217 IPW_DEBUG_ASSOC("Attempting to associate with new "
8218 "parameters.\n");
8219 ipw_associate(priv);
8220 return 0;
8221 }
8222
8223 priv->config |= CFG_STATIC_CHANNEL;
8224
8225 if (priv->channel == channel) {
8226 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8227 channel);
8228 return 0;
8229 }
8230
8231 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8232 priv->channel = channel;
8233
8234 #ifdef CONFIG_IPW2200_MONITOR
8235 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8236 int i;
8237 if (priv->status & STATUS_SCANNING) {
8238 IPW_DEBUG_SCAN("Scan abort triggered due to "
8239 "channel change.\n");
8240 ipw_abort_scan(priv);
8241 }
8242
8243 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8244 udelay(10);
8245
8246 if (priv->status & STATUS_SCANNING)
8247 IPW_DEBUG_SCAN("Still scanning...\n");
8248 else
8249 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8250 1000 - i);
8251
8252 return 0;
8253 }
8254 #endif /* CONFIG_IPW2200_MONITOR */
8255
8256 /* Network configuration changed -- force [re]association */
8257 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8258 if (!ipw_disassociate(priv))
8259 ipw_associate(priv);
8260
8261 return 0;
8262 }
8263
8264 static int ipw_wx_set_freq(struct net_device *dev,
8265 struct iw_request_info *info,
8266 union iwreq_data *wrqu, char *extra)
8267 {
8268 struct ipw_priv *priv = ieee80211_priv(dev);
8269 const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee);
8270 struct iw_freq *fwrq = &wrqu->freq;
8271 int ret = 0, i;
8272 u8 channel, flags;
8273 int band;
8274
8275 if (fwrq->m == 0) {
8276 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8277 down(&priv->sem);
8278 ret = ipw_set_channel(priv, 0);
8279 up(&priv->sem);
8280 return ret;
8281 }
8282 /* if setting by freq convert to channel */
8283 if (fwrq->e == 1) {
8284 channel = ipw_freq_to_channel(priv->ieee, fwrq->m);
8285 if (channel == 0)
8286 return -EINVAL;
8287 } else
8288 channel = fwrq->m;
8289
8290 if (!(band = ipw_is_valid_channel(priv->ieee, channel)))
8291 return -EINVAL;
8292
8293 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8294 i = ipw_channel_to_index(priv->ieee, channel);
8295 if (i == -1)
8296 return -EINVAL;
8297
8298 flags = (band == IEEE80211_24GHZ_BAND) ?
8299 geo->bg[i].flags : geo->a[i].flags;
8300 if (flags & IEEE80211_CH_PASSIVE_ONLY) {
8301 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8302 return -EINVAL;
8303 }
8304 }
8305
8306 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8307 down(&priv->sem);
8308 ret = ipw_set_channel(priv, channel);
8309 up(&priv->sem);
8310 return ret;
8311 }
8312
8313 static int ipw_wx_get_freq(struct net_device *dev,
8314 struct iw_request_info *info,
8315 union iwreq_data *wrqu, char *extra)
8316 {
8317 struct ipw_priv *priv = ieee80211_priv(dev);
8318
8319 wrqu->freq.e = 0;
8320
8321 /* If we are associated, trying to associate, or have a statically
8322 * configured CHANNEL then return that; otherwise return ANY */
8323 down(&priv->sem);
8324 if (priv->config & CFG_STATIC_CHANNEL ||
8325 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))
8326 wrqu->freq.m = priv->channel;
8327 else
8328 wrqu->freq.m = 0;
8329
8330 up(&priv->sem);
8331 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8332 return 0;
8333 }
8334
8335 static int ipw_wx_set_mode(struct net_device *dev,
8336 struct iw_request_info *info,
8337 union iwreq_data *wrqu, char *extra)
8338 {
8339 struct ipw_priv *priv = ieee80211_priv(dev);
8340 int err = 0;
8341
8342 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8343
8344 switch (wrqu->mode) {
8345 #ifdef CONFIG_IPW2200_MONITOR
8346 case IW_MODE_MONITOR:
8347 #endif
8348 case IW_MODE_ADHOC:
8349 case IW_MODE_INFRA:
8350 break;
8351 case IW_MODE_AUTO:
8352 wrqu->mode = IW_MODE_INFRA;
8353 break;
8354 default:
8355 return -EINVAL;
8356 }
8357 if (wrqu->mode == priv->ieee->iw_mode)
8358 return 0;
8359
8360 down(&priv->sem);
8361
8362 ipw_sw_reset(priv, 0);
8363
8364 #ifdef CONFIG_IPW2200_MONITOR
8365 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8366 priv->net_dev->type = ARPHRD_ETHER;
8367
8368 if (wrqu->mode == IW_MODE_MONITOR)
8369 #ifdef CONFIG_IEEE80211_RADIOTAP
8370 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8371 #else
8372 priv->net_dev->type = ARPHRD_IEEE80211;
8373 #endif
8374 #endif /* CONFIG_IPW2200_MONITOR */
8375
8376 /* Free the existing firmware and reset the fw_loaded
8377 * flag so ipw_load() will bring in the new firmawre */
8378 free_firmware();
8379
8380 priv->ieee->iw_mode = wrqu->mode;
8381
8382 queue_work(priv->workqueue, &priv->adapter_restart);
8383 up(&priv->sem);
8384 return err;
8385 }
8386
8387 static int ipw_wx_get_mode(struct net_device *dev,
8388 struct iw_request_info *info,
8389 union iwreq_data *wrqu, char *extra)
8390 {
8391 struct ipw_priv *priv = ieee80211_priv(dev);
8392 down(&priv->sem);
8393 wrqu->mode = priv->ieee->iw_mode;
8394 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8395 up(&priv->sem);
8396 return 0;
8397 }
8398
8399 /* Values are in microsecond */
8400 static const s32 timeout_duration[] = {
8401 350000,
8402 250000,
8403 75000,
8404 37000,
8405 25000,
8406 };
8407
8408 static const s32 period_duration[] = {
8409 400000,
8410 700000,
8411 1000000,
8412 1000000,
8413 1000000
8414 };
8415
8416 static int ipw_wx_get_range(struct net_device *dev,
8417 struct iw_request_info *info,
8418 union iwreq_data *wrqu, char *extra)
8419 {
8420 struct ipw_priv *priv = ieee80211_priv(dev);
8421 struct iw_range *range = (struct iw_range *)extra;
8422 const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee);
8423 int i = 0, j;
8424
8425 wrqu->data.length = sizeof(*range);
8426 memset(range, 0, sizeof(*range));
8427
8428 /* 54Mbs == ~27 Mb/s real (802.11g) */
8429 range->throughput = 27 * 1000 * 1000;
8430
8431 range->max_qual.qual = 100;
8432 /* TODO: Find real max RSSI and stick here */
8433 range->max_qual.level = 0;
8434 range->max_qual.noise = priv->ieee->worst_rssi + 0x100;
8435 range->max_qual.updated = 7; /* Updated all three */
8436
8437 range->avg_qual.qual = 70;
8438 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8439 range->avg_qual.level = 0; /* FIXME to real average level */
8440 range->avg_qual.noise = 0;
8441 range->avg_qual.updated = 7; /* Updated all three */
8442 down(&priv->sem);
8443 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8444
8445 for (i = 0; i < range->num_bitrates; i++)
8446 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8447 500000;
8448
8449 range->max_rts = DEFAULT_RTS_THRESHOLD;
8450 range->min_frag = MIN_FRAG_THRESHOLD;
8451 range->max_frag = MAX_FRAG_THRESHOLD;
8452
8453 range->encoding_size[0] = 5;
8454 range->encoding_size[1] = 13;
8455 range->num_encoding_sizes = 2;
8456 range->max_encoding_tokens = WEP_KEYS;
8457
8458 /* Set the Wireless Extension versions */
8459 range->we_version_compiled = WIRELESS_EXT;
8460 range->we_version_source = 16;
8461
8462 i = 0;
8463 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8464 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES;
8465 i++, j++) {
8466 range->freq[i].i = geo->bg[j].channel;
8467 range->freq[i].m = geo->bg[j].freq * 100000;
8468 range->freq[i].e = 1;
8469 }
8470 }
8471
8472 if (priv->ieee->mode & IEEE_A) {
8473 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES;
8474 i++, j++) {
8475 range->freq[i].i = geo->a[j].channel;
8476 range->freq[i].m = geo->a[j].freq * 100000;
8477 range->freq[i].e = 1;
8478 }
8479 }
8480
8481 range->num_channels = i;
8482 range->num_frequency = i;
8483
8484 up(&priv->sem);
8485
8486 /* Event capability (kernel + driver) */
8487 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8488 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8489 IW_EVENT_CAPA_MASK(SIOCGIWAP));
8490 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8491
8492 IPW_DEBUG_WX("GET Range\n");
8493 return 0;
8494 }
8495
8496 static int ipw_wx_set_wap(struct net_device *dev,
8497 struct iw_request_info *info,
8498 union iwreq_data *wrqu, char *extra)
8499 {
8500 struct ipw_priv *priv = ieee80211_priv(dev);
8501
8502 static const unsigned char any[] = {
8503 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
8504 };
8505 static const unsigned char off[] = {
8506 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
8507 };
8508
8509 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8510 return -EINVAL;
8511 down(&priv->sem);
8512 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
8513 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8514 /* we disable mandatory BSSID association */
8515 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8516 priv->config &= ~CFG_STATIC_BSSID;
8517 IPW_DEBUG_ASSOC("Attempting to associate with new "
8518 "parameters.\n");
8519 ipw_associate(priv);
8520 up(&priv->sem);
8521 return 0;
8522 }
8523
8524 priv->config |= CFG_STATIC_BSSID;
8525 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8526 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8527 up(&priv->sem);
8528 return 0;
8529 }
8530
8531 IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n",
8532 MAC_ARG(wrqu->ap_addr.sa_data));
8533
8534 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8535
8536 /* Network configuration changed -- force [re]association */
8537 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8538 if (!ipw_disassociate(priv))
8539 ipw_associate(priv);
8540
8541 up(&priv->sem);
8542 return 0;
8543 }
8544
8545 static int ipw_wx_get_wap(struct net_device *dev,
8546 struct iw_request_info *info,
8547 union iwreq_data *wrqu, char *extra)
8548 {
8549 struct ipw_priv *priv = ieee80211_priv(dev);
8550 /* If we are associated, trying to associate, or have a statically
8551 * configured BSSID then return that; otherwise return ANY */
8552 down(&priv->sem);
8553 if (priv->config & CFG_STATIC_BSSID ||
8554 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8555 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8556 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8557 } else
8558 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
8559
8560 IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
8561 MAC_ARG(wrqu->ap_addr.sa_data));
8562 up(&priv->sem);
8563 return 0;
8564 }
8565
8566 static int ipw_wx_set_essid(struct net_device *dev,
8567 struct iw_request_info *info,
8568 union iwreq_data *wrqu, char *extra)
8569 {
8570 struct ipw_priv *priv = ieee80211_priv(dev);
8571 char *essid = ""; /* ANY */
8572 int length = 0;
8573 down(&priv->sem);
8574 if (wrqu->essid.flags && wrqu->essid.length) {
8575 length = wrqu->essid.length - 1;
8576 essid = extra;
8577 }
8578 if (length == 0) {
8579 IPW_DEBUG_WX("Setting ESSID to ANY\n");
8580 if ((priv->config & CFG_STATIC_ESSID) &&
8581 !(priv->status & (STATUS_ASSOCIATED |
8582 STATUS_ASSOCIATING))) {
8583 IPW_DEBUG_ASSOC("Attempting to associate with new "
8584 "parameters.\n");
8585 priv->config &= ~CFG_STATIC_ESSID;
8586 ipw_associate(priv);
8587 }
8588 up(&priv->sem);
8589 return 0;
8590 }
8591
8592 length = min(length, IW_ESSID_MAX_SIZE);
8593
8594 priv->config |= CFG_STATIC_ESSID;
8595
8596 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
8597 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
8598 up(&priv->sem);
8599 return 0;
8600 }
8601
8602 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length),
8603 length);
8604
8605 priv->essid_len = length;
8606 memcpy(priv->essid, essid, priv->essid_len);
8607
8608 /* Network configuration changed -- force [re]association */
8609 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
8610 if (!ipw_disassociate(priv))
8611 ipw_associate(priv);
8612
8613 up(&priv->sem);
8614 return 0;
8615 }
8616
8617 static int ipw_wx_get_essid(struct net_device *dev,
8618 struct iw_request_info *info,
8619 union iwreq_data *wrqu, char *extra)
8620 {
8621 struct ipw_priv *priv = ieee80211_priv(dev);
8622
8623 /* If we are associated, trying to associate, or have a statically
8624 * configured ESSID then return that; otherwise return ANY */
8625 down(&priv->sem);
8626 if (priv->config & CFG_STATIC_ESSID ||
8627 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8628 IPW_DEBUG_WX("Getting essid: '%s'\n",
8629 escape_essid(priv->essid, priv->essid_len));
8630 memcpy(extra, priv->essid, priv->essid_len);
8631 wrqu->essid.length = priv->essid_len;
8632 wrqu->essid.flags = 1; /* active */
8633 } else {
8634 IPW_DEBUG_WX("Getting essid: ANY\n");
8635 wrqu->essid.length = 0;
8636 wrqu->essid.flags = 0; /* active */
8637 }
8638 up(&priv->sem);
8639 return 0;
8640 }
8641
8642 static int ipw_wx_set_nick(struct net_device *dev,
8643 struct iw_request_info *info,
8644 union iwreq_data *wrqu, char *extra)
8645 {
8646 struct ipw_priv *priv = ieee80211_priv(dev);
8647
8648 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
8649 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
8650 return -E2BIG;
8651 down(&priv->sem);
8652 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
8653 memset(priv->nick, 0, sizeof(priv->nick));
8654 memcpy(priv->nick, extra, wrqu->data.length);
8655 IPW_DEBUG_TRACE("<<\n");
8656 up(&priv->sem);
8657 return 0;
8658
8659 }
8660
8661 static int ipw_wx_get_nick(struct net_device *dev,
8662 struct iw_request_info *info,
8663 union iwreq_data *wrqu, char *extra)
8664 {
8665 struct ipw_priv *priv = ieee80211_priv(dev);
8666 IPW_DEBUG_WX("Getting nick\n");
8667 down(&priv->sem);
8668 wrqu->data.length = strlen(priv->nick) + 1;
8669 memcpy(extra, priv->nick, wrqu->data.length);
8670 wrqu->data.flags = 1; /* active */
8671 up(&priv->sem);
8672 return 0;
8673 }
8674
8675 static int ipw_wx_set_rate(struct net_device *dev,
8676 struct iw_request_info *info,
8677 union iwreq_data *wrqu, char *extra)
8678 {
8679 /* TODO: We should use semaphores or locks for access to priv */
8680 struct ipw_priv *priv = ieee80211_priv(dev);
8681 u32 target_rate = wrqu->bitrate.value;
8682 u32 fixed, mask;
8683
8684 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
8685 /* value = X, fixed = 1 means only rate X */
8686 /* value = X, fixed = 0 means all rates lower equal X */
8687
8688 if (target_rate == -1) {
8689 fixed = 0;
8690 mask = IEEE80211_DEFAULT_RATES_MASK;
8691 /* Now we should reassociate */
8692 goto apply;
8693 }
8694
8695 mask = 0;
8696 fixed = wrqu->bitrate.fixed;
8697
8698 if (target_rate == 1000000 || !fixed)
8699 mask |= IEEE80211_CCK_RATE_1MB_MASK;
8700 if (target_rate == 1000000)
8701 goto apply;
8702
8703 if (target_rate == 2000000 || !fixed)
8704 mask |= IEEE80211_CCK_RATE_2MB_MASK;
8705 if (target_rate == 2000000)
8706 goto apply;
8707
8708 if (target_rate == 5500000 || !fixed)
8709 mask |= IEEE80211_CCK_RATE_5MB_MASK;
8710 if (target_rate == 5500000)
8711 goto apply;
8712
8713 if (target_rate == 6000000 || !fixed)
8714 mask |= IEEE80211_OFDM_RATE_6MB_MASK;
8715 if (target_rate == 6000000)
8716 goto apply;
8717
8718 if (target_rate == 9000000 || !fixed)
8719 mask |= IEEE80211_OFDM_RATE_9MB_MASK;
8720 if (target_rate == 9000000)
8721 goto apply;
8722
8723 if (target_rate == 11000000 || !fixed)
8724 mask |= IEEE80211_CCK_RATE_11MB_MASK;
8725 if (target_rate == 11000000)
8726 goto apply;
8727
8728 if (target_rate == 12000000 || !fixed)
8729 mask |= IEEE80211_OFDM_RATE_12MB_MASK;
8730 if (target_rate == 12000000)
8731 goto apply;
8732
8733 if (target_rate == 18000000 || !fixed)
8734 mask |= IEEE80211_OFDM_RATE_18MB_MASK;
8735 if (target_rate == 18000000)
8736 goto apply;
8737
8738 if (target_rate == 24000000 || !fixed)
8739 mask |= IEEE80211_OFDM_RATE_24MB_MASK;
8740 if (target_rate == 24000000)
8741 goto apply;
8742
8743 if (target_rate == 36000000 || !fixed)
8744 mask |= IEEE80211_OFDM_RATE_36MB_MASK;
8745 if (target_rate == 36000000)
8746 goto apply;
8747
8748 if (target_rate == 48000000 || !fixed)
8749 mask |= IEEE80211_OFDM_RATE_48MB_MASK;
8750 if (target_rate == 48000000)
8751 goto apply;
8752
8753 if (target_rate == 54000000 || !fixed)
8754 mask |= IEEE80211_OFDM_RATE_54MB_MASK;
8755 if (target_rate == 54000000)
8756 goto apply;
8757
8758 IPW_DEBUG_WX("invalid rate specified, returning error\n");
8759 return -EINVAL;
8760
8761 apply:
8762 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
8763 mask, fixed ? "fixed" : "sub-rates");
8764 down(&priv->sem);
8765 if (mask == IEEE80211_DEFAULT_RATES_MASK) {
8766 priv->config &= ~CFG_FIXED_RATE;
8767 ipw_set_fixed_rate(priv, priv->ieee->mode);
8768 } else
8769 priv->config |= CFG_FIXED_RATE;
8770
8771 if (priv->rates_mask == mask) {
8772 IPW_DEBUG_WX("Mask set to current mask.\n");
8773 up(&priv->sem);
8774 return 0;
8775 }
8776
8777 priv->rates_mask = mask;
8778
8779 /* Network configuration changed -- force [re]association */
8780 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
8781 if (!ipw_disassociate(priv))
8782 ipw_associate(priv);
8783
8784 up(&priv->sem);
8785 return 0;
8786 }
8787
8788 static int ipw_wx_get_rate(struct net_device *dev,
8789 struct iw_request_info *info,
8790 union iwreq_data *wrqu, char *extra)
8791 {
8792 struct ipw_priv *priv = ieee80211_priv(dev);
8793 down(&priv->sem);
8794 wrqu->bitrate.value = priv->last_rate;
8795 up(&priv->sem);
8796 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
8797 return 0;
8798 }
8799
8800 static int ipw_wx_set_rts(struct net_device *dev,
8801 struct iw_request_info *info,
8802 union iwreq_data *wrqu, char *extra)
8803 {
8804 struct ipw_priv *priv = ieee80211_priv(dev);
8805 down(&priv->sem);
8806 if (wrqu->rts.disabled)
8807 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8808 else {
8809 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
8810 wrqu->rts.value > MAX_RTS_THRESHOLD) {
8811 up(&priv->sem);
8812 return -EINVAL;
8813 }
8814 priv->rts_threshold = wrqu->rts.value;
8815 }
8816
8817 ipw_send_rts_threshold(priv, priv->rts_threshold);
8818 up(&priv->sem);
8819 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
8820 return 0;
8821 }
8822
8823 static int ipw_wx_get_rts(struct net_device *dev,
8824 struct iw_request_info *info,
8825 union iwreq_data *wrqu, char *extra)
8826 {
8827 struct ipw_priv *priv = ieee80211_priv(dev);
8828 down(&priv->sem);
8829 wrqu->rts.value = priv->rts_threshold;
8830 wrqu->rts.fixed = 0; /* no auto select */
8831 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
8832 up(&priv->sem);
8833 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
8834 return 0;
8835 }
8836
8837 static int ipw_wx_set_txpow(struct net_device *dev,
8838 struct iw_request_info *info,
8839 union iwreq_data *wrqu, char *extra)
8840 {
8841 struct ipw_priv *priv = ieee80211_priv(dev);
8842 int err = 0;
8843
8844 down(&priv->sem);
8845 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
8846 err = -EINPROGRESS;
8847 goto out;
8848 }
8849
8850 if (!wrqu->power.fixed)
8851 wrqu->power.value = IPW_TX_POWER_DEFAULT;
8852
8853 if (wrqu->power.flags != IW_TXPOW_DBM) {
8854 err = -EINVAL;
8855 goto out;
8856 }
8857
8858 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
8859 (wrqu->power.value < IPW_TX_POWER_MIN)) {
8860 err = -EINVAL;
8861 goto out;
8862 }
8863
8864 priv->tx_power = wrqu->power.value;
8865 err = ipw_set_tx_power(priv);
8866 out:
8867 up(&priv->sem);
8868 return err;
8869 }
8870
8871 static int ipw_wx_get_txpow(struct net_device *dev,
8872 struct iw_request_info *info,
8873 union iwreq_data *wrqu, char *extra)
8874 {
8875 struct ipw_priv *priv = ieee80211_priv(dev);
8876 down(&priv->sem);
8877 wrqu->power.value = priv->tx_power;
8878 wrqu->power.fixed = 1;
8879 wrqu->power.flags = IW_TXPOW_DBM;
8880 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
8881 up(&priv->sem);
8882
8883 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
8884 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
8885
8886 return 0;
8887 }
8888
8889 static int ipw_wx_set_frag(struct net_device *dev,
8890 struct iw_request_info *info,
8891 union iwreq_data *wrqu, char *extra)
8892 {
8893 struct ipw_priv *priv = ieee80211_priv(dev);
8894 down(&priv->sem);
8895 if (wrqu->frag.disabled)
8896 priv->ieee->fts = DEFAULT_FTS;
8897 else {
8898 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
8899 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
8900 up(&priv->sem);
8901 return -EINVAL;
8902 }
8903
8904 priv->ieee->fts = wrqu->frag.value & ~0x1;
8905 }
8906
8907 ipw_send_frag_threshold(priv, wrqu->frag.value);
8908 up(&priv->sem);
8909 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
8910 return 0;
8911 }
8912
8913 static int ipw_wx_get_frag(struct net_device *dev,
8914 struct iw_request_info *info,
8915 union iwreq_data *wrqu, char *extra)
8916 {
8917 struct ipw_priv *priv = ieee80211_priv(dev);
8918 down(&priv->sem);
8919 wrqu->frag.value = priv->ieee->fts;
8920 wrqu->frag.fixed = 0; /* no auto select */
8921 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
8922 up(&priv->sem);
8923 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
8924
8925 return 0;
8926 }
8927
8928 static int ipw_wx_set_retry(struct net_device *dev,
8929 struct iw_request_info *info,
8930 union iwreq_data *wrqu, char *extra)
8931 {
8932 struct ipw_priv *priv = ieee80211_priv(dev);
8933
8934 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
8935 return -EINVAL;
8936
8937 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
8938 return 0;
8939
8940 if (wrqu->retry.value < 0 || wrqu->retry.value > 255)
8941 return -EINVAL;
8942
8943 down(&priv->sem);
8944 if (wrqu->retry.flags & IW_RETRY_MIN)
8945 priv->short_retry_limit = (u8) wrqu->retry.value;
8946 else if (wrqu->retry.flags & IW_RETRY_MAX)
8947 priv->long_retry_limit = (u8) wrqu->retry.value;
8948 else {
8949 priv->short_retry_limit = (u8) wrqu->retry.value;
8950 priv->long_retry_limit = (u8) wrqu->retry.value;
8951 }
8952
8953 ipw_send_retry_limit(priv, priv->short_retry_limit,
8954 priv->long_retry_limit);
8955 up(&priv->sem);
8956 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
8957 priv->short_retry_limit, priv->long_retry_limit);
8958 return 0;
8959 }
8960
8961 static int ipw_wx_get_retry(struct net_device *dev,
8962 struct iw_request_info *info,
8963 union iwreq_data *wrqu, char *extra)
8964 {
8965 struct ipw_priv *priv = ieee80211_priv(dev);
8966
8967 down(&priv->sem);
8968 wrqu->retry.disabled = 0;
8969
8970 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
8971 up(&priv->sem);
8972 return -EINVAL;
8973 }
8974
8975 if (wrqu->retry.flags & IW_RETRY_MAX) {
8976 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
8977 wrqu->retry.value = priv->long_retry_limit;
8978 } else if (wrqu->retry.flags & IW_RETRY_MIN) {
8979 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MIN;
8980 wrqu->retry.value = priv->short_retry_limit;
8981 } else {
8982 wrqu->retry.flags = IW_RETRY_LIMIT;
8983 wrqu->retry.value = priv->short_retry_limit;
8984 }
8985 up(&priv->sem);
8986
8987 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
8988
8989 return 0;
8990 }
8991
8992 static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
8993 int essid_len)
8994 {
8995 struct ipw_scan_request_ext scan;
8996 int err = 0, scan_type;
8997
8998 if (!(priv->status & STATUS_INIT) ||
8999 (priv->status & STATUS_EXIT_PENDING))
9000 return 0;
9001
9002 down(&priv->sem);
9003
9004 if (priv->status & STATUS_RF_KILL_MASK) {
9005 IPW_DEBUG_HC("Aborting scan due to RF kill activation\n");
9006 priv->status |= STATUS_SCAN_PENDING;
9007 goto done;
9008 }
9009
9010 IPW_DEBUG_HC("starting request direct scan!\n");
9011
9012 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
9013 /* We should not sleep here; otherwise we will block most
9014 * of the system (for instance, we hold rtnl_lock when we
9015 * get here).
9016 */
9017 err = -EAGAIN;
9018 goto done;
9019 }
9020 memset(&scan, 0, sizeof(scan));
9021
9022 if (priv->config & CFG_SPEED_SCAN)
9023 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
9024 cpu_to_le16(30);
9025 else
9026 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
9027 cpu_to_le16(20);
9028
9029 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
9030 cpu_to_le16(20);
9031 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
9032 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
9033
9034 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
9035
9036 err = ipw_send_ssid(priv, essid, essid_len);
9037 if (err) {
9038 IPW_DEBUG_HC("Attempt to send SSID command failed\n");
9039 goto done;
9040 }
9041 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
9042
9043 ipw_add_scan_channels(priv, &scan, scan_type);
9044
9045 err = ipw_send_scan_request_ext(priv, &scan);
9046 if (err) {
9047 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
9048 goto done;
9049 }
9050
9051 priv->status |= STATUS_SCANNING;
9052
9053 done:
9054 up(&priv->sem);
9055 return err;
9056 }
9057
9058 static int ipw_wx_set_scan(struct net_device *dev,
9059 struct iw_request_info *info,
9060 union iwreq_data *wrqu, char *extra)
9061 {
9062 struct ipw_priv *priv = ieee80211_priv(dev);
9063 struct iw_scan_req *req = NULL;
9064 if (wrqu->data.length
9065 && wrqu->data.length == sizeof(struct iw_scan_req)) {
9066 req = (struct iw_scan_req *)extra;
9067 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9068 ipw_request_direct_scan(priv, req->essid,
9069 req->essid_len);
9070 return 0;
9071 }
9072 }
9073
9074 IPW_DEBUG_WX("Start scan\n");
9075
9076 queue_work(priv->workqueue, &priv->request_scan);
9077
9078 return 0;
9079 }
9080
9081 static int ipw_wx_get_scan(struct net_device *dev,
9082 struct iw_request_info *info,
9083 union iwreq_data *wrqu, char *extra)
9084 {
9085 struct ipw_priv *priv = ieee80211_priv(dev);
9086 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
9087 }
9088
9089 static int ipw_wx_set_encode(struct net_device *dev,
9090 struct iw_request_info *info,
9091 union iwreq_data *wrqu, char *key)
9092 {
9093 struct ipw_priv *priv = ieee80211_priv(dev);
9094 int ret;
9095 u32 cap = priv->capability;
9096
9097 down(&priv->sem);
9098 ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
9099
9100 /* In IBSS mode, we need to notify the firmware to update
9101 * the beacon info after we changed the capability. */
9102 if (cap != priv->capability &&
9103 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9104 priv->status & STATUS_ASSOCIATED)
9105 ipw_disassociate(priv);
9106
9107 up(&priv->sem);
9108 return ret;
9109 }
9110
9111 static int ipw_wx_get_encode(struct net_device *dev,
9112 struct iw_request_info *info,
9113 union iwreq_data *wrqu, char *key)
9114 {
9115 struct ipw_priv *priv = ieee80211_priv(dev);
9116 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
9117 }
9118
9119 static int ipw_wx_set_power(struct net_device *dev,
9120 struct iw_request_info *info,
9121 union iwreq_data *wrqu, char *extra)
9122 {
9123 struct ipw_priv *priv = ieee80211_priv(dev);
9124 int err;
9125 down(&priv->sem);
9126 if (wrqu->power.disabled) {
9127 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9128 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9129 if (err) {
9130 IPW_DEBUG_WX("failed setting power mode.\n");
9131 up(&priv->sem);
9132 return err;
9133 }
9134 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9135 up(&priv->sem);
9136 return 0;
9137 }
9138
9139 switch (wrqu->power.flags & IW_POWER_MODE) {
9140 case IW_POWER_ON: /* If not specified */
9141 case IW_POWER_MODE: /* If set all mask */
9142 case IW_POWER_ALL_R: /* If explicitely state all */
9143 break;
9144 default: /* Otherwise we don't support it */
9145 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9146 wrqu->power.flags);
9147 up(&priv->sem);
9148 return -EOPNOTSUPP;
9149 }
9150
9151 /* If the user hasn't specified a power management mode yet, default
9152 * to BATTERY */
9153 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9154 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9155 else
9156 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9157 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9158 if (err) {
9159 IPW_DEBUG_WX("failed setting power mode.\n");
9160 up(&priv->sem);
9161 return err;
9162 }
9163
9164 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9165 up(&priv->sem);
9166 return 0;
9167 }
9168
9169 static int ipw_wx_get_power(struct net_device *dev,
9170 struct iw_request_info *info,
9171 union iwreq_data *wrqu, char *extra)
9172 {
9173 struct ipw_priv *priv = ieee80211_priv(dev);
9174 down(&priv->sem);
9175 if (!(priv->power_mode & IPW_POWER_ENABLED))
9176 wrqu->power.disabled = 1;
9177 else
9178 wrqu->power.disabled = 0;
9179
9180 up(&priv->sem);
9181 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9182
9183 return 0;
9184 }
9185
9186 static int ipw_wx_set_powermode(struct net_device *dev,
9187 struct iw_request_info *info,
9188 union iwreq_data *wrqu, char *extra)
9189 {
9190 struct ipw_priv *priv = ieee80211_priv(dev);
9191 int mode = *(int *)extra;
9192 int err;
9193 down(&priv->sem);
9194 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
9195 mode = IPW_POWER_AC;
9196 priv->power_mode = mode;
9197 } else {
9198 priv->power_mode = IPW_POWER_ENABLED | mode;
9199 }
9200
9201 if (priv->power_mode != mode) {
9202 err = ipw_send_power_mode(priv, mode);
9203
9204 if (err) {
9205 IPW_DEBUG_WX("failed setting power mode.\n");
9206 up(&priv->sem);
9207 return err;
9208 }
9209 }
9210 up(&priv->sem);
9211 return 0;
9212 }
9213
9214 #define MAX_WX_STRING 80
9215 static int ipw_wx_get_powermode(struct net_device *dev,
9216 struct iw_request_info *info,
9217 union iwreq_data *wrqu, char *extra)
9218 {
9219 struct ipw_priv *priv = ieee80211_priv(dev);
9220 int level = IPW_POWER_LEVEL(priv->power_mode);
9221 char *p = extra;
9222
9223 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9224
9225 switch (level) {
9226 case IPW_POWER_AC:
9227 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9228 break;
9229 case IPW_POWER_BATTERY:
9230 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9231 break;
9232 default:
9233 p += snprintf(p, MAX_WX_STRING - (p - extra),
9234 "(Timeout %dms, Period %dms)",
9235 timeout_duration[level - 1] / 1000,
9236 period_duration[level - 1] / 1000);
9237 }
9238
9239 if (!(priv->power_mode & IPW_POWER_ENABLED))
9240 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9241
9242 wrqu->data.length = p - extra + 1;
9243
9244 return 0;
9245 }
9246
9247 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9248 struct iw_request_info *info,
9249 union iwreq_data *wrqu, char *extra)
9250 {
9251 struct ipw_priv *priv = ieee80211_priv(dev);
9252 int mode = *(int *)extra;
9253 u8 band = 0, modulation = 0;
9254
9255 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9256 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9257 return -EINVAL;
9258 }
9259 down(&priv->sem);
9260 if (priv->adapter == IPW_2915ABG) {
9261 priv->ieee->abg_true = 1;
9262 if (mode & IEEE_A) {
9263 band |= IEEE80211_52GHZ_BAND;
9264 modulation |= IEEE80211_OFDM_MODULATION;
9265 } else
9266 priv->ieee->abg_true = 0;
9267 } else {
9268 if (mode & IEEE_A) {
9269 IPW_WARNING("Attempt to set 2200BG into "
9270 "802.11a mode\n");
9271 up(&priv->sem);
9272 return -EINVAL;
9273 }
9274
9275 priv->ieee->abg_true = 0;
9276 }
9277
9278 if (mode & IEEE_B) {
9279 band |= IEEE80211_24GHZ_BAND;
9280 modulation |= IEEE80211_CCK_MODULATION;
9281 } else
9282 priv->ieee->abg_true = 0;
9283
9284 if (mode & IEEE_G) {
9285 band |= IEEE80211_24GHZ_BAND;
9286 modulation |= IEEE80211_OFDM_MODULATION;
9287 } else
9288 priv->ieee->abg_true = 0;
9289
9290 priv->ieee->mode = mode;
9291 priv->ieee->freq_band = band;
9292 priv->ieee->modulation = modulation;
9293 init_supported_rates(priv, &priv->rates);
9294
9295 /* Network configuration changed -- force [re]association */
9296 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9297 if (!ipw_disassociate(priv)) {
9298 ipw_send_supported_rates(priv, &priv->rates);
9299 ipw_associate(priv);
9300 }
9301
9302 /* Update the band LEDs */
9303 ipw_led_band_on(priv);
9304
9305 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9306 mode & IEEE_A ? 'a' : '.',
9307 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9308 up(&priv->sem);
9309 return 0;
9310 }
9311
9312 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9313 struct iw_request_info *info,
9314 union iwreq_data *wrqu, char *extra)
9315 {
9316 struct ipw_priv *priv = ieee80211_priv(dev);
9317 down(&priv->sem);
9318 switch (priv->ieee->mode) {
9319 case IEEE_A:
9320 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9321 break;
9322 case IEEE_B:
9323 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9324 break;
9325 case IEEE_A | IEEE_B:
9326 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9327 break;
9328 case IEEE_G:
9329 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9330 break;
9331 case IEEE_A | IEEE_G:
9332 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9333 break;
9334 case IEEE_B | IEEE_G:
9335 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9336 break;
9337 case IEEE_A | IEEE_B | IEEE_G:
9338 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9339 break;
9340 default:
9341 strncpy(extra, "unknown", MAX_WX_STRING);
9342 break;
9343 }
9344
9345 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9346
9347 wrqu->data.length = strlen(extra) + 1;
9348 up(&priv->sem);
9349
9350 return 0;
9351 }
9352
9353 static int ipw_wx_set_preamble(struct net_device *dev,
9354 struct iw_request_info *info,
9355 union iwreq_data *wrqu, char *extra)
9356 {
9357 struct ipw_priv *priv = ieee80211_priv(dev);
9358 int mode = *(int *)extra;
9359 down(&priv->sem);
9360 /* Switching from SHORT -> LONG requires a disassociation */
9361 if (mode == 1) {
9362 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9363 priv->config |= CFG_PREAMBLE_LONG;
9364
9365 /* Network configuration changed -- force [re]association */
9366 IPW_DEBUG_ASSOC
9367 ("[re]association triggered due to preamble change.\n");
9368 if (!ipw_disassociate(priv))
9369 ipw_associate(priv);
9370 }
9371 goto done;
9372 }
9373
9374 if (mode == 0) {
9375 priv->config &= ~CFG_PREAMBLE_LONG;
9376 goto done;
9377 }
9378 up(&priv->sem);
9379 return -EINVAL;
9380
9381 done:
9382 up(&priv->sem);
9383 return 0;
9384 }
9385
9386 static int ipw_wx_get_preamble(struct net_device *dev,
9387 struct iw_request_info *info,
9388 union iwreq_data *wrqu, char *extra)
9389 {
9390 struct ipw_priv *priv = ieee80211_priv(dev);
9391 down(&priv->sem);
9392 if (priv->config & CFG_PREAMBLE_LONG)
9393 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9394 else
9395 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9396 up(&priv->sem);
9397 return 0;
9398 }
9399
9400 #ifdef CONFIG_IPW2200_MONITOR
9401 static int ipw_wx_set_monitor(struct net_device *dev,
9402 struct iw_request_info *info,
9403 union iwreq_data *wrqu, char *extra)
9404 {
9405 struct ipw_priv *priv = ieee80211_priv(dev);
9406 int *parms = (int *)extra;
9407 int enable = (parms[0] > 0);
9408 down(&priv->sem);
9409 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9410 if (enable) {
9411 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9412 #ifdef CONFIG_IEEE80211_RADIOTAP
9413 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9414 #else
9415 priv->net_dev->type = ARPHRD_IEEE80211;
9416 #endif
9417 queue_work(priv->workqueue, &priv->adapter_restart);
9418 }
9419
9420 ipw_set_channel(priv, parms[1]);
9421 } else {
9422 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9423 up(&priv->sem);
9424 return 0;
9425 }
9426 priv->net_dev->type = ARPHRD_ETHER;
9427 queue_work(priv->workqueue, &priv->adapter_restart);
9428 }
9429 up(&priv->sem);
9430 return 0;
9431 }
9432
9433 #endif // CONFIG_IPW2200_MONITOR
9434
9435 static int ipw_wx_reset(struct net_device *dev,
9436 struct iw_request_info *info,
9437 union iwreq_data *wrqu, char *extra)
9438 {
9439 struct ipw_priv *priv = ieee80211_priv(dev);
9440 IPW_DEBUG_WX("RESET\n");
9441 queue_work(priv->workqueue, &priv->adapter_restart);
9442 return 0;
9443 }
9444
9445 static int ipw_wx_sw_reset(struct net_device *dev,
9446 struct iw_request_info *info,
9447 union iwreq_data *wrqu, char *extra)
9448 {
9449 struct ipw_priv *priv = ieee80211_priv(dev);
9450 union iwreq_data wrqu_sec = {
9451 .encoding = {
9452 .flags = IW_ENCODE_DISABLED,
9453 },
9454 };
9455 int ret;
9456
9457 IPW_DEBUG_WX("SW_RESET\n");
9458
9459 down(&priv->sem);
9460
9461 ret = ipw_sw_reset(priv, 0);
9462 if (!ret) {
9463 free_firmware();
9464 ipw_adapter_restart(priv);
9465 }
9466
9467 /* The SW reset bit might have been toggled on by the 'disable'
9468 * module parameter, so take appropriate action */
9469 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9470
9471 up(&priv->sem);
9472 ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9473 down(&priv->sem);
9474
9475 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9476 /* Configuration likely changed -- force [re]association */
9477 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9478 "reset.\n");
9479 if (!ipw_disassociate(priv))
9480 ipw_associate(priv);
9481 }
9482
9483 up(&priv->sem);
9484
9485 return 0;
9486 }
9487
9488 /* Rebase the WE IOCTLs to zero for the handler array */
9489 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9490 static iw_handler ipw_wx_handlers[] = {
9491 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9492 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9493 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9494 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9495 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9496 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9497 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9498 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9499 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9500 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9501 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9502 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9503 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9504 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9505 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9506 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9507 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9508 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
9509 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
9510 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
9511 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
9512 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
9513 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
9514 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
9515 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
9516 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
9517 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
9518 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
9519 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
9520 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
9521 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
9522 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
9523 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
9524 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
9525 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
9526 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
9527 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
9528 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
9529 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
9530 };
9531
9532 enum {
9533 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9534 IPW_PRIV_GET_POWER,
9535 IPW_PRIV_SET_MODE,
9536 IPW_PRIV_GET_MODE,
9537 IPW_PRIV_SET_PREAMBLE,
9538 IPW_PRIV_GET_PREAMBLE,
9539 IPW_PRIV_RESET,
9540 IPW_PRIV_SW_RESET,
9541 #ifdef CONFIG_IPW2200_MONITOR
9542 IPW_PRIV_SET_MONITOR,
9543 #endif
9544 };
9545
9546 static struct iw_priv_args ipw_priv_args[] = {
9547 {
9548 .cmd = IPW_PRIV_SET_POWER,
9549 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9550 .name = "set_power"},
9551 {
9552 .cmd = IPW_PRIV_GET_POWER,
9553 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9554 .name = "get_power"},
9555 {
9556 .cmd = IPW_PRIV_SET_MODE,
9557 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9558 .name = "set_mode"},
9559 {
9560 .cmd = IPW_PRIV_GET_MODE,
9561 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9562 .name = "get_mode"},
9563 {
9564 .cmd = IPW_PRIV_SET_PREAMBLE,
9565 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9566 .name = "set_preamble"},
9567 {
9568 .cmd = IPW_PRIV_GET_PREAMBLE,
9569 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
9570 .name = "get_preamble"},
9571 {
9572 IPW_PRIV_RESET,
9573 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
9574 {
9575 IPW_PRIV_SW_RESET,
9576 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
9577 #ifdef CONFIG_IPW2200_MONITOR
9578 {
9579 IPW_PRIV_SET_MONITOR,
9580 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
9581 #endif /* CONFIG_IPW2200_MONITOR */
9582 };
9583
9584 static iw_handler ipw_priv_handler[] = {
9585 ipw_wx_set_powermode,
9586 ipw_wx_get_powermode,
9587 ipw_wx_set_wireless_mode,
9588 ipw_wx_get_wireless_mode,
9589 ipw_wx_set_preamble,
9590 ipw_wx_get_preamble,
9591 ipw_wx_reset,
9592 ipw_wx_sw_reset,
9593 #ifdef CONFIG_IPW2200_MONITOR
9594 ipw_wx_set_monitor,
9595 #endif
9596 };
9597
9598 static struct iw_handler_def ipw_wx_handler_def = {
9599 .standard = ipw_wx_handlers,
9600 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
9601 .num_private = ARRAY_SIZE(ipw_priv_handler),
9602 .num_private_args = ARRAY_SIZE(ipw_priv_args),
9603 .private = ipw_priv_handler,
9604 .private_args = ipw_priv_args,
9605 .get_wireless_stats = ipw_get_wireless_stats,
9606 };
9607
9608 /*
9609 * Get wireless statistics.
9610 * Called by /proc/net/wireless
9611 * Also called by SIOCGIWSTATS
9612 */
9613 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
9614 {
9615 struct ipw_priv *priv = ieee80211_priv(dev);
9616 struct iw_statistics *wstats;
9617
9618 wstats = &priv->wstats;
9619
9620 /* if hw is disabled, then ipw_get_ordinal() can't be called.
9621 * netdev->get_wireless_stats seems to be called before fw is
9622 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
9623 * and associated; if not associcated, the values are all meaningless
9624 * anyway, so set them all to NULL and INVALID */
9625 if (!(priv->status & STATUS_ASSOCIATED)) {
9626 wstats->miss.beacon = 0;
9627 wstats->discard.retries = 0;
9628 wstats->qual.qual = 0;
9629 wstats->qual.level = 0;
9630 wstats->qual.noise = 0;
9631 wstats->qual.updated = 7;
9632 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
9633 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
9634 return wstats;
9635 }
9636
9637 wstats->qual.qual = priv->quality;
9638 wstats->qual.level = average_value(&priv->average_rssi);
9639 wstats->qual.noise = average_value(&priv->average_noise);
9640 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
9641 IW_QUAL_NOISE_UPDATED;
9642
9643 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
9644 wstats->discard.retries = priv->last_tx_failures;
9645 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
9646
9647 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
9648 goto fail_get_ordinal;
9649 wstats->discard.retries += tx_retry; */
9650
9651 return wstats;
9652 }
9653
9654 /* net device stuff */
9655
9656 static void init_sys_config(struct ipw_sys_config *sys_config)
9657 {
9658 memset(sys_config, 0, sizeof(struct ipw_sys_config));
9659 sys_config->bt_coexistence = 1; /* We may need to look into prvStaBtConfig */
9660 sys_config->answer_broadcast_ssid_probe = 0;
9661 sys_config->accept_all_data_frames = 0;
9662 sys_config->accept_non_directed_frames = 1;
9663 sys_config->exclude_unicast_unencrypted = 0;
9664 sys_config->disable_unicast_decryption = 1;
9665 sys_config->exclude_multicast_unencrypted = 0;
9666 sys_config->disable_multicast_decryption = 1;
9667 sys_config->antenna_diversity = CFG_SYS_ANTENNA_BOTH;
9668 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
9669 sys_config->dot11g_auto_detection = 0;
9670 sys_config->enable_cts_to_self = 0;
9671 sys_config->bt_coexist_collision_thr = 0;
9672 sys_config->pass_noise_stats_to_host = 1; //1 -- fix for 256
9673 }
9674
9675 static int ipw_net_open(struct net_device *dev)
9676 {
9677 struct ipw_priv *priv = ieee80211_priv(dev);
9678 IPW_DEBUG_INFO("dev->open\n");
9679 /* we should be verifying the device is ready to be opened */
9680 down(&priv->sem);
9681 if (!(priv->status & STATUS_RF_KILL_MASK) &&
9682 (priv->status & STATUS_ASSOCIATED))
9683 netif_start_queue(dev);
9684 up(&priv->sem);
9685 return 0;
9686 }
9687
9688 static int ipw_net_stop(struct net_device *dev)
9689 {
9690 IPW_DEBUG_INFO("dev->close\n");
9691 netif_stop_queue(dev);
9692 return 0;
9693 }
9694
9695 /*
9696 todo:
9697
9698 modify to send one tfd per fragment instead of using chunking. otherwise
9699 we need to heavily modify the ieee80211_skb_to_txb.
9700 */
9701
9702 static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
9703 int pri)
9704 {
9705 struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)
9706 txb->fragments[0]->data;
9707 int i = 0;
9708 struct tfd_frame *tfd;
9709 #ifdef CONFIG_IPW_QOS
9710 int tx_id = ipw_get_tx_queue_number(priv, pri);
9711 struct clx2_tx_queue *txq = &priv->txq[tx_id];
9712 #else
9713 struct clx2_tx_queue *txq = &priv->txq[0];
9714 #endif
9715 struct clx2_queue *q = &txq->q;
9716 u8 id, hdr_len, unicast;
9717 u16 remaining_bytes;
9718 int fc;
9719
9720 /* If there isn't room in the queue, we return busy and let the
9721 * network stack requeue the packet for us */
9722 if (ipw_queue_space(q) < q->high_mark)
9723 return NETDEV_TX_BUSY;
9724
9725 switch (priv->ieee->iw_mode) {
9726 case IW_MODE_ADHOC:
9727 hdr_len = IEEE80211_3ADDR_LEN;
9728 unicast = !is_multicast_ether_addr(hdr->addr1);
9729 id = ipw_find_station(priv, hdr->addr1);
9730 if (id == IPW_INVALID_STATION) {
9731 id = ipw_add_station(priv, hdr->addr1);
9732 if (id == IPW_INVALID_STATION) {
9733 IPW_WARNING("Attempt to send data to "
9734 "invalid cell: " MAC_FMT "\n",
9735 MAC_ARG(hdr->addr1));
9736 goto drop;
9737 }
9738 }
9739 break;
9740
9741 case IW_MODE_INFRA:
9742 default:
9743 unicast = !is_multicast_ether_addr(hdr->addr3);
9744 hdr_len = IEEE80211_3ADDR_LEN;
9745 id = 0;
9746 break;
9747 }
9748
9749 tfd = &txq->bd[q->first_empty];
9750 txq->txb[q->first_empty] = txb;
9751 memset(tfd, 0, sizeof(*tfd));
9752 tfd->u.data.station_number = id;
9753
9754 tfd->control_flags.message_type = TX_FRAME_TYPE;
9755 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
9756
9757 tfd->u.data.cmd_id = DINO_CMD_TX;
9758 tfd->u.data.len = cpu_to_le16(txb->payload_size);
9759 remaining_bytes = txb->payload_size;
9760
9761 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
9762 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
9763 else
9764 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
9765
9766 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
9767 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
9768
9769 fc = le16_to_cpu(hdr->frame_ctl);
9770 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
9771
9772 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
9773
9774 if (likely(unicast))
9775 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
9776
9777 if (txb->encrypted && !priv->ieee->host_encrypt) {
9778 switch (priv->ieee->sec.level) {
9779 case SEC_LEVEL_3:
9780 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
9781 IEEE80211_FCTL_PROTECTED;
9782 /* XXX: ACK flag must be set for CCMP even if it
9783 * is a multicast/broadcast packet, because CCMP
9784 * group communication encrypted by GTK is
9785 * actually done by the AP. */
9786 if (!unicast)
9787 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
9788
9789 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
9790 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
9791 tfd->u.data.key_index = 0;
9792 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
9793 break;
9794 case SEC_LEVEL_2:
9795 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
9796 IEEE80211_FCTL_PROTECTED;
9797 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
9798 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
9799 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
9800 break;
9801 case SEC_LEVEL_1:
9802 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
9803 IEEE80211_FCTL_PROTECTED;
9804 tfd->u.data.key_index = priv->ieee->tx_keyidx;
9805 if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <=
9806 40)
9807 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
9808 else
9809 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
9810 break;
9811 case SEC_LEVEL_0:
9812 break;
9813 default:
9814 printk(KERN_ERR "Unknow security level %d\n",
9815 priv->ieee->sec.level);
9816 break;
9817 }
9818 } else
9819 /* No hardware encryption */
9820 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
9821
9822 #ifdef CONFIG_IPW_QOS
9823 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data), unicast);
9824 #endif /* CONFIG_IPW_QOS */
9825
9826 /* payload */
9827 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
9828 txb->nr_frags));
9829 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
9830 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
9831 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
9832 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
9833 i, le32_to_cpu(tfd->u.data.num_chunks),
9834 txb->fragments[i]->len - hdr_len);
9835 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
9836 i, tfd->u.data.num_chunks,
9837 txb->fragments[i]->len - hdr_len);
9838 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
9839 txb->fragments[i]->len - hdr_len);
9840
9841 tfd->u.data.chunk_ptr[i] =
9842 cpu_to_le32(pci_map_single
9843 (priv->pci_dev,
9844 txb->fragments[i]->data + hdr_len,
9845 txb->fragments[i]->len - hdr_len,
9846 PCI_DMA_TODEVICE));
9847 tfd->u.data.chunk_len[i] =
9848 cpu_to_le16(txb->fragments[i]->len - hdr_len);
9849 }
9850
9851 if (i != txb->nr_frags) {
9852 struct sk_buff *skb;
9853 u16 remaining_bytes = 0;
9854 int j;
9855
9856 for (j = i; j < txb->nr_frags; j++)
9857 remaining_bytes += txb->fragments[j]->len - hdr_len;
9858
9859 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
9860 remaining_bytes);
9861 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
9862 if (skb != NULL) {
9863 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
9864 for (j = i; j < txb->nr_frags; j++) {
9865 int size = txb->fragments[j]->len - hdr_len;
9866
9867 printk(KERN_INFO "Adding frag %d %d...\n",
9868 j, size);
9869 memcpy(skb_put(skb, size),
9870 txb->fragments[j]->data + hdr_len, size);
9871 }
9872 dev_kfree_skb_any(txb->fragments[i]);
9873 txb->fragments[i] = skb;
9874 tfd->u.data.chunk_ptr[i] =
9875 cpu_to_le32(pci_map_single
9876 (priv->pci_dev, skb->data,
9877 tfd->u.data.chunk_len[i],
9878 PCI_DMA_TODEVICE));
9879
9880 tfd->u.data.num_chunks =
9881 cpu_to_le32(le32_to_cpu(tfd->u.data.num_chunks) +
9882 1);
9883 }
9884 }
9885
9886 /* kick DMA */
9887 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
9888 ipw_write32(priv, q->reg_w, q->first_empty);
9889
9890 return NETDEV_TX_OK;
9891
9892 drop:
9893 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
9894 ieee80211_txb_free(txb);
9895 return NETDEV_TX_OK;
9896 }
9897
9898 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
9899 {
9900 struct ipw_priv *priv = ieee80211_priv(dev);
9901 #ifdef CONFIG_IPW_QOS
9902 int tx_id = ipw_get_tx_queue_number(priv, pri);
9903 struct clx2_tx_queue *txq = &priv->txq[tx_id];
9904 #else
9905 struct clx2_tx_queue *txq = &priv->txq[0];
9906 #endif /* CONFIG_IPW_QOS */
9907
9908 if (ipw_queue_space(&txq->q) < txq->q.high_mark)
9909 return 1;
9910
9911 return 0;
9912 }
9913
9914 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
9915 struct net_device *dev, int pri)
9916 {
9917 struct ipw_priv *priv = ieee80211_priv(dev);
9918 unsigned long flags;
9919 int ret;
9920
9921 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
9922 spin_lock_irqsave(&priv->lock, flags);
9923
9924 if (!(priv->status & STATUS_ASSOCIATED)) {
9925 IPW_DEBUG_INFO("Tx attempt while not associated.\n");
9926 priv->ieee->stats.tx_carrier_errors++;
9927 netif_stop_queue(dev);
9928 goto fail_unlock;
9929 }
9930
9931 ret = ipw_tx_skb(priv, txb, pri);
9932 if (ret == NETDEV_TX_OK)
9933 __ipw_led_activity_on(priv);
9934 spin_unlock_irqrestore(&priv->lock, flags);
9935
9936 return ret;
9937
9938 fail_unlock:
9939 spin_unlock_irqrestore(&priv->lock, flags);
9940 return 1;
9941 }
9942
9943 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
9944 {
9945 struct ipw_priv *priv = ieee80211_priv(dev);
9946
9947 priv->ieee->stats.tx_packets = priv->tx_packets;
9948 priv->ieee->stats.rx_packets = priv->rx_packets;
9949 return &priv->ieee->stats;
9950 }
9951
9952 static void ipw_net_set_multicast_list(struct net_device *dev)
9953 {
9954
9955 }
9956
9957 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
9958 {
9959 struct ipw_priv *priv = ieee80211_priv(dev);
9960 struct sockaddr *addr = p;
9961 if (!is_valid_ether_addr(addr->sa_data))
9962 return -EADDRNOTAVAIL;
9963 down(&priv->sem);
9964 priv->config |= CFG_CUSTOM_MAC;
9965 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
9966 printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n",
9967 priv->net_dev->name, MAC_ARG(priv->mac_addr));
9968 queue_work(priv->workqueue, &priv->adapter_restart);
9969 up(&priv->sem);
9970 return 0;
9971 }
9972
9973 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
9974 struct ethtool_drvinfo *info)
9975 {
9976 struct ipw_priv *p = ieee80211_priv(dev);
9977 char vers[64];
9978 char date[32];
9979 u32 len;
9980
9981 strcpy(info->driver, DRV_NAME);
9982 strcpy(info->version, DRV_VERSION);
9983
9984 len = sizeof(vers);
9985 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
9986 len = sizeof(date);
9987 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
9988
9989 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
9990 vers, date);
9991 strcpy(info->bus_info, pci_name(p->pci_dev));
9992 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
9993 }
9994
9995 static u32 ipw_ethtool_get_link(struct net_device *dev)
9996 {
9997 struct ipw_priv *priv = ieee80211_priv(dev);
9998 return (priv->status & STATUS_ASSOCIATED) != 0;
9999 }
10000
10001 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10002 {
10003 return IPW_EEPROM_IMAGE_SIZE;
10004 }
10005
10006 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10007 struct ethtool_eeprom *eeprom, u8 * bytes)
10008 {
10009 struct ipw_priv *p = ieee80211_priv(dev);
10010
10011 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10012 return -EINVAL;
10013 down(&p->sem);
10014 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10015 up(&p->sem);
10016 return 0;
10017 }
10018
10019 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10020 struct ethtool_eeprom *eeprom, u8 * bytes)
10021 {
10022 struct ipw_priv *p = ieee80211_priv(dev);
10023 int i;
10024
10025 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10026 return -EINVAL;
10027 down(&p->sem);
10028 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10029 for (i = IPW_EEPROM_DATA;
10030 i < IPW_EEPROM_DATA + IPW_EEPROM_IMAGE_SIZE; i++)
10031 ipw_write8(p, i, p->eeprom[i]);
10032 up(&p->sem);
10033 return 0;
10034 }
10035
10036 static struct ethtool_ops ipw_ethtool_ops = {
10037 .get_link = ipw_ethtool_get_link,
10038 .get_drvinfo = ipw_ethtool_get_drvinfo,
10039 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10040 .get_eeprom = ipw_ethtool_get_eeprom,
10041 .set_eeprom = ipw_ethtool_set_eeprom,
10042 };
10043
10044 static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
10045 {
10046 struct ipw_priv *priv = data;
10047 u32 inta, inta_mask;
10048
10049 if (!priv)
10050 return IRQ_NONE;
10051
10052 spin_lock(&priv->lock);
10053
10054 if (!(priv->status & STATUS_INT_ENABLED)) {
10055 /* Shared IRQ */
10056 goto none;
10057 }
10058
10059 inta = ipw_read32(priv, IPW_INTA_RW);
10060 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10061
10062 if (inta == 0xFFFFFFFF) {
10063 /* Hardware disappeared */
10064 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10065 goto none;
10066 }
10067
10068 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10069 /* Shared interrupt */
10070 goto none;
10071 }
10072
10073 /* tell the device to stop sending interrupts */
10074 ipw_disable_interrupts(priv);
10075
10076 /* ack current interrupts */
10077 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10078 ipw_write32(priv, IPW_INTA_RW, inta);
10079
10080 /* Cache INTA value for our tasklet */
10081 priv->isr_inta = inta;
10082
10083 tasklet_schedule(&priv->irq_tasklet);
10084
10085 spin_unlock(&priv->lock);
10086
10087 return IRQ_HANDLED;
10088 none:
10089 spin_unlock(&priv->lock);
10090 return IRQ_NONE;
10091 }
10092
10093 static void ipw_rf_kill(void *adapter)
10094 {
10095 struct ipw_priv *priv = adapter;
10096 unsigned long flags;
10097
10098 spin_lock_irqsave(&priv->lock, flags);
10099
10100 if (rf_kill_active(priv)) {
10101 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10102 if (priv->workqueue)
10103 queue_delayed_work(priv->workqueue,
10104 &priv->rf_kill, 2 * HZ);
10105 goto exit_unlock;
10106 }
10107
10108 /* RF Kill is now disabled, so bring the device back up */
10109
10110 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10111 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10112 "device\n");
10113
10114 /* we can not do an adapter restart while inside an irq lock */
10115 queue_work(priv->workqueue, &priv->adapter_restart);
10116 } else
10117 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10118 "enabled\n");
10119
10120 exit_unlock:
10121 spin_unlock_irqrestore(&priv->lock, flags);
10122 }
10123
10124 static void ipw_bg_rf_kill(void *data)
10125 {
10126 struct ipw_priv *priv = data;
10127 down(&priv->sem);
10128 ipw_rf_kill(data);
10129 up(&priv->sem);
10130 }
10131
10132 static void ipw_link_up(struct ipw_priv *priv)
10133 {
10134 priv->last_seq_num = -1;
10135 priv->last_frag_num = -1;
10136 priv->last_packet_time = 0;
10137
10138 netif_carrier_on(priv->net_dev);
10139 if (netif_queue_stopped(priv->net_dev)) {
10140 IPW_DEBUG_NOTIF("waking queue\n");
10141 netif_wake_queue(priv->net_dev);
10142 } else {
10143 IPW_DEBUG_NOTIF("starting queue\n");
10144 netif_start_queue(priv->net_dev);
10145 }
10146
10147 cancel_delayed_work(&priv->request_scan);
10148 ipw_reset_stats(priv);
10149 /* Ensure the rate is updated immediately */
10150 priv->last_rate = ipw_get_current_rate(priv);
10151 ipw_gather_stats(priv);
10152 ipw_led_link_up(priv);
10153 notify_wx_assoc_event(priv);
10154
10155 if (priv->config & CFG_BACKGROUND_SCAN)
10156 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10157 }
10158
10159 static void ipw_bg_link_up(void *data)
10160 {
10161 struct ipw_priv *priv = data;
10162 down(&priv->sem);
10163 ipw_link_up(data);
10164 up(&priv->sem);
10165 }
10166
10167 static void ipw_link_down(struct ipw_priv *priv)
10168 {
10169 ipw_led_link_down(priv);
10170 netif_carrier_off(priv->net_dev);
10171 netif_stop_queue(priv->net_dev);
10172 notify_wx_assoc_event(priv);
10173
10174 /* Cancel any queued work ... */
10175 cancel_delayed_work(&priv->request_scan);
10176 cancel_delayed_work(&priv->adhoc_check);
10177 cancel_delayed_work(&priv->gather_stats);
10178
10179 ipw_reset_stats(priv);
10180
10181 if (!(priv->status & STATUS_EXIT_PENDING)) {
10182 /* Queue up another scan... */
10183 queue_work(priv->workqueue, &priv->request_scan);
10184 }
10185 }
10186
10187 static void ipw_bg_link_down(void *data)
10188 {
10189 struct ipw_priv *priv = data;
10190 down(&priv->sem);
10191 ipw_link_down(data);
10192 up(&priv->sem);
10193 }
10194
10195 static int ipw_setup_deferred_work(struct ipw_priv *priv)
10196 {
10197 int ret = 0;
10198
10199 priv->workqueue = create_workqueue(DRV_NAME);
10200 init_waitqueue_head(&priv->wait_command_queue);
10201 init_waitqueue_head(&priv->wait_state);
10202
10203 INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv);
10204 INIT_WORK(&priv->associate, ipw_bg_associate, priv);
10205 INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv);
10206 INIT_WORK(&priv->system_config, ipw_system_config, priv);
10207 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv);
10208 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv);
10209 INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv);
10210 INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv);
10211 INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv);
10212 INIT_WORK(&priv->request_scan,
10213 (void (*)(void *))ipw_request_scan, priv);
10214 INIT_WORK(&priv->gather_stats,
10215 (void (*)(void *))ipw_bg_gather_stats, priv);
10216 INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv);
10217 INIT_WORK(&priv->roam, ipw_bg_roam, priv);
10218 INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv);
10219 INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv);
10220 INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv);
10221 INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on,
10222 priv);
10223 INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off,
10224 priv);
10225 INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off,
10226 priv);
10227 INIT_WORK(&priv->merge_networks,
10228 (void (*)(void *))ipw_merge_adhoc_network, priv);
10229
10230 #ifdef CONFIG_IPW_QOS
10231 INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate,
10232 priv);
10233 #endif /* CONFIG_IPW_QOS */
10234
10235 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10236 ipw_irq_tasklet, (unsigned long)priv);
10237
10238 return ret;
10239 }
10240
10241 static void shim__set_security(struct net_device *dev,
10242 struct ieee80211_security *sec)
10243 {
10244 struct ipw_priv *priv = ieee80211_priv(dev);
10245 int i;
10246 for (i = 0; i < 4; i++) {
10247 if (sec->flags & (1 << i)) {
10248 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10249 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10250 if (sec->key_sizes[i] == 0)
10251 priv->ieee->sec.flags &= ~(1 << i);
10252 else {
10253 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10254 sec->key_sizes[i]);
10255 priv->ieee->sec.flags |= (1 << i);
10256 }
10257 priv->status |= STATUS_SECURITY_UPDATED;
10258 } else if (sec->level != SEC_LEVEL_1)
10259 priv->ieee->sec.flags &= ~(1 << i);
10260 }
10261
10262 if (sec->flags & SEC_ACTIVE_KEY) {
10263 if (sec->active_key <= 3) {
10264 priv->ieee->sec.active_key = sec->active_key;
10265 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10266 } else
10267 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10268 priv->status |= STATUS_SECURITY_UPDATED;
10269 } else
10270 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10271
10272 if ((sec->flags & SEC_AUTH_MODE) &&
10273 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10274 priv->ieee->sec.auth_mode = sec->auth_mode;
10275 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10276 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10277 priv->capability |= CAP_SHARED_KEY;
10278 else
10279 priv->capability &= ~CAP_SHARED_KEY;
10280 priv->status |= STATUS_SECURITY_UPDATED;
10281 }
10282
10283 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10284 priv->ieee->sec.flags |= SEC_ENABLED;
10285 priv->ieee->sec.enabled = sec->enabled;
10286 priv->status |= STATUS_SECURITY_UPDATED;
10287 if (sec->enabled)
10288 priv->capability |= CAP_PRIVACY_ON;
10289 else
10290 priv->capability &= ~CAP_PRIVACY_ON;
10291 }
10292
10293 if (sec->flags & SEC_ENCRYPT)
10294 priv->ieee->sec.encrypt = sec->encrypt;
10295
10296 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10297 priv->ieee->sec.level = sec->level;
10298 priv->ieee->sec.flags |= SEC_LEVEL;
10299 priv->status |= STATUS_SECURITY_UPDATED;
10300 }
10301
10302 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10303 ipw_set_hwcrypto_keys(priv);
10304
10305 /* To match current functionality of ipw2100 (which works well w/
10306 * various supplicants, we don't force a disassociate if the
10307 * privacy capability changes ... */
10308 #if 0
10309 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10310 (((priv->assoc_request.capability &
10311 WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
10312 (!(priv->assoc_request.capability &
10313 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
10314 IPW_DEBUG_ASSOC("Disassociating due to capability "
10315 "change.\n");
10316 ipw_disassociate(priv);
10317 }
10318 #endif
10319 }
10320
10321 static int init_supported_rates(struct ipw_priv *priv,
10322 struct ipw_supported_rates *rates)
10323 {
10324 /* TODO: Mask out rates based on priv->rates_mask */
10325
10326 memset(rates, 0, sizeof(*rates));
10327 /* configure supported rates */
10328 switch (priv->ieee->freq_band) {
10329 case IEEE80211_52GHZ_BAND:
10330 rates->ieee_mode = IPW_A_MODE;
10331 rates->purpose = IPW_RATE_CAPABILITIES;
10332 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10333 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10334 break;
10335
10336 default: /* Mixed or 2.4Ghz */
10337 rates->ieee_mode = IPW_G_MODE;
10338 rates->purpose = IPW_RATE_CAPABILITIES;
10339 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
10340 IEEE80211_CCK_DEFAULT_RATES_MASK);
10341 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
10342 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10343 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10344 }
10345 break;
10346 }
10347
10348 return 0;
10349 }
10350
10351 static int ipw_config(struct ipw_priv *priv)
10352 {
10353 /* This is only called from ipw_up, which resets/reloads the firmware
10354 so, we don't need to first disable the card before we configure
10355 it */
10356 if (ipw_set_tx_power(priv))
10357 goto error;
10358
10359 /* initialize adapter address */
10360 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10361 goto error;
10362
10363 /* set basic system config settings */
10364 init_sys_config(&priv->sys_config);
10365 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10366 priv->sys_config.answer_broadcast_ssid_probe = 1;
10367 else
10368 priv->sys_config.answer_broadcast_ssid_probe = 0;
10369
10370 if (ipw_send_system_config(priv, &priv->sys_config))
10371 goto error;
10372
10373 init_supported_rates(priv, &priv->rates);
10374 if (ipw_send_supported_rates(priv, &priv->rates))
10375 goto error;
10376
10377 /* Set request-to-send threshold */
10378 if (priv->rts_threshold) {
10379 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10380 goto error;
10381 }
10382 #ifdef CONFIG_IPW_QOS
10383 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10384 ipw_qos_activate(priv, NULL);
10385 #endif /* CONFIG_IPW_QOS */
10386
10387 if (ipw_set_random_seed(priv))
10388 goto error;
10389
10390 /* final state transition to the RUN state */
10391 if (ipw_send_host_complete(priv))
10392 goto error;
10393
10394 priv->status |= STATUS_INIT;
10395
10396 ipw_led_init(priv);
10397 ipw_led_radio_on(priv);
10398 priv->notif_missed_beacons = 0;
10399
10400 /* Set hardware WEP key if it is configured. */
10401 if ((priv->capability & CAP_PRIVACY_ON) &&
10402 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10403 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10404 ipw_set_hwcrypto_keys(priv);
10405
10406 return 0;
10407
10408 error:
10409 return -EIO;
10410 }
10411
10412 /*
10413 * NOTE:
10414 *
10415 * These tables have been tested in conjunction with the
10416 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10417 *
10418 * Altering this values, using it on other hardware, or in geographies
10419 * not intended for resale of the above mentioned Intel adapters has
10420 * not been tested.
10421 *
10422 */
10423 static const struct ieee80211_geo ipw_geos[] = {
10424 { /* Restricted */
10425 "---",
10426 .bg_channels = 11,
10427 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10428 {2427, 4}, {2432, 5}, {2437, 6},
10429 {2442, 7}, {2447, 8}, {2452, 9},
10430 {2457, 10}, {2462, 11}},
10431 },
10432
10433 { /* Custom US/Canada */
10434 "ZZF",
10435 .bg_channels = 11,
10436 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10437 {2427, 4}, {2432, 5}, {2437, 6},
10438 {2442, 7}, {2447, 8}, {2452, 9},
10439 {2457, 10}, {2462, 11}},
10440 .a_channels = 8,
10441 .a = {{5180, 36},
10442 {5200, 40},
10443 {5220, 44},
10444 {5240, 48},
10445 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10446 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10447 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10448 {5320, 64, IEEE80211_CH_PASSIVE_ONLY}},
10449 },
10450
10451 { /* Rest of World */
10452 "ZZD",
10453 .bg_channels = 13,
10454 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10455 {2427, 4}, {2432, 5}, {2437, 6},
10456 {2442, 7}, {2447, 8}, {2452, 9},
10457 {2457, 10}, {2462, 11}, {2467, 12},
10458 {2472, 13}},
10459 },
10460
10461 { /* Custom USA & Europe & High */
10462 "ZZA",
10463 .bg_channels = 11,
10464 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10465 {2427, 4}, {2432, 5}, {2437, 6},
10466 {2442, 7}, {2447, 8}, {2452, 9},
10467 {2457, 10}, {2462, 11}},
10468 .a_channels = 13,
10469 .a = {{5180, 36},
10470 {5200, 40},
10471 {5220, 44},
10472 {5240, 48},
10473 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10474 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10475 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10476 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10477 {5745, 149},
10478 {5765, 153},
10479 {5785, 157},
10480 {5805, 161},
10481 {5825, 165}},
10482 },
10483
10484 { /* Custom NA & Europe */
10485 "ZZB",
10486 .bg_channels = 11,
10487 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10488 {2427, 4}, {2432, 5}, {2437, 6},
10489 {2442, 7}, {2447, 8}, {2452, 9},
10490 {2457, 10}, {2462, 11}},
10491 .a_channels = 13,
10492 .a = {{5180, 36},
10493 {5200, 40},
10494 {5220, 44},
10495 {5240, 48},
10496 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10497 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10498 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10499 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10500 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10501 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10502 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10503 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10504 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10505 },
10506
10507 { /* Custom Japan */
10508 "ZZC",
10509 .bg_channels = 11,
10510 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10511 {2427, 4}, {2432, 5}, {2437, 6},
10512 {2442, 7}, {2447, 8}, {2452, 9},
10513 {2457, 10}, {2462, 11}},
10514 .a_channels = 4,
10515 .a = {{5170, 34}, {5190, 38},
10516 {5210, 42}, {5230, 46}},
10517 },
10518
10519 { /* Custom */
10520 "ZZM",
10521 .bg_channels = 11,
10522 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10523 {2427, 4}, {2432, 5}, {2437, 6},
10524 {2442, 7}, {2447, 8}, {2452, 9},
10525 {2457, 10}, {2462, 11}},
10526 },
10527
10528 { /* Europe */
10529 "ZZE",
10530 .bg_channels = 13,
10531 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10532 {2427, 4}, {2432, 5}, {2437, 6},
10533 {2442, 7}, {2447, 8}, {2452, 9},
10534 {2457, 10}, {2462, 11}, {2467, 12},
10535 {2472, 13}},
10536 .a_channels = 19,
10537 .a = {{5180, 36},
10538 {5200, 40},
10539 {5220, 44},
10540 {5240, 48},
10541 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10542 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10543 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10544 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10545 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
10546 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
10547 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
10548 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
10549 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
10550 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
10551 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
10552 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
10553 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
10554 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
10555 {5700, 140, IEEE80211_CH_PASSIVE_ONLY}},
10556 },
10557
10558 { /* Custom Japan */
10559 "ZZJ",
10560 .bg_channels = 14,
10561 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10562 {2427, 4}, {2432, 5}, {2437, 6},
10563 {2442, 7}, {2447, 8}, {2452, 9},
10564 {2457, 10}, {2462, 11}, {2467, 12},
10565 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY}},
10566 .a_channels = 4,
10567 .a = {{5170, 34}, {5190, 38},
10568 {5210, 42}, {5230, 46}},
10569 },
10570
10571 { /* Rest of World */
10572 "ZZR",
10573 .bg_channels = 14,
10574 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10575 {2427, 4}, {2432, 5}, {2437, 6},
10576 {2442, 7}, {2447, 8}, {2452, 9},
10577 {2457, 10}, {2462, 11}, {2467, 12},
10578 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY |
10579 IEEE80211_CH_PASSIVE_ONLY}},
10580 },
10581
10582 { /* High Band */
10583 "ZZH",
10584 .bg_channels = 13,
10585 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10586 {2427, 4}, {2432, 5}, {2437, 6},
10587 {2442, 7}, {2447, 8}, {2452, 9},
10588 {2457, 10}, {2462, 11},
10589 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
10590 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
10591 .a_channels = 4,
10592 .a = {{5745, 149}, {5765, 153},
10593 {5785, 157}, {5805, 161}},
10594 },
10595
10596 { /* Custom Europe */
10597 "ZZG",
10598 .bg_channels = 13,
10599 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10600 {2427, 4}, {2432, 5}, {2437, 6},
10601 {2442, 7}, {2447, 8}, {2452, 9},
10602 {2457, 10}, {2462, 11},
10603 {2467, 12}, {2472, 13}},
10604 .a_channels = 4,
10605 .a = {{5180, 36}, {5200, 40},
10606 {5220, 44}, {5240, 48}},
10607 },
10608
10609 { /* Europe */
10610 "ZZK",
10611 .bg_channels = 13,
10612 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10613 {2427, 4}, {2432, 5}, {2437, 6},
10614 {2442, 7}, {2447, 8}, {2452, 9},
10615 {2457, 10}, {2462, 11},
10616 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
10617 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
10618 .a_channels = 24,
10619 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
10620 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
10621 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
10622 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
10623 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10624 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10625 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10626 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10627 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
10628 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
10629 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
10630 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
10631 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
10632 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
10633 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
10634 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
10635 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
10636 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
10637 {5700, 140, IEEE80211_CH_PASSIVE_ONLY},
10638 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10639 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10640 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10641 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10642 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10643 },
10644
10645 { /* Europe */
10646 "ZZL",
10647 .bg_channels = 11,
10648 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10649 {2427, 4}, {2432, 5}, {2437, 6},
10650 {2442, 7}, {2447, 8}, {2452, 9},
10651 {2457, 10}, {2462, 11}},
10652 .a_channels = 13,
10653 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
10654 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
10655 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
10656 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
10657 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10658 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10659 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10660 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10661 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10662 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10663 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10664 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10665 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10666 }
10667 };
10668
10669 /* GEO code borrowed from ieee80211_geo.c */
10670 static int ipw_is_valid_channel(struct ieee80211_device *ieee, u8 channel)
10671 {
10672 int i;
10673
10674 /* Driver needs to initialize the geography map before using
10675 * these helper functions */
10676 BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0);
10677
10678 if (ieee->freq_band & IEEE80211_24GHZ_BAND)
10679 for (i = 0; i < ieee->geo.bg_channels; i++)
10680 /* NOTE: If G mode is currently supported but
10681 * this is a B only channel, we don't see it
10682 * as valid. */
10683 if ((ieee->geo.bg[i].channel == channel) &&
10684 (!(ieee->mode & IEEE_G) ||
10685 !(ieee->geo.bg[i].flags & IEEE80211_CH_B_ONLY)))
10686 return IEEE80211_24GHZ_BAND;
10687
10688 if (ieee->freq_band & IEEE80211_52GHZ_BAND)
10689 for (i = 0; i < ieee->geo.a_channels; i++)
10690 if (ieee->geo.a[i].channel == channel)
10691 return IEEE80211_52GHZ_BAND;
10692
10693 return 0;
10694 }
10695
10696 static int ipw_channel_to_index(struct ieee80211_device *ieee, u8 channel)
10697 {
10698 int i;
10699
10700 /* Driver needs to initialize the geography map before using
10701 * these helper functions */
10702 BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0);
10703
10704 if (ieee->freq_band & IEEE80211_24GHZ_BAND)
10705 for (i = 0; i < ieee->geo.bg_channels; i++)
10706 if (ieee->geo.bg[i].channel == channel)
10707 return i;
10708
10709 if (ieee->freq_band & IEEE80211_52GHZ_BAND)
10710 for (i = 0; i < ieee->geo.a_channels; i++)
10711 if (ieee->geo.a[i].channel == channel)
10712 return i;
10713
10714 return -1;
10715 }
10716
10717 static u8 ipw_freq_to_channel(struct ieee80211_device *ieee, u32 freq)
10718 {
10719 int i;
10720
10721 /* Driver needs to initialize the geography map before using
10722 * these helper functions */
10723 BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0);
10724
10725 freq /= 100000;
10726
10727 if (ieee->freq_band & IEEE80211_24GHZ_BAND)
10728 for (i = 0; i < ieee->geo.bg_channels; i++)
10729 if (ieee->geo.bg[i].freq == freq)
10730 return ieee->geo.bg[i].channel;
10731
10732 if (ieee->freq_band & IEEE80211_52GHZ_BAND)
10733 for (i = 0; i < ieee->geo.a_channels; i++)
10734 if (ieee->geo.a[i].freq == freq)
10735 return ieee->geo.a[i].channel;
10736
10737 return 0;
10738 }
10739
10740 static int ipw_set_geo(struct ieee80211_device *ieee,
10741 const struct ieee80211_geo *geo)
10742 {
10743 memcpy(ieee->geo.name, geo->name, 3);
10744 ieee->geo.name[3] = '\0';
10745 ieee->geo.bg_channels = geo->bg_channels;
10746 ieee->geo.a_channels = geo->a_channels;
10747 memcpy(ieee->geo.bg, geo->bg, geo->bg_channels *
10748 sizeof(struct ieee80211_channel));
10749 memcpy(ieee->geo.a, geo->a, ieee->geo.a_channels *
10750 sizeof(struct ieee80211_channel));
10751 return 0;
10752 }
10753
10754 static const struct ieee80211_geo *ipw_get_geo(struct ieee80211_device *ieee)
10755 {
10756 return &ieee->geo;
10757 }
10758
10759 #define MAX_HW_RESTARTS 5
10760 static int ipw_up(struct ipw_priv *priv)
10761 {
10762 int rc, i, j;
10763
10764 if (priv->status & STATUS_EXIT_PENDING)
10765 return -EIO;
10766
10767 if (cmdlog && !priv->cmdlog) {
10768 priv->cmdlog = kmalloc(sizeof(*priv->cmdlog) * cmdlog,
10769 GFP_KERNEL);
10770 if (priv->cmdlog == NULL) {
10771 IPW_ERROR("Error allocating %d command log entries.\n",
10772 cmdlog);
10773 } else {
10774 memset(priv->cmdlog, 0, sizeof(*priv->cmdlog) * cmdlog);
10775 priv->cmdlog_len = cmdlog;
10776 }
10777 }
10778
10779 for (i = 0; i < MAX_HW_RESTARTS; i++) {
10780 /* Load the microcode, firmware, and eeprom.
10781 * Also start the clocks. */
10782 rc = ipw_load(priv);
10783 if (rc) {
10784 IPW_ERROR("Unable to load firmware: %d\n", rc);
10785 return rc;
10786 }
10787
10788 ipw_init_ordinals(priv);
10789 if (!(priv->config & CFG_CUSTOM_MAC))
10790 eeprom_parse_mac(priv, priv->mac_addr);
10791 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
10792
10793 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
10794 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
10795 ipw_geos[j].name, 3))
10796 break;
10797 }
10798 if (j == ARRAY_SIZE(ipw_geos)) {
10799 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
10800 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
10801 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
10802 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
10803 j = 0;
10804 }
10805 if (ipw_set_geo(priv->ieee, &ipw_geos[j])) {
10806 IPW_WARNING("Could not set geography.");
10807 return 0;
10808 }
10809
10810 IPW_DEBUG_INFO("Geography %03d [%s] detected.\n",
10811 j, priv->ieee->geo.name);
10812
10813 if (priv->status & STATUS_RF_KILL_SW) {
10814 IPW_WARNING("Radio disabled by module parameter.\n");
10815 return 0;
10816 } else if (rf_kill_active(priv)) {
10817 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
10818 "Kill switch must be turned off for "
10819 "wireless networking to work.\n");
10820 queue_delayed_work(priv->workqueue, &priv->rf_kill,
10821 2 * HZ);
10822 return 0;
10823 }
10824
10825 rc = ipw_config(priv);
10826 if (!rc) {
10827 IPW_DEBUG_INFO("Configured device on count %i\n", i);
10828
10829 /* If configure to try and auto-associate, kick
10830 * off a scan. */
10831 queue_work(priv->workqueue, &priv->request_scan);
10832
10833 return 0;
10834 }
10835
10836 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
10837 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
10838 i, MAX_HW_RESTARTS);
10839
10840 /* We had an error bringing up the hardware, so take it
10841 * all the way back down so we can try again */
10842 ipw_down(priv);
10843 }
10844
10845 /* tried to restart and config the device for as long as our
10846 * patience could withstand */
10847 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
10848
10849 return -EIO;
10850 }
10851
10852 static void ipw_bg_up(void *data)
10853 {
10854 struct ipw_priv *priv = data;
10855 down(&priv->sem);
10856 ipw_up(data);
10857 up(&priv->sem);
10858 }
10859
10860 static void ipw_deinit(struct ipw_priv *priv)
10861 {
10862 int i;
10863
10864 if (priv->status & STATUS_SCANNING) {
10865 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
10866 ipw_abort_scan(priv);
10867 }
10868
10869 if (priv->status & STATUS_ASSOCIATED) {
10870 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
10871 ipw_disassociate(priv);
10872 }
10873
10874 ipw_led_shutdown(priv);
10875
10876 /* Wait up to 1s for status to change to not scanning and not
10877 * associated (disassociation can take a while for a ful 802.11
10878 * exchange */
10879 for (i = 1000; i && (priv->status &
10880 (STATUS_DISASSOCIATING |
10881 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
10882 udelay(10);
10883
10884 if (priv->status & (STATUS_DISASSOCIATING |
10885 STATUS_ASSOCIATED | STATUS_SCANNING))
10886 IPW_DEBUG_INFO("Still associated or scanning...\n");
10887 else
10888 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
10889
10890 /* Attempt to disable the card */
10891 ipw_send_card_disable(priv, 0);
10892
10893 priv->status &= ~STATUS_INIT;
10894 }
10895
10896 static void ipw_down(struct ipw_priv *priv)
10897 {
10898 int exit_pending = priv->status & STATUS_EXIT_PENDING;
10899
10900 priv->status |= STATUS_EXIT_PENDING;
10901
10902 if (ipw_is_init(priv))
10903 ipw_deinit(priv);
10904
10905 /* Wipe out the EXIT_PENDING status bit if we are not actually
10906 * exiting the module */
10907 if (!exit_pending)
10908 priv->status &= ~STATUS_EXIT_PENDING;
10909
10910 /* tell the device to stop sending interrupts */
10911 ipw_disable_interrupts(priv);
10912
10913 /* Clear all bits but the RF Kill */
10914 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
10915 netif_carrier_off(priv->net_dev);
10916 netif_stop_queue(priv->net_dev);
10917
10918 ipw_stop_nic(priv);
10919
10920 ipw_led_radio_off(priv);
10921 }
10922
10923 static void ipw_bg_down(void *data)
10924 {
10925 struct ipw_priv *priv = data;
10926 down(&priv->sem);
10927 ipw_down(data);
10928 up(&priv->sem);
10929 }
10930
10931 /* Called by register_netdev() */
10932 static int ipw_net_init(struct net_device *dev)
10933 {
10934 struct ipw_priv *priv = ieee80211_priv(dev);
10935 down(&priv->sem);
10936
10937 if (ipw_up(priv)) {
10938 up(&priv->sem);
10939 return -EIO;
10940 }
10941
10942 up(&priv->sem);
10943 return 0;
10944 }
10945
10946 /* PCI driver stuff */
10947 static struct pci_device_id card_ids[] = {
10948 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
10949 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
10950 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
10951 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
10952 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
10953 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
10954 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
10955 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
10956 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
10957 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
10958 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
10959 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
10960 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
10961 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
10962 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
10963 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
10964 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
10965 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
10966 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
10967 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
10968 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
10969 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
10970
10971 /* required last entry */
10972 {0,}
10973 };
10974
10975 MODULE_DEVICE_TABLE(pci, card_ids);
10976
10977 static struct attribute *ipw_sysfs_entries[] = {
10978 &dev_attr_rf_kill.attr,
10979 &dev_attr_direct_dword.attr,
10980 &dev_attr_indirect_byte.attr,
10981 &dev_attr_indirect_dword.attr,
10982 &dev_attr_mem_gpio_reg.attr,
10983 &dev_attr_command_event_reg.attr,
10984 &dev_attr_nic_type.attr,
10985 &dev_attr_status.attr,
10986 &dev_attr_cfg.attr,
10987 &dev_attr_error.attr,
10988 &dev_attr_event_log.attr,
10989 &dev_attr_cmd_log.attr,
10990 &dev_attr_eeprom_delay.attr,
10991 &dev_attr_ucode_version.attr,
10992 &dev_attr_rtc.attr,
10993 &dev_attr_scan_age.attr,
10994 &dev_attr_led.attr,
10995 &dev_attr_speed_scan.attr,
10996 &dev_attr_net_stats.attr,
10997 NULL
10998 };
10999
11000 static struct attribute_group ipw_attribute_group = {
11001 .name = NULL, /* put in device directory */
11002 .attrs = ipw_sysfs_entries,
11003 };
11004
11005 static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11006 {
11007 int err = 0;
11008 struct net_device *net_dev;
11009 void __iomem *base;
11010 u32 length, val;
11011 struct ipw_priv *priv;
11012 int i;
11013
11014 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
11015 if (net_dev == NULL) {
11016 err = -ENOMEM;
11017 goto out;
11018 }
11019
11020 priv = ieee80211_priv(net_dev);
11021 priv->ieee = netdev_priv(net_dev);
11022
11023 priv->net_dev = net_dev;
11024 priv->pci_dev = pdev;
11025 #ifdef CONFIG_IPW2200_DEBUG
11026 ipw_debug_level = debug;
11027 #endif
11028 spin_lock_init(&priv->lock);
11029 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11030 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11031
11032 init_MUTEX(&priv->sem);
11033 if (pci_enable_device(pdev)) {
11034 err = -ENODEV;
11035 goto out_free_ieee80211;
11036 }
11037
11038 pci_set_master(pdev);
11039
11040 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11041 if (!err)
11042 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
11043 if (err) {
11044 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11045 goto out_pci_disable_device;
11046 }
11047
11048 pci_set_drvdata(pdev, priv);
11049
11050 err = pci_request_regions(pdev, DRV_NAME);
11051 if (err)
11052 goto out_pci_disable_device;
11053
11054 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11055 * PCI Tx retries from interfering with C3 CPU state */
11056 pci_read_config_dword(pdev, 0x40, &val);
11057 if ((val & 0x0000ff00) != 0)
11058 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11059
11060 length = pci_resource_len(pdev, 0);
11061 priv->hw_len = length;
11062
11063 base = ioremap_nocache(pci_resource_start(pdev, 0), length);
11064 if (!base) {
11065 err = -ENODEV;
11066 goto out_pci_release_regions;
11067 }
11068
11069 priv->hw_base = base;
11070 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11071 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11072
11073 err = ipw_setup_deferred_work(priv);
11074 if (err) {
11075 IPW_ERROR("Unable to setup deferred work\n");
11076 goto out_iounmap;
11077 }
11078
11079 ipw_sw_reset(priv, 1);
11080
11081 err = request_irq(pdev->irq, ipw_isr, SA_SHIRQ, DRV_NAME, priv);
11082 if (err) {
11083 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11084 goto out_destroy_workqueue;
11085 }
11086
11087 SET_MODULE_OWNER(net_dev);
11088 SET_NETDEV_DEV(net_dev, &pdev->dev);
11089
11090 down(&priv->sem);
11091
11092 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11093 priv->ieee->set_security = shim__set_security;
11094 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11095
11096 #ifdef CONFIG_IPW_QOS
11097 priv->ieee->handle_probe_response = ipw_handle_beacon;
11098 priv->ieee->handle_beacon = ipw_handle_probe_response;
11099 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11100 #endif /* CONFIG_IPW_QOS */
11101
11102 priv->ieee->perfect_rssi = -20;
11103 priv->ieee->worst_rssi = -85;
11104
11105 net_dev->open = ipw_net_open;
11106 net_dev->stop = ipw_net_stop;
11107 net_dev->init = ipw_net_init;
11108 net_dev->get_stats = ipw_net_get_stats;
11109 net_dev->set_multicast_list = ipw_net_set_multicast_list;
11110 net_dev->set_mac_address = ipw_net_set_mac_address;
11111 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11112 net_dev->wireless_data = &priv->wireless_data;
11113 net_dev->wireless_handlers = &ipw_wx_handler_def;
11114 net_dev->ethtool_ops = &ipw_ethtool_ops;
11115 net_dev->irq = pdev->irq;
11116 net_dev->base_addr = (unsigned long)priv->hw_base;
11117 net_dev->mem_start = pci_resource_start(pdev, 0);
11118 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11119
11120 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11121 if (err) {
11122 IPW_ERROR("failed to create sysfs device attributes\n");
11123 up(&priv->sem);
11124 goto out_release_irq;
11125 }
11126
11127 up(&priv->sem);
11128 err = register_netdev(net_dev);
11129 if (err) {
11130 IPW_ERROR("failed to register network device\n");
11131 goto out_remove_sysfs;
11132 }
11133 return 0;
11134
11135 out_remove_sysfs:
11136 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11137 out_release_irq:
11138 free_irq(pdev->irq, priv);
11139 out_destroy_workqueue:
11140 destroy_workqueue(priv->workqueue);
11141 priv->workqueue = NULL;
11142 out_iounmap:
11143 iounmap(priv->hw_base);
11144 out_pci_release_regions:
11145 pci_release_regions(pdev);
11146 out_pci_disable_device:
11147 pci_disable_device(pdev);
11148 pci_set_drvdata(pdev, NULL);
11149 out_free_ieee80211:
11150 free_ieee80211(priv->net_dev);
11151 out:
11152 return err;
11153 }
11154
11155 static void ipw_pci_remove(struct pci_dev *pdev)
11156 {
11157 struct ipw_priv *priv = pci_get_drvdata(pdev);
11158 struct list_head *p, *q;
11159 int i;
11160
11161 if (!priv)
11162 return;
11163
11164 down(&priv->sem);
11165
11166 priv->status |= STATUS_EXIT_PENDING;
11167 ipw_down(priv);
11168 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11169
11170 up(&priv->sem);
11171
11172 unregister_netdev(priv->net_dev);
11173
11174 if (priv->rxq) {
11175 ipw_rx_queue_free(priv, priv->rxq);
11176 priv->rxq = NULL;
11177 }
11178 ipw_tx_queue_free(priv);
11179
11180 if (priv->cmdlog) {
11181 kfree(priv->cmdlog);
11182 priv->cmdlog = NULL;
11183 }
11184 /* ipw_down will ensure that there is no more pending work
11185 * in the workqueue's, so we can safely remove them now. */
11186 cancel_delayed_work(&priv->adhoc_check);
11187 cancel_delayed_work(&priv->gather_stats);
11188 cancel_delayed_work(&priv->request_scan);
11189 cancel_delayed_work(&priv->rf_kill);
11190 cancel_delayed_work(&priv->scan_check);
11191 destroy_workqueue(priv->workqueue);
11192 priv->workqueue = NULL;
11193
11194 /* Free MAC hash list for ADHOC */
11195 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11196 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11197 list_del(p);
11198 kfree(list_entry(p, struct ipw_ibss_seq, list));
11199 }
11200 }
11201
11202 if (priv->error) {
11203 ipw_free_error_log(priv->error);
11204 priv->error = NULL;
11205 }
11206
11207 free_irq(pdev->irq, priv);
11208 iounmap(priv->hw_base);
11209 pci_release_regions(pdev);
11210 pci_disable_device(pdev);
11211 pci_set_drvdata(pdev, NULL);
11212 free_ieee80211(priv->net_dev);
11213 free_firmware();
11214 }
11215
11216 #ifdef CONFIG_PM
11217 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11218 {
11219 struct ipw_priv *priv = pci_get_drvdata(pdev);
11220 struct net_device *dev = priv->net_dev;
11221
11222 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11223
11224 /* Take down the device; powers it off, etc. */
11225 ipw_down(priv);
11226
11227 /* Remove the PRESENT state of the device */
11228 netif_device_detach(dev);
11229
11230 pci_save_state(pdev);
11231 pci_disable_device(pdev);
11232 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11233
11234 return 0;
11235 }
11236
11237 static int ipw_pci_resume(struct pci_dev *pdev)
11238 {
11239 struct ipw_priv *priv = pci_get_drvdata(pdev);
11240 struct net_device *dev = priv->net_dev;
11241 u32 val;
11242
11243 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11244
11245 pci_set_power_state(pdev, PCI_D0);
11246 pci_enable_device(pdev);
11247 pci_restore_state(pdev);
11248
11249 /*
11250 * Suspend/Resume resets the PCI configuration space, so we have to
11251 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11252 * from interfering with C3 CPU state. pci_restore_state won't help
11253 * here since it only restores the first 64 bytes pci config header.
11254 */
11255 pci_read_config_dword(pdev, 0x40, &val);
11256 if ((val & 0x0000ff00) != 0)
11257 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11258
11259 /* Set the device back into the PRESENT state; this will also wake
11260 * the queue of needed */
11261 netif_device_attach(dev);
11262
11263 /* Bring the device back up */
11264 queue_work(priv->workqueue, &priv->up);
11265
11266 return 0;
11267 }
11268 #endif
11269
11270 /* driver initialization stuff */
11271 static struct pci_driver ipw_driver = {
11272 .name = DRV_NAME,
11273 .id_table = card_ids,
11274 .probe = ipw_pci_probe,
11275 .remove = __devexit_p(ipw_pci_remove),
11276 #ifdef CONFIG_PM
11277 .suspend = ipw_pci_suspend,
11278 .resume = ipw_pci_resume,
11279 #endif
11280 };
11281
11282 static int __init ipw_init(void)
11283 {
11284 int ret;
11285
11286 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11287 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11288
11289 ret = pci_module_init(&ipw_driver);
11290 if (ret) {
11291 IPW_ERROR("Unable to initialize PCI module\n");
11292 return ret;
11293 }
11294
11295 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11296 if (ret) {
11297 IPW_ERROR("Unable to create driver sysfs file\n");
11298 pci_unregister_driver(&ipw_driver);
11299 return ret;
11300 }
11301
11302 return ret;
11303 }
11304
11305 static void __exit ipw_exit(void)
11306 {
11307 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11308 pci_unregister_driver(&ipw_driver);
11309 }
11310
11311 module_param(disable, int, 0444);
11312 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11313
11314 module_param(associate, int, 0444);
11315 MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
11316
11317 module_param(auto_create, int, 0444);
11318 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11319
11320 module_param(led, int, 0444);
11321 MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n");
11322
11323 module_param(debug, int, 0444);
11324 MODULE_PARM_DESC(debug, "debug output mask");
11325
11326 module_param(channel, int, 0444);
11327 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11328
11329 #ifdef CONFIG_IPW_QOS
11330 module_param(qos_enable, int, 0444);
11331 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
11332
11333 module_param(qos_burst_enable, int, 0444);
11334 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11335
11336 module_param(qos_no_ack_mask, int, 0444);
11337 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
11338
11339 module_param(burst_duration_CCK, int, 0444);
11340 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
11341
11342 module_param(burst_duration_OFDM, int, 0444);
11343 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
11344 #endif /* CONFIG_IPW_QOS */
11345
11346 #ifdef CONFIG_IPW2200_MONITOR
11347 module_param(mode, int, 0444);
11348 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
11349 #else
11350 module_param(mode, int, 0444);
11351 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
11352 #endif
11353
11354 module_param(hwcrypto, int, 0444);
11355 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default on)");
11356
11357 module_param(cmdlog, int, 0444);
11358 MODULE_PARM_DESC(cmdlog,
11359 "allocate a ring buffer for logging firmware commands");
11360
11361 module_exit(ipw_exit);
11362 module_init(ipw_init);