]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/wireless/ipw2200.c
Merge branch 'upstream-fixes' into upstream
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / ipw2200.c
1 /******************************************************************************
2
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31 ******************************************************************************/
32
33 #include "ipw2200.h"
34 #include <linux/version.h>
35
36
37 #ifndef KBUILD_EXTMOD
38 #define VK "k"
39 #else
40 #define VK
41 #endif
42
43 #ifdef CONFIG_IPW2200_DEBUG
44 #define VD "d"
45 #else
46 #define VD
47 #endif
48
49 #ifdef CONFIG_IPW2200_MONITOR
50 #define VM "m"
51 #else
52 #define VM
53 #endif
54
55 #ifdef CONFIG_IPW2200_PROMISCUOUS
56 #define VP "p"
57 #else
58 #define VP
59 #endif
60
61 #ifdef CONFIG_IPW2200_RADIOTAP
62 #define VR "r"
63 #else
64 #define VR
65 #endif
66
67 #ifdef CONFIG_IPW2200_QOS
68 #define VQ "q"
69 #else
70 #define VQ
71 #endif
72
73 #define IPW2200_VERSION "1.1.2" VK VD VM VP VR VQ
74 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
75 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
76 #define DRV_VERSION IPW2200_VERSION
77
78 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
79
80 MODULE_DESCRIPTION(DRV_DESCRIPTION);
81 MODULE_VERSION(DRV_VERSION);
82 MODULE_AUTHOR(DRV_COPYRIGHT);
83 MODULE_LICENSE("GPL");
84
85 static int cmdlog = 0;
86 #ifdef CONFIG_IPW2200_DEBUG
87 static int debug = 0;
88 #endif
89 static int channel = 0;
90 static int mode = 0;
91
92 static u32 ipw_debug_level;
93 static int associate = 1;
94 static int auto_create = 1;
95 static int led = 0;
96 static int disable = 0;
97 static int bt_coexist = 0;
98 static int hwcrypto = 0;
99 static int roaming = 1;
100 static const char ipw_modes[] = {
101 'a', 'b', 'g', '?'
102 };
103 static int antenna = CFG_SYS_ANTENNA_BOTH;
104
105 #ifdef CONFIG_IPW2200_PROMISCUOUS
106 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
107 #endif
108
109
110 #ifdef CONFIG_IPW2200_QOS
111 static int qos_enable = 0;
112 static int qos_burst_enable = 0;
113 static int qos_no_ack_mask = 0;
114 static int burst_duration_CCK = 0;
115 static int burst_duration_OFDM = 0;
116
117 static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
118 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
119 QOS_TX3_CW_MIN_OFDM},
120 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
121 QOS_TX3_CW_MAX_OFDM},
122 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
123 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
124 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
125 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
126 };
127
128 static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
129 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
130 QOS_TX3_CW_MIN_CCK},
131 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
132 QOS_TX3_CW_MAX_CCK},
133 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
134 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
135 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
136 QOS_TX3_TXOP_LIMIT_CCK}
137 };
138
139 static struct ieee80211_qos_parameters def_parameters_OFDM = {
140 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
141 DEF_TX3_CW_MIN_OFDM},
142 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
143 DEF_TX3_CW_MAX_OFDM},
144 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
145 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
146 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
147 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
148 };
149
150 static struct ieee80211_qos_parameters def_parameters_CCK = {
151 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
152 DEF_TX3_CW_MIN_CCK},
153 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
154 DEF_TX3_CW_MAX_CCK},
155 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
156 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
157 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
158 DEF_TX3_TXOP_LIMIT_CCK}
159 };
160
161 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
162
163 static int from_priority_to_tx_queue[] = {
164 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
165 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
166 };
167
168 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
169
170 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
171 *qos_param);
172 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
173 *qos_param);
174 #endif /* CONFIG_IPW2200_QOS */
175
176 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
177 static void ipw_remove_current_network(struct ipw_priv *priv);
178 static void ipw_rx(struct ipw_priv *priv);
179 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
180 struct clx2_tx_queue *txq, int qindex);
181 static int ipw_queue_reset(struct ipw_priv *priv);
182
183 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
184 int len, int sync);
185
186 static void ipw_tx_queue_free(struct ipw_priv *);
187
188 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
189 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
190 static void ipw_rx_queue_replenish(void *);
191 static int ipw_up(struct ipw_priv *);
192 static void ipw_bg_up(void *);
193 static void ipw_down(struct ipw_priv *);
194 static void ipw_bg_down(void *);
195 static int ipw_config(struct ipw_priv *);
196 static int init_supported_rates(struct ipw_priv *priv,
197 struct ipw_supported_rates *prates);
198 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
199 static void ipw_send_wep_keys(struct ipw_priv *, int);
200
201 static int snprint_line(char *buf, size_t count,
202 const u8 * data, u32 len, u32 ofs)
203 {
204 int out, i, j, l;
205 char c;
206
207 out = snprintf(buf, count, "%08X", ofs);
208
209 for (l = 0, i = 0; i < 2; i++) {
210 out += snprintf(buf + out, count - out, " ");
211 for (j = 0; j < 8 && l < len; j++, l++)
212 out += snprintf(buf + out, count - out, "%02X ",
213 data[(i * 8 + j)]);
214 for (; j < 8; j++)
215 out += snprintf(buf + out, count - out, " ");
216 }
217
218 out += snprintf(buf + out, count - out, " ");
219 for (l = 0, i = 0; i < 2; i++) {
220 out += snprintf(buf + out, count - out, " ");
221 for (j = 0; j < 8 && l < len; j++, l++) {
222 c = data[(i * 8 + j)];
223 if (!isascii(c) || !isprint(c))
224 c = '.';
225
226 out += snprintf(buf + out, count - out, "%c", c);
227 }
228
229 for (; j < 8; j++)
230 out += snprintf(buf + out, count - out, " ");
231 }
232
233 return out;
234 }
235
236 static void printk_buf(int level, const u8 * data, u32 len)
237 {
238 char line[81];
239 u32 ofs = 0;
240 if (!(ipw_debug_level & level))
241 return;
242
243 while (len) {
244 snprint_line(line, sizeof(line), &data[ofs],
245 min(len, 16U), ofs);
246 printk(KERN_DEBUG "%s\n", line);
247 ofs += 16;
248 len -= min(len, 16U);
249 }
250 }
251
252 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
253 {
254 size_t out = size;
255 u32 ofs = 0;
256 int total = 0;
257
258 while (size && len) {
259 out = snprint_line(output, size, &data[ofs],
260 min_t(size_t, len, 16U), ofs);
261
262 ofs += 16;
263 output += out;
264 size -= out;
265 len -= min_t(size_t, len, 16U);
266 total += out;
267 }
268 return total;
269 }
270
271 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
272 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
273 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
274
275 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
276 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
277 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
278
279 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
280 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
281 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
282 {
283 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
284 __LINE__, (u32) (b), (u32) (c));
285 _ipw_write_reg8(a, b, c);
286 }
287
288 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
289 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
290 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
291 {
292 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
293 __LINE__, (u32) (b), (u32) (c));
294 _ipw_write_reg16(a, b, c);
295 }
296
297 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
298 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
299 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
300 {
301 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
302 __LINE__, (u32) (b), (u32) (c));
303 _ipw_write_reg32(a, b, c);
304 }
305
306 /* 8-bit direct write (low 4K) */
307 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
308
309 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
310 #define ipw_write8(ipw, ofs, val) \
311 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
312 _ipw_write8(ipw, ofs, val)
313
314 /* 16-bit direct write (low 4K) */
315 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
316
317 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
318 #define ipw_write16(ipw, ofs, val) \
319 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
320 _ipw_write16(ipw, ofs, val)
321
322 /* 32-bit direct write (low 4K) */
323 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
324
325 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
326 #define ipw_write32(ipw, ofs, val) \
327 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
328 _ipw_write32(ipw, ofs, val)
329
330 /* 8-bit direct read (low 4K) */
331 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
332
333 /* 8-bit direct read (low 4K), with debug wrapper */
334 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
335 {
336 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
337 return _ipw_read8(ipw, ofs);
338 }
339
340 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
341 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
342
343 /* 16-bit direct read (low 4K) */
344 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
345
346 /* 16-bit direct read (low 4K), with debug wrapper */
347 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
348 {
349 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
350 return _ipw_read16(ipw, ofs);
351 }
352
353 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
354 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
355
356 /* 32-bit direct read (low 4K) */
357 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
358
359 /* 32-bit direct read (low 4K), with debug wrapper */
360 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
361 {
362 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
363 return _ipw_read32(ipw, ofs);
364 }
365
366 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
367 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
368
369 /* multi-byte read (above 4K), with debug wrapper */
370 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
371 static inline void __ipw_read_indirect(const char *f, int l,
372 struct ipw_priv *a, u32 b, u8 * c, int d)
373 {
374 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %d bytes\n", f, l, (u32) (b),
375 d);
376 _ipw_read_indirect(a, b, c, d);
377 }
378
379 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
380 #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d)
381
382 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
383 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
384 int num);
385 #define ipw_write_indirect(a, b, c, d) \
386 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
387 _ipw_write_indirect(a, b, c, d)
388
389 /* 32-bit indirect write (above 4K) */
390 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
391 {
392 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
393 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
394 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
395 }
396
397 /* 8-bit indirect write (above 4K) */
398 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
399 {
400 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
401 u32 dif_len = reg - aligned_addr;
402
403 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
404 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
405 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
406 }
407
408 /* 16-bit indirect write (above 4K) */
409 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
410 {
411 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
412 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
413
414 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
415 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
416 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
417 }
418
419 /* 8-bit indirect read (above 4K) */
420 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
421 {
422 u32 word;
423 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
424 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
425 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
426 return (word >> ((reg & 0x3) * 8)) & 0xff;
427 }
428
429 /* 32-bit indirect read (above 4K) */
430 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
431 {
432 u32 value;
433
434 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
435
436 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
437 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
438 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
439 return value;
440 }
441
442 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
443 /* for area above 1st 4K of SRAM/reg space */
444 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
445 int num)
446 {
447 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
448 u32 dif_len = addr - aligned_addr;
449 u32 i;
450
451 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
452
453 if (num <= 0) {
454 return;
455 }
456
457 /* Read the first dword (or portion) byte by byte */
458 if (unlikely(dif_len)) {
459 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
460 /* Start reading at aligned_addr + dif_len */
461 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
462 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
463 aligned_addr += 4;
464 }
465
466 /* Read all of the middle dwords as dwords, with auto-increment */
467 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
468 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
469 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
470
471 /* Read the last dword (or portion) byte by byte */
472 if (unlikely(num)) {
473 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
474 for (i = 0; num > 0; i++, num--)
475 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
476 }
477 }
478
479 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
480 /* for area above 1st 4K of SRAM/reg space */
481 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
482 int num)
483 {
484 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
485 u32 dif_len = addr - aligned_addr;
486 u32 i;
487
488 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
489
490 if (num <= 0) {
491 return;
492 }
493
494 /* Write the first dword (or portion) byte by byte */
495 if (unlikely(dif_len)) {
496 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
497 /* Start writing at aligned_addr + dif_len */
498 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
499 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
500 aligned_addr += 4;
501 }
502
503 /* Write all of the middle dwords as dwords, with auto-increment */
504 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
505 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
506 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
507
508 /* Write the last dword (or portion) byte by byte */
509 if (unlikely(num)) {
510 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
511 for (i = 0; num > 0; i++, num--, buf++)
512 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
513 }
514 }
515
516 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
517 /* for 1st 4K of SRAM/regs space */
518 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
519 int num)
520 {
521 memcpy_toio((priv->hw_base + addr), buf, num);
522 }
523
524 /* Set bit(s) in low 4K of SRAM/regs */
525 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
526 {
527 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
528 }
529
530 /* Clear bit(s) in low 4K of SRAM/regs */
531 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
532 {
533 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
534 }
535
536 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
537 {
538 if (priv->status & STATUS_INT_ENABLED)
539 return;
540 priv->status |= STATUS_INT_ENABLED;
541 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
542 }
543
544 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
545 {
546 if (!(priv->status & STATUS_INT_ENABLED))
547 return;
548 priv->status &= ~STATUS_INT_ENABLED;
549 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
550 }
551
552 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
553 {
554 unsigned long flags;
555
556 spin_lock_irqsave(&priv->irq_lock, flags);
557 __ipw_enable_interrupts(priv);
558 spin_unlock_irqrestore(&priv->irq_lock, flags);
559 }
560
561 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
562 {
563 unsigned long flags;
564
565 spin_lock_irqsave(&priv->irq_lock, flags);
566 __ipw_disable_interrupts(priv);
567 spin_unlock_irqrestore(&priv->irq_lock, flags);
568 }
569
570 #ifdef CONFIG_IPW2200_DEBUG
571 static char *ipw_error_desc(u32 val)
572 {
573 switch (val) {
574 case IPW_FW_ERROR_OK:
575 return "ERROR_OK";
576 case IPW_FW_ERROR_FAIL:
577 return "ERROR_FAIL";
578 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
579 return "MEMORY_UNDERFLOW";
580 case IPW_FW_ERROR_MEMORY_OVERFLOW:
581 return "MEMORY_OVERFLOW";
582 case IPW_FW_ERROR_BAD_PARAM:
583 return "BAD_PARAM";
584 case IPW_FW_ERROR_BAD_CHECKSUM:
585 return "BAD_CHECKSUM";
586 case IPW_FW_ERROR_NMI_INTERRUPT:
587 return "NMI_INTERRUPT";
588 case IPW_FW_ERROR_BAD_DATABASE:
589 return "BAD_DATABASE";
590 case IPW_FW_ERROR_ALLOC_FAIL:
591 return "ALLOC_FAIL";
592 case IPW_FW_ERROR_DMA_UNDERRUN:
593 return "DMA_UNDERRUN";
594 case IPW_FW_ERROR_DMA_STATUS:
595 return "DMA_STATUS";
596 case IPW_FW_ERROR_DINO_ERROR:
597 return "DINO_ERROR";
598 case IPW_FW_ERROR_EEPROM_ERROR:
599 return "EEPROM_ERROR";
600 case IPW_FW_ERROR_SYSASSERT:
601 return "SYSASSERT";
602 case IPW_FW_ERROR_FATAL_ERROR:
603 return "FATAL_ERROR";
604 default:
605 return "UNKNOWN_ERROR";
606 }
607 }
608
609 static void ipw_dump_error_log(struct ipw_priv *priv,
610 struct ipw_fw_error *error)
611 {
612 u32 i;
613
614 if (!error) {
615 IPW_ERROR("Error allocating and capturing error log. "
616 "Nothing to dump.\n");
617 return;
618 }
619
620 IPW_ERROR("Start IPW Error Log Dump:\n");
621 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
622 error->status, error->config);
623
624 for (i = 0; i < error->elem_len; i++)
625 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
626 ipw_error_desc(error->elem[i].desc),
627 error->elem[i].time,
628 error->elem[i].blink1,
629 error->elem[i].blink2,
630 error->elem[i].link1,
631 error->elem[i].link2, error->elem[i].data);
632 for (i = 0; i < error->log_len; i++)
633 IPW_ERROR("%i\t0x%08x\t%i\n",
634 error->log[i].time,
635 error->log[i].data, error->log[i].event);
636 }
637 #endif
638
639 static inline int ipw_is_init(struct ipw_priv *priv)
640 {
641 return (priv->status & STATUS_INIT) ? 1 : 0;
642 }
643
644 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
645 {
646 u32 addr, field_info, field_len, field_count, total_len;
647
648 IPW_DEBUG_ORD("ordinal = %i\n", ord);
649
650 if (!priv || !val || !len) {
651 IPW_DEBUG_ORD("Invalid argument\n");
652 return -EINVAL;
653 }
654
655 /* verify device ordinal tables have been initialized */
656 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
657 IPW_DEBUG_ORD("Access ordinals before initialization\n");
658 return -EINVAL;
659 }
660
661 switch (IPW_ORD_TABLE_ID_MASK & ord) {
662 case IPW_ORD_TABLE_0_MASK:
663 /*
664 * TABLE 0: Direct access to a table of 32 bit values
665 *
666 * This is a very simple table with the data directly
667 * read from the table
668 */
669
670 /* remove the table id from the ordinal */
671 ord &= IPW_ORD_TABLE_VALUE_MASK;
672
673 /* boundary check */
674 if (ord > priv->table0_len) {
675 IPW_DEBUG_ORD("ordinal value (%i) longer then "
676 "max (%i)\n", ord, priv->table0_len);
677 return -EINVAL;
678 }
679
680 /* verify we have enough room to store the value */
681 if (*len < sizeof(u32)) {
682 IPW_DEBUG_ORD("ordinal buffer length too small, "
683 "need %zd\n", sizeof(u32));
684 return -EINVAL;
685 }
686
687 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
688 ord, priv->table0_addr + (ord << 2));
689
690 *len = sizeof(u32);
691 ord <<= 2;
692 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
693 break;
694
695 case IPW_ORD_TABLE_1_MASK:
696 /*
697 * TABLE 1: Indirect access to a table of 32 bit values
698 *
699 * This is a fairly large table of u32 values each
700 * representing starting addr for the data (which is
701 * also a u32)
702 */
703
704 /* remove the table id from the ordinal */
705 ord &= IPW_ORD_TABLE_VALUE_MASK;
706
707 /* boundary check */
708 if (ord > priv->table1_len) {
709 IPW_DEBUG_ORD("ordinal value too long\n");
710 return -EINVAL;
711 }
712
713 /* verify we have enough room to store the value */
714 if (*len < sizeof(u32)) {
715 IPW_DEBUG_ORD("ordinal buffer length too small, "
716 "need %zd\n", sizeof(u32));
717 return -EINVAL;
718 }
719
720 *((u32 *) val) =
721 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
722 *len = sizeof(u32);
723 break;
724
725 case IPW_ORD_TABLE_2_MASK:
726 /*
727 * TABLE 2: Indirect access to a table of variable sized values
728 *
729 * This table consist of six values, each containing
730 * - dword containing the starting offset of the data
731 * - dword containing the lengh in the first 16bits
732 * and the count in the second 16bits
733 */
734
735 /* remove the table id from the ordinal */
736 ord &= IPW_ORD_TABLE_VALUE_MASK;
737
738 /* boundary check */
739 if (ord > priv->table2_len) {
740 IPW_DEBUG_ORD("ordinal value too long\n");
741 return -EINVAL;
742 }
743
744 /* get the address of statistic */
745 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
746
747 /* get the second DW of statistics ;
748 * two 16-bit words - first is length, second is count */
749 field_info =
750 ipw_read_reg32(priv,
751 priv->table2_addr + (ord << 3) +
752 sizeof(u32));
753
754 /* get each entry length */
755 field_len = *((u16 *) & field_info);
756
757 /* get number of entries */
758 field_count = *(((u16 *) & field_info) + 1);
759
760 /* abort if not enought memory */
761 total_len = field_len * field_count;
762 if (total_len > *len) {
763 *len = total_len;
764 return -EINVAL;
765 }
766
767 *len = total_len;
768 if (!total_len)
769 return 0;
770
771 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
772 "field_info = 0x%08x\n",
773 addr, total_len, field_info);
774 ipw_read_indirect(priv, addr, val, total_len);
775 break;
776
777 default:
778 IPW_DEBUG_ORD("Invalid ordinal!\n");
779 return -EINVAL;
780
781 }
782
783 return 0;
784 }
785
786 static void ipw_init_ordinals(struct ipw_priv *priv)
787 {
788 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
789 priv->table0_len = ipw_read32(priv, priv->table0_addr);
790
791 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
792 priv->table0_addr, priv->table0_len);
793
794 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
795 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
796
797 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
798 priv->table1_addr, priv->table1_len);
799
800 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
801 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
802 priv->table2_len &= 0x0000ffff; /* use first two bytes */
803
804 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
805 priv->table2_addr, priv->table2_len);
806
807 }
808
809 static u32 ipw_register_toggle(u32 reg)
810 {
811 reg &= ~IPW_START_STANDBY;
812 if (reg & IPW_GATE_ODMA)
813 reg &= ~IPW_GATE_ODMA;
814 if (reg & IPW_GATE_IDMA)
815 reg &= ~IPW_GATE_IDMA;
816 if (reg & IPW_GATE_ADMA)
817 reg &= ~IPW_GATE_ADMA;
818 return reg;
819 }
820
821 /*
822 * LED behavior:
823 * - On radio ON, turn on any LEDs that require to be on during start
824 * - On initialization, start unassociated blink
825 * - On association, disable unassociated blink
826 * - On disassociation, start unassociated blink
827 * - On radio OFF, turn off any LEDs started during radio on
828 *
829 */
830 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
831 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
832 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
833
834 static void ipw_led_link_on(struct ipw_priv *priv)
835 {
836 unsigned long flags;
837 u32 led;
838
839 /* If configured to not use LEDs, or nic_type is 1,
840 * then we don't toggle a LINK led */
841 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
842 return;
843
844 spin_lock_irqsave(&priv->lock, flags);
845
846 if (!(priv->status & STATUS_RF_KILL_MASK) &&
847 !(priv->status & STATUS_LED_LINK_ON)) {
848 IPW_DEBUG_LED("Link LED On\n");
849 led = ipw_read_reg32(priv, IPW_EVENT_REG);
850 led |= priv->led_association_on;
851
852 led = ipw_register_toggle(led);
853
854 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
855 ipw_write_reg32(priv, IPW_EVENT_REG, led);
856
857 priv->status |= STATUS_LED_LINK_ON;
858
859 /* If we aren't associated, schedule turning the LED off */
860 if (!(priv->status & STATUS_ASSOCIATED))
861 queue_delayed_work(priv->workqueue,
862 &priv->led_link_off,
863 LD_TIME_LINK_ON);
864 }
865
866 spin_unlock_irqrestore(&priv->lock, flags);
867 }
868
869 static void ipw_bg_led_link_on(void *data)
870 {
871 struct ipw_priv *priv = data;
872 mutex_lock(&priv->mutex);
873 ipw_led_link_on(data);
874 mutex_unlock(&priv->mutex);
875 }
876
877 static void ipw_led_link_off(struct ipw_priv *priv)
878 {
879 unsigned long flags;
880 u32 led;
881
882 /* If configured not to use LEDs, or nic type is 1,
883 * then we don't goggle the LINK led. */
884 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
885 return;
886
887 spin_lock_irqsave(&priv->lock, flags);
888
889 if (priv->status & STATUS_LED_LINK_ON) {
890 led = ipw_read_reg32(priv, IPW_EVENT_REG);
891 led &= priv->led_association_off;
892 led = ipw_register_toggle(led);
893
894 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
895 ipw_write_reg32(priv, IPW_EVENT_REG, led);
896
897 IPW_DEBUG_LED("Link LED Off\n");
898
899 priv->status &= ~STATUS_LED_LINK_ON;
900
901 /* If we aren't associated and the radio is on, schedule
902 * turning the LED on (blink while unassociated) */
903 if (!(priv->status & STATUS_RF_KILL_MASK) &&
904 !(priv->status & STATUS_ASSOCIATED))
905 queue_delayed_work(priv->workqueue, &priv->led_link_on,
906 LD_TIME_LINK_OFF);
907
908 }
909
910 spin_unlock_irqrestore(&priv->lock, flags);
911 }
912
913 static void ipw_bg_led_link_off(void *data)
914 {
915 struct ipw_priv *priv = data;
916 mutex_lock(&priv->mutex);
917 ipw_led_link_off(data);
918 mutex_unlock(&priv->mutex);
919 }
920
921 static void __ipw_led_activity_on(struct ipw_priv *priv)
922 {
923 u32 led;
924
925 if (priv->config & CFG_NO_LED)
926 return;
927
928 if (priv->status & STATUS_RF_KILL_MASK)
929 return;
930
931 if (!(priv->status & STATUS_LED_ACT_ON)) {
932 led = ipw_read_reg32(priv, IPW_EVENT_REG);
933 led |= priv->led_activity_on;
934
935 led = ipw_register_toggle(led);
936
937 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
938 ipw_write_reg32(priv, IPW_EVENT_REG, led);
939
940 IPW_DEBUG_LED("Activity LED On\n");
941
942 priv->status |= STATUS_LED_ACT_ON;
943
944 cancel_delayed_work(&priv->led_act_off);
945 queue_delayed_work(priv->workqueue, &priv->led_act_off,
946 LD_TIME_ACT_ON);
947 } else {
948 /* Reschedule LED off for full time period */
949 cancel_delayed_work(&priv->led_act_off);
950 queue_delayed_work(priv->workqueue, &priv->led_act_off,
951 LD_TIME_ACT_ON);
952 }
953 }
954
955 #if 0
956 void ipw_led_activity_on(struct ipw_priv *priv)
957 {
958 unsigned long flags;
959 spin_lock_irqsave(&priv->lock, flags);
960 __ipw_led_activity_on(priv);
961 spin_unlock_irqrestore(&priv->lock, flags);
962 }
963 #endif /* 0 */
964
965 static void ipw_led_activity_off(struct ipw_priv *priv)
966 {
967 unsigned long flags;
968 u32 led;
969
970 if (priv->config & CFG_NO_LED)
971 return;
972
973 spin_lock_irqsave(&priv->lock, flags);
974
975 if (priv->status & STATUS_LED_ACT_ON) {
976 led = ipw_read_reg32(priv, IPW_EVENT_REG);
977 led &= priv->led_activity_off;
978
979 led = ipw_register_toggle(led);
980
981 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
982 ipw_write_reg32(priv, IPW_EVENT_REG, led);
983
984 IPW_DEBUG_LED("Activity LED Off\n");
985
986 priv->status &= ~STATUS_LED_ACT_ON;
987 }
988
989 spin_unlock_irqrestore(&priv->lock, flags);
990 }
991
992 static void ipw_bg_led_activity_off(void *data)
993 {
994 struct ipw_priv *priv = data;
995 mutex_lock(&priv->mutex);
996 ipw_led_activity_off(data);
997 mutex_unlock(&priv->mutex);
998 }
999
1000 static void ipw_led_band_on(struct ipw_priv *priv)
1001 {
1002 unsigned long flags;
1003 u32 led;
1004
1005 /* Only nic type 1 supports mode LEDs */
1006 if (priv->config & CFG_NO_LED ||
1007 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1008 return;
1009
1010 spin_lock_irqsave(&priv->lock, flags);
1011
1012 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1013 if (priv->assoc_network->mode == IEEE_A) {
1014 led |= priv->led_ofdm_on;
1015 led &= priv->led_association_off;
1016 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1017 } else if (priv->assoc_network->mode == IEEE_G) {
1018 led |= priv->led_ofdm_on;
1019 led |= priv->led_association_on;
1020 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1021 } else {
1022 led &= priv->led_ofdm_off;
1023 led |= priv->led_association_on;
1024 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1025 }
1026
1027 led = ipw_register_toggle(led);
1028
1029 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1030 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1031
1032 spin_unlock_irqrestore(&priv->lock, flags);
1033 }
1034
1035 static void ipw_led_band_off(struct ipw_priv *priv)
1036 {
1037 unsigned long flags;
1038 u32 led;
1039
1040 /* Only nic type 1 supports mode LEDs */
1041 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1042 return;
1043
1044 spin_lock_irqsave(&priv->lock, flags);
1045
1046 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1047 led &= priv->led_ofdm_off;
1048 led &= priv->led_association_off;
1049
1050 led = ipw_register_toggle(led);
1051
1052 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1053 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1054
1055 spin_unlock_irqrestore(&priv->lock, flags);
1056 }
1057
1058 static void ipw_led_radio_on(struct ipw_priv *priv)
1059 {
1060 ipw_led_link_on(priv);
1061 }
1062
1063 static void ipw_led_radio_off(struct ipw_priv *priv)
1064 {
1065 ipw_led_activity_off(priv);
1066 ipw_led_link_off(priv);
1067 }
1068
1069 static void ipw_led_link_up(struct ipw_priv *priv)
1070 {
1071 /* Set the Link Led on for all nic types */
1072 ipw_led_link_on(priv);
1073 }
1074
1075 static void ipw_led_link_down(struct ipw_priv *priv)
1076 {
1077 ipw_led_activity_off(priv);
1078 ipw_led_link_off(priv);
1079
1080 if (priv->status & STATUS_RF_KILL_MASK)
1081 ipw_led_radio_off(priv);
1082 }
1083
1084 static void ipw_led_init(struct ipw_priv *priv)
1085 {
1086 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1087
1088 /* Set the default PINs for the link and activity leds */
1089 priv->led_activity_on = IPW_ACTIVITY_LED;
1090 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1091
1092 priv->led_association_on = IPW_ASSOCIATED_LED;
1093 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1094
1095 /* Set the default PINs for the OFDM leds */
1096 priv->led_ofdm_on = IPW_OFDM_LED;
1097 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1098
1099 switch (priv->nic_type) {
1100 case EEPROM_NIC_TYPE_1:
1101 /* In this NIC type, the LEDs are reversed.... */
1102 priv->led_activity_on = IPW_ASSOCIATED_LED;
1103 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1104 priv->led_association_on = IPW_ACTIVITY_LED;
1105 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1106
1107 if (!(priv->config & CFG_NO_LED))
1108 ipw_led_band_on(priv);
1109
1110 /* And we don't blink link LEDs for this nic, so
1111 * just return here */
1112 return;
1113
1114 case EEPROM_NIC_TYPE_3:
1115 case EEPROM_NIC_TYPE_2:
1116 case EEPROM_NIC_TYPE_4:
1117 case EEPROM_NIC_TYPE_0:
1118 break;
1119
1120 default:
1121 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1122 priv->nic_type);
1123 priv->nic_type = EEPROM_NIC_TYPE_0;
1124 break;
1125 }
1126
1127 if (!(priv->config & CFG_NO_LED)) {
1128 if (priv->status & STATUS_ASSOCIATED)
1129 ipw_led_link_on(priv);
1130 else
1131 ipw_led_link_off(priv);
1132 }
1133 }
1134
1135 static void ipw_led_shutdown(struct ipw_priv *priv)
1136 {
1137 ipw_led_activity_off(priv);
1138 ipw_led_link_off(priv);
1139 ipw_led_band_off(priv);
1140 cancel_delayed_work(&priv->led_link_on);
1141 cancel_delayed_work(&priv->led_link_off);
1142 cancel_delayed_work(&priv->led_act_off);
1143 }
1144
1145 /*
1146 * The following adds a new attribute to the sysfs representation
1147 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1148 * used for controling the debug level.
1149 *
1150 * See the level definitions in ipw for details.
1151 */
1152 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1153 {
1154 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1155 }
1156
1157 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1158 size_t count)
1159 {
1160 char *p = (char *)buf;
1161 u32 val;
1162
1163 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1164 p++;
1165 if (p[0] == 'x' || p[0] == 'X')
1166 p++;
1167 val = simple_strtoul(p, &p, 16);
1168 } else
1169 val = simple_strtoul(p, &p, 10);
1170 if (p == buf)
1171 printk(KERN_INFO DRV_NAME
1172 ": %s is not in hex or decimal form.\n", buf);
1173 else
1174 ipw_debug_level = val;
1175
1176 return strnlen(buf, count);
1177 }
1178
1179 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1180 show_debug_level, store_debug_level);
1181
1182 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1183 {
1184 /* length = 1st dword in log */
1185 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1186 }
1187
1188 static void ipw_capture_event_log(struct ipw_priv *priv,
1189 u32 log_len, struct ipw_event *log)
1190 {
1191 u32 base;
1192
1193 if (log_len) {
1194 base = ipw_read32(priv, IPW_EVENT_LOG);
1195 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1196 (u8 *) log, sizeof(*log) * log_len);
1197 }
1198 }
1199
1200 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1201 {
1202 struct ipw_fw_error *error;
1203 u32 log_len = ipw_get_event_log_len(priv);
1204 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1205 u32 elem_len = ipw_read_reg32(priv, base);
1206
1207 error = kmalloc(sizeof(*error) +
1208 sizeof(*error->elem) * elem_len +
1209 sizeof(*error->log) * log_len, GFP_ATOMIC);
1210 if (!error) {
1211 IPW_ERROR("Memory allocation for firmware error log "
1212 "failed.\n");
1213 return NULL;
1214 }
1215 error->jiffies = jiffies;
1216 error->status = priv->status;
1217 error->config = priv->config;
1218 error->elem_len = elem_len;
1219 error->log_len = log_len;
1220 error->elem = (struct ipw_error_elem *)error->payload;
1221 error->log = (struct ipw_event *)(error->elem + elem_len);
1222
1223 ipw_capture_event_log(priv, log_len, error->log);
1224
1225 if (elem_len)
1226 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1227 sizeof(*error->elem) * elem_len);
1228
1229 return error;
1230 }
1231
1232 static ssize_t show_event_log(struct device *d,
1233 struct device_attribute *attr, char *buf)
1234 {
1235 struct ipw_priv *priv = dev_get_drvdata(d);
1236 u32 log_len = ipw_get_event_log_len(priv);
1237 struct ipw_event log[log_len];
1238 u32 len = 0, i;
1239
1240 ipw_capture_event_log(priv, log_len, log);
1241
1242 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1243 for (i = 0; i < log_len; i++)
1244 len += snprintf(buf + len, PAGE_SIZE - len,
1245 "\n%08X%08X%08X",
1246 log[i].time, log[i].event, log[i].data);
1247 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1248 return len;
1249 }
1250
1251 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1252
1253 static ssize_t show_error(struct device *d,
1254 struct device_attribute *attr, char *buf)
1255 {
1256 struct ipw_priv *priv = dev_get_drvdata(d);
1257 u32 len = 0, i;
1258 if (!priv->error)
1259 return 0;
1260 len += snprintf(buf + len, PAGE_SIZE - len,
1261 "%08lX%08X%08X%08X",
1262 priv->error->jiffies,
1263 priv->error->status,
1264 priv->error->config, priv->error->elem_len);
1265 for (i = 0; i < priv->error->elem_len; i++)
1266 len += snprintf(buf + len, PAGE_SIZE - len,
1267 "\n%08X%08X%08X%08X%08X%08X%08X",
1268 priv->error->elem[i].time,
1269 priv->error->elem[i].desc,
1270 priv->error->elem[i].blink1,
1271 priv->error->elem[i].blink2,
1272 priv->error->elem[i].link1,
1273 priv->error->elem[i].link2,
1274 priv->error->elem[i].data);
1275
1276 len += snprintf(buf + len, PAGE_SIZE - len,
1277 "\n%08X", priv->error->log_len);
1278 for (i = 0; i < priv->error->log_len; i++)
1279 len += snprintf(buf + len, PAGE_SIZE - len,
1280 "\n%08X%08X%08X",
1281 priv->error->log[i].time,
1282 priv->error->log[i].event,
1283 priv->error->log[i].data);
1284 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1285 return len;
1286 }
1287
1288 static ssize_t clear_error(struct device *d,
1289 struct device_attribute *attr,
1290 const char *buf, size_t count)
1291 {
1292 struct ipw_priv *priv = dev_get_drvdata(d);
1293
1294 kfree(priv->error);
1295 priv->error = NULL;
1296 return count;
1297 }
1298
1299 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1300
1301 static ssize_t show_cmd_log(struct device *d,
1302 struct device_attribute *attr, char *buf)
1303 {
1304 struct ipw_priv *priv = dev_get_drvdata(d);
1305 u32 len = 0, i;
1306 if (!priv->cmdlog)
1307 return 0;
1308 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1309 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1310 i = (i + 1) % priv->cmdlog_len) {
1311 len +=
1312 snprintf(buf + len, PAGE_SIZE - len,
1313 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1314 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1315 priv->cmdlog[i].cmd.len);
1316 len +=
1317 snprintk_buf(buf + len, PAGE_SIZE - len,
1318 (u8 *) priv->cmdlog[i].cmd.param,
1319 priv->cmdlog[i].cmd.len);
1320 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1321 }
1322 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1323 return len;
1324 }
1325
1326 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1327
1328 #ifdef CONFIG_IPW2200_PROMISCUOUS
1329 static void ipw_prom_free(struct ipw_priv *priv);
1330 static int ipw_prom_alloc(struct ipw_priv *priv);
1331 static ssize_t store_rtap_iface(struct device *d,
1332 struct device_attribute *attr,
1333 const char *buf, size_t count)
1334 {
1335 struct ipw_priv *priv = dev_get_drvdata(d);
1336 int rc = 0;
1337
1338 if (count < 1)
1339 return -EINVAL;
1340
1341 switch (buf[0]) {
1342 case '0':
1343 if (!rtap_iface)
1344 return count;
1345
1346 if (netif_running(priv->prom_net_dev)) {
1347 IPW_WARNING("Interface is up. Cannot unregister.\n");
1348 return count;
1349 }
1350
1351 ipw_prom_free(priv);
1352 rtap_iface = 0;
1353 break;
1354
1355 case '1':
1356 if (rtap_iface)
1357 return count;
1358
1359 rc = ipw_prom_alloc(priv);
1360 if (!rc)
1361 rtap_iface = 1;
1362 break;
1363
1364 default:
1365 return -EINVAL;
1366 }
1367
1368 if (rc) {
1369 IPW_ERROR("Failed to register promiscuous network "
1370 "device (error %d).\n", rc);
1371 }
1372
1373 return count;
1374 }
1375
1376 static ssize_t show_rtap_iface(struct device *d,
1377 struct device_attribute *attr,
1378 char *buf)
1379 {
1380 struct ipw_priv *priv = dev_get_drvdata(d);
1381 if (rtap_iface)
1382 return sprintf(buf, "%s", priv->prom_net_dev->name);
1383 else {
1384 buf[0] = '-';
1385 buf[1] = '1';
1386 buf[2] = '\0';
1387 return 3;
1388 }
1389 }
1390
1391 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1392 store_rtap_iface);
1393
1394 static ssize_t store_rtap_filter(struct device *d,
1395 struct device_attribute *attr,
1396 const char *buf, size_t count)
1397 {
1398 struct ipw_priv *priv = dev_get_drvdata(d);
1399
1400 if (!priv->prom_priv) {
1401 IPW_ERROR("Attempting to set filter without "
1402 "rtap_iface enabled.\n");
1403 return -EPERM;
1404 }
1405
1406 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1407
1408 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1409 BIT_ARG16(priv->prom_priv->filter));
1410
1411 return count;
1412 }
1413
1414 static ssize_t show_rtap_filter(struct device *d,
1415 struct device_attribute *attr,
1416 char *buf)
1417 {
1418 struct ipw_priv *priv = dev_get_drvdata(d);
1419 return sprintf(buf, "0x%04X",
1420 priv->prom_priv ? priv->prom_priv->filter : 0);
1421 }
1422
1423 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1424 store_rtap_filter);
1425 #endif
1426
1427 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1428 char *buf)
1429 {
1430 struct ipw_priv *priv = dev_get_drvdata(d);
1431 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1432 }
1433
1434 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1435 const char *buf, size_t count)
1436 {
1437 struct ipw_priv *priv = dev_get_drvdata(d);
1438 #ifdef CONFIG_IPW2200_DEBUG
1439 struct net_device *dev = priv->net_dev;
1440 #endif
1441 char buffer[] = "00000000";
1442 unsigned long len =
1443 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1444 unsigned long val;
1445 char *p = buffer;
1446
1447 IPW_DEBUG_INFO("enter\n");
1448
1449 strncpy(buffer, buf, len);
1450 buffer[len] = 0;
1451
1452 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1453 p++;
1454 if (p[0] == 'x' || p[0] == 'X')
1455 p++;
1456 val = simple_strtoul(p, &p, 16);
1457 } else
1458 val = simple_strtoul(p, &p, 10);
1459 if (p == buffer) {
1460 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1461 } else {
1462 priv->ieee->scan_age = val;
1463 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1464 }
1465
1466 IPW_DEBUG_INFO("exit\n");
1467 return len;
1468 }
1469
1470 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1471
1472 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1473 char *buf)
1474 {
1475 struct ipw_priv *priv = dev_get_drvdata(d);
1476 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1477 }
1478
1479 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1480 const char *buf, size_t count)
1481 {
1482 struct ipw_priv *priv = dev_get_drvdata(d);
1483
1484 IPW_DEBUG_INFO("enter\n");
1485
1486 if (count == 0)
1487 return 0;
1488
1489 if (*buf == 0) {
1490 IPW_DEBUG_LED("Disabling LED control.\n");
1491 priv->config |= CFG_NO_LED;
1492 ipw_led_shutdown(priv);
1493 } else {
1494 IPW_DEBUG_LED("Enabling LED control.\n");
1495 priv->config &= ~CFG_NO_LED;
1496 ipw_led_init(priv);
1497 }
1498
1499 IPW_DEBUG_INFO("exit\n");
1500 return count;
1501 }
1502
1503 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1504
1505 static ssize_t show_status(struct device *d,
1506 struct device_attribute *attr, char *buf)
1507 {
1508 struct ipw_priv *p = d->driver_data;
1509 return sprintf(buf, "0x%08x\n", (int)p->status);
1510 }
1511
1512 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1513
1514 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1515 char *buf)
1516 {
1517 struct ipw_priv *p = d->driver_data;
1518 return sprintf(buf, "0x%08x\n", (int)p->config);
1519 }
1520
1521 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1522
1523 static ssize_t show_nic_type(struct device *d,
1524 struct device_attribute *attr, char *buf)
1525 {
1526 struct ipw_priv *priv = d->driver_data;
1527 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1528 }
1529
1530 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1531
1532 static ssize_t show_ucode_version(struct device *d,
1533 struct device_attribute *attr, char *buf)
1534 {
1535 u32 len = sizeof(u32), tmp = 0;
1536 struct ipw_priv *p = d->driver_data;
1537
1538 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1539 return 0;
1540
1541 return sprintf(buf, "0x%08x\n", tmp);
1542 }
1543
1544 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1545
1546 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1547 char *buf)
1548 {
1549 u32 len = sizeof(u32), tmp = 0;
1550 struct ipw_priv *p = d->driver_data;
1551
1552 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1553 return 0;
1554
1555 return sprintf(buf, "0x%08x\n", tmp);
1556 }
1557
1558 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1559
1560 /*
1561 * Add a device attribute to view/control the delay between eeprom
1562 * operations.
1563 */
1564 static ssize_t show_eeprom_delay(struct device *d,
1565 struct device_attribute *attr, char *buf)
1566 {
1567 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
1568 return sprintf(buf, "%i\n", n);
1569 }
1570 static ssize_t store_eeprom_delay(struct device *d,
1571 struct device_attribute *attr,
1572 const char *buf, size_t count)
1573 {
1574 struct ipw_priv *p = d->driver_data;
1575 sscanf(buf, "%i", &p->eeprom_delay);
1576 return strnlen(buf, count);
1577 }
1578
1579 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1580 show_eeprom_delay, store_eeprom_delay);
1581
1582 static ssize_t show_command_event_reg(struct device *d,
1583 struct device_attribute *attr, char *buf)
1584 {
1585 u32 reg = 0;
1586 struct ipw_priv *p = d->driver_data;
1587
1588 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1589 return sprintf(buf, "0x%08x\n", reg);
1590 }
1591 static ssize_t store_command_event_reg(struct device *d,
1592 struct device_attribute *attr,
1593 const char *buf, size_t count)
1594 {
1595 u32 reg;
1596 struct ipw_priv *p = d->driver_data;
1597
1598 sscanf(buf, "%x", &reg);
1599 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1600 return strnlen(buf, count);
1601 }
1602
1603 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1604 show_command_event_reg, store_command_event_reg);
1605
1606 static ssize_t show_mem_gpio_reg(struct device *d,
1607 struct device_attribute *attr, char *buf)
1608 {
1609 u32 reg = 0;
1610 struct ipw_priv *p = d->driver_data;
1611
1612 reg = ipw_read_reg32(p, 0x301100);
1613 return sprintf(buf, "0x%08x\n", reg);
1614 }
1615 static ssize_t store_mem_gpio_reg(struct device *d,
1616 struct device_attribute *attr,
1617 const char *buf, size_t count)
1618 {
1619 u32 reg;
1620 struct ipw_priv *p = d->driver_data;
1621
1622 sscanf(buf, "%x", &reg);
1623 ipw_write_reg32(p, 0x301100, reg);
1624 return strnlen(buf, count);
1625 }
1626
1627 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1628 show_mem_gpio_reg, store_mem_gpio_reg);
1629
1630 static ssize_t show_indirect_dword(struct device *d,
1631 struct device_attribute *attr, char *buf)
1632 {
1633 u32 reg = 0;
1634 struct ipw_priv *priv = d->driver_data;
1635
1636 if (priv->status & STATUS_INDIRECT_DWORD)
1637 reg = ipw_read_reg32(priv, priv->indirect_dword);
1638 else
1639 reg = 0;
1640
1641 return sprintf(buf, "0x%08x\n", reg);
1642 }
1643 static ssize_t store_indirect_dword(struct device *d,
1644 struct device_attribute *attr,
1645 const char *buf, size_t count)
1646 {
1647 struct ipw_priv *priv = d->driver_data;
1648
1649 sscanf(buf, "%x", &priv->indirect_dword);
1650 priv->status |= STATUS_INDIRECT_DWORD;
1651 return strnlen(buf, count);
1652 }
1653
1654 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1655 show_indirect_dword, store_indirect_dword);
1656
1657 static ssize_t show_indirect_byte(struct device *d,
1658 struct device_attribute *attr, char *buf)
1659 {
1660 u8 reg = 0;
1661 struct ipw_priv *priv = d->driver_data;
1662
1663 if (priv->status & STATUS_INDIRECT_BYTE)
1664 reg = ipw_read_reg8(priv, priv->indirect_byte);
1665 else
1666 reg = 0;
1667
1668 return sprintf(buf, "0x%02x\n", reg);
1669 }
1670 static ssize_t store_indirect_byte(struct device *d,
1671 struct device_attribute *attr,
1672 const char *buf, size_t count)
1673 {
1674 struct ipw_priv *priv = d->driver_data;
1675
1676 sscanf(buf, "%x", &priv->indirect_byte);
1677 priv->status |= STATUS_INDIRECT_BYTE;
1678 return strnlen(buf, count);
1679 }
1680
1681 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1682 show_indirect_byte, store_indirect_byte);
1683
1684 static ssize_t show_direct_dword(struct device *d,
1685 struct device_attribute *attr, char *buf)
1686 {
1687 u32 reg = 0;
1688 struct ipw_priv *priv = d->driver_data;
1689
1690 if (priv->status & STATUS_DIRECT_DWORD)
1691 reg = ipw_read32(priv, priv->direct_dword);
1692 else
1693 reg = 0;
1694
1695 return sprintf(buf, "0x%08x\n", reg);
1696 }
1697 static ssize_t store_direct_dword(struct device *d,
1698 struct device_attribute *attr,
1699 const char *buf, size_t count)
1700 {
1701 struct ipw_priv *priv = d->driver_data;
1702
1703 sscanf(buf, "%x", &priv->direct_dword);
1704 priv->status |= STATUS_DIRECT_DWORD;
1705 return strnlen(buf, count);
1706 }
1707
1708 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1709 show_direct_dword, store_direct_dword);
1710
1711 static int rf_kill_active(struct ipw_priv *priv)
1712 {
1713 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1714 priv->status |= STATUS_RF_KILL_HW;
1715 else
1716 priv->status &= ~STATUS_RF_KILL_HW;
1717
1718 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1719 }
1720
1721 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1722 char *buf)
1723 {
1724 /* 0 - RF kill not enabled
1725 1 - SW based RF kill active (sysfs)
1726 2 - HW based RF kill active
1727 3 - Both HW and SW baed RF kill active */
1728 struct ipw_priv *priv = d->driver_data;
1729 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1730 (rf_kill_active(priv) ? 0x2 : 0x0);
1731 return sprintf(buf, "%i\n", val);
1732 }
1733
1734 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1735 {
1736 if ((disable_radio ? 1 : 0) ==
1737 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1738 return 0;
1739
1740 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1741 disable_radio ? "OFF" : "ON");
1742
1743 if (disable_radio) {
1744 priv->status |= STATUS_RF_KILL_SW;
1745
1746 if (priv->workqueue)
1747 cancel_delayed_work(&priv->request_scan);
1748 queue_work(priv->workqueue, &priv->down);
1749 } else {
1750 priv->status &= ~STATUS_RF_KILL_SW;
1751 if (rf_kill_active(priv)) {
1752 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1753 "disabled by HW switch\n");
1754 /* Make sure the RF_KILL check timer is running */
1755 cancel_delayed_work(&priv->rf_kill);
1756 queue_delayed_work(priv->workqueue, &priv->rf_kill,
1757 2 * HZ);
1758 } else
1759 queue_work(priv->workqueue, &priv->up);
1760 }
1761
1762 return 1;
1763 }
1764
1765 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1766 const char *buf, size_t count)
1767 {
1768 struct ipw_priv *priv = d->driver_data;
1769
1770 ipw_radio_kill_sw(priv, buf[0] == '1');
1771
1772 return count;
1773 }
1774
1775 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1776
1777 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1778 char *buf)
1779 {
1780 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1781 int pos = 0, len = 0;
1782 if (priv->config & CFG_SPEED_SCAN) {
1783 while (priv->speed_scan[pos] != 0)
1784 len += sprintf(&buf[len], "%d ",
1785 priv->speed_scan[pos++]);
1786 return len + sprintf(&buf[len], "\n");
1787 }
1788
1789 return sprintf(buf, "0\n");
1790 }
1791
1792 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1793 const char *buf, size_t count)
1794 {
1795 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1796 int channel, pos = 0;
1797 const char *p = buf;
1798
1799 /* list of space separated channels to scan, optionally ending with 0 */
1800 while ((channel = simple_strtol(p, NULL, 0))) {
1801 if (pos == MAX_SPEED_SCAN - 1) {
1802 priv->speed_scan[pos] = 0;
1803 break;
1804 }
1805
1806 if (ieee80211_is_valid_channel(priv->ieee, channel))
1807 priv->speed_scan[pos++] = channel;
1808 else
1809 IPW_WARNING("Skipping invalid channel request: %d\n",
1810 channel);
1811 p = strchr(p, ' ');
1812 if (!p)
1813 break;
1814 while (*p == ' ' || *p == '\t')
1815 p++;
1816 }
1817
1818 if (pos == 0)
1819 priv->config &= ~CFG_SPEED_SCAN;
1820 else {
1821 priv->speed_scan_pos = 0;
1822 priv->config |= CFG_SPEED_SCAN;
1823 }
1824
1825 return count;
1826 }
1827
1828 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1829 store_speed_scan);
1830
1831 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1832 char *buf)
1833 {
1834 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1835 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1836 }
1837
1838 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1839 const char *buf, size_t count)
1840 {
1841 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1842 if (buf[0] == '1')
1843 priv->config |= CFG_NET_STATS;
1844 else
1845 priv->config &= ~CFG_NET_STATS;
1846
1847 return count;
1848 }
1849
1850 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1851 show_net_stats, store_net_stats);
1852
1853 static void notify_wx_assoc_event(struct ipw_priv *priv)
1854 {
1855 union iwreq_data wrqu;
1856 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1857 if (priv->status & STATUS_ASSOCIATED)
1858 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1859 else
1860 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1861 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1862 }
1863
1864 static void ipw_irq_tasklet(struct ipw_priv *priv)
1865 {
1866 u32 inta, inta_mask, handled = 0;
1867 unsigned long flags;
1868 int rc = 0;
1869
1870 spin_lock_irqsave(&priv->irq_lock, flags);
1871
1872 inta = ipw_read32(priv, IPW_INTA_RW);
1873 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1874 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1875
1876 /* Add any cached INTA values that need to be handled */
1877 inta |= priv->isr_inta;
1878
1879 spin_unlock_irqrestore(&priv->irq_lock, flags);
1880
1881 spin_lock_irqsave(&priv->lock, flags);
1882
1883 /* handle all the justifications for the interrupt */
1884 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1885 ipw_rx(priv);
1886 handled |= IPW_INTA_BIT_RX_TRANSFER;
1887 }
1888
1889 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1890 IPW_DEBUG_HC("Command completed.\n");
1891 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1892 priv->status &= ~STATUS_HCMD_ACTIVE;
1893 wake_up_interruptible(&priv->wait_command_queue);
1894 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1895 }
1896
1897 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1898 IPW_DEBUG_TX("TX_QUEUE_1\n");
1899 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1900 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1901 }
1902
1903 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1904 IPW_DEBUG_TX("TX_QUEUE_2\n");
1905 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1906 handled |= IPW_INTA_BIT_TX_QUEUE_2;
1907 }
1908
1909 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1910 IPW_DEBUG_TX("TX_QUEUE_3\n");
1911 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1912 handled |= IPW_INTA_BIT_TX_QUEUE_3;
1913 }
1914
1915 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1916 IPW_DEBUG_TX("TX_QUEUE_4\n");
1917 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1918 handled |= IPW_INTA_BIT_TX_QUEUE_4;
1919 }
1920
1921 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
1922 IPW_WARNING("STATUS_CHANGE\n");
1923 handled |= IPW_INTA_BIT_STATUS_CHANGE;
1924 }
1925
1926 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1927 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1928 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
1929 }
1930
1931 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1932 IPW_WARNING("HOST_CMD_DONE\n");
1933 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1934 }
1935
1936 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
1937 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1938 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
1939 }
1940
1941 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1942 IPW_WARNING("PHY_OFF_DONE\n");
1943 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1944 }
1945
1946 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
1947 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1948 priv->status |= STATUS_RF_KILL_HW;
1949 wake_up_interruptible(&priv->wait_command_queue);
1950 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1951 cancel_delayed_work(&priv->request_scan);
1952 schedule_work(&priv->link_down);
1953 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
1954 handled |= IPW_INTA_BIT_RF_KILL_DONE;
1955 }
1956
1957 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
1958 IPW_WARNING("Firmware error detected. Restarting.\n");
1959 if (priv->error) {
1960 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
1961 #ifdef CONFIG_IPW2200_DEBUG
1962 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1963 struct ipw_fw_error *error =
1964 ipw_alloc_error_log(priv);
1965 ipw_dump_error_log(priv, error);
1966 kfree(error);
1967 }
1968 #endif
1969 } else {
1970 priv->error = ipw_alloc_error_log(priv);
1971 if (priv->error)
1972 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
1973 else
1974 IPW_DEBUG_FW("Error allocating sysfs 'error' "
1975 "log.\n");
1976 #ifdef CONFIG_IPW2200_DEBUG
1977 if (ipw_debug_level & IPW_DL_FW_ERRORS)
1978 ipw_dump_error_log(priv, priv->error);
1979 #endif
1980 }
1981
1982 /* XXX: If hardware encryption is for WPA/WPA2,
1983 * we have to notify the supplicant. */
1984 if (priv->ieee->sec.encrypt) {
1985 priv->status &= ~STATUS_ASSOCIATED;
1986 notify_wx_assoc_event(priv);
1987 }
1988
1989 /* Keep the restart process from trying to send host
1990 * commands by clearing the INIT status bit */
1991 priv->status &= ~STATUS_INIT;
1992
1993 /* Cancel currently queued command. */
1994 priv->status &= ~STATUS_HCMD_ACTIVE;
1995 wake_up_interruptible(&priv->wait_command_queue);
1996
1997 queue_work(priv->workqueue, &priv->adapter_restart);
1998 handled |= IPW_INTA_BIT_FATAL_ERROR;
1999 }
2000
2001 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2002 IPW_ERROR("Parity error\n");
2003 handled |= IPW_INTA_BIT_PARITY_ERROR;
2004 }
2005
2006 if (handled != inta) {
2007 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2008 }
2009
2010 spin_unlock_irqrestore(&priv->lock, flags);
2011
2012 /* enable all interrupts */
2013 ipw_enable_interrupts(priv);
2014 }
2015
2016 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2017 static char *get_cmd_string(u8 cmd)
2018 {
2019 switch (cmd) {
2020 IPW_CMD(HOST_COMPLETE);
2021 IPW_CMD(POWER_DOWN);
2022 IPW_CMD(SYSTEM_CONFIG);
2023 IPW_CMD(MULTICAST_ADDRESS);
2024 IPW_CMD(SSID);
2025 IPW_CMD(ADAPTER_ADDRESS);
2026 IPW_CMD(PORT_TYPE);
2027 IPW_CMD(RTS_THRESHOLD);
2028 IPW_CMD(FRAG_THRESHOLD);
2029 IPW_CMD(POWER_MODE);
2030 IPW_CMD(WEP_KEY);
2031 IPW_CMD(TGI_TX_KEY);
2032 IPW_CMD(SCAN_REQUEST);
2033 IPW_CMD(SCAN_REQUEST_EXT);
2034 IPW_CMD(ASSOCIATE);
2035 IPW_CMD(SUPPORTED_RATES);
2036 IPW_CMD(SCAN_ABORT);
2037 IPW_CMD(TX_FLUSH);
2038 IPW_CMD(QOS_PARAMETERS);
2039 IPW_CMD(DINO_CONFIG);
2040 IPW_CMD(RSN_CAPABILITIES);
2041 IPW_CMD(RX_KEY);
2042 IPW_CMD(CARD_DISABLE);
2043 IPW_CMD(SEED_NUMBER);
2044 IPW_CMD(TX_POWER);
2045 IPW_CMD(COUNTRY_INFO);
2046 IPW_CMD(AIRONET_INFO);
2047 IPW_CMD(AP_TX_POWER);
2048 IPW_CMD(CCKM_INFO);
2049 IPW_CMD(CCX_VER_INFO);
2050 IPW_CMD(SET_CALIBRATION);
2051 IPW_CMD(SENSITIVITY_CALIB);
2052 IPW_CMD(RETRY_LIMIT);
2053 IPW_CMD(IPW_PRE_POWER_DOWN);
2054 IPW_CMD(VAP_BEACON_TEMPLATE);
2055 IPW_CMD(VAP_DTIM_PERIOD);
2056 IPW_CMD(EXT_SUPPORTED_RATES);
2057 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2058 IPW_CMD(VAP_QUIET_INTERVALS);
2059 IPW_CMD(VAP_CHANNEL_SWITCH);
2060 IPW_CMD(VAP_MANDATORY_CHANNELS);
2061 IPW_CMD(VAP_CELL_PWR_LIMIT);
2062 IPW_CMD(VAP_CF_PARAM_SET);
2063 IPW_CMD(VAP_SET_BEACONING_STATE);
2064 IPW_CMD(MEASUREMENT);
2065 IPW_CMD(POWER_CAPABILITY);
2066 IPW_CMD(SUPPORTED_CHANNELS);
2067 IPW_CMD(TPC_REPORT);
2068 IPW_CMD(WME_INFO);
2069 IPW_CMD(PRODUCTION_COMMAND);
2070 default:
2071 return "UNKNOWN";
2072 }
2073 }
2074
2075 #define HOST_COMPLETE_TIMEOUT HZ
2076
2077 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2078 {
2079 int rc = 0;
2080 unsigned long flags;
2081
2082 spin_lock_irqsave(&priv->lock, flags);
2083 if (priv->status & STATUS_HCMD_ACTIVE) {
2084 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2085 get_cmd_string(cmd->cmd));
2086 spin_unlock_irqrestore(&priv->lock, flags);
2087 return -EAGAIN;
2088 }
2089
2090 priv->status |= STATUS_HCMD_ACTIVE;
2091
2092 if (priv->cmdlog) {
2093 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2094 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2095 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2096 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2097 cmd->len);
2098 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2099 }
2100
2101 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2102 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2103 priv->status);
2104
2105 #ifndef DEBUG_CMD_WEP_KEY
2106 if (cmd->cmd == IPW_CMD_WEP_KEY)
2107 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2108 else
2109 #endif
2110 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2111
2112 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2113 if (rc) {
2114 priv->status &= ~STATUS_HCMD_ACTIVE;
2115 IPW_ERROR("Failed to send %s: Reason %d\n",
2116 get_cmd_string(cmd->cmd), rc);
2117 spin_unlock_irqrestore(&priv->lock, flags);
2118 goto exit;
2119 }
2120 spin_unlock_irqrestore(&priv->lock, flags);
2121
2122 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2123 !(priv->
2124 status & STATUS_HCMD_ACTIVE),
2125 HOST_COMPLETE_TIMEOUT);
2126 if (rc == 0) {
2127 spin_lock_irqsave(&priv->lock, flags);
2128 if (priv->status & STATUS_HCMD_ACTIVE) {
2129 IPW_ERROR("Failed to send %s: Command timed out.\n",
2130 get_cmd_string(cmd->cmd));
2131 priv->status &= ~STATUS_HCMD_ACTIVE;
2132 spin_unlock_irqrestore(&priv->lock, flags);
2133 rc = -EIO;
2134 goto exit;
2135 }
2136 spin_unlock_irqrestore(&priv->lock, flags);
2137 } else
2138 rc = 0;
2139
2140 if (priv->status & STATUS_RF_KILL_HW) {
2141 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2142 get_cmd_string(cmd->cmd));
2143 rc = -EIO;
2144 goto exit;
2145 }
2146
2147 exit:
2148 if (priv->cmdlog) {
2149 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2150 priv->cmdlog_pos %= priv->cmdlog_len;
2151 }
2152 return rc;
2153 }
2154
2155 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2156 {
2157 struct host_cmd cmd = {
2158 .cmd = command,
2159 };
2160
2161 return __ipw_send_cmd(priv, &cmd);
2162 }
2163
2164 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2165 void *data)
2166 {
2167 struct host_cmd cmd = {
2168 .cmd = command,
2169 .len = len,
2170 .param = data,
2171 };
2172
2173 return __ipw_send_cmd(priv, &cmd);
2174 }
2175
2176 static int ipw_send_host_complete(struct ipw_priv *priv)
2177 {
2178 if (!priv) {
2179 IPW_ERROR("Invalid args\n");
2180 return -1;
2181 }
2182
2183 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2184 }
2185
2186 static int ipw_send_system_config(struct ipw_priv *priv)
2187 {
2188 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2189 sizeof(priv->sys_config),
2190 &priv->sys_config);
2191 }
2192
2193 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2194 {
2195 if (!priv || !ssid) {
2196 IPW_ERROR("Invalid args\n");
2197 return -1;
2198 }
2199
2200 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2201 ssid);
2202 }
2203
2204 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2205 {
2206 if (!priv || !mac) {
2207 IPW_ERROR("Invalid args\n");
2208 return -1;
2209 }
2210
2211 IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
2212 priv->net_dev->name, MAC_ARG(mac));
2213
2214 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2215 }
2216
2217 /*
2218 * NOTE: This must be executed from our workqueue as it results in udelay
2219 * being called which may corrupt the keyboard if executed on default
2220 * workqueue
2221 */
2222 static void ipw_adapter_restart(void *adapter)
2223 {
2224 struct ipw_priv *priv = adapter;
2225
2226 if (priv->status & STATUS_RF_KILL_MASK)
2227 return;
2228
2229 ipw_down(priv);
2230
2231 if (priv->assoc_network &&
2232 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2233 ipw_remove_current_network(priv);
2234
2235 if (ipw_up(priv)) {
2236 IPW_ERROR("Failed to up device\n");
2237 return;
2238 }
2239 }
2240
2241 static void ipw_bg_adapter_restart(void *data)
2242 {
2243 struct ipw_priv *priv = data;
2244 mutex_lock(&priv->mutex);
2245 ipw_adapter_restart(data);
2246 mutex_unlock(&priv->mutex);
2247 }
2248
2249 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2250
2251 static void ipw_scan_check(void *data)
2252 {
2253 struct ipw_priv *priv = data;
2254 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2255 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2256 "adapter after (%dms).\n",
2257 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2258 queue_work(priv->workqueue, &priv->adapter_restart);
2259 }
2260 }
2261
2262 static void ipw_bg_scan_check(void *data)
2263 {
2264 struct ipw_priv *priv = data;
2265 mutex_lock(&priv->mutex);
2266 ipw_scan_check(data);
2267 mutex_unlock(&priv->mutex);
2268 }
2269
2270 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2271 struct ipw_scan_request_ext *request)
2272 {
2273 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2274 sizeof(*request), request);
2275 }
2276
2277 static int ipw_send_scan_abort(struct ipw_priv *priv)
2278 {
2279 if (!priv) {
2280 IPW_ERROR("Invalid args\n");
2281 return -1;
2282 }
2283
2284 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2285 }
2286
2287 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2288 {
2289 struct ipw_sensitivity_calib calib = {
2290 .beacon_rssi_raw = sens,
2291 };
2292
2293 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2294 &calib);
2295 }
2296
2297 static int ipw_send_associate(struct ipw_priv *priv,
2298 struct ipw_associate *associate)
2299 {
2300 struct ipw_associate tmp_associate;
2301
2302 if (!priv || !associate) {
2303 IPW_ERROR("Invalid args\n");
2304 return -1;
2305 }
2306
2307 memcpy(&tmp_associate, associate, sizeof(*associate));
2308 tmp_associate.policy_support =
2309 cpu_to_le16(tmp_associate.policy_support);
2310 tmp_associate.assoc_tsf_msw = cpu_to_le32(tmp_associate.assoc_tsf_msw);
2311 tmp_associate.assoc_tsf_lsw = cpu_to_le32(tmp_associate.assoc_tsf_lsw);
2312 tmp_associate.capability = cpu_to_le16(tmp_associate.capability);
2313 tmp_associate.listen_interval =
2314 cpu_to_le16(tmp_associate.listen_interval);
2315 tmp_associate.beacon_interval =
2316 cpu_to_le16(tmp_associate.beacon_interval);
2317 tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window);
2318
2319 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(tmp_associate),
2320 &tmp_associate);
2321 }
2322
2323 static int ipw_send_supported_rates(struct ipw_priv *priv,
2324 struct ipw_supported_rates *rates)
2325 {
2326 if (!priv || !rates) {
2327 IPW_ERROR("Invalid args\n");
2328 return -1;
2329 }
2330
2331 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2332 rates);
2333 }
2334
2335 static int ipw_set_random_seed(struct ipw_priv *priv)
2336 {
2337 u32 val;
2338
2339 if (!priv) {
2340 IPW_ERROR("Invalid args\n");
2341 return -1;
2342 }
2343
2344 get_random_bytes(&val, sizeof(val));
2345
2346 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2347 }
2348
2349 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2350 {
2351 if (!priv) {
2352 IPW_ERROR("Invalid args\n");
2353 return -1;
2354 }
2355
2356 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off),
2357 &phy_off);
2358 }
2359
2360 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2361 {
2362 if (!priv || !power) {
2363 IPW_ERROR("Invalid args\n");
2364 return -1;
2365 }
2366
2367 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2368 }
2369
2370 static int ipw_set_tx_power(struct ipw_priv *priv)
2371 {
2372 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
2373 struct ipw_tx_power tx_power;
2374 s8 max_power;
2375 int i;
2376
2377 memset(&tx_power, 0, sizeof(tx_power));
2378
2379 /* configure device for 'G' band */
2380 tx_power.ieee_mode = IPW_G_MODE;
2381 tx_power.num_channels = geo->bg_channels;
2382 for (i = 0; i < geo->bg_channels; i++) {
2383 max_power = geo->bg[i].max_power;
2384 tx_power.channels_tx_power[i].channel_number =
2385 geo->bg[i].channel;
2386 tx_power.channels_tx_power[i].tx_power = max_power ?
2387 min(max_power, priv->tx_power) : priv->tx_power;
2388 }
2389 if (ipw_send_tx_power(priv, &tx_power))
2390 return -EIO;
2391
2392 /* configure device to also handle 'B' band */
2393 tx_power.ieee_mode = IPW_B_MODE;
2394 if (ipw_send_tx_power(priv, &tx_power))
2395 return -EIO;
2396
2397 /* configure device to also handle 'A' band */
2398 if (priv->ieee->abg_true) {
2399 tx_power.ieee_mode = IPW_A_MODE;
2400 tx_power.num_channels = geo->a_channels;
2401 for (i = 0; i < tx_power.num_channels; i++) {
2402 max_power = geo->a[i].max_power;
2403 tx_power.channels_tx_power[i].channel_number =
2404 geo->a[i].channel;
2405 tx_power.channels_tx_power[i].tx_power = max_power ?
2406 min(max_power, priv->tx_power) : priv->tx_power;
2407 }
2408 if (ipw_send_tx_power(priv, &tx_power))
2409 return -EIO;
2410 }
2411 return 0;
2412 }
2413
2414 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2415 {
2416 struct ipw_rts_threshold rts_threshold = {
2417 .rts_threshold = rts,
2418 };
2419
2420 if (!priv) {
2421 IPW_ERROR("Invalid args\n");
2422 return -1;
2423 }
2424
2425 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2426 sizeof(rts_threshold), &rts_threshold);
2427 }
2428
2429 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2430 {
2431 struct ipw_frag_threshold frag_threshold = {
2432 .frag_threshold = frag,
2433 };
2434
2435 if (!priv) {
2436 IPW_ERROR("Invalid args\n");
2437 return -1;
2438 }
2439
2440 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2441 sizeof(frag_threshold), &frag_threshold);
2442 }
2443
2444 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2445 {
2446 u32 param;
2447
2448 if (!priv) {
2449 IPW_ERROR("Invalid args\n");
2450 return -1;
2451 }
2452
2453 /* If on battery, set to 3, if AC set to CAM, else user
2454 * level */
2455 switch (mode) {
2456 case IPW_POWER_BATTERY:
2457 param = IPW_POWER_INDEX_3;
2458 break;
2459 case IPW_POWER_AC:
2460 param = IPW_POWER_MODE_CAM;
2461 break;
2462 default:
2463 param = mode;
2464 break;
2465 }
2466
2467 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2468 &param);
2469 }
2470
2471 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2472 {
2473 struct ipw_retry_limit retry_limit = {
2474 .short_retry_limit = slimit,
2475 .long_retry_limit = llimit
2476 };
2477
2478 if (!priv) {
2479 IPW_ERROR("Invalid args\n");
2480 return -1;
2481 }
2482
2483 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2484 &retry_limit);
2485 }
2486
2487 /*
2488 * The IPW device contains a Microwire compatible EEPROM that stores
2489 * various data like the MAC address. Usually the firmware has exclusive
2490 * access to the eeprom, but during device initialization (before the
2491 * device driver has sent the HostComplete command to the firmware) the
2492 * device driver has read access to the EEPROM by way of indirect addressing
2493 * through a couple of memory mapped registers.
2494 *
2495 * The following is a simplified implementation for pulling data out of the
2496 * the eeprom, along with some helper functions to find information in
2497 * the per device private data's copy of the eeprom.
2498 *
2499 * NOTE: To better understand how these functions work (i.e what is a chip
2500 * select and why do have to keep driving the eeprom clock?), read
2501 * just about any data sheet for a Microwire compatible EEPROM.
2502 */
2503
2504 /* write a 32 bit value into the indirect accessor register */
2505 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2506 {
2507 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2508
2509 /* the eeprom requires some time to complete the operation */
2510 udelay(p->eeprom_delay);
2511
2512 return;
2513 }
2514
2515 /* perform a chip select operation */
2516 static void eeprom_cs(struct ipw_priv *priv)
2517 {
2518 eeprom_write_reg(priv, 0);
2519 eeprom_write_reg(priv, EEPROM_BIT_CS);
2520 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2521 eeprom_write_reg(priv, EEPROM_BIT_CS);
2522 }
2523
2524 /* perform a chip select operation */
2525 static void eeprom_disable_cs(struct ipw_priv *priv)
2526 {
2527 eeprom_write_reg(priv, EEPROM_BIT_CS);
2528 eeprom_write_reg(priv, 0);
2529 eeprom_write_reg(priv, EEPROM_BIT_SK);
2530 }
2531
2532 /* push a single bit down to the eeprom */
2533 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2534 {
2535 int d = (bit ? EEPROM_BIT_DI : 0);
2536 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2537 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2538 }
2539
2540 /* push an opcode followed by an address down to the eeprom */
2541 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2542 {
2543 int i;
2544
2545 eeprom_cs(priv);
2546 eeprom_write_bit(priv, 1);
2547 eeprom_write_bit(priv, op & 2);
2548 eeprom_write_bit(priv, op & 1);
2549 for (i = 7; i >= 0; i--) {
2550 eeprom_write_bit(priv, addr & (1 << i));
2551 }
2552 }
2553
2554 /* pull 16 bits off the eeprom, one bit at a time */
2555 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2556 {
2557 int i;
2558 u16 r = 0;
2559
2560 /* Send READ Opcode */
2561 eeprom_op(priv, EEPROM_CMD_READ, addr);
2562
2563 /* Send dummy bit */
2564 eeprom_write_reg(priv, EEPROM_BIT_CS);
2565
2566 /* Read the byte off the eeprom one bit at a time */
2567 for (i = 0; i < 16; i++) {
2568 u32 data = 0;
2569 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2570 eeprom_write_reg(priv, EEPROM_BIT_CS);
2571 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2572 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2573 }
2574
2575 /* Send another dummy bit */
2576 eeprom_write_reg(priv, 0);
2577 eeprom_disable_cs(priv);
2578
2579 return r;
2580 }
2581
2582 /* helper function for pulling the mac address out of the private */
2583 /* data's copy of the eeprom data */
2584 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2585 {
2586 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2587 }
2588
2589 /*
2590 * Either the device driver (i.e. the host) or the firmware can
2591 * load eeprom data into the designated region in SRAM. If neither
2592 * happens then the FW will shutdown with a fatal error.
2593 *
2594 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2595 * bit needs region of shared SRAM needs to be non-zero.
2596 */
2597 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2598 {
2599 int i;
2600 u16 *eeprom = (u16 *) priv->eeprom;
2601
2602 IPW_DEBUG_TRACE(">>\n");
2603
2604 /* read entire contents of eeprom into private buffer */
2605 for (i = 0; i < 128; i++)
2606 eeprom[i] = le16_to_cpu(eeprom_read_u16(priv, (u8) i));
2607
2608 /*
2609 If the data looks correct, then copy it to our private
2610 copy. Otherwise let the firmware know to perform the operation
2611 on its own.
2612 */
2613 if (priv->eeprom[EEPROM_VERSION] != 0) {
2614 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2615
2616 /* write the eeprom data to sram */
2617 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2618 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2619
2620 /* Do not load eeprom data on fatal error or suspend */
2621 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2622 } else {
2623 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2624
2625 /* Load eeprom data on fatal error or suspend */
2626 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2627 }
2628
2629 IPW_DEBUG_TRACE("<<\n");
2630 }
2631
2632 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2633 {
2634 count >>= 2;
2635 if (!count)
2636 return;
2637 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2638 while (count--)
2639 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2640 }
2641
2642 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2643 {
2644 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2645 CB_NUMBER_OF_ELEMENTS_SMALL *
2646 sizeof(struct command_block));
2647 }
2648
2649 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2650 { /* start dma engine but no transfers yet */
2651
2652 IPW_DEBUG_FW(">> : \n");
2653
2654 /* Start the dma */
2655 ipw_fw_dma_reset_command_blocks(priv);
2656
2657 /* Write CB base address */
2658 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2659
2660 IPW_DEBUG_FW("<< : \n");
2661 return 0;
2662 }
2663
2664 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2665 {
2666 u32 control = 0;
2667
2668 IPW_DEBUG_FW(">> :\n");
2669
2670 /* set the Stop and Abort bit */
2671 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2672 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2673 priv->sram_desc.last_cb_index = 0;
2674
2675 IPW_DEBUG_FW("<< \n");
2676 }
2677
2678 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2679 struct command_block *cb)
2680 {
2681 u32 address =
2682 IPW_SHARED_SRAM_DMA_CONTROL +
2683 (sizeof(struct command_block) * index);
2684 IPW_DEBUG_FW(">> :\n");
2685
2686 ipw_write_indirect(priv, address, (u8 *) cb,
2687 (int)sizeof(struct command_block));
2688
2689 IPW_DEBUG_FW("<< :\n");
2690 return 0;
2691
2692 }
2693
2694 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2695 {
2696 u32 control = 0;
2697 u32 index = 0;
2698
2699 IPW_DEBUG_FW(">> :\n");
2700
2701 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2702 ipw_fw_dma_write_command_block(priv, index,
2703 &priv->sram_desc.cb_list[index]);
2704
2705 /* Enable the DMA in the CSR register */
2706 ipw_clear_bit(priv, IPW_RESET_REG,
2707 IPW_RESET_REG_MASTER_DISABLED |
2708 IPW_RESET_REG_STOP_MASTER);
2709
2710 /* Set the Start bit. */
2711 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2712 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2713
2714 IPW_DEBUG_FW("<< :\n");
2715 return 0;
2716 }
2717
2718 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2719 {
2720 u32 address;
2721 u32 register_value = 0;
2722 u32 cb_fields_address = 0;
2723
2724 IPW_DEBUG_FW(">> :\n");
2725 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2726 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2727
2728 /* Read the DMA Controlor register */
2729 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2730 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2731
2732 /* Print the CB values */
2733 cb_fields_address = address;
2734 register_value = ipw_read_reg32(priv, cb_fields_address);
2735 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2736
2737 cb_fields_address += sizeof(u32);
2738 register_value = ipw_read_reg32(priv, cb_fields_address);
2739 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2740
2741 cb_fields_address += sizeof(u32);
2742 register_value = ipw_read_reg32(priv, cb_fields_address);
2743 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2744 register_value);
2745
2746 cb_fields_address += sizeof(u32);
2747 register_value = ipw_read_reg32(priv, cb_fields_address);
2748 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2749
2750 IPW_DEBUG_FW(">> :\n");
2751 }
2752
2753 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2754 {
2755 u32 current_cb_address = 0;
2756 u32 current_cb_index = 0;
2757
2758 IPW_DEBUG_FW("<< :\n");
2759 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2760
2761 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2762 sizeof(struct command_block);
2763
2764 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2765 current_cb_index, current_cb_address);
2766
2767 IPW_DEBUG_FW(">> :\n");
2768 return current_cb_index;
2769
2770 }
2771
2772 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2773 u32 src_address,
2774 u32 dest_address,
2775 u32 length,
2776 int interrupt_enabled, int is_last)
2777 {
2778
2779 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2780 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2781 CB_DEST_SIZE_LONG;
2782 struct command_block *cb;
2783 u32 last_cb_element = 0;
2784
2785 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2786 src_address, dest_address, length);
2787
2788 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2789 return -1;
2790
2791 last_cb_element = priv->sram_desc.last_cb_index;
2792 cb = &priv->sram_desc.cb_list[last_cb_element];
2793 priv->sram_desc.last_cb_index++;
2794
2795 /* Calculate the new CB control word */
2796 if (interrupt_enabled)
2797 control |= CB_INT_ENABLED;
2798
2799 if (is_last)
2800 control |= CB_LAST_VALID;
2801
2802 control |= length;
2803
2804 /* Calculate the CB Element's checksum value */
2805 cb->status = control ^ src_address ^ dest_address;
2806
2807 /* Copy the Source and Destination addresses */
2808 cb->dest_addr = dest_address;
2809 cb->source_addr = src_address;
2810
2811 /* Copy the Control Word last */
2812 cb->control = control;
2813
2814 return 0;
2815 }
2816
2817 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2818 u32 src_phys, u32 dest_address, u32 length)
2819 {
2820 u32 bytes_left = length;
2821 u32 src_offset = 0;
2822 u32 dest_offset = 0;
2823 int status = 0;
2824 IPW_DEBUG_FW(">> \n");
2825 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2826 src_phys, dest_address, length);
2827 while (bytes_left > CB_MAX_LENGTH) {
2828 status = ipw_fw_dma_add_command_block(priv,
2829 src_phys + src_offset,
2830 dest_address +
2831 dest_offset,
2832 CB_MAX_LENGTH, 0, 0);
2833 if (status) {
2834 IPW_DEBUG_FW_INFO(": Failed\n");
2835 return -1;
2836 } else
2837 IPW_DEBUG_FW_INFO(": Added new cb\n");
2838
2839 src_offset += CB_MAX_LENGTH;
2840 dest_offset += CB_MAX_LENGTH;
2841 bytes_left -= CB_MAX_LENGTH;
2842 }
2843
2844 /* add the buffer tail */
2845 if (bytes_left > 0) {
2846 status =
2847 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2848 dest_address + dest_offset,
2849 bytes_left, 0, 0);
2850 if (status) {
2851 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2852 return -1;
2853 } else
2854 IPW_DEBUG_FW_INFO
2855 (": Adding new cb - the buffer tail\n");
2856 }
2857
2858 IPW_DEBUG_FW("<< \n");
2859 return 0;
2860 }
2861
2862 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2863 {
2864 u32 current_index = 0, previous_index;
2865 u32 watchdog = 0;
2866
2867 IPW_DEBUG_FW(">> : \n");
2868
2869 current_index = ipw_fw_dma_command_block_index(priv);
2870 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2871 (int)priv->sram_desc.last_cb_index);
2872
2873 while (current_index < priv->sram_desc.last_cb_index) {
2874 udelay(50);
2875 previous_index = current_index;
2876 current_index = ipw_fw_dma_command_block_index(priv);
2877
2878 if (previous_index < current_index) {
2879 watchdog = 0;
2880 continue;
2881 }
2882 if (++watchdog > 400) {
2883 IPW_DEBUG_FW_INFO("Timeout\n");
2884 ipw_fw_dma_dump_command_block(priv);
2885 ipw_fw_dma_abort(priv);
2886 return -1;
2887 }
2888 }
2889
2890 ipw_fw_dma_abort(priv);
2891
2892 /*Disable the DMA in the CSR register */
2893 ipw_set_bit(priv, IPW_RESET_REG,
2894 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2895
2896 IPW_DEBUG_FW("<< dmaWaitSync \n");
2897 return 0;
2898 }
2899
2900 static void ipw_remove_current_network(struct ipw_priv *priv)
2901 {
2902 struct list_head *element, *safe;
2903 struct ieee80211_network *network = NULL;
2904 unsigned long flags;
2905
2906 spin_lock_irqsave(&priv->ieee->lock, flags);
2907 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2908 network = list_entry(element, struct ieee80211_network, list);
2909 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2910 list_del(element);
2911 list_add_tail(&network->list,
2912 &priv->ieee->network_free_list);
2913 }
2914 }
2915 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2916 }
2917
2918 /**
2919 * Check that card is still alive.
2920 * Reads debug register from domain0.
2921 * If card is present, pre-defined value should
2922 * be found there.
2923 *
2924 * @param priv
2925 * @return 1 if card is present, 0 otherwise
2926 */
2927 static inline int ipw_alive(struct ipw_priv *priv)
2928 {
2929 return ipw_read32(priv, 0x90) == 0xd55555d5;
2930 }
2931
2932 /* timeout in msec, attempted in 10-msec quanta */
2933 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2934 int timeout)
2935 {
2936 int i = 0;
2937
2938 do {
2939 if ((ipw_read32(priv, addr) & mask) == mask)
2940 return i;
2941 mdelay(10);
2942 i += 10;
2943 } while (i < timeout);
2944
2945 return -ETIME;
2946 }
2947
2948 /* These functions load the firmware and micro code for the operation of
2949 * the ipw hardware. It assumes the buffer has all the bits for the
2950 * image and the caller is handling the memory allocation and clean up.
2951 */
2952
2953 static int ipw_stop_master(struct ipw_priv *priv)
2954 {
2955 int rc;
2956
2957 IPW_DEBUG_TRACE(">> \n");
2958 /* stop master. typical delay - 0 */
2959 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
2960
2961 /* timeout is in msec, polled in 10-msec quanta */
2962 rc = ipw_poll_bit(priv, IPW_RESET_REG,
2963 IPW_RESET_REG_MASTER_DISABLED, 100);
2964 if (rc < 0) {
2965 IPW_ERROR("wait for stop master failed after 100ms\n");
2966 return -1;
2967 }
2968
2969 IPW_DEBUG_INFO("stop master %dms\n", rc);
2970
2971 return rc;
2972 }
2973
2974 static void ipw_arc_release(struct ipw_priv *priv)
2975 {
2976 IPW_DEBUG_TRACE(">> \n");
2977 mdelay(5);
2978
2979 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2980
2981 /* no one knows timing, for safety add some delay */
2982 mdelay(5);
2983 }
2984
2985 struct fw_chunk {
2986 u32 address;
2987 u32 length;
2988 };
2989
2990 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
2991 {
2992 int rc = 0, i, addr;
2993 u8 cr = 0;
2994 u16 *image;
2995
2996 image = (u16 *) data;
2997
2998 IPW_DEBUG_TRACE(">> \n");
2999
3000 rc = ipw_stop_master(priv);
3001
3002 if (rc < 0)
3003 return rc;
3004
3005 for (addr = IPW_SHARED_LOWER_BOUND;
3006 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3007 ipw_write32(priv, addr, 0);
3008 }
3009
3010 /* no ucode (yet) */
3011 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3012 /* destroy DMA queues */
3013 /* reset sequence */
3014
3015 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3016 ipw_arc_release(priv);
3017 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3018 mdelay(1);
3019
3020 /* reset PHY */
3021 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3022 mdelay(1);
3023
3024 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3025 mdelay(1);
3026
3027 /* enable ucode store */
3028 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3029 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3030 mdelay(1);
3031
3032 /* write ucode */
3033 /**
3034 * @bug
3035 * Do NOT set indirect address register once and then
3036 * store data to indirect data register in the loop.
3037 * It seems very reasonable, but in this case DINO do not
3038 * accept ucode. It is essential to set address each time.
3039 */
3040 /* load new ipw uCode */
3041 for (i = 0; i < len / 2; i++)
3042 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3043 cpu_to_le16(image[i]));
3044
3045 /* enable DINO */
3046 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3047 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3048
3049 /* this is where the igx / win driver deveates from the VAP driver. */
3050
3051 /* wait for alive response */
3052 for (i = 0; i < 100; i++) {
3053 /* poll for incoming data */
3054 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3055 if (cr & DINO_RXFIFO_DATA)
3056 break;
3057 mdelay(1);
3058 }
3059
3060 if (cr & DINO_RXFIFO_DATA) {
3061 /* alive_command_responce size is NOT multiple of 4 */
3062 u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3063
3064 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3065 response_buffer[i] =
3066 le32_to_cpu(ipw_read_reg32(priv,
3067 IPW_BASEBAND_RX_FIFO_READ));
3068 memcpy(&priv->dino_alive, response_buffer,
3069 sizeof(priv->dino_alive));
3070 if (priv->dino_alive.alive_command == 1
3071 && priv->dino_alive.ucode_valid == 1) {
3072 rc = 0;
3073 IPW_DEBUG_INFO
3074 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3075 "of %02d/%02d/%02d %02d:%02d\n",
3076 priv->dino_alive.software_revision,
3077 priv->dino_alive.software_revision,
3078 priv->dino_alive.device_identifier,
3079 priv->dino_alive.device_identifier,
3080 priv->dino_alive.time_stamp[0],
3081 priv->dino_alive.time_stamp[1],
3082 priv->dino_alive.time_stamp[2],
3083 priv->dino_alive.time_stamp[3],
3084 priv->dino_alive.time_stamp[4]);
3085 } else {
3086 IPW_DEBUG_INFO("Microcode is not alive\n");
3087 rc = -EINVAL;
3088 }
3089 } else {
3090 IPW_DEBUG_INFO("No alive response from DINO\n");
3091 rc = -ETIME;
3092 }
3093
3094 /* disable DINO, otherwise for some reason
3095 firmware have problem getting alive resp. */
3096 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3097
3098 return rc;
3099 }
3100
3101 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3102 {
3103 int rc = -1;
3104 int offset = 0;
3105 struct fw_chunk *chunk;
3106 dma_addr_t shared_phys;
3107 u8 *shared_virt;
3108
3109 IPW_DEBUG_TRACE("<< : \n");
3110 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
3111
3112 if (!shared_virt)
3113 return -ENOMEM;
3114
3115 memmove(shared_virt, data, len);
3116
3117 /* Start the Dma */
3118 rc = ipw_fw_dma_enable(priv);
3119
3120 if (priv->sram_desc.last_cb_index > 0) {
3121 /* the DMA is already ready this would be a bug. */
3122 BUG();
3123 goto out;
3124 }
3125
3126 do {
3127 chunk = (struct fw_chunk *)(data + offset);
3128 offset += sizeof(struct fw_chunk);
3129 /* build DMA packet and queue up for sending */
3130 /* dma to chunk->address, the chunk->length bytes from data +
3131 * offeset*/
3132 /* Dma loading */
3133 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
3134 le32_to_cpu(chunk->address),
3135 le32_to_cpu(chunk->length));
3136 if (rc) {
3137 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3138 goto out;
3139 }
3140
3141 offset += le32_to_cpu(chunk->length);
3142 } while (offset < len);
3143
3144 /* Run the DMA and wait for the answer */
3145 rc = ipw_fw_dma_kick(priv);
3146 if (rc) {
3147 IPW_ERROR("dmaKick Failed\n");
3148 goto out;
3149 }
3150
3151 rc = ipw_fw_dma_wait(priv);
3152 if (rc) {
3153 IPW_ERROR("dmaWaitSync Failed\n");
3154 goto out;
3155 }
3156 out:
3157 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
3158 return rc;
3159 }
3160
3161 /* stop nic */
3162 static int ipw_stop_nic(struct ipw_priv *priv)
3163 {
3164 int rc = 0;
3165
3166 /* stop */
3167 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3168
3169 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3170 IPW_RESET_REG_MASTER_DISABLED, 500);
3171 if (rc < 0) {
3172 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3173 return rc;
3174 }
3175
3176 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3177
3178 return rc;
3179 }
3180
3181 static void ipw_start_nic(struct ipw_priv *priv)
3182 {
3183 IPW_DEBUG_TRACE(">>\n");
3184
3185 /* prvHwStartNic release ARC */
3186 ipw_clear_bit(priv, IPW_RESET_REG,
3187 IPW_RESET_REG_MASTER_DISABLED |
3188 IPW_RESET_REG_STOP_MASTER |
3189 CBD_RESET_REG_PRINCETON_RESET);
3190
3191 /* enable power management */
3192 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3193 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3194
3195 IPW_DEBUG_TRACE("<<\n");
3196 }
3197
3198 static int ipw_init_nic(struct ipw_priv *priv)
3199 {
3200 int rc;
3201
3202 IPW_DEBUG_TRACE(">>\n");
3203 /* reset */
3204 /*prvHwInitNic */
3205 /* set "initialization complete" bit to move adapter to D0 state */
3206 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3207
3208 /* low-level PLL activation */
3209 ipw_write32(priv, IPW_READ_INT_REGISTER,
3210 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3211
3212 /* wait for clock stabilization */
3213 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3214 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3215 if (rc < 0)
3216 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3217
3218 /* assert SW reset */
3219 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3220
3221 udelay(10);
3222
3223 /* set "initialization complete" bit to move adapter to D0 state */
3224 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3225
3226 IPW_DEBUG_TRACE(">>\n");
3227 return 0;
3228 }
3229
3230 /* Call this function from process context, it will sleep in request_firmware.
3231 * Probe is an ok place to call this from.
3232 */
3233 static int ipw_reset_nic(struct ipw_priv *priv)
3234 {
3235 int rc = 0;
3236 unsigned long flags;
3237
3238 IPW_DEBUG_TRACE(">>\n");
3239
3240 rc = ipw_init_nic(priv);
3241
3242 spin_lock_irqsave(&priv->lock, flags);
3243 /* Clear the 'host command active' bit... */
3244 priv->status &= ~STATUS_HCMD_ACTIVE;
3245 wake_up_interruptible(&priv->wait_command_queue);
3246 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3247 wake_up_interruptible(&priv->wait_state);
3248 spin_unlock_irqrestore(&priv->lock, flags);
3249
3250 IPW_DEBUG_TRACE("<<\n");
3251 return rc;
3252 }
3253
3254
3255 struct ipw_fw {
3256 __le32 ver;
3257 __le32 boot_size;
3258 __le32 ucode_size;
3259 __le32 fw_size;
3260 u8 data[0];
3261 };
3262
3263 static int ipw_get_fw(struct ipw_priv *priv,
3264 const struct firmware **raw, const char *name)
3265 {
3266 struct ipw_fw *fw;
3267 int rc;
3268
3269 /* ask firmware_class module to get the boot firmware off disk */
3270 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3271 if (rc < 0) {
3272 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3273 return rc;
3274 }
3275
3276 if ((*raw)->size < sizeof(*fw)) {
3277 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3278 return -EINVAL;
3279 }
3280
3281 fw = (void *)(*raw)->data;
3282
3283 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3284 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3285 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3286 name, (*raw)->size);
3287 return -EINVAL;
3288 }
3289
3290 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3291 name,
3292 le32_to_cpu(fw->ver) >> 16,
3293 le32_to_cpu(fw->ver) & 0xff,
3294 (*raw)->size - sizeof(*fw));
3295 return 0;
3296 }
3297
3298 #define IPW_RX_BUF_SIZE (3000)
3299
3300 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3301 struct ipw_rx_queue *rxq)
3302 {
3303 unsigned long flags;
3304 int i;
3305
3306 spin_lock_irqsave(&rxq->lock, flags);
3307
3308 INIT_LIST_HEAD(&rxq->rx_free);
3309 INIT_LIST_HEAD(&rxq->rx_used);
3310
3311 /* Fill the rx_used queue with _all_ of the Rx buffers */
3312 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3313 /* In the reset function, these buffers may have been allocated
3314 * to an SKB, so we need to unmap and free potential storage */
3315 if (rxq->pool[i].skb != NULL) {
3316 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3317 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3318 dev_kfree_skb(rxq->pool[i].skb);
3319 rxq->pool[i].skb = NULL;
3320 }
3321 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3322 }
3323
3324 /* Set us so that we have processed and used all buffers, but have
3325 * not restocked the Rx queue with fresh buffers */
3326 rxq->read = rxq->write = 0;
3327 rxq->processed = RX_QUEUE_SIZE - 1;
3328 rxq->free_count = 0;
3329 spin_unlock_irqrestore(&rxq->lock, flags);
3330 }
3331
3332 #ifdef CONFIG_PM
3333 static int fw_loaded = 0;
3334 static const struct firmware *raw = NULL;
3335
3336 static void free_firmware(void)
3337 {
3338 if (fw_loaded) {
3339 release_firmware(raw);
3340 raw = NULL;
3341 fw_loaded = 0;
3342 }
3343 }
3344 #else
3345 #define free_firmware() do {} while (0)
3346 #endif
3347
3348 static int ipw_load(struct ipw_priv *priv)
3349 {
3350 #ifndef CONFIG_PM
3351 const struct firmware *raw = NULL;
3352 #endif
3353 struct ipw_fw *fw;
3354 u8 *boot_img, *ucode_img, *fw_img;
3355 u8 *name = NULL;
3356 int rc = 0, retries = 3;
3357
3358 switch (priv->ieee->iw_mode) {
3359 case IW_MODE_ADHOC:
3360 name = "ipw2200-ibss.fw";
3361 break;
3362 #ifdef CONFIG_IPW2200_MONITOR
3363 case IW_MODE_MONITOR:
3364 name = "ipw2200-sniffer.fw";
3365 break;
3366 #endif
3367 case IW_MODE_INFRA:
3368 name = "ipw2200-bss.fw";
3369 break;
3370 }
3371
3372 if (!name) {
3373 rc = -EINVAL;
3374 goto error;
3375 }
3376
3377 #ifdef CONFIG_PM
3378 if (!fw_loaded) {
3379 #endif
3380 rc = ipw_get_fw(priv, &raw, name);
3381 if (rc < 0)
3382 goto error;
3383 #ifdef CONFIG_PM
3384 }
3385 #endif
3386
3387 fw = (void *)raw->data;
3388 boot_img = &fw->data[0];
3389 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3390 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3391 le32_to_cpu(fw->ucode_size)];
3392
3393 if (rc < 0)
3394 goto error;
3395
3396 if (!priv->rxq)
3397 priv->rxq = ipw_rx_queue_alloc(priv);
3398 else
3399 ipw_rx_queue_reset(priv, priv->rxq);
3400 if (!priv->rxq) {
3401 IPW_ERROR("Unable to initialize Rx queue\n");
3402 goto error;
3403 }
3404
3405 retry:
3406 /* Ensure interrupts are disabled */
3407 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3408 priv->status &= ~STATUS_INT_ENABLED;
3409
3410 /* ack pending interrupts */
3411 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3412
3413 ipw_stop_nic(priv);
3414
3415 rc = ipw_reset_nic(priv);
3416 if (rc < 0) {
3417 IPW_ERROR("Unable to reset NIC\n");
3418 goto error;
3419 }
3420
3421 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3422 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3423
3424 /* DMA the initial boot firmware into the device */
3425 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3426 if (rc < 0) {
3427 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3428 goto error;
3429 }
3430
3431 /* kick start the device */
3432 ipw_start_nic(priv);
3433
3434 /* wait for the device to finish its initial startup sequence */
3435 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3436 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3437 if (rc < 0) {
3438 IPW_ERROR("device failed to boot initial fw image\n");
3439 goto error;
3440 }
3441 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3442
3443 /* ack fw init done interrupt */
3444 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3445
3446 /* DMA the ucode into the device */
3447 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3448 if (rc < 0) {
3449 IPW_ERROR("Unable to load ucode: %d\n", rc);
3450 goto error;
3451 }
3452
3453 /* stop nic */
3454 ipw_stop_nic(priv);
3455
3456 /* DMA bss firmware into the device */
3457 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3458 if (rc < 0) {
3459 IPW_ERROR("Unable to load firmware: %d\n", rc);
3460 goto error;
3461 }
3462 #ifdef CONFIG_PM
3463 fw_loaded = 1;
3464 #endif
3465
3466 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3467
3468 rc = ipw_queue_reset(priv);
3469 if (rc < 0) {
3470 IPW_ERROR("Unable to initialize queues\n");
3471 goto error;
3472 }
3473
3474 /* Ensure interrupts are disabled */
3475 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3476 /* ack pending interrupts */
3477 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3478
3479 /* kick start the device */
3480 ipw_start_nic(priv);
3481
3482 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3483 if (retries > 0) {
3484 IPW_WARNING("Parity error. Retrying init.\n");
3485 retries--;
3486 goto retry;
3487 }
3488
3489 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3490 rc = -EIO;
3491 goto error;
3492 }
3493
3494 /* wait for the device */
3495 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3496 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3497 if (rc < 0) {
3498 IPW_ERROR("device failed to start within 500ms\n");
3499 goto error;
3500 }
3501 IPW_DEBUG_INFO("device response after %dms\n", rc);
3502
3503 /* ack fw init done interrupt */
3504 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3505
3506 /* read eeprom data and initialize the eeprom region of sram */
3507 priv->eeprom_delay = 1;
3508 ipw_eeprom_init_sram(priv);
3509
3510 /* enable interrupts */
3511 ipw_enable_interrupts(priv);
3512
3513 /* Ensure our queue has valid packets */
3514 ipw_rx_queue_replenish(priv);
3515
3516 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3517
3518 /* ack pending interrupts */
3519 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3520
3521 #ifndef CONFIG_PM
3522 release_firmware(raw);
3523 #endif
3524 return 0;
3525
3526 error:
3527 if (priv->rxq) {
3528 ipw_rx_queue_free(priv, priv->rxq);
3529 priv->rxq = NULL;
3530 }
3531 ipw_tx_queue_free(priv);
3532 if (raw)
3533 release_firmware(raw);
3534 #ifdef CONFIG_PM
3535 fw_loaded = 0;
3536 raw = NULL;
3537 #endif
3538
3539 return rc;
3540 }
3541
3542 /**
3543 * DMA services
3544 *
3545 * Theory of operation
3546 *
3547 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3548 * 2 empty entries always kept in the buffer to protect from overflow.
3549 *
3550 * For Tx queue, there are low mark and high mark limits. If, after queuing
3551 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3552 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3553 * Tx queue resumed.
3554 *
3555 * The IPW operates with six queues, one receive queue in the device's
3556 * sram, one transmit queue for sending commands to the device firmware,
3557 * and four transmit queues for data.
3558 *
3559 * The four transmit queues allow for performing quality of service (qos)
3560 * transmissions as per the 802.11 protocol. Currently Linux does not
3561 * provide a mechanism to the user for utilizing prioritized queues, so
3562 * we only utilize the first data transmit queue (queue1).
3563 */
3564
3565 /**
3566 * Driver allocates buffers of this size for Rx
3567 */
3568
3569 static inline int ipw_queue_space(const struct clx2_queue *q)
3570 {
3571 int s = q->last_used - q->first_empty;
3572 if (s <= 0)
3573 s += q->n_bd;
3574 s -= 2; /* keep some reserve to not confuse empty and full situations */
3575 if (s < 0)
3576 s = 0;
3577 return s;
3578 }
3579
3580 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3581 {
3582 return (++index == n_bd) ? 0 : index;
3583 }
3584
3585 /**
3586 * Initialize common DMA queue structure
3587 *
3588 * @param q queue to init
3589 * @param count Number of BD's to allocate. Should be power of 2
3590 * @param read_register Address for 'read' register
3591 * (not offset within BAR, full address)
3592 * @param write_register Address for 'write' register
3593 * (not offset within BAR, full address)
3594 * @param base_register Address for 'base' register
3595 * (not offset within BAR, full address)
3596 * @param size Address for 'size' register
3597 * (not offset within BAR, full address)
3598 */
3599 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3600 int count, u32 read, u32 write, u32 base, u32 size)
3601 {
3602 q->n_bd = count;
3603
3604 q->low_mark = q->n_bd / 4;
3605 if (q->low_mark < 4)
3606 q->low_mark = 4;
3607
3608 q->high_mark = q->n_bd / 8;
3609 if (q->high_mark < 2)
3610 q->high_mark = 2;
3611
3612 q->first_empty = q->last_used = 0;
3613 q->reg_r = read;
3614 q->reg_w = write;
3615
3616 ipw_write32(priv, base, q->dma_addr);
3617 ipw_write32(priv, size, count);
3618 ipw_write32(priv, read, 0);
3619 ipw_write32(priv, write, 0);
3620
3621 _ipw_read32(priv, 0x90);
3622 }
3623
3624 static int ipw_queue_tx_init(struct ipw_priv *priv,
3625 struct clx2_tx_queue *q,
3626 int count, u32 read, u32 write, u32 base, u32 size)
3627 {
3628 struct pci_dev *dev = priv->pci_dev;
3629
3630 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3631 if (!q->txb) {
3632 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3633 return -ENOMEM;
3634 }
3635
3636 q->bd =
3637 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3638 if (!q->bd) {
3639 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3640 sizeof(q->bd[0]) * count);
3641 kfree(q->txb);
3642 q->txb = NULL;
3643 return -ENOMEM;
3644 }
3645
3646 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3647 return 0;
3648 }
3649
3650 /**
3651 * Free one TFD, those at index [txq->q.last_used].
3652 * Do NOT advance any indexes
3653 *
3654 * @param dev
3655 * @param txq
3656 */
3657 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3658 struct clx2_tx_queue *txq)
3659 {
3660 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3661 struct pci_dev *dev = priv->pci_dev;
3662 int i;
3663
3664 /* classify bd */
3665 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3666 /* nothing to cleanup after for host commands */
3667 return;
3668
3669 /* sanity check */
3670 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3671 IPW_ERROR("Too many chunks: %i\n",
3672 le32_to_cpu(bd->u.data.num_chunks));
3673 /** @todo issue fatal error, it is quite serious situation */
3674 return;
3675 }
3676
3677 /* unmap chunks if any */
3678 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3679 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3680 le16_to_cpu(bd->u.data.chunk_len[i]),
3681 PCI_DMA_TODEVICE);
3682 if (txq->txb[txq->q.last_used]) {
3683 ieee80211_txb_free(txq->txb[txq->q.last_used]);
3684 txq->txb[txq->q.last_used] = NULL;
3685 }
3686 }
3687 }
3688
3689 /**
3690 * Deallocate DMA queue.
3691 *
3692 * Empty queue by removing and destroying all BD's.
3693 * Free all buffers.
3694 *
3695 * @param dev
3696 * @param q
3697 */
3698 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3699 {
3700 struct clx2_queue *q = &txq->q;
3701 struct pci_dev *dev = priv->pci_dev;
3702
3703 if (q->n_bd == 0)
3704 return;
3705
3706 /* first, empty all BD's */
3707 for (; q->first_empty != q->last_used;
3708 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3709 ipw_queue_tx_free_tfd(priv, txq);
3710 }
3711
3712 /* free buffers belonging to queue itself */
3713 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3714 q->dma_addr);
3715 kfree(txq->txb);
3716
3717 /* 0 fill whole structure */
3718 memset(txq, 0, sizeof(*txq));
3719 }
3720
3721 /**
3722 * Destroy all DMA queues and structures
3723 *
3724 * @param priv
3725 */
3726 static void ipw_tx_queue_free(struct ipw_priv *priv)
3727 {
3728 /* Tx CMD queue */
3729 ipw_queue_tx_free(priv, &priv->txq_cmd);
3730
3731 /* Tx queues */
3732 ipw_queue_tx_free(priv, &priv->txq[0]);
3733 ipw_queue_tx_free(priv, &priv->txq[1]);
3734 ipw_queue_tx_free(priv, &priv->txq[2]);
3735 ipw_queue_tx_free(priv, &priv->txq[3]);
3736 }
3737
3738 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3739 {
3740 /* First 3 bytes are manufacturer */
3741 bssid[0] = priv->mac_addr[0];
3742 bssid[1] = priv->mac_addr[1];
3743 bssid[2] = priv->mac_addr[2];
3744
3745 /* Last bytes are random */
3746 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3747
3748 bssid[0] &= 0xfe; /* clear multicast bit */
3749 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3750 }
3751
3752 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3753 {
3754 struct ipw_station_entry entry;
3755 int i;
3756
3757 for (i = 0; i < priv->num_stations; i++) {
3758 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3759 /* Another node is active in network */
3760 priv->missed_adhoc_beacons = 0;
3761 if (!(priv->config & CFG_STATIC_CHANNEL))
3762 /* when other nodes drop out, we drop out */
3763 priv->config &= ~CFG_ADHOC_PERSIST;
3764
3765 return i;
3766 }
3767 }
3768
3769 if (i == MAX_STATIONS)
3770 return IPW_INVALID_STATION;
3771
3772 IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
3773
3774 entry.reserved = 0;
3775 entry.support_mode = 0;
3776 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3777 memcpy(priv->stations[i], bssid, ETH_ALEN);
3778 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3779 &entry, sizeof(entry));
3780 priv->num_stations++;
3781
3782 return i;
3783 }
3784
3785 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3786 {
3787 int i;
3788
3789 for (i = 0; i < priv->num_stations; i++)
3790 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3791 return i;
3792
3793 return IPW_INVALID_STATION;
3794 }
3795
3796 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3797 {
3798 int err;
3799
3800 if (priv->status & STATUS_ASSOCIATING) {
3801 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3802 queue_work(priv->workqueue, &priv->disassociate);
3803 return;
3804 }
3805
3806 if (!(priv->status & STATUS_ASSOCIATED)) {
3807 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3808 return;
3809 }
3810
3811 IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
3812 "on channel %d.\n",
3813 MAC_ARG(priv->assoc_request.bssid),
3814 priv->assoc_request.channel);
3815
3816 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3817 priv->status |= STATUS_DISASSOCIATING;
3818
3819 if (quiet)
3820 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3821 else
3822 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3823
3824 err = ipw_send_associate(priv, &priv->assoc_request);
3825 if (err) {
3826 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3827 "failed.\n");
3828 return;
3829 }
3830
3831 }
3832
3833 static int ipw_disassociate(void *data)
3834 {
3835 struct ipw_priv *priv = data;
3836 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3837 return 0;
3838 ipw_send_disassociate(data, 0);
3839 return 1;
3840 }
3841
3842 static void ipw_bg_disassociate(void *data)
3843 {
3844 struct ipw_priv *priv = data;
3845 mutex_lock(&priv->mutex);
3846 ipw_disassociate(data);
3847 mutex_unlock(&priv->mutex);
3848 }
3849
3850 static void ipw_system_config(void *data)
3851 {
3852 struct ipw_priv *priv = data;
3853
3854 #ifdef CONFIG_IPW2200_PROMISCUOUS
3855 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3856 priv->sys_config.accept_all_data_frames = 1;
3857 priv->sys_config.accept_non_directed_frames = 1;
3858 priv->sys_config.accept_all_mgmt_bcpr = 1;
3859 priv->sys_config.accept_all_mgmt_frames = 1;
3860 }
3861 #endif
3862
3863 ipw_send_system_config(priv);
3864 }
3865
3866 struct ipw_status_code {
3867 u16 status;
3868 const char *reason;
3869 };
3870
3871 static const struct ipw_status_code ipw_status_codes[] = {
3872 {0x00, "Successful"},
3873 {0x01, "Unspecified failure"},
3874 {0x0A, "Cannot support all requested capabilities in the "
3875 "Capability information field"},
3876 {0x0B, "Reassociation denied due to inability to confirm that "
3877 "association exists"},
3878 {0x0C, "Association denied due to reason outside the scope of this "
3879 "standard"},
3880 {0x0D,
3881 "Responding station does not support the specified authentication "
3882 "algorithm"},
3883 {0x0E,
3884 "Received an Authentication frame with authentication sequence "
3885 "transaction sequence number out of expected sequence"},
3886 {0x0F, "Authentication rejected because of challenge failure"},
3887 {0x10, "Authentication rejected due to timeout waiting for next "
3888 "frame in sequence"},
3889 {0x11, "Association denied because AP is unable to handle additional "
3890 "associated stations"},
3891 {0x12,
3892 "Association denied due to requesting station not supporting all "
3893 "of the datarates in the BSSBasicServiceSet Parameter"},
3894 {0x13,
3895 "Association denied due to requesting station not supporting "
3896 "short preamble operation"},
3897 {0x14,
3898 "Association denied due to requesting station not supporting "
3899 "PBCC encoding"},
3900 {0x15,
3901 "Association denied due to requesting station not supporting "
3902 "channel agility"},
3903 {0x19,
3904 "Association denied due to requesting station not supporting "
3905 "short slot operation"},
3906 {0x1A,
3907 "Association denied due to requesting station not supporting "
3908 "DSSS-OFDM operation"},
3909 {0x28, "Invalid Information Element"},
3910 {0x29, "Group Cipher is not valid"},
3911 {0x2A, "Pairwise Cipher is not valid"},
3912 {0x2B, "AKMP is not valid"},
3913 {0x2C, "Unsupported RSN IE version"},
3914 {0x2D, "Invalid RSN IE Capabilities"},
3915 {0x2E, "Cipher suite is rejected per security policy"},
3916 };
3917
3918 #ifdef CONFIG_IPW2200_DEBUG
3919 static const char *ipw_get_status_code(u16 status)
3920 {
3921 int i;
3922 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3923 if (ipw_status_codes[i].status == (status & 0xff))
3924 return ipw_status_codes[i].reason;
3925 return "Unknown status value.";
3926 }
3927 #endif
3928
3929 static void inline average_init(struct average *avg)
3930 {
3931 memset(avg, 0, sizeof(*avg));
3932 }
3933
3934 #define DEPTH_RSSI 8
3935 #define DEPTH_NOISE 16
3936 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
3937 {
3938 return ((depth-1)*prev_avg + val)/depth;
3939 }
3940
3941 static void average_add(struct average *avg, s16 val)
3942 {
3943 avg->sum -= avg->entries[avg->pos];
3944 avg->sum += val;
3945 avg->entries[avg->pos++] = val;
3946 if (unlikely(avg->pos == AVG_ENTRIES)) {
3947 avg->init = 1;
3948 avg->pos = 0;
3949 }
3950 }
3951
3952 static s16 average_value(struct average *avg)
3953 {
3954 if (!unlikely(avg->init)) {
3955 if (avg->pos)
3956 return avg->sum / avg->pos;
3957 return 0;
3958 }
3959
3960 return avg->sum / AVG_ENTRIES;
3961 }
3962
3963 static void ipw_reset_stats(struct ipw_priv *priv)
3964 {
3965 u32 len = sizeof(u32);
3966
3967 priv->quality = 0;
3968
3969 average_init(&priv->average_missed_beacons);
3970 priv->exp_avg_rssi = -60;
3971 priv->exp_avg_noise = -85 + 0x100;
3972
3973 priv->last_rate = 0;
3974 priv->last_missed_beacons = 0;
3975 priv->last_rx_packets = 0;
3976 priv->last_tx_packets = 0;
3977 priv->last_tx_failures = 0;
3978
3979 /* Firmware managed, reset only when NIC is restarted, so we have to
3980 * normalize on the current value */
3981 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
3982 &priv->last_rx_err, &len);
3983 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
3984 &priv->last_tx_failures, &len);
3985
3986 /* Driver managed, reset with each association */
3987 priv->missed_adhoc_beacons = 0;
3988 priv->missed_beacons = 0;
3989 priv->tx_packets = 0;
3990 priv->rx_packets = 0;
3991
3992 }
3993
3994 static u32 ipw_get_max_rate(struct ipw_priv *priv)
3995 {
3996 u32 i = 0x80000000;
3997 u32 mask = priv->rates_mask;
3998 /* If currently associated in B mode, restrict the maximum
3999 * rate match to B rates */
4000 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4001 mask &= IEEE80211_CCK_RATES_MASK;
4002
4003 /* TODO: Verify that the rate is supported by the current rates
4004 * list. */
4005
4006 while (i && !(mask & i))
4007 i >>= 1;
4008 switch (i) {
4009 case IEEE80211_CCK_RATE_1MB_MASK:
4010 return 1000000;
4011 case IEEE80211_CCK_RATE_2MB_MASK:
4012 return 2000000;
4013 case IEEE80211_CCK_RATE_5MB_MASK:
4014 return 5500000;
4015 case IEEE80211_OFDM_RATE_6MB_MASK:
4016 return 6000000;
4017 case IEEE80211_OFDM_RATE_9MB_MASK:
4018 return 9000000;
4019 case IEEE80211_CCK_RATE_11MB_MASK:
4020 return 11000000;
4021 case IEEE80211_OFDM_RATE_12MB_MASK:
4022 return 12000000;
4023 case IEEE80211_OFDM_RATE_18MB_MASK:
4024 return 18000000;
4025 case IEEE80211_OFDM_RATE_24MB_MASK:
4026 return 24000000;
4027 case IEEE80211_OFDM_RATE_36MB_MASK:
4028 return 36000000;
4029 case IEEE80211_OFDM_RATE_48MB_MASK:
4030 return 48000000;
4031 case IEEE80211_OFDM_RATE_54MB_MASK:
4032 return 54000000;
4033 }
4034
4035 if (priv->ieee->mode == IEEE_B)
4036 return 11000000;
4037 else
4038 return 54000000;
4039 }
4040
4041 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4042 {
4043 u32 rate, len = sizeof(rate);
4044 int err;
4045
4046 if (!(priv->status & STATUS_ASSOCIATED))
4047 return 0;
4048
4049 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4050 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4051 &len);
4052 if (err) {
4053 IPW_DEBUG_INFO("failed querying ordinals.\n");
4054 return 0;
4055 }
4056 } else
4057 return ipw_get_max_rate(priv);
4058
4059 switch (rate) {
4060 case IPW_TX_RATE_1MB:
4061 return 1000000;
4062 case IPW_TX_RATE_2MB:
4063 return 2000000;
4064 case IPW_TX_RATE_5MB:
4065 return 5500000;
4066 case IPW_TX_RATE_6MB:
4067 return 6000000;
4068 case IPW_TX_RATE_9MB:
4069 return 9000000;
4070 case IPW_TX_RATE_11MB:
4071 return 11000000;
4072 case IPW_TX_RATE_12MB:
4073 return 12000000;
4074 case IPW_TX_RATE_18MB:
4075 return 18000000;
4076 case IPW_TX_RATE_24MB:
4077 return 24000000;
4078 case IPW_TX_RATE_36MB:
4079 return 36000000;
4080 case IPW_TX_RATE_48MB:
4081 return 48000000;
4082 case IPW_TX_RATE_54MB:
4083 return 54000000;
4084 }
4085
4086 return 0;
4087 }
4088
4089 #define IPW_STATS_INTERVAL (2 * HZ)
4090 static void ipw_gather_stats(struct ipw_priv *priv)
4091 {
4092 u32 rx_err, rx_err_delta, rx_packets_delta;
4093 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4094 u32 missed_beacons_percent, missed_beacons_delta;
4095 u32 quality = 0;
4096 u32 len = sizeof(u32);
4097 s16 rssi;
4098 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4099 rate_quality;
4100 u32 max_rate;
4101
4102 if (!(priv->status & STATUS_ASSOCIATED)) {
4103 priv->quality = 0;
4104 return;
4105 }
4106
4107 /* Update the statistics */
4108 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4109 &priv->missed_beacons, &len);
4110 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4111 priv->last_missed_beacons = priv->missed_beacons;
4112 if (priv->assoc_request.beacon_interval) {
4113 missed_beacons_percent = missed_beacons_delta *
4114 (HZ * priv->assoc_request.beacon_interval) /
4115 (IPW_STATS_INTERVAL * 10);
4116 } else {
4117 missed_beacons_percent = 0;
4118 }
4119 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4120
4121 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4122 rx_err_delta = rx_err - priv->last_rx_err;
4123 priv->last_rx_err = rx_err;
4124
4125 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4126 tx_failures_delta = tx_failures - priv->last_tx_failures;
4127 priv->last_tx_failures = tx_failures;
4128
4129 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4130 priv->last_rx_packets = priv->rx_packets;
4131
4132 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4133 priv->last_tx_packets = priv->tx_packets;
4134
4135 /* Calculate quality based on the following:
4136 *
4137 * Missed beacon: 100% = 0, 0% = 70% missed
4138 * Rate: 60% = 1Mbs, 100% = Max
4139 * Rx and Tx errors represent a straight % of total Rx/Tx
4140 * RSSI: 100% = > -50, 0% = < -80
4141 * Rx errors: 100% = 0, 0% = 50% missed
4142 *
4143 * The lowest computed quality is used.
4144 *
4145 */
4146 #define BEACON_THRESHOLD 5
4147 beacon_quality = 100 - missed_beacons_percent;
4148 if (beacon_quality < BEACON_THRESHOLD)
4149 beacon_quality = 0;
4150 else
4151 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4152 (100 - BEACON_THRESHOLD);
4153 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4154 beacon_quality, missed_beacons_percent);
4155
4156 priv->last_rate = ipw_get_current_rate(priv);
4157 max_rate = ipw_get_max_rate(priv);
4158 rate_quality = priv->last_rate * 40 / max_rate + 60;
4159 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4160 rate_quality, priv->last_rate / 1000000);
4161
4162 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4163 rx_quality = 100 - (rx_err_delta * 100) /
4164 (rx_packets_delta + rx_err_delta);
4165 else
4166 rx_quality = 100;
4167 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4168 rx_quality, rx_err_delta, rx_packets_delta);
4169
4170 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4171 tx_quality = 100 - (tx_failures_delta * 100) /
4172 (tx_packets_delta + tx_failures_delta);
4173 else
4174 tx_quality = 100;
4175 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4176 tx_quality, tx_failures_delta, tx_packets_delta);
4177
4178 rssi = priv->exp_avg_rssi;
4179 signal_quality =
4180 (100 *
4181 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4182 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4183 (priv->ieee->perfect_rssi - rssi) *
4184 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4185 62 * (priv->ieee->perfect_rssi - rssi))) /
4186 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4187 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4188 if (signal_quality > 100)
4189 signal_quality = 100;
4190 else if (signal_quality < 1)
4191 signal_quality = 0;
4192
4193 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4194 signal_quality, rssi);
4195
4196 quality = min(beacon_quality,
4197 min(rate_quality,
4198 min(tx_quality, min(rx_quality, signal_quality))));
4199 if (quality == beacon_quality)
4200 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4201 quality);
4202 if (quality == rate_quality)
4203 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4204 quality);
4205 if (quality == tx_quality)
4206 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4207 quality);
4208 if (quality == rx_quality)
4209 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4210 quality);
4211 if (quality == signal_quality)
4212 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4213 quality);
4214
4215 priv->quality = quality;
4216
4217 queue_delayed_work(priv->workqueue, &priv->gather_stats,
4218 IPW_STATS_INTERVAL);
4219 }
4220
4221 static void ipw_bg_gather_stats(void *data)
4222 {
4223 struct ipw_priv *priv = data;
4224 mutex_lock(&priv->mutex);
4225 ipw_gather_stats(data);
4226 mutex_unlock(&priv->mutex);
4227 }
4228
4229 /* Missed beacon behavior:
4230 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4231 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4232 * Above disassociate threshold, give up and stop scanning.
4233 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4234 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4235 int missed_count)
4236 {
4237 priv->notif_missed_beacons = missed_count;
4238
4239 if (missed_count > priv->disassociate_threshold &&
4240 priv->status & STATUS_ASSOCIATED) {
4241 /* If associated and we've hit the missed
4242 * beacon threshold, disassociate, turn
4243 * off roaming, and abort any active scans */
4244 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4245 IPW_DL_STATE | IPW_DL_ASSOC,
4246 "Missed beacon: %d - disassociate\n", missed_count);
4247 priv->status &= ~STATUS_ROAMING;
4248 if (priv->status & STATUS_SCANNING) {
4249 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4250 IPW_DL_STATE,
4251 "Aborting scan with missed beacon.\n");
4252 queue_work(priv->workqueue, &priv->abort_scan);
4253 }
4254
4255 queue_work(priv->workqueue, &priv->disassociate);
4256 return;
4257 }
4258
4259 if (priv->status & STATUS_ROAMING) {
4260 /* If we are currently roaming, then just
4261 * print a debug statement... */
4262 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4263 "Missed beacon: %d - roam in progress\n",
4264 missed_count);
4265 return;
4266 }
4267
4268 if (roaming &&
4269 (missed_count > priv->roaming_threshold &&
4270 missed_count <= priv->disassociate_threshold)) {
4271 /* If we are not already roaming, set the ROAM
4272 * bit in the status and kick off a scan.
4273 * This can happen several times before we reach
4274 * disassociate_threshold. */
4275 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4276 "Missed beacon: %d - initiate "
4277 "roaming\n", missed_count);
4278 if (!(priv->status & STATUS_ROAMING)) {
4279 priv->status |= STATUS_ROAMING;
4280 if (!(priv->status & STATUS_SCANNING))
4281 queue_work(priv->workqueue,
4282 &priv->request_scan);
4283 }
4284 return;
4285 }
4286
4287 if (priv->status & STATUS_SCANNING) {
4288 /* Stop scan to keep fw from getting
4289 * stuck (only if we aren't roaming --
4290 * otherwise we'll never scan more than 2 or 3
4291 * channels..) */
4292 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4293 "Aborting scan with missed beacon.\n");
4294 queue_work(priv->workqueue, &priv->abort_scan);
4295 }
4296
4297 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4298 }
4299
4300 /**
4301 * Handle host notification packet.
4302 * Called from interrupt routine
4303 */
4304 static void ipw_rx_notification(struct ipw_priv *priv,
4305 struct ipw_rx_notification *notif)
4306 {
4307 notif->size = le16_to_cpu(notif->size);
4308
4309 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size);
4310
4311 switch (notif->subtype) {
4312 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4313 struct notif_association *assoc = &notif->u.assoc;
4314
4315 switch (assoc->state) {
4316 case CMAS_ASSOCIATED:{
4317 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4318 IPW_DL_ASSOC,
4319 "associated: '%s' " MAC_FMT
4320 " \n",
4321 escape_essid(priv->essid,
4322 priv->essid_len),
4323 MAC_ARG(priv->bssid));
4324
4325 switch (priv->ieee->iw_mode) {
4326 case IW_MODE_INFRA:
4327 memcpy(priv->ieee->bssid,
4328 priv->bssid, ETH_ALEN);
4329 break;
4330
4331 case IW_MODE_ADHOC:
4332 memcpy(priv->ieee->bssid,
4333 priv->bssid, ETH_ALEN);
4334
4335 /* clear out the station table */
4336 priv->num_stations = 0;
4337
4338 IPW_DEBUG_ASSOC
4339 ("queueing adhoc check\n");
4340 queue_delayed_work(priv->
4341 workqueue,
4342 &priv->
4343 adhoc_check,
4344 priv->
4345 assoc_request.
4346 beacon_interval);
4347 break;
4348 }
4349
4350 priv->status &= ~STATUS_ASSOCIATING;
4351 priv->status |= STATUS_ASSOCIATED;
4352 queue_work(priv->workqueue,
4353 &priv->system_config);
4354
4355 #ifdef CONFIG_IPW2200_QOS
4356 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4357 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_ctl))
4358 if ((priv->status & STATUS_AUTH) &&
4359 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4360 == IEEE80211_STYPE_ASSOC_RESP)) {
4361 if ((sizeof
4362 (struct
4363 ieee80211_assoc_response)
4364 <= notif->size)
4365 && (notif->size <= 2314)) {
4366 struct
4367 ieee80211_rx_stats
4368 stats = {
4369 .len =
4370 notif->
4371 size - 1,
4372 };
4373
4374 IPW_DEBUG_QOS
4375 ("QoS Associate "
4376 "size %d\n",
4377 notif->size);
4378 ieee80211_rx_mgt(priv->
4379 ieee,
4380 (struct
4381 ieee80211_hdr_4addr
4382 *)
4383 &notif->u.raw, &stats);
4384 }
4385 }
4386 #endif
4387
4388 schedule_work(&priv->link_up);
4389
4390 break;
4391 }
4392
4393 case CMAS_AUTHENTICATED:{
4394 if (priv->
4395 status & (STATUS_ASSOCIATED |
4396 STATUS_AUTH)) {
4397 #ifdef CONFIG_IPW2200_DEBUG
4398 struct notif_authenticate *auth
4399 = &notif->u.auth;
4400 IPW_DEBUG(IPW_DL_NOTIF |
4401 IPW_DL_STATE |
4402 IPW_DL_ASSOC,
4403 "deauthenticated: '%s' "
4404 MAC_FMT
4405 ": (0x%04X) - %s \n",
4406 escape_essid(priv->
4407 essid,
4408 priv->
4409 essid_len),
4410 MAC_ARG(priv->bssid),
4411 ntohs(auth->status),
4412 ipw_get_status_code
4413 (ntohs
4414 (auth->status)));
4415 #endif
4416
4417 priv->status &=
4418 ~(STATUS_ASSOCIATING |
4419 STATUS_AUTH |
4420 STATUS_ASSOCIATED);
4421
4422 schedule_work(&priv->link_down);
4423 break;
4424 }
4425
4426 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4427 IPW_DL_ASSOC,
4428 "authenticated: '%s' " MAC_FMT
4429 "\n",
4430 escape_essid(priv->essid,
4431 priv->essid_len),
4432 MAC_ARG(priv->bssid));
4433 break;
4434 }
4435
4436 case CMAS_INIT:{
4437 if (priv->status & STATUS_AUTH) {
4438 struct
4439 ieee80211_assoc_response
4440 *resp;
4441 resp =
4442 (struct
4443 ieee80211_assoc_response
4444 *)&notif->u.raw;
4445 IPW_DEBUG(IPW_DL_NOTIF |
4446 IPW_DL_STATE |
4447 IPW_DL_ASSOC,
4448 "association failed (0x%04X): %s\n",
4449 ntohs(resp->status),
4450 ipw_get_status_code
4451 (ntohs
4452 (resp->status)));
4453 }
4454
4455 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4456 IPW_DL_ASSOC,
4457 "disassociated: '%s' " MAC_FMT
4458 " \n",
4459 escape_essid(priv->essid,
4460 priv->essid_len),
4461 MAC_ARG(priv->bssid));
4462
4463 priv->status &=
4464 ~(STATUS_DISASSOCIATING |
4465 STATUS_ASSOCIATING |
4466 STATUS_ASSOCIATED | STATUS_AUTH);
4467 if (priv->assoc_network
4468 && (priv->assoc_network->
4469 capability &
4470 WLAN_CAPABILITY_IBSS))
4471 ipw_remove_current_network
4472 (priv);
4473
4474 schedule_work(&priv->link_down);
4475
4476 break;
4477 }
4478
4479 case CMAS_RX_ASSOC_RESP:
4480 break;
4481
4482 default:
4483 IPW_ERROR("assoc: unknown (%d)\n",
4484 assoc->state);
4485 break;
4486 }
4487
4488 break;
4489 }
4490
4491 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4492 struct notif_authenticate *auth = &notif->u.auth;
4493 switch (auth->state) {
4494 case CMAS_AUTHENTICATED:
4495 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4496 "authenticated: '%s' " MAC_FMT " \n",
4497 escape_essid(priv->essid,
4498 priv->essid_len),
4499 MAC_ARG(priv->bssid));
4500 priv->status |= STATUS_AUTH;
4501 break;
4502
4503 case CMAS_INIT:
4504 if (priv->status & STATUS_AUTH) {
4505 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4506 IPW_DL_ASSOC,
4507 "authentication failed (0x%04X): %s\n",
4508 ntohs(auth->status),
4509 ipw_get_status_code(ntohs
4510 (auth->
4511 status)));
4512 }
4513 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4514 IPW_DL_ASSOC,
4515 "deauthenticated: '%s' " MAC_FMT "\n",
4516 escape_essid(priv->essid,
4517 priv->essid_len),
4518 MAC_ARG(priv->bssid));
4519
4520 priv->status &= ~(STATUS_ASSOCIATING |
4521 STATUS_AUTH |
4522 STATUS_ASSOCIATED);
4523
4524 schedule_work(&priv->link_down);
4525 break;
4526
4527 case CMAS_TX_AUTH_SEQ_1:
4528 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4529 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4530 break;
4531 case CMAS_RX_AUTH_SEQ_2:
4532 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4533 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4534 break;
4535 case CMAS_AUTH_SEQ_1_PASS:
4536 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4537 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4538 break;
4539 case CMAS_AUTH_SEQ_1_FAIL:
4540 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4541 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4542 break;
4543 case CMAS_TX_AUTH_SEQ_3:
4544 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4545 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4546 break;
4547 case CMAS_RX_AUTH_SEQ_4:
4548 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4549 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4550 break;
4551 case CMAS_AUTH_SEQ_2_PASS:
4552 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4553 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4554 break;
4555 case CMAS_AUTH_SEQ_2_FAIL:
4556 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4557 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4558 break;
4559 case CMAS_TX_ASSOC:
4560 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4561 IPW_DL_ASSOC, "TX_ASSOC\n");
4562 break;
4563 case CMAS_RX_ASSOC_RESP:
4564 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4565 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4566
4567 break;
4568 case CMAS_ASSOCIATED:
4569 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4570 IPW_DL_ASSOC, "ASSOCIATED\n");
4571 break;
4572 default:
4573 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4574 auth->state);
4575 break;
4576 }
4577 break;
4578 }
4579
4580 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4581 struct notif_channel_result *x =
4582 &notif->u.channel_result;
4583
4584 if (notif->size == sizeof(*x)) {
4585 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4586 x->channel_num);
4587 } else {
4588 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4589 "(should be %zd)\n",
4590 notif->size, sizeof(*x));
4591 }
4592 break;
4593 }
4594
4595 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4596 struct notif_scan_complete *x = &notif->u.scan_complete;
4597 if (notif->size == sizeof(*x)) {
4598 IPW_DEBUG_SCAN
4599 ("Scan completed: type %d, %d channels, "
4600 "%d status\n", x->scan_type,
4601 x->num_channels, x->status);
4602 } else {
4603 IPW_ERROR("Scan completed of wrong size %d "
4604 "(should be %zd)\n",
4605 notif->size, sizeof(*x));
4606 }
4607
4608 priv->status &=
4609 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4610
4611 wake_up_interruptible(&priv->wait_state);
4612 cancel_delayed_work(&priv->scan_check);
4613
4614 if (priv->status & STATUS_EXIT_PENDING)
4615 break;
4616
4617 priv->ieee->scans++;
4618
4619 #ifdef CONFIG_IPW2200_MONITOR
4620 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4621 priv->status |= STATUS_SCAN_FORCED;
4622 queue_work(priv->workqueue,
4623 &priv->request_scan);
4624 break;
4625 }
4626 priv->status &= ~STATUS_SCAN_FORCED;
4627 #endif /* CONFIG_IPW2200_MONITOR */
4628
4629 if (!(priv->status & (STATUS_ASSOCIATED |
4630 STATUS_ASSOCIATING |
4631 STATUS_ROAMING |
4632 STATUS_DISASSOCIATING)))
4633 queue_work(priv->workqueue, &priv->associate);
4634 else if (priv->status & STATUS_ROAMING) {
4635 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4636 /* If a scan completed and we are in roam mode, then
4637 * the scan that completed was the one requested as a
4638 * result of entering roam... so, schedule the
4639 * roam work */
4640 queue_work(priv->workqueue,
4641 &priv->roam);
4642 else
4643 /* Don't schedule if we aborted the scan */
4644 priv->status &= ~STATUS_ROAMING;
4645 } else if (priv->status & STATUS_SCAN_PENDING)
4646 queue_work(priv->workqueue,
4647 &priv->request_scan);
4648 else if (priv->config & CFG_BACKGROUND_SCAN
4649 && priv->status & STATUS_ASSOCIATED)
4650 queue_delayed_work(priv->workqueue,
4651 &priv->request_scan, HZ);
4652
4653 /* Send an empty event to user space.
4654 * We don't send the received data on the event because
4655 * it would require us to do complex transcoding, and
4656 * we want to minimise the work done in the irq handler
4657 * Use a request to extract the data.
4658 * Also, we generate this even for any scan, regardless
4659 * on how the scan was initiated. User space can just
4660 * sync on periodic scan to get fresh data...
4661 * Jean II */
4662 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) {
4663 union iwreq_data wrqu;
4664
4665 wrqu.data.length = 0;
4666 wrqu.data.flags = 0;
4667 wireless_send_event(priv->net_dev, SIOCGIWSCAN,
4668 &wrqu, NULL);
4669 }
4670 break;
4671 }
4672
4673 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4674 struct notif_frag_length *x = &notif->u.frag_len;
4675
4676 if (notif->size == sizeof(*x))
4677 IPW_ERROR("Frag length: %d\n",
4678 le16_to_cpu(x->frag_length));
4679 else
4680 IPW_ERROR("Frag length of wrong size %d "
4681 "(should be %zd)\n",
4682 notif->size, sizeof(*x));
4683 break;
4684 }
4685
4686 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4687 struct notif_link_deterioration *x =
4688 &notif->u.link_deterioration;
4689
4690 if (notif->size == sizeof(*x)) {
4691 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4692 "link deterioration: type %d, cnt %d\n",
4693 x->silence_notification_type,
4694 x->silence_count);
4695 memcpy(&priv->last_link_deterioration, x,
4696 sizeof(*x));
4697 } else {
4698 IPW_ERROR("Link Deterioration of wrong size %d "
4699 "(should be %zd)\n",
4700 notif->size, sizeof(*x));
4701 }
4702 break;
4703 }
4704
4705 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4706 IPW_ERROR("Dino config\n");
4707 if (priv->hcmd
4708 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4709 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4710
4711 break;
4712 }
4713
4714 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4715 struct notif_beacon_state *x = &notif->u.beacon_state;
4716 if (notif->size != sizeof(*x)) {
4717 IPW_ERROR
4718 ("Beacon state of wrong size %d (should "
4719 "be %zd)\n", notif->size, sizeof(*x));
4720 break;
4721 }
4722
4723 if (le32_to_cpu(x->state) ==
4724 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4725 ipw_handle_missed_beacon(priv,
4726 le32_to_cpu(x->
4727 number));
4728
4729 break;
4730 }
4731
4732 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4733 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4734 if (notif->size == sizeof(*x)) {
4735 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4736 "0x%02x station %d\n",
4737 x->key_state, x->security_type,
4738 x->station_index);
4739 break;
4740 }
4741
4742 IPW_ERROR
4743 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4744 notif->size, sizeof(*x));
4745 break;
4746 }
4747
4748 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4749 struct notif_calibration *x = &notif->u.calibration;
4750
4751 if (notif->size == sizeof(*x)) {
4752 memcpy(&priv->calib, x, sizeof(*x));
4753 IPW_DEBUG_INFO("TODO: Calibration\n");
4754 break;
4755 }
4756
4757 IPW_ERROR
4758 ("Calibration of wrong size %d (should be %zd)\n",
4759 notif->size, sizeof(*x));
4760 break;
4761 }
4762
4763 case HOST_NOTIFICATION_NOISE_STATS:{
4764 if (notif->size == sizeof(u32)) {
4765 priv->exp_avg_noise =
4766 exponential_average(priv->exp_avg_noise,
4767 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4768 DEPTH_NOISE);
4769 break;
4770 }
4771
4772 IPW_ERROR
4773 ("Noise stat is wrong size %d (should be %zd)\n",
4774 notif->size, sizeof(u32));
4775 break;
4776 }
4777
4778 default:
4779 IPW_DEBUG_NOTIF("Unknown notification: "
4780 "subtype=%d,flags=0x%2x,size=%d\n",
4781 notif->subtype, notif->flags, notif->size);
4782 }
4783 }
4784
4785 /**
4786 * Destroys all DMA structures and initialise them again
4787 *
4788 * @param priv
4789 * @return error code
4790 */
4791 static int ipw_queue_reset(struct ipw_priv *priv)
4792 {
4793 int rc = 0;
4794 /** @todo customize queue sizes */
4795 int nTx = 64, nTxCmd = 8;
4796 ipw_tx_queue_free(priv);
4797 /* Tx CMD queue */
4798 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4799 IPW_TX_CMD_QUEUE_READ_INDEX,
4800 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4801 IPW_TX_CMD_QUEUE_BD_BASE,
4802 IPW_TX_CMD_QUEUE_BD_SIZE);
4803 if (rc) {
4804 IPW_ERROR("Tx Cmd queue init failed\n");
4805 goto error;
4806 }
4807 /* Tx queue(s) */
4808 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4809 IPW_TX_QUEUE_0_READ_INDEX,
4810 IPW_TX_QUEUE_0_WRITE_INDEX,
4811 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4812 if (rc) {
4813 IPW_ERROR("Tx 0 queue init failed\n");
4814 goto error;
4815 }
4816 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4817 IPW_TX_QUEUE_1_READ_INDEX,
4818 IPW_TX_QUEUE_1_WRITE_INDEX,
4819 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4820 if (rc) {
4821 IPW_ERROR("Tx 1 queue init failed\n");
4822 goto error;
4823 }
4824 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4825 IPW_TX_QUEUE_2_READ_INDEX,
4826 IPW_TX_QUEUE_2_WRITE_INDEX,
4827 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4828 if (rc) {
4829 IPW_ERROR("Tx 2 queue init failed\n");
4830 goto error;
4831 }
4832 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4833 IPW_TX_QUEUE_3_READ_INDEX,
4834 IPW_TX_QUEUE_3_WRITE_INDEX,
4835 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4836 if (rc) {
4837 IPW_ERROR("Tx 3 queue init failed\n");
4838 goto error;
4839 }
4840 /* statistics */
4841 priv->rx_bufs_min = 0;
4842 priv->rx_pend_max = 0;
4843 return rc;
4844
4845 error:
4846 ipw_tx_queue_free(priv);
4847 return rc;
4848 }
4849
4850 /**
4851 * Reclaim Tx queue entries no more used by NIC.
4852 *
4853 * When FW adwances 'R' index, all entries between old and
4854 * new 'R' index need to be reclaimed. As result, some free space
4855 * forms. If there is enough free space (> low mark), wake Tx queue.
4856 *
4857 * @note Need to protect against garbage in 'R' index
4858 * @param priv
4859 * @param txq
4860 * @param qindex
4861 * @return Number of used entries remains in the queue
4862 */
4863 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4864 struct clx2_tx_queue *txq, int qindex)
4865 {
4866 u32 hw_tail;
4867 int used;
4868 struct clx2_queue *q = &txq->q;
4869
4870 hw_tail = ipw_read32(priv, q->reg_r);
4871 if (hw_tail >= q->n_bd) {
4872 IPW_ERROR
4873 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4874 hw_tail, q->n_bd);
4875 goto done;
4876 }
4877 for (; q->last_used != hw_tail;
4878 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4879 ipw_queue_tx_free_tfd(priv, txq);
4880 priv->tx_packets++;
4881 }
4882 done:
4883 if ((ipw_queue_space(q) > q->low_mark) &&
4884 (qindex >= 0) &&
4885 (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev))
4886 netif_wake_queue(priv->net_dev);
4887 used = q->first_empty - q->last_used;
4888 if (used < 0)
4889 used += q->n_bd;
4890
4891 return used;
4892 }
4893
4894 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4895 int len, int sync)
4896 {
4897 struct clx2_tx_queue *txq = &priv->txq_cmd;
4898 struct clx2_queue *q = &txq->q;
4899 struct tfd_frame *tfd;
4900
4901 if (ipw_queue_space(q) < (sync ? 1 : 2)) {
4902 IPW_ERROR("No space for Tx\n");
4903 return -EBUSY;
4904 }
4905
4906 tfd = &txq->bd[q->first_empty];
4907 txq->txb[q->first_empty] = NULL;
4908
4909 memset(tfd, 0, sizeof(*tfd));
4910 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
4911 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
4912 priv->hcmd_seq++;
4913 tfd->u.cmd.index = hcmd;
4914 tfd->u.cmd.length = len;
4915 memcpy(tfd->u.cmd.payload, buf, len);
4916 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
4917 ipw_write32(priv, q->reg_w, q->first_empty);
4918 _ipw_read32(priv, 0x90);
4919
4920 return 0;
4921 }
4922
4923 /*
4924 * Rx theory of operation
4925 *
4926 * The host allocates 32 DMA target addresses and passes the host address
4927 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
4928 * 0 to 31
4929 *
4930 * Rx Queue Indexes
4931 * The host/firmware share two index registers for managing the Rx buffers.
4932 *
4933 * The READ index maps to the first position that the firmware may be writing
4934 * to -- the driver can read up to (but not including) this position and get
4935 * good data.
4936 * The READ index is managed by the firmware once the card is enabled.
4937 *
4938 * The WRITE index maps to the last position the driver has read from -- the
4939 * position preceding WRITE is the last slot the firmware can place a packet.
4940 *
4941 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4942 * WRITE = READ.
4943 *
4944 * During initialization the host sets up the READ queue position to the first
4945 * INDEX position, and WRITE to the last (READ - 1 wrapped)
4946 *
4947 * When the firmware places a packet in a buffer it will advance the READ index
4948 * and fire the RX interrupt. The driver can then query the READ index and
4949 * process as many packets as possible, moving the WRITE index forward as it
4950 * resets the Rx queue buffers with new memory.
4951 *
4952 * The management in the driver is as follows:
4953 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
4954 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
4955 * to replensish the ipw->rxq->rx_free.
4956 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
4957 * ipw->rxq is replenished and the READ INDEX is updated (updating the
4958 * 'processed' and 'read' driver indexes as well)
4959 * + A received packet is processed and handed to the kernel network stack,
4960 * detached from the ipw->rxq. The driver 'processed' index is updated.
4961 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
4962 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
4963 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
4964 * were enough free buffers and RX_STALLED is set it is cleared.
4965 *
4966 *
4967 * Driver sequence:
4968 *
4969 * ipw_rx_queue_alloc() Allocates rx_free
4970 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
4971 * ipw_rx_queue_restock
4972 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
4973 * queue, updates firmware pointers, and updates
4974 * the WRITE index. If insufficient rx_free buffers
4975 * are available, schedules ipw_rx_queue_replenish
4976 *
4977 * -- enable interrupts --
4978 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
4979 * READ INDEX, detaching the SKB from the pool.
4980 * Moves the packet buffer from queue to rx_used.
4981 * Calls ipw_rx_queue_restock to refill any empty
4982 * slots.
4983 * ...
4984 *
4985 */
4986
4987 /*
4988 * If there are slots in the RX queue that need to be restocked,
4989 * and we have free pre-allocated buffers, fill the ranks as much
4990 * as we can pulling from rx_free.
4991 *
4992 * This moves the 'write' index forward to catch up with 'processed', and
4993 * also updates the memory address in the firmware to reference the new
4994 * target buffer.
4995 */
4996 static void ipw_rx_queue_restock(struct ipw_priv *priv)
4997 {
4998 struct ipw_rx_queue *rxq = priv->rxq;
4999 struct list_head *element;
5000 struct ipw_rx_mem_buffer *rxb;
5001 unsigned long flags;
5002 int write;
5003
5004 spin_lock_irqsave(&rxq->lock, flags);
5005 write = rxq->write;
5006 while ((rxq->write != rxq->processed) && (rxq->free_count)) {
5007 element = rxq->rx_free.next;
5008 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5009 list_del(element);
5010
5011 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5012 rxb->dma_addr);
5013 rxq->queue[rxq->write] = rxb;
5014 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5015 rxq->free_count--;
5016 }
5017 spin_unlock_irqrestore(&rxq->lock, flags);
5018
5019 /* If the pre-allocated buffer pool is dropping low, schedule to
5020 * refill it */
5021 if (rxq->free_count <= RX_LOW_WATERMARK)
5022 queue_work(priv->workqueue, &priv->rx_replenish);
5023
5024 /* If we've added more space for the firmware to place data, tell it */
5025 if (write != rxq->write)
5026 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5027 }
5028
5029 /*
5030 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5031 * Also restock the Rx queue via ipw_rx_queue_restock.
5032 *
5033 * This is called as a scheduled work item (except for during intialization)
5034 */
5035 static void ipw_rx_queue_replenish(void *data)
5036 {
5037 struct ipw_priv *priv = data;
5038 struct ipw_rx_queue *rxq = priv->rxq;
5039 struct list_head *element;
5040 struct ipw_rx_mem_buffer *rxb;
5041 unsigned long flags;
5042
5043 spin_lock_irqsave(&rxq->lock, flags);
5044 while (!list_empty(&rxq->rx_used)) {
5045 element = rxq->rx_used.next;
5046 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5047 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5048 if (!rxb->skb) {
5049 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5050 priv->net_dev->name);
5051 /* We don't reschedule replenish work here -- we will
5052 * call the restock method and if it still needs
5053 * more buffers it will schedule replenish */
5054 break;
5055 }
5056 list_del(element);
5057
5058 rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data;
5059 rxb->dma_addr =
5060 pci_map_single(priv->pci_dev, rxb->skb->data,
5061 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5062
5063 list_add_tail(&rxb->list, &rxq->rx_free);
5064 rxq->free_count++;
5065 }
5066 spin_unlock_irqrestore(&rxq->lock, flags);
5067
5068 ipw_rx_queue_restock(priv);
5069 }
5070
5071 static void ipw_bg_rx_queue_replenish(void *data)
5072 {
5073 struct ipw_priv *priv = data;
5074 mutex_lock(&priv->mutex);
5075 ipw_rx_queue_replenish(data);
5076 mutex_unlock(&priv->mutex);
5077 }
5078
5079 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5080 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5081 * This free routine walks the list of POOL entries and if SKB is set to
5082 * non NULL it is unmapped and freed
5083 */
5084 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5085 {
5086 int i;
5087
5088 if (!rxq)
5089 return;
5090
5091 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5092 if (rxq->pool[i].skb != NULL) {
5093 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5094 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5095 dev_kfree_skb(rxq->pool[i].skb);
5096 }
5097 }
5098
5099 kfree(rxq);
5100 }
5101
5102 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5103 {
5104 struct ipw_rx_queue *rxq;
5105 int i;
5106
5107 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5108 if (unlikely(!rxq)) {
5109 IPW_ERROR("memory allocation failed\n");
5110 return NULL;
5111 }
5112 spin_lock_init(&rxq->lock);
5113 INIT_LIST_HEAD(&rxq->rx_free);
5114 INIT_LIST_HEAD(&rxq->rx_used);
5115
5116 /* Fill the rx_used queue with _all_ of the Rx buffers */
5117 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5118 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5119
5120 /* Set us so that we have processed and used all buffers, but have
5121 * not restocked the Rx queue with fresh buffers */
5122 rxq->read = rxq->write = 0;
5123 rxq->processed = RX_QUEUE_SIZE - 1;
5124 rxq->free_count = 0;
5125
5126 return rxq;
5127 }
5128
5129 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5130 {
5131 rate &= ~IEEE80211_BASIC_RATE_MASK;
5132 if (ieee_mode == IEEE_A) {
5133 switch (rate) {
5134 case IEEE80211_OFDM_RATE_6MB:
5135 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
5136 1 : 0;
5137 case IEEE80211_OFDM_RATE_9MB:
5138 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
5139 1 : 0;
5140 case IEEE80211_OFDM_RATE_12MB:
5141 return priv->
5142 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5143 case IEEE80211_OFDM_RATE_18MB:
5144 return priv->
5145 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5146 case IEEE80211_OFDM_RATE_24MB:
5147 return priv->
5148 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5149 case IEEE80211_OFDM_RATE_36MB:
5150 return priv->
5151 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5152 case IEEE80211_OFDM_RATE_48MB:
5153 return priv->
5154 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5155 case IEEE80211_OFDM_RATE_54MB:
5156 return priv->
5157 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5158 default:
5159 return 0;
5160 }
5161 }
5162
5163 /* B and G mixed */
5164 switch (rate) {
5165 case IEEE80211_CCK_RATE_1MB:
5166 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
5167 case IEEE80211_CCK_RATE_2MB:
5168 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
5169 case IEEE80211_CCK_RATE_5MB:
5170 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
5171 case IEEE80211_CCK_RATE_11MB:
5172 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
5173 }
5174
5175 /* If we are limited to B modulations, bail at this point */
5176 if (ieee_mode == IEEE_B)
5177 return 0;
5178
5179 /* G */
5180 switch (rate) {
5181 case IEEE80211_OFDM_RATE_6MB:
5182 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
5183 case IEEE80211_OFDM_RATE_9MB:
5184 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
5185 case IEEE80211_OFDM_RATE_12MB:
5186 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5187 case IEEE80211_OFDM_RATE_18MB:
5188 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5189 case IEEE80211_OFDM_RATE_24MB:
5190 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5191 case IEEE80211_OFDM_RATE_36MB:
5192 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5193 case IEEE80211_OFDM_RATE_48MB:
5194 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5195 case IEEE80211_OFDM_RATE_54MB:
5196 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5197 }
5198
5199 return 0;
5200 }
5201
5202 static int ipw_compatible_rates(struct ipw_priv *priv,
5203 const struct ieee80211_network *network,
5204 struct ipw_supported_rates *rates)
5205 {
5206 int num_rates, i;
5207
5208 memset(rates, 0, sizeof(*rates));
5209 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5210 rates->num_rates = 0;
5211 for (i = 0; i < num_rates; i++) {
5212 if (!ipw_is_rate_in_mask(priv, network->mode,
5213 network->rates[i])) {
5214
5215 if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
5216 IPW_DEBUG_SCAN("Adding masked mandatory "
5217 "rate %02X\n",
5218 network->rates[i]);
5219 rates->supported_rates[rates->num_rates++] =
5220 network->rates[i];
5221 continue;
5222 }
5223
5224 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5225 network->rates[i], priv->rates_mask);
5226 continue;
5227 }
5228
5229 rates->supported_rates[rates->num_rates++] = network->rates[i];
5230 }
5231
5232 num_rates = min(network->rates_ex_len,
5233 (u8) (IPW_MAX_RATES - num_rates));
5234 for (i = 0; i < num_rates; i++) {
5235 if (!ipw_is_rate_in_mask(priv, network->mode,
5236 network->rates_ex[i])) {
5237 if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
5238 IPW_DEBUG_SCAN("Adding masked mandatory "
5239 "rate %02X\n",
5240 network->rates_ex[i]);
5241 rates->supported_rates[rates->num_rates++] =
5242 network->rates[i];
5243 continue;
5244 }
5245
5246 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5247 network->rates_ex[i], priv->rates_mask);
5248 continue;
5249 }
5250
5251 rates->supported_rates[rates->num_rates++] =
5252 network->rates_ex[i];
5253 }
5254
5255 return 1;
5256 }
5257
5258 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5259 const struct ipw_supported_rates *src)
5260 {
5261 u8 i;
5262 for (i = 0; i < src->num_rates; i++)
5263 dest->supported_rates[i] = src->supported_rates[i];
5264 dest->num_rates = src->num_rates;
5265 }
5266
5267 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5268 * mask should ever be used -- right now all callers to add the scan rates are
5269 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5270 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5271 u8 modulation, u32 rate_mask)
5272 {
5273 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5274 IEEE80211_BASIC_RATE_MASK : 0;
5275
5276 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
5277 rates->supported_rates[rates->num_rates++] =
5278 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
5279
5280 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
5281 rates->supported_rates[rates->num_rates++] =
5282 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
5283
5284 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
5285 rates->supported_rates[rates->num_rates++] = basic_mask |
5286 IEEE80211_CCK_RATE_5MB;
5287
5288 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
5289 rates->supported_rates[rates->num_rates++] = basic_mask |
5290 IEEE80211_CCK_RATE_11MB;
5291 }
5292
5293 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5294 u8 modulation, u32 rate_mask)
5295 {
5296 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5297 IEEE80211_BASIC_RATE_MASK : 0;
5298
5299 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
5300 rates->supported_rates[rates->num_rates++] = basic_mask |
5301 IEEE80211_OFDM_RATE_6MB;
5302
5303 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
5304 rates->supported_rates[rates->num_rates++] =
5305 IEEE80211_OFDM_RATE_9MB;
5306
5307 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
5308 rates->supported_rates[rates->num_rates++] = basic_mask |
5309 IEEE80211_OFDM_RATE_12MB;
5310
5311 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
5312 rates->supported_rates[rates->num_rates++] =
5313 IEEE80211_OFDM_RATE_18MB;
5314
5315 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
5316 rates->supported_rates[rates->num_rates++] = basic_mask |
5317 IEEE80211_OFDM_RATE_24MB;
5318
5319 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
5320 rates->supported_rates[rates->num_rates++] =
5321 IEEE80211_OFDM_RATE_36MB;
5322
5323 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
5324 rates->supported_rates[rates->num_rates++] =
5325 IEEE80211_OFDM_RATE_48MB;
5326
5327 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
5328 rates->supported_rates[rates->num_rates++] =
5329 IEEE80211_OFDM_RATE_54MB;
5330 }
5331
5332 struct ipw_network_match {
5333 struct ieee80211_network *network;
5334 struct ipw_supported_rates rates;
5335 };
5336
5337 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5338 struct ipw_network_match *match,
5339 struct ieee80211_network *network,
5340 int roaming)
5341 {
5342 struct ipw_supported_rates rates;
5343
5344 /* Verify that this network's capability is compatible with the
5345 * current mode (AdHoc or Infrastructure) */
5346 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5347 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5348 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded due to "
5349 "capability mismatch.\n",
5350 escape_essid(network->ssid, network->ssid_len),
5351 MAC_ARG(network->bssid));
5352 return 0;
5353 }
5354
5355 /* If we do not have an ESSID for this AP, we can not associate with
5356 * it */
5357 if (network->flags & NETWORK_EMPTY_ESSID) {
5358 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5359 "because of hidden ESSID.\n",
5360 escape_essid(network->ssid, network->ssid_len),
5361 MAC_ARG(network->bssid));
5362 return 0;
5363 }
5364
5365 if (unlikely(roaming)) {
5366 /* If we are roaming, then ensure check if this is a valid
5367 * network to try and roam to */
5368 if ((network->ssid_len != match->network->ssid_len) ||
5369 memcmp(network->ssid, match->network->ssid,
5370 network->ssid_len)) {
5371 IPW_DEBUG_MERGE("Netowrk '%s (" MAC_FMT ")' excluded "
5372 "because of non-network ESSID.\n",
5373 escape_essid(network->ssid,
5374 network->ssid_len),
5375 MAC_ARG(network->bssid));
5376 return 0;
5377 }
5378 } else {
5379 /* If an ESSID has been configured then compare the broadcast
5380 * ESSID to ours */
5381 if ((priv->config & CFG_STATIC_ESSID) &&
5382 ((network->ssid_len != priv->essid_len) ||
5383 memcmp(network->ssid, priv->essid,
5384 min(network->ssid_len, priv->essid_len)))) {
5385 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5386
5387 strncpy(escaped,
5388 escape_essid(network->ssid, network->ssid_len),
5389 sizeof(escaped));
5390 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5391 "because of ESSID mismatch: '%s'.\n",
5392 escaped, MAC_ARG(network->bssid),
5393 escape_essid(priv->essid,
5394 priv->essid_len));
5395 return 0;
5396 }
5397 }
5398
5399 /* If the old network rate is better than this one, don't bother
5400 * testing everything else. */
5401
5402 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5403 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5404 "current network.\n",
5405 escape_essid(match->network->ssid,
5406 match->network->ssid_len));
5407 return 0;
5408 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5409 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5410 "current network.\n",
5411 escape_essid(match->network->ssid,
5412 match->network->ssid_len));
5413 return 0;
5414 }
5415
5416 /* Now go through and see if the requested network is valid... */
5417 if (priv->ieee->scan_age != 0 &&
5418 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5419 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5420 "because of age: %ums.\n",
5421 escape_essid(network->ssid, network->ssid_len),
5422 MAC_ARG(network->bssid),
5423 jiffies_to_msecs(jiffies -
5424 network->last_scanned));
5425 return 0;
5426 }
5427
5428 if ((priv->config & CFG_STATIC_CHANNEL) &&
5429 (network->channel != priv->channel)) {
5430 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5431 "because of channel mismatch: %d != %d.\n",
5432 escape_essid(network->ssid, network->ssid_len),
5433 MAC_ARG(network->bssid),
5434 network->channel, priv->channel);
5435 return 0;
5436 }
5437
5438 /* Verify privacy compatability */
5439 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5440 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5441 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5442 "because of privacy mismatch: %s != %s.\n",
5443 escape_essid(network->ssid, network->ssid_len),
5444 MAC_ARG(network->bssid),
5445 priv->
5446 capability & CAP_PRIVACY_ON ? "on" : "off",
5447 network->
5448 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5449 "off");
5450 return 0;
5451 }
5452
5453 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5454 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5455 "because of the same BSSID match: " MAC_FMT
5456 ".\n", escape_essid(network->ssid,
5457 network->ssid_len),
5458 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5459 return 0;
5460 }
5461
5462 /* Filter out any incompatible freq / mode combinations */
5463 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5464 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5465 "because of invalid frequency/mode "
5466 "combination.\n",
5467 escape_essid(network->ssid, network->ssid_len),
5468 MAC_ARG(network->bssid));
5469 return 0;
5470 }
5471
5472 /* Ensure that the rates supported by the driver are compatible with
5473 * this AP, including verification of basic rates (mandatory) */
5474 if (!ipw_compatible_rates(priv, network, &rates)) {
5475 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5476 "because configured rate mask excludes "
5477 "AP mandatory rate.\n",
5478 escape_essid(network->ssid, network->ssid_len),
5479 MAC_ARG(network->bssid));
5480 return 0;
5481 }
5482
5483 if (rates.num_rates == 0) {
5484 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5485 "because of no compatible rates.\n",
5486 escape_essid(network->ssid, network->ssid_len),
5487 MAC_ARG(network->bssid));
5488 return 0;
5489 }
5490
5491 /* TODO: Perform any further minimal comparititive tests. We do not
5492 * want to put too much policy logic here; intelligent scan selection
5493 * should occur within a generic IEEE 802.11 user space tool. */
5494
5495 /* Set up 'new' AP to this network */
5496 ipw_copy_rates(&match->rates, &rates);
5497 match->network = network;
5498 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' is a viable match.\n",
5499 escape_essid(network->ssid, network->ssid_len),
5500 MAC_ARG(network->bssid));
5501
5502 return 1;
5503 }
5504
5505 static void ipw_merge_adhoc_network(void *data)
5506 {
5507 struct ipw_priv *priv = data;
5508 struct ieee80211_network *network = NULL;
5509 struct ipw_network_match match = {
5510 .network = priv->assoc_network
5511 };
5512
5513 if ((priv->status & STATUS_ASSOCIATED) &&
5514 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5515 /* First pass through ROAM process -- look for a better
5516 * network */
5517 unsigned long flags;
5518
5519 spin_lock_irqsave(&priv->ieee->lock, flags);
5520 list_for_each_entry(network, &priv->ieee->network_list, list) {
5521 if (network != priv->assoc_network)
5522 ipw_find_adhoc_network(priv, &match, network,
5523 1);
5524 }
5525 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5526
5527 if (match.network == priv->assoc_network) {
5528 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5529 "merge to.\n");
5530 return;
5531 }
5532
5533 mutex_lock(&priv->mutex);
5534 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5535 IPW_DEBUG_MERGE("remove network %s\n",
5536 escape_essid(priv->essid,
5537 priv->essid_len));
5538 ipw_remove_current_network(priv);
5539 }
5540
5541 ipw_disassociate(priv);
5542 priv->assoc_network = match.network;
5543 mutex_unlock(&priv->mutex);
5544 return;
5545 }
5546 }
5547
5548 static int ipw_best_network(struct ipw_priv *priv,
5549 struct ipw_network_match *match,
5550 struct ieee80211_network *network, int roaming)
5551 {
5552 struct ipw_supported_rates rates;
5553
5554 /* Verify that this network's capability is compatible with the
5555 * current mode (AdHoc or Infrastructure) */
5556 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5557 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5558 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5559 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5560 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to "
5561 "capability mismatch.\n",
5562 escape_essid(network->ssid, network->ssid_len),
5563 MAC_ARG(network->bssid));
5564 return 0;
5565 }
5566
5567 /* If we do not have an ESSID for this AP, we can not associate with
5568 * it */
5569 if (network->flags & NETWORK_EMPTY_ESSID) {
5570 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5571 "because of hidden ESSID.\n",
5572 escape_essid(network->ssid, network->ssid_len),
5573 MAC_ARG(network->bssid));
5574 return 0;
5575 }
5576
5577 if (unlikely(roaming)) {
5578 /* If we are roaming, then ensure check if this is a valid
5579 * network to try and roam to */
5580 if ((network->ssid_len != match->network->ssid_len) ||
5581 memcmp(network->ssid, match->network->ssid,
5582 network->ssid_len)) {
5583 IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded "
5584 "because of non-network ESSID.\n",
5585 escape_essid(network->ssid,
5586 network->ssid_len),
5587 MAC_ARG(network->bssid));
5588 return 0;
5589 }
5590 } else {
5591 /* If an ESSID has been configured then compare the broadcast
5592 * ESSID to ours */
5593 if ((priv->config & CFG_STATIC_ESSID) &&
5594 ((network->ssid_len != priv->essid_len) ||
5595 memcmp(network->ssid, priv->essid,
5596 min(network->ssid_len, priv->essid_len)))) {
5597 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5598 strncpy(escaped,
5599 escape_essid(network->ssid, network->ssid_len),
5600 sizeof(escaped));
5601 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5602 "because of ESSID mismatch: '%s'.\n",
5603 escaped, MAC_ARG(network->bssid),
5604 escape_essid(priv->essid,
5605 priv->essid_len));
5606 return 0;
5607 }
5608 }
5609
5610 /* If the old network rate is better than this one, don't bother
5611 * testing everything else. */
5612 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5613 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5614 strncpy(escaped,
5615 escape_essid(network->ssid, network->ssid_len),
5616 sizeof(escaped));
5617 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because "
5618 "'%s (" MAC_FMT ")' has a stronger signal.\n",
5619 escaped, MAC_ARG(network->bssid),
5620 escape_essid(match->network->ssid,
5621 match->network->ssid_len),
5622 MAC_ARG(match->network->bssid));
5623 return 0;
5624 }
5625
5626 /* If this network has already had an association attempt within the
5627 * last 3 seconds, do not try and associate again... */
5628 if (network->last_associate &&
5629 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5630 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5631 "because of storming (%ums since last "
5632 "assoc attempt).\n",
5633 escape_essid(network->ssid, network->ssid_len),
5634 MAC_ARG(network->bssid),
5635 jiffies_to_msecs(jiffies -
5636 network->last_associate));
5637 return 0;
5638 }
5639
5640 /* Now go through and see if the requested network is valid... */
5641 if (priv->ieee->scan_age != 0 &&
5642 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5643 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5644 "because of age: %ums.\n",
5645 escape_essid(network->ssid, network->ssid_len),
5646 MAC_ARG(network->bssid),
5647 jiffies_to_msecs(jiffies -
5648 network->last_scanned));
5649 return 0;
5650 }
5651
5652 if ((priv->config & CFG_STATIC_CHANNEL) &&
5653 (network->channel != priv->channel)) {
5654 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5655 "because of channel mismatch: %d != %d.\n",
5656 escape_essid(network->ssid, network->ssid_len),
5657 MAC_ARG(network->bssid),
5658 network->channel, priv->channel);
5659 return 0;
5660 }
5661
5662 /* Verify privacy compatability */
5663 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5664 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5665 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5666 "because of privacy mismatch: %s != %s.\n",
5667 escape_essid(network->ssid, network->ssid_len),
5668 MAC_ARG(network->bssid),
5669 priv->capability & CAP_PRIVACY_ON ? "on" :
5670 "off",
5671 network->capability &
5672 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5673 return 0;
5674 }
5675
5676 if ((priv->config & CFG_STATIC_BSSID) &&
5677 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5678 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5679 "because of BSSID mismatch: " MAC_FMT ".\n",
5680 escape_essid(network->ssid, network->ssid_len),
5681 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5682 return 0;
5683 }
5684
5685 /* Filter out any incompatible freq / mode combinations */
5686 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5687 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5688 "because of invalid frequency/mode "
5689 "combination.\n",
5690 escape_essid(network->ssid, network->ssid_len),
5691 MAC_ARG(network->bssid));
5692 return 0;
5693 }
5694
5695 /* Filter out invalid channel in current GEO */
5696 if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) {
5697 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5698 "because of invalid channel in current GEO\n",
5699 escape_essid(network->ssid, network->ssid_len),
5700 MAC_ARG(network->bssid));
5701 return 0;
5702 }
5703
5704 /* Ensure that the rates supported by the driver are compatible with
5705 * this AP, including verification of basic rates (mandatory) */
5706 if (!ipw_compatible_rates(priv, network, &rates)) {
5707 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5708 "because configured rate mask excludes "
5709 "AP mandatory rate.\n",
5710 escape_essid(network->ssid, network->ssid_len),
5711 MAC_ARG(network->bssid));
5712 return 0;
5713 }
5714
5715 if (rates.num_rates == 0) {
5716 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5717 "because of no compatible rates.\n",
5718 escape_essid(network->ssid, network->ssid_len),
5719 MAC_ARG(network->bssid));
5720 return 0;
5721 }
5722
5723 /* TODO: Perform any further minimal comparititive tests. We do not
5724 * want to put too much policy logic here; intelligent scan selection
5725 * should occur within a generic IEEE 802.11 user space tool. */
5726
5727 /* Set up 'new' AP to this network */
5728 ipw_copy_rates(&match->rates, &rates);
5729 match->network = network;
5730
5731 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n",
5732 escape_essid(network->ssid, network->ssid_len),
5733 MAC_ARG(network->bssid));
5734
5735 return 1;
5736 }
5737
5738 static void ipw_adhoc_create(struct ipw_priv *priv,
5739 struct ieee80211_network *network)
5740 {
5741 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
5742 int i;
5743
5744 /*
5745 * For the purposes of scanning, we can set our wireless mode
5746 * to trigger scans across combinations of bands, but when it
5747 * comes to creating a new ad-hoc network, we have tell the FW
5748 * exactly which band to use.
5749 *
5750 * We also have the possibility of an invalid channel for the
5751 * chossen band. Attempting to create a new ad-hoc network
5752 * with an invalid channel for wireless mode will trigger a
5753 * FW fatal error.
5754 *
5755 */
5756 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
5757 case IEEE80211_52GHZ_BAND:
5758 network->mode = IEEE_A;
5759 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5760 BUG_ON(i == -1);
5761 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5762 IPW_WARNING("Overriding invalid channel\n");
5763 priv->channel = geo->a[0].channel;
5764 }
5765 break;
5766
5767 case IEEE80211_24GHZ_BAND:
5768 if (priv->ieee->mode & IEEE_G)
5769 network->mode = IEEE_G;
5770 else
5771 network->mode = IEEE_B;
5772 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5773 BUG_ON(i == -1);
5774 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5775 IPW_WARNING("Overriding invalid channel\n");
5776 priv->channel = geo->bg[0].channel;
5777 }
5778 break;
5779
5780 default:
5781 IPW_WARNING("Overriding invalid channel\n");
5782 if (priv->ieee->mode & IEEE_A) {
5783 network->mode = IEEE_A;
5784 priv->channel = geo->a[0].channel;
5785 } else if (priv->ieee->mode & IEEE_G) {
5786 network->mode = IEEE_G;
5787 priv->channel = geo->bg[0].channel;
5788 } else {
5789 network->mode = IEEE_B;
5790 priv->channel = geo->bg[0].channel;
5791 }
5792 break;
5793 }
5794
5795 network->channel = priv->channel;
5796 priv->config |= CFG_ADHOC_PERSIST;
5797 ipw_create_bssid(priv, network->bssid);
5798 network->ssid_len = priv->essid_len;
5799 memcpy(network->ssid, priv->essid, priv->essid_len);
5800 memset(&network->stats, 0, sizeof(network->stats));
5801 network->capability = WLAN_CAPABILITY_IBSS;
5802 if (!(priv->config & CFG_PREAMBLE_LONG))
5803 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5804 if (priv->capability & CAP_PRIVACY_ON)
5805 network->capability |= WLAN_CAPABILITY_PRIVACY;
5806 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5807 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5808 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5809 memcpy(network->rates_ex,
5810 &priv->rates.supported_rates[network->rates_len],
5811 network->rates_ex_len);
5812 network->last_scanned = 0;
5813 network->flags = 0;
5814 network->last_associate = 0;
5815 network->time_stamp[0] = 0;
5816 network->time_stamp[1] = 0;
5817 network->beacon_interval = 100; /* Default */
5818 network->listen_interval = 10; /* Default */
5819 network->atim_window = 0; /* Default */
5820 network->wpa_ie_len = 0;
5821 network->rsn_ie_len = 0;
5822 }
5823
5824 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5825 {
5826 struct ipw_tgi_tx_key key;
5827
5828 if (!(priv->ieee->sec.flags & (1 << index)))
5829 return;
5830
5831 key.key_id = index;
5832 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5833 key.security_type = type;
5834 key.station_index = 0; /* always 0 for BSS */
5835 key.flags = 0;
5836 /* 0 for new key; previous value of counter (after fatal error) */
5837 key.tx_counter[0] = 0;
5838 key.tx_counter[1] = 0;
5839
5840 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5841 }
5842
5843 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5844 {
5845 struct ipw_wep_key key;
5846 int i;
5847
5848 key.cmd_id = DINO_CMD_WEP_KEY;
5849 key.seq_num = 0;
5850
5851 /* Note: AES keys cannot be set for multiple times.
5852 * Only set it at the first time. */
5853 for (i = 0; i < 4; i++) {
5854 key.key_index = i | type;
5855 if (!(priv->ieee->sec.flags & (1 << i))) {
5856 key.key_size = 0;
5857 continue;
5858 }
5859
5860 key.key_size = priv->ieee->sec.key_sizes[i];
5861 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5862
5863 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5864 }
5865 }
5866
5867 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5868 {
5869 if (priv->ieee->host_encrypt)
5870 return;
5871
5872 switch (level) {
5873 case SEC_LEVEL_3:
5874 priv->sys_config.disable_unicast_decryption = 0;
5875 priv->ieee->host_decrypt = 0;
5876 break;
5877 case SEC_LEVEL_2:
5878 priv->sys_config.disable_unicast_decryption = 1;
5879 priv->ieee->host_decrypt = 1;
5880 break;
5881 case SEC_LEVEL_1:
5882 priv->sys_config.disable_unicast_decryption = 0;
5883 priv->ieee->host_decrypt = 0;
5884 break;
5885 case SEC_LEVEL_0:
5886 priv->sys_config.disable_unicast_decryption = 1;
5887 break;
5888 default:
5889 break;
5890 }
5891 }
5892
5893 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5894 {
5895 if (priv->ieee->host_encrypt)
5896 return;
5897
5898 switch (level) {
5899 case SEC_LEVEL_3:
5900 priv->sys_config.disable_multicast_decryption = 0;
5901 break;
5902 case SEC_LEVEL_2:
5903 priv->sys_config.disable_multicast_decryption = 1;
5904 break;
5905 case SEC_LEVEL_1:
5906 priv->sys_config.disable_multicast_decryption = 0;
5907 break;
5908 case SEC_LEVEL_0:
5909 priv->sys_config.disable_multicast_decryption = 1;
5910 break;
5911 default:
5912 break;
5913 }
5914 }
5915
5916 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
5917 {
5918 switch (priv->ieee->sec.level) {
5919 case SEC_LEVEL_3:
5920 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5921 ipw_send_tgi_tx_key(priv,
5922 DCT_FLAG_EXT_SECURITY_CCM,
5923 priv->ieee->sec.active_key);
5924
5925 if (!priv->ieee->host_mc_decrypt)
5926 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
5927 break;
5928 case SEC_LEVEL_2:
5929 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5930 ipw_send_tgi_tx_key(priv,
5931 DCT_FLAG_EXT_SECURITY_TKIP,
5932 priv->ieee->sec.active_key);
5933 break;
5934 case SEC_LEVEL_1:
5935 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
5936 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
5937 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
5938 break;
5939 case SEC_LEVEL_0:
5940 default:
5941 break;
5942 }
5943 }
5944
5945 static void ipw_adhoc_check(void *data)
5946 {
5947 struct ipw_priv *priv = data;
5948
5949 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
5950 !(priv->config & CFG_ADHOC_PERSIST)) {
5951 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
5952 IPW_DL_STATE | IPW_DL_ASSOC,
5953 "Missed beacon: %d - disassociate\n",
5954 priv->missed_adhoc_beacons);
5955 ipw_remove_current_network(priv);
5956 ipw_disassociate(priv);
5957 return;
5958 }
5959
5960 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
5961 priv->assoc_request.beacon_interval);
5962 }
5963
5964 static void ipw_bg_adhoc_check(void *data)
5965 {
5966 struct ipw_priv *priv = data;
5967 mutex_lock(&priv->mutex);
5968 ipw_adhoc_check(data);
5969 mutex_unlock(&priv->mutex);
5970 }
5971
5972 #ifdef CONFIG_IPW2200_DEBUG
5973 static void ipw_debug_config(struct ipw_priv *priv)
5974 {
5975 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
5976 "[CFG 0x%08X]\n", priv->config);
5977 if (priv->config & CFG_STATIC_CHANNEL)
5978 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
5979 else
5980 IPW_DEBUG_INFO("Channel unlocked.\n");
5981 if (priv->config & CFG_STATIC_ESSID)
5982 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
5983 escape_essid(priv->essid, priv->essid_len));
5984 else
5985 IPW_DEBUG_INFO("ESSID unlocked.\n");
5986 if (priv->config & CFG_STATIC_BSSID)
5987 IPW_DEBUG_INFO("BSSID locked to " MAC_FMT "\n",
5988 MAC_ARG(priv->bssid));
5989 else
5990 IPW_DEBUG_INFO("BSSID unlocked.\n");
5991 if (priv->capability & CAP_PRIVACY_ON)
5992 IPW_DEBUG_INFO("PRIVACY on\n");
5993 else
5994 IPW_DEBUG_INFO("PRIVACY off\n");
5995 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
5996 }
5997 #else
5998 #define ipw_debug_config(x) do {} while (0)
5999 #endif
6000
6001 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6002 {
6003 /* TODO: Verify that this works... */
6004 struct ipw_fixed_rate fr = {
6005 .tx_rates = priv->rates_mask
6006 };
6007 u32 reg;
6008 u16 mask = 0;
6009
6010 /* Identify 'current FW band' and match it with the fixed
6011 * Tx rates */
6012
6013 switch (priv->ieee->freq_band) {
6014 case IEEE80211_52GHZ_BAND: /* A only */
6015 /* IEEE_A */
6016 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
6017 /* Invalid fixed rate mask */
6018 IPW_DEBUG_WX
6019 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6020 fr.tx_rates = 0;
6021 break;
6022 }
6023
6024 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
6025 break;
6026
6027 default: /* 2.4Ghz or Mixed */
6028 /* IEEE_B */
6029 if (mode == IEEE_B) {
6030 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
6031 /* Invalid fixed rate mask */
6032 IPW_DEBUG_WX
6033 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6034 fr.tx_rates = 0;
6035 }
6036 break;
6037 }
6038
6039 /* IEEE_G */
6040 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
6041 IEEE80211_OFDM_RATES_MASK)) {
6042 /* Invalid fixed rate mask */
6043 IPW_DEBUG_WX
6044 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6045 fr.tx_rates = 0;
6046 break;
6047 }
6048
6049 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
6050 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
6051 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
6052 }
6053
6054 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
6055 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
6056 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
6057 }
6058
6059 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
6060 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
6061 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
6062 }
6063
6064 fr.tx_rates |= mask;
6065 break;
6066 }
6067
6068 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6069 ipw_write_reg32(priv, reg, *(u32 *) & fr);
6070 }
6071
6072 static void ipw_abort_scan(struct ipw_priv *priv)
6073 {
6074 int err;
6075
6076 if (priv->status & STATUS_SCAN_ABORTING) {
6077 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6078 return;
6079 }
6080 priv->status |= STATUS_SCAN_ABORTING;
6081
6082 err = ipw_send_scan_abort(priv);
6083 if (err)
6084 IPW_DEBUG_HC("Request to abort scan failed.\n");
6085 }
6086
6087 static void ipw_add_scan_channels(struct ipw_priv *priv,
6088 struct ipw_scan_request_ext *scan,
6089 int scan_type)
6090 {
6091 int channel_index = 0;
6092 const struct ieee80211_geo *geo;
6093 int i;
6094
6095 geo = ieee80211_get_geo(priv->ieee);
6096
6097 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
6098 int start = channel_index;
6099 for (i = 0; i < geo->a_channels; i++) {
6100 if ((priv->status & STATUS_ASSOCIATED) &&
6101 geo->a[i].channel == priv->channel)
6102 continue;
6103 channel_index++;
6104 scan->channels_list[channel_index] = geo->a[i].channel;
6105 ipw_set_scan_type(scan, channel_index,
6106 geo->a[i].
6107 flags & IEEE80211_CH_PASSIVE_ONLY ?
6108 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6109 scan_type);
6110 }
6111
6112 if (start != channel_index) {
6113 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6114 (channel_index - start);
6115 channel_index++;
6116 }
6117 }
6118
6119 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
6120 int start = channel_index;
6121 if (priv->config & CFG_SPEED_SCAN) {
6122 int index;
6123 u8 channels[IEEE80211_24GHZ_CHANNELS] = {
6124 /* nop out the list */
6125 [0] = 0
6126 };
6127
6128 u8 channel;
6129 while (channel_index < IPW_SCAN_CHANNELS) {
6130 channel =
6131 priv->speed_scan[priv->speed_scan_pos];
6132 if (channel == 0) {
6133 priv->speed_scan_pos = 0;
6134 channel = priv->speed_scan[0];
6135 }
6136 if ((priv->status & STATUS_ASSOCIATED) &&
6137 channel == priv->channel) {
6138 priv->speed_scan_pos++;
6139 continue;
6140 }
6141
6142 /* If this channel has already been
6143 * added in scan, break from loop
6144 * and this will be the first channel
6145 * in the next scan.
6146 */
6147 if (channels[channel - 1] != 0)
6148 break;
6149
6150 channels[channel - 1] = 1;
6151 priv->speed_scan_pos++;
6152 channel_index++;
6153 scan->channels_list[channel_index] = channel;
6154 index =
6155 ieee80211_channel_to_index(priv->ieee, channel);
6156 ipw_set_scan_type(scan, channel_index,
6157 geo->bg[index].
6158 flags &
6159 IEEE80211_CH_PASSIVE_ONLY ?
6160 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6161 : scan_type);
6162 }
6163 } else {
6164 for (i = 0; i < geo->bg_channels; i++) {
6165 if ((priv->status & STATUS_ASSOCIATED) &&
6166 geo->bg[i].channel == priv->channel)
6167 continue;
6168 channel_index++;
6169 scan->channels_list[channel_index] =
6170 geo->bg[i].channel;
6171 ipw_set_scan_type(scan, channel_index,
6172 geo->bg[i].
6173 flags &
6174 IEEE80211_CH_PASSIVE_ONLY ?
6175 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6176 : scan_type);
6177 }
6178 }
6179
6180 if (start != channel_index) {
6181 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6182 (channel_index - start);
6183 }
6184 }
6185 }
6186
6187 static int ipw_request_scan(struct ipw_priv *priv)
6188 {
6189 struct ipw_scan_request_ext scan;
6190 int err = 0, scan_type;
6191
6192 if (!(priv->status & STATUS_INIT) ||
6193 (priv->status & STATUS_EXIT_PENDING))
6194 return 0;
6195
6196 mutex_lock(&priv->mutex);
6197
6198 if (priv->status & STATUS_SCANNING) {
6199 IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n");
6200 priv->status |= STATUS_SCAN_PENDING;
6201 goto done;
6202 }
6203
6204 if (!(priv->status & STATUS_SCAN_FORCED) &&
6205 priv->status & STATUS_SCAN_ABORTING) {
6206 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6207 priv->status |= STATUS_SCAN_PENDING;
6208 goto done;
6209 }
6210
6211 if (priv->status & STATUS_RF_KILL_MASK) {
6212 IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6213 priv->status |= STATUS_SCAN_PENDING;
6214 goto done;
6215 }
6216
6217 memset(&scan, 0, sizeof(scan));
6218
6219 if (priv->config & CFG_SPEED_SCAN)
6220 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6221 cpu_to_le16(30);
6222 else
6223 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6224 cpu_to_le16(20);
6225
6226 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6227 cpu_to_le16(20);
6228 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
6229
6230 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
6231
6232 #ifdef CONFIG_IPW2200_MONITOR
6233 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6234 u8 channel;
6235 u8 band = 0;
6236
6237 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
6238 case IEEE80211_52GHZ_BAND:
6239 band = (u8) (IPW_A_MODE << 6) | 1;
6240 channel = priv->channel;
6241 break;
6242
6243 case IEEE80211_24GHZ_BAND:
6244 band = (u8) (IPW_B_MODE << 6) | 1;
6245 channel = priv->channel;
6246 break;
6247
6248 default:
6249 band = (u8) (IPW_B_MODE << 6) | 1;
6250 channel = 9;
6251 break;
6252 }
6253
6254 scan.channels_list[0] = band;
6255 scan.channels_list[1] = channel;
6256 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6257
6258 /* NOTE: The card will sit on this channel for this time
6259 * period. Scan aborts are timing sensitive and frequently
6260 * result in firmware restarts. As such, it is best to
6261 * set a small dwell_time here and just keep re-issuing
6262 * scans. Otherwise fast channel hopping will not actually
6263 * hop channels.
6264 *
6265 * TODO: Move SPEED SCAN support to all modes and bands */
6266 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6267 cpu_to_le16(2000);
6268 } else {
6269 #endif /* CONFIG_IPW2200_MONITOR */
6270 /* If we are roaming, then make this a directed scan for the
6271 * current network. Otherwise, ensure that every other scan
6272 * is a fast channel hop scan */
6273 if ((priv->status & STATUS_ROAMING)
6274 || (!(priv->status & STATUS_ASSOCIATED)
6275 && (priv->config & CFG_STATIC_ESSID)
6276 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6277 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6278 if (err) {
6279 IPW_DEBUG_HC("Attempt to send SSID command "
6280 "failed.\n");
6281 goto done;
6282 }
6283
6284 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6285 } else
6286 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6287
6288 ipw_add_scan_channels(priv, &scan, scan_type);
6289 #ifdef CONFIG_IPW2200_MONITOR
6290 }
6291 #endif
6292
6293 err = ipw_send_scan_request_ext(priv, &scan);
6294 if (err) {
6295 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6296 goto done;
6297 }
6298
6299 priv->status |= STATUS_SCANNING;
6300 priv->status &= ~STATUS_SCAN_PENDING;
6301 queue_delayed_work(priv->workqueue, &priv->scan_check,
6302 IPW_SCAN_CHECK_WATCHDOG);
6303 done:
6304 mutex_unlock(&priv->mutex);
6305 return err;
6306 }
6307
6308 static void ipw_bg_abort_scan(void *data)
6309 {
6310 struct ipw_priv *priv = data;
6311 mutex_lock(&priv->mutex);
6312 ipw_abort_scan(data);
6313 mutex_unlock(&priv->mutex);
6314 }
6315
6316 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6317 {
6318 /* This is called when wpa_supplicant loads and closes the driver
6319 * interface. */
6320 priv->ieee->wpa_enabled = value;
6321 return 0;
6322 }
6323
6324 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6325 {
6326 struct ieee80211_device *ieee = priv->ieee;
6327 struct ieee80211_security sec = {
6328 .flags = SEC_AUTH_MODE,
6329 };
6330 int ret = 0;
6331
6332 if (value & IW_AUTH_ALG_SHARED_KEY) {
6333 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6334 ieee->open_wep = 0;
6335 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6336 sec.auth_mode = WLAN_AUTH_OPEN;
6337 ieee->open_wep = 1;
6338 } else if (value & IW_AUTH_ALG_LEAP) {
6339 sec.auth_mode = WLAN_AUTH_LEAP;
6340 ieee->open_wep = 1;
6341 } else
6342 return -EINVAL;
6343
6344 if (ieee->set_security)
6345 ieee->set_security(ieee->dev, &sec);
6346 else
6347 ret = -EOPNOTSUPP;
6348
6349 return ret;
6350 }
6351
6352 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6353 int wpa_ie_len)
6354 {
6355 /* make sure WPA is enabled */
6356 ipw_wpa_enable(priv, 1);
6357 }
6358
6359 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6360 char *capabilities, int length)
6361 {
6362 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6363
6364 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6365 capabilities);
6366 }
6367
6368 /*
6369 * WE-18 support
6370 */
6371
6372 /* SIOCSIWGENIE */
6373 static int ipw_wx_set_genie(struct net_device *dev,
6374 struct iw_request_info *info,
6375 union iwreq_data *wrqu, char *extra)
6376 {
6377 struct ipw_priv *priv = ieee80211_priv(dev);
6378 struct ieee80211_device *ieee = priv->ieee;
6379 u8 *buf;
6380 int err = 0;
6381
6382 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6383 (wrqu->data.length && extra == NULL))
6384 return -EINVAL;
6385
6386 if (wrqu->data.length) {
6387 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6388 if (buf == NULL) {
6389 err = -ENOMEM;
6390 goto out;
6391 }
6392
6393 memcpy(buf, extra, wrqu->data.length);
6394 kfree(ieee->wpa_ie);
6395 ieee->wpa_ie = buf;
6396 ieee->wpa_ie_len = wrqu->data.length;
6397 } else {
6398 kfree(ieee->wpa_ie);
6399 ieee->wpa_ie = NULL;
6400 ieee->wpa_ie_len = 0;
6401 }
6402
6403 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6404 out:
6405 return err;
6406 }
6407
6408 /* SIOCGIWGENIE */
6409 static int ipw_wx_get_genie(struct net_device *dev,
6410 struct iw_request_info *info,
6411 union iwreq_data *wrqu, char *extra)
6412 {
6413 struct ipw_priv *priv = ieee80211_priv(dev);
6414 struct ieee80211_device *ieee = priv->ieee;
6415 int err = 0;
6416
6417 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6418 wrqu->data.length = 0;
6419 goto out;
6420 }
6421
6422 if (wrqu->data.length < ieee->wpa_ie_len) {
6423 err = -E2BIG;
6424 goto out;
6425 }
6426
6427 wrqu->data.length = ieee->wpa_ie_len;
6428 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6429
6430 out:
6431 return err;
6432 }
6433
6434 static int wext_cipher2level(int cipher)
6435 {
6436 switch (cipher) {
6437 case IW_AUTH_CIPHER_NONE:
6438 return SEC_LEVEL_0;
6439 case IW_AUTH_CIPHER_WEP40:
6440 case IW_AUTH_CIPHER_WEP104:
6441 return SEC_LEVEL_1;
6442 case IW_AUTH_CIPHER_TKIP:
6443 return SEC_LEVEL_2;
6444 case IW_AUTH_CIPHER_CCMP:
6445 return SEC_LEVEL_3;
6446 default:
6447 return -1;
6448 }
6449 }
6450
6451 /* SIOCSIWAUTH */
6452 static int ipw_wx_set_auth(struct net_device *dev,
6453 struct iw_request_info *info,
6454 union iwreq_data *wrqu, char *extra)
6455 {
6456 struct ipw_priv *priv = ieee80211_priv(dev);
6457 struct ieee80211_device *ieee = priv->ieee;
6458 struct iw_param *param = &wrqu->param;
6459 struct ieee80211_crypt_data *crypt;
6460 unsigned long flags;
6461 int ret = 0;
6462
6463 switch (param->flags & IW_AUTH_INDEX) {
6464 case IW_AUTH_WPA_VERSION:
6465 break;
6466 case IW_AUTH_CIPHER_PAIRWISE:
6467 ipw_set_hw_decrypt_unicast(priv,
6468 wext_cipher2level(param->value));
6469 break;
6470 case IW_AUTH_CIPHER_GROUP:
6471 ipw_set_hw_decrypt_multicast(priv,
6472 wext_cipher2level(param->value));
6473 break;
6474 case IW_AUTH_KEY_MGMT:
6475 /*
6476 * ipw2200 does not use these parameters
6477 */
6478 break;
6479
6480 case IW_AUTH_TKIP_COUNTERMEASURES:
6481 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6482 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6483 break;
6484
6485 flags = crypt->ops->get_flags(crypt->priv);
6486
6487 if (param->value)
6488 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6489 else
6490 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6491
6492 crypt->ops->set_flags(flags, crypt->priv);
6493
6494 break;
6495
6496 case IW_AUTH_DROP_UNENCRYPTED:{
6497 /* HACK:
6498 *
6499 * wpa_supplicant calls set_wpa_enabled when the driver
6500 * is loaded and unloaded, regardless of if WPA is being
6501 * used. No other calls are made which can be used to
6502 * determine if encryption will be used or not prior to
6503 * association being expected. If encryption is not being
6504 * used, drop_unencrypted is set to false, else true -- we
6505 * can use this to determine if the CAP_PRIVACY_ON bit should
6506 * be set.
6507 */
6508 struct ieee80211_security sec = {
6509 .flags = SEC_ENABLED,
6510 .enabled = param->value,
6511 };
6512 priv->ieee->drop_unencrypted = param->value;
6513 /* We only change SEC_LEVEL for open mode. Others
6514 * are set by ipw_wpa_set_encryption.
6515 */
6516 if (!param->value) {
6517 sec.flags |= SEC_LEVEL;
6518 sec.level = SEC_LEVEL_0;
6519 } else {
6520 sec.flags |= SEC_LEVEL;
6521 sec.level = SEC_LEVEL_1;
6522 }
6523 if (priv->ieee->set_security)
6524 priv->ieee->set_security(priv->ieee->dev, &sec);
6525 break;
6526 }
6527
6528 case IW_AUTH_80211_AUTH_ALG:
6529 ret = ipw_wpa_set_auth_algs(priv, param->value);
6530 break;
6531
6532 case IW_AUTH_WPA_ENABLED:
6533 ret = ipw_wpa_enable(priv, param->value);
6534 ipw_disassociate(priv);
6535 break;
6536
6537 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6538 ieee->ieee802_1x = param->value;
6539 break;
6540
6541 case IW_AUTH_PRIVACY_INVOKED:
6542 ieee->privacy_invoked = param->value;
6543 break;
6544
6545 default:
6546 return -EOPNOTSUPP;
6547 }
6548 return ret;
6549 }
6550
6551 /* SIOCGIWAUTH */
6552 static int ipw_wx_get_auth(struct net_device *dev,
6553 struct iw_request_info *info,
6554 union iwreq_data *wrqu, char *extra)
6555 {
6556 struct ipw_priv *priv = ieee80211_priv(dev);
6557 struct ieee80211_device *ieee = priv->ieee;
6558 struct ieee80211_crypt_data *crypt;
6559 struct iw_param *param = &wrqu->param;
6560 int ret = 0;
6561
6562 switch (param->flags & IW_AUTH_INDEX) {
6563 case IW_AUTH_WPA_VERSION:
6564 case IW_AUTH_CIPHER_PAIRWISE:
6565 case IW_AUTH_CIPHER_GROUP:
6566 case IW_AUTH_KEY_MGMT:
6567 /*
6568 * wpa_supplicant will control these internally
6569 */
6570 ret = -EOPNOTSUPP;
6571 break;
6572
6573 case IW_AUTH_TKIP_COUNTERMEASURES:
6574 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6575 if (!crypt || !crypt->ops->get_flags)
6576 break;
6577
6578 param->value = (crypt->ops->get_flags(crypt->priv) &
6579 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6580
6581 break;
6582
6583 case IW_AUTH_DROP_UNENCRYPTED:
6584 param->value = ieee->drop_unencrypted;
6585 break;
6586
6587 case IW_AUTH_80211_AUTH_ALG:
6588 param->value = ieee->sec.auth_mode;
6589 break;
6590
6591 case IW_AUTH_WPA_ENABLED:
6592 param->value = ieee->wpa_enabled;
6593 break;
6594
6595 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6596 param->value = ieee->ieee802_1x;
6597 break;
6598
6599 case IW_AUTH_ROAMING_CONTROL:
6600 case IW_AUTH_PRIVACY_INVOKED:
6601 param->value = ieee->privacy_invoked;
6602 break;
6603
6604 default:
6605 return -EOPNOTSUPP;
6606 }
6607 return 0;
6608 }
6609
6610 /* SIOCSIWENCODEEXT */
6611 static int ipw_wx_set_encodeext(struct net_device *dev,
6612 struct iw_request_info *info,
6613 union iwreq_data *wrqu, char *extra)
6614 {
6615 struct ipw_priv *priv = ieee80211_priv(dev);
6616 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6617
6618 if (hwcrypto) {
6619 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6620 /* IPW HW can't build TKIP MIC,
6621 host decryption still needed */
6622 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6623 priv->ieee->host_mc_decrypt = 1;
6624 else {
6625 priv->ieee->host_encrypt = 0;
6626 priv->ieee->host_encrypt_msdu = 1;
6627 priv->ieee->host_decrypt = 1;
6628 }
6629 } else {
6630 priv->ieee->host_encrypt = 0;
6631 priv->ieee->host_encrypt_msdu = 0;
6632 priv->ieee->host_decrypt = 0;
6633 priv->ieee->host_mc_decrypt = 0;
6634 }
6635 }
6636
6637 return ieee80211_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6638 }
6639
6640 /* SIOCGIWENCODEEXT */
6641 static int ipw_wx_get_encodeext(struct net_device *dev,
6642 struct iw_request_info *info,
6643 union iwreq_data *wrqu, char *extra)
6644 {
6645 struct ipw_priv *priv = ieee80211_priv(dev);
6646 return ieee80211_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6647 }
6648
6649 /* SIOCSIWMLME */
6650 static int ipw_wx_set_mlme(struct net_device *dev,
6651 struct iw_request_info *info,
6652 union iwreq_data *wrqu, char *extra)
6653 {
6654 struct ipw_priv *priv = ieee80211_priv(dev);
6655 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6656 u16 reason;
6657
6658 reason = cpu_to_le16(mlme->reason_code);
6659
6660 switch (mlme->cmd) {
6661 case IW_MLME_DEAUTH:
6662 /* silently ignore */
6663 break;
6664
6665 case IW_MLME_DISASSOC:
6666 ipw_disassociate(priv);
6667 break;
6668
6669 default:
6670 return -EOPNOTSUPP;
6671 }
6672 return 0;
6673 }
6674
6675 #ifdef CONFIG_IPW2200_QOS
6676
6677 /* QoS */
6678 /*
6679 * get the modulation type of the current network or
6680 * the card current mode
6681 */
6682 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6683 {
6684 u8 mode = 0;
6685
6686 if (priv->status & STATUS_ASSOCIATED) {
6687 unsigned long flags;
6688
6689 spin_lock_irqsave(&priv->ieee->lock, flags);
6690 mode = priv->assoc_network->mode;
6691 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6692 } else {
6693 mode = priv->ieee->mode;
6694 }
6695 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6696 return mode;
6697 }
6698
6699 /*
6700 * Handle management frame beacon and probe response
6701 */
6702 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6703 int active_network,
6704 struct ieee80211_network *network)
6705 {
6706 u32 size = sizeof(struct ieee80211_qos_parameters);
6707
6708 if (network->capability & WLAN_CAPABILITY_IBSS)
6709 network->qos_data.active = network->qos_data.supported;
6710
6711 if (network->flags & NETWORK_HAS_QOS_MASK) {
6712 if (active_network &&
6713 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6714 network->qos_data.active = network->qos_data.supported;
6715
6716 if ((network->qos_data.active == 1) && (active_network == 1) &&
6717 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6718 (network->qos_data.old_param_count !=
6719 network->qos_data.param_count)) {
6720 network->qos_data.old_param_count =
6721 network->qos_data.param_count;
6722 schedule_work(&priv->qos_activate);
6723 IPW_DEBUG_QOS("QoS parameters change call "
6724 "qos_activate\n");
6725 }
6726 } else {
6727 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6728 memcpy(&network->qos_data.parameters,
6729 &def_parameters_CCK, size);
6730 else
6731 memcpy(&network->qos_data.parameters,
6732 &def_parameters_OFDM, size);
6733
6734 if ((network->qos_data.active == 1) && (active_network == 1)) {
6735 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6736 schedule_work(&priv->qos_activate);
6737 }
6738
6739 network->qos_data.active = 0;
6740 network->qos_data.supported = 0;
6741 }
6742 if ((priv->status & STATUS_ASSOCIATED) &&
6743 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6744 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6745 if ((network->capability & WLAN_CAPABILITY_IBSS) &&
6746 !(network->flags & NETWORK_EMPTY_ESSID))
6747 if ((network->ssid_len ==
6748 priv->assoc_network->ssid_len) &&
6749 !memcmp(network->ssid,
6750 priv->assoc_network->ssid,
6751 network->ssid_len)) {
6752 queue_work(priv->workqueue,
6753 &priv->merge_networks);
6754 }
6755 }
6756
6757 return 0;
6758 }
6759
6760 /*
6761 * This function set up the firmware to support QoS. It sends
6762 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6763 */
6764 static int ipw_qos_activate(struct ipw_priv *priv,
6765 struct ieee80211_qos_data *qos_network_data)
6766 {
6767 int err;
6768 struct ieee80211_qos_parameters qos_parameters[QOS_QOS_SETS];
6769 struct ieee80211_qos_parameters *active_one = NULL;
6770 u32 size = sizeof(struct ieee80211_qos_parameters);
6771 u32 burst_duration;
6772 int i;
6773 u8 type;
6774
6775 type = ipw_qos_current_mode(priv);
6776
6777 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6778 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6779 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6780 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6781
6782 if (qos_network_data == NULL) {
6783 if (type == IEEE_B) {
6784 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6785 active_one = &def_parameters_CCK;
6786 } else
6787 active_one = &def_parameters_OFDM;
6788
6789 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6790 burst_duration = ipw_qos_get_burst_duration(priv);
6791 for (i = 0; i < QOS_QUEUE_NUM; i++)
6792 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6793 (u16) burst_duration;
6794 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6795 if (type == IEEE_B) {
6796 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
6797 type);
6798 if (priv->qos_data.qos_enable == 0)
6799 active_one = &def_parameters_CCK;
6800 else
6801 active_one = priv->qos_data.def_qos_parm_CCK;
6802 } else {
6803 if (priv->qos_data.qos_enable == 0)
6804 active_one = &def_parameters_OFDM;
6805 else
6806 active_one = priv->qos_data.def_qos_parm_OFDM;
6807 }
6808 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6809 } else {
6810 unsigned long flags;
6811 int active;
6812
6813 spin_lock_irqsave(&priv->ieee->lock, flags);
6814 active_one = &(qos_network_data->parameters);
6815 qos_network_data->old_param_count =
6816 qos_network_data->param_count;
6817 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6818 active = qos_network_data->supported;
6819 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6820
6821 if (active == 0) {
6822 burst_duration = ipw_qos_get_burst_duration(priv);
6823 for (i = 0; i < QOS_QUEUE_NUM; i++)
6824 qos_parameters[QOS_PARAM_SET_ACTIVE].
6825 tx_op_limit[i] = (u16) burst_duration;
6826 }
6827 }
6828
6829 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6830 err = ipw_send_qos_params_command(priv,
6831 (struct ieee80211_qos_parameters *)
6832 &(qos_parameters[0]));
6833 if (err)
6834 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6835
6836 return err;
6837 }
6838
6839 /*
6840 * send IPW_CMD_WME_INFO to the firmware
6841 */
6842 static int ipw_qos_set_info_element(struct ipw_priv *priv)
6843 {
6844 int ret = 0;
6845 struct ieee80211_qos_information_element qos_info;
6846
6847 if (priv == NULL)
6848 return -1;
6849
6850 qos_info.elementID = QOS_ELEMENT_ID;
6851 qos_info.length = sizeof(struct ieee80211_qos_information_element) - 2;
6852
6853 qos_info.version = QOS_VERSION_1;
6854 qos_info.ac_info = 0;
6855
6856 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
6857 qos_info.qui_type = QOS_OUI_TYPE;
6858 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
6859
6860 ret = ipw_send_qos_info_command(priv, &qos_info);
6861 if (ret != 0) {
6862 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
6863 }
6864 return ret;
6865 }
6866
6867 /*
6868 * Set the QoS parameter with the association request structure
6869 */
6870 static int ipw_qos_association(struct ipw_priv *priv,
6871 struct ieee80211_network *network)
6872 {
6873 int err = 0;
6874 struct ieee80211_qos_data *qos_data = NULL;
6875 struct ieee80211_qos_data ibss_data = {
6876 .supported = 1,
6877 .active = 1,
6878 };
6879
6880 switch (priv->ieee->iw_mode) {
6881 case IW_MODE_ADHOC:
6882 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
6883
6884 qos_data = &ibss_data;
6885 break;
6886
6887 case IW_MODE_INFRA:
6888 qos_data = &network->qos_data;
6889 break;
6890
6891 default:
6892 BUG();
6893 break;
6894 }
6895
6896 err = ipw_qos_activate(priv, qos_data);
6897 if (err) {
6898 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
6899 return err;
6900 }
6901
6902 if (priv->qos_data.qos_enable && qos_data->supported) {
6903 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
6904 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
6905 return ipw_qos_set_info_element(priv);
6906 }
6907
6908 return 0;
6909 }
6910
6911 /*
6912 * handling the beaconing responces. if we get different QoS setting
6913 * of the network from the the associated setting adjust the QoS
6914 * setting
6915 */
6916 static int ipw_qos_association_resp(struct ipw_priv *priv,
6917 struct ieee80211_network *network)
6918 {
6919 int ret = 0;
6920 unsigned long flags;
6921 u32 size = sizeof(struct ieee80211_qos_parameters);
6922 int set_qos_param = 0;
6923
6924 if ((priv == NULL) || (network == NULL) ||
6925 (priv->assoc_network == NULL))
6926 return ret;
6927
6928 if (!(priv->status & STATUS_ASSOCIATED))
6929 return ret;
6930
6931 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
6932 return ret;
6933
6934 spin_lock_irqsave(&priv->ieee->lock, flags);
6935 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
6936 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
6937 sizeof(struct ieee80211_qos_data));
6938 priv->assoc_network->qos_data.active = 1;
6939 if ((network->qos_data.old_param_count !=
6940 network->qos_data.param_count)) {
6941 set_qos_param = 1;
6942 network->qos_data.old_param_count =
6943 network->qos_data.param_count;
6944 }
6945
6946 } else {
6947 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
6948 memcpy(&priv->assoc_network->qos_data.parameters,
6949 &def_parameters_CCK, size);
6950 else
6951 memcpy(&priv->assoc_network->qos_data.parameters,
6952 &def_parameters_OFDM, size);
6953 priv->assoc_network->qos_data.active = 0;
6954 priv->assoc_network->qos_data.supported = 0;
6955 set_qos_param = 1;
6956 }
6957
6958 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6959
6960 if (set_qos_param == 1)
6961 schedule_work(&priv->qos_activate);
6962
6963 return ret;
6964 }
6965
6966 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
6967 {
6968 u32 ret = 0;
6969
6970 if ((priv == NULL))
6971 return 0;
6972
6973 if (!(priv->ieee->modulation & IEEE80211_OFDM_MODULATION))
6974 ret = priv->qos_data.burst_duration_CCK;
6975 else
6976 ret = priv->qos_data.burst_duration_OFDM;
6977
6978 return ret;
6979 }
6980
6981 /*
6982 * Initialize the setting of QoS global
6983 */
6984 static void ipw_qos_init(struct ipw_priv *priv, int enable,
6985 int burst_enable, u32 burst_duration_CCK,
6986 u32 burst_duration_OFDM)
6987 {
6988 priv->qos_data.qos_enable = enable;
6989
6990 if (priv->qos_data.qos_enable) {
6991 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
6992 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
6993 IPW_DEBUG_QOS("QoS is enabled\n");
6994 } else {
6995 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
6996 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
6997 IPW_DEBUG_QOS("QoS is not enabled\n");
6998 }
6999
7000 priv->qos_data.burst_enable = burst_enable;
7001
7002 if (burst_enable) {
7003 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7004 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7005 } else {
7006 priv->qos_data.burst_duration_CCK = 0;
7007 priv->qos_data.burst_duration_OFDM = 0;
7008 }
7009 }
7010
7011 /*
7012 * map the packet priority to the right TX Queue
7013 */
7014 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7015 {
7016 if (priority > 7 || !priv->qos_data.qos_enable)
7017 priority = 0;
7018
7019 return from_priority_to_tx_queue[priority] - 1;
7020 }
7021
7022 static int ipw_is_qos_active(struct net_device *dev,
7023 struct sk_buff *skb)
7024 {
7025 struct ipw_priv *priv = ieee80211_priv(dev);
7026 struct ieee80211_qos_data *qos_data = NULL;
7027 int active, supported;
7028 u8 *daddr = skb->data + ETH_ALEN;
7029 int unicast = !is_multicast_ether_addr(daddr);
7030
7031 if (!(priv->status & STATUS_ASSOCIATED))
7032 return 0;
7033
7034 qos_data = &priv->assoc_network->qos_data;
7035
7036 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7037 if (unicast == 0)
7038 qos_data->active = 0;
7039 else
7040 qos_data->active = qos_data->supported;
7041 }
7042 active = qos_data->active;
7043 supported = qos_data->supported;
7044 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7045 "unicast %d\n",
7046 priv->qos_data.qos_enable, active, supported, unicast);
7047 if (active && priv->qos_data.qos_enable)
7048 return 1;
7049
7050 return 0;
7051
7052 }
7053 /*
7054 * add QoS parameter to the TX command
7055 */
7056 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7057 u16 priority,
7058 struct tfd_data *tfd)
7059 {
7060 int tx_queue_id = 0;
7061
7062
7063 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7064 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7065
7066 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7067 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7068 tfd->tfd.tfd_26.mchdr.qos_ctrl |= CTRL_QOS_NO_ACK;
7069 }
7070 return 0;
7071 }
7072
7073 /*
7074 * background support to run QoS activate functionality
7075 */
7076 static void ipw_bg_qos_activate(void *data)
7077 {
7078 struct ipw_priv *priv = data;
7079
7080 if (priv == NULL)
7081 return;
7082
7083 mutex_lock(&priv->mutex);
7084
7085 if (priv->status & STATUS_ASSOCIATED)
7086 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7087
7088 mutex_unlock(&priv->mutex);
7089 }
7090
7091 static int ipw_handle_probe_response(struct net_device *dev,
7092 struct ieee80211_probe_response *resp,
7093 struct ieee80211_network *network)
7094 {
7095 struct ipw_priv *priv = ieee80211_priv(dev);
7096 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7097 (network == priv->assoc_network));
7098
7099 ipw_qos_handle_probe_response(priv, active_network, network);
7100
7101 return 0;
7102 }
7103
7104 static int ipw_handle_beacon(struct net_device *dev,
7105 struct ieee80211_beacon *resp,
7106 struct ieee80211_network *network)
7107 {
7108 struct ipw_priv *priv = ieee80211_priv(dev);
7109 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7110 (network == priv->assoc_network));
7111
7112 ipw_qos_handle_probe_response(priv, active_network, network);
7113
7114 return 0;
7115 }
7116
7117 static int ipw_handle_assoc_response(struct net_device *dev,
7118 struct ieee80211_assoc_response *resp,
7119 struct ieee80211_network *network)
7120 {
7121 struct ipw_priv *priv = ieee80211_priv(dev);
7122 ipw_qos_association_resp(priv, network);
7123 return 0;
7124 }
7125
7126 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
7127 *qos_param)
7128 {
7129 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7130 sizeof(*qos_param) * 3, qos_param);
7131 }
7132
7133 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
7134 *qos_param)
7135 {
7136 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7137 qos_param);
7138 }
7139
7140 #endif /* CONFIG_IPW2200_QOS */
7141
7142 static int ipw_associate_network(struct ipw_priv *priv,
7143 struct ieee80211_network *network,
7144 struct ipw_supported_rates *rates, int roaming)
7145 {
7146 int err;
7147
7148 if (priv->config & CFG_FIXED_RATE)
7149 ipw_set_fixed_rate(priv, network->mode);
7150
7151 if (!(priv->config & CFG_STATIC_ESSID)) {
7152 priv->essid_len = min(network->ssid_len,
7153 (u8) IW_ESSID_MAX_SIZE);
7154 memcpy(priv->essid, network->ssid, priv->essid_len);
7155 }
7156
7157 network->last_associate = jiffies;
7158
7159 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7160 priv->assoc_request.channel = network->channel;
7161 priv->assoc_request.auth_key = 0;
7162
7163 if ((priv->capability & CAP_PRIVACY_ON) &&
7164 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7165 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7166 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7167
7168 if (priv->ieee->sec.level == SEC_LEVEL_1)
7169 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7170
7171 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7172 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7173 priv->assoc_request.auth_type = AUTH_LEAP;
7174 else
7175 priv->assoc_request.auth_type = AUTH_OPEN;
7176
7177 if (priv->ieee->wpa_ie_len) {
7178 priv->assoc_request.policy_support = 0x02; /* RSN active */
7179 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7180 priv->ieee->wpa_ie_len);
7181 }
7182
7183 /*
7184 * It is valid for our ieee device to support multiple modes, but
7185 * when it comes to associating to a given network we have to choose
7186 * just one mode.
7187 */
7188 if (network->mode & priv->ieee->mode & IEEE_A)
7189 priv->assoc_request.ieee_mode = IPW_A_MODE;
7190 else if (network->mode & priv->ieee->mode & IEEE_G)
7191 priv->assoc_request.ieee_mode = IPW_G_MODE;
7192 else if (network->mode & priv->ieee->mode & IEEE_B)
7193 priv->assoc_request.ieee_mode = IPW_B_MODE;
7194
7195 priv->assoc_request.capability = network->capability;
7196 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7197 && !(priv->config & CFG_PREAMBLE_LONG)) {
7198 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7199 } else {
7200 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7201
7202 /* Clear the short preamble if we won't be supporting it */
7203 priv->assoc_request.capability &=
7204 ~WLAN_CAPABILITY_SHORT_PREAMBLE;
7205 }
7206
7207 /* Clear capability bits that aren't used in Ad Hoc */
7208 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7209 priv->assoc_request.capability &=
7210 ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
7211
7212 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7213 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7214 roaming ? "Rea" : "A",
7215 escape_essid(priv->essid, priv->essid_len),
7216 network->channel,
7217 ipw_modes[priv->assoc_request.ieee_mode],
7218 rates->num_rates,
7219 (priv->assoc_request.preamble_length ==
7220 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7221 network->capability &
7222 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7223 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7224 priv->capability & CAP_PRIVACY_ON ?
7225 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7226 "(open)") : "",
7227 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7228 priv->capability & CAP_PRIVACY_ON ?
7229 '1' + priv->ieee->sec.active_key : '.',
7230 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7231
7232 priv->assoc_request.beacon_interval = network->beacon_interval;
7233 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7234 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7235 priv->assoc_request.assoc_type = HC_IBSS_START;
7236 priv->assoc_request.assoc_tsf_msw = 0;
7237 priv->assoc_request.assoc_tsf_lsw = 0;
7238 } else {
7239 if (unlikely(roaming))
7240 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7241 else
7242 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7243 priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
7244 priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
7245 }
7246
7247 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7248
7249 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7250 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7251 priv->assoc_request.atim_window = network->atim_window;
7252 } else {
7253 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7254 priv->assoc_request.atim_window = 0;
7255 }
7256
7257 priv->assoc_request.listen_interval = network->listen_interval;
7258
7259 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7260 if (err) {
7261 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7262 return err;
7263 }
7264
7265 rates->ieee_mode = priv->assoc_request.ieee_mode;
7266 rates->purpose = IPW_RATE_CONNECT;
7267 ipw_send_supported_rates(priv, rates);
7268
7269 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7270 priv->sys_config.dot11g_auto_detection = 1;
7271 else
7272 priv->sys_config.dot11g_auto_detection = 0;
7273
7274 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7275 priv->sys_config.answer_broadcast_ssid_probe = 1;
7276 else
7277 priv->sys_config.answer_broadcast_ssid_probe = 0;
7278
7279 err = ipw_send_system_config(priv);
7280 if (err) {
7281 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7282 return err;
7283 }
7284
7285 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7286 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7287 if (err) {
7288 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7289 return err;
7290 }
7291
7292 /*
7293 * If preemption is enabled, it is possible for the association
7294 * to complete before we return from ipw_send_associate. Therefore
7295 * we have to be sure and update our priviate data first.
7296 */
7297 priv->channel = network->channel;
7298 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7299 priv->status |= STATUS_ASSOCIATING;
7300 priv->status &= ~STATUS_SECURITY_UPDATED;
7301
7302 priv->assoc_network = network;
7303
7304 #ifdef CONFIG_IPW2200_QOS
7305 ipw_qos_association(priv, network);
7306 #endif
7307
7308 err = ipw_send_associate(priv, &priv->assoc_request);
7309 if (err) {
7310 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7311 return err;
7312 }
7313
7314 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n",
7315 escape_essid(priv->essid, priv->essid_len),
7316 MAC_ARG(priv->bssid));
7317
7318 return 0;
7319 }
7320
7321 static void ipw_roam(void *data)
7322 {
7323 struct ipw_priv *priv = data;
7324 struct ieee80211_network *network = NULL;
7325 struct ipw_network_match match = {
7326 .network = priv->assoc_network
7327 };
7328
7329 /* The roaming process is as follows:
7330 *
7331 * 1. Missed beacon threshold triggers the roaming process by
7332 * setting the status ROAM bit and requesting a scan.
7333 * 2. When the scan completes, it schedules the ROAM work
7334 * 3. The ROAM work looks at all of the known networks for one that
7335 * is a better network than the currently associated. If none
7336 * found, the ROAM process is over (ROAM bit cleared)
7337 * 4. If a better network is found, a disassociation request is
7338 * sent.
7339 * 5. When the disassociation completes, the roam work is again
7340 * scheduled. The second time through, the driver is no longer
7341 * associated, and the newly selected network is sent an
7342 * association request.
7343 * 6. At this point ,the roaming process is complete and the ROAM
7344 * status bit is cleared.
7345 */
7346
7347 /* If we are no longer associated, and the roaming bit is no longer
7348 * set, then we are not actively roaming, so just return */
7349 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7350 return;
7351
7352 if (priv->status & STATUS_ASSOCIATED) {
7353 /* First pass through ROAM process -- look for a better
7354 * network */
7355 unsigned long flags;
7356 u8 rssi = priv->assoc_network->stats.rssi;
7357 priv->assoc_network->stats.rssi = -128;
7358 spin_lock_irqsave(&priv->ieee->lock, flags);
7359 list_for_each_entry(network, &priv->ieee->network_list, list) {
7360 if (network != priv->assoc_network)
7361 ipw_best_network(priv, &match, network, 1);
7362 }
7363 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7364 priv->assoc_network->stats.rssi = rssi;
7365
7366 if (match.network == priv->assoc_network) {
7367 IPW_DEBUG_ASSOC("No better APs in this network to "
7368 "roam to.\n");
7369 priv->status &= ~STATUS_ROAMING;
7370 ipw_debug_config(priv);
7371 return;
7372 }
7373
7374 ipw_send_disassociate(priv, 1);
7375 priv->assoc_network = match.network;
7376
7377 return;
7378 }
7379
7380 /* Second pass through ROAM process -- request association */
7381 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7382 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7383 priv->status &= ~STATUS_ROAMING;
7384 }
7385
7386 static void ipw_bg_roam(void *data)
7387 {
7388 struct ipw_priv *priv = data;
7389 mutex_lock(&priv->mutex);
7390 ipw_roam(data);
7391 mutex_unlock(&priv->mutex);
7392 }
7393
7394 static int ipw_associate(void *data)
7395 {
7396 struct ipw_priv *priv = data;
7397
7398 struct ieee80211_network *network = NULL;
7399 struct ipw_network_match match = {
7400 .network = NULL
7401 };
7402 struct ipw_supported_rates *rates;
7403 struct list_head *element;
7404 unsigned long flags;
7405
7406 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7407 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7408 return 0;
7409 }
7410
7411 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7412 IPW_DEBUG_ASSOC("Not attempting association (already in "
7413 "progress)\n");
7414 return 0;
7415 }
7416
7417 if (priv->status & STATUS_DISASSOCIATING) {
7418 IPW_DEBUG_ASSOC("Not attempting association (in "
7419 "disassociating)\n ");
7420 queue_work(priv->workqueue, &priv->associate);
7421 return 0;
7422 }
7423
7424 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7425 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7426 "initialized)\n");
7427 return 0;
7428 }
7429
7430 if (!(priv->config & CFG_ASSOCIATE) &&
7431 !(priv->config & (CFG_STATIC_ESSID |
7432 CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
7433 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7434 return 0;
7435 }
7436
7437 /* Protect our use of the network_list */
7438 spin_lock_irqsave(&priv->ieee->lock, flags);
7439 list_for_each_entry(network, &priv->ieee->network_list, list)
7440 ipw_best_network(priv, &match, network, 0);
7441
7442 network = match.network;
7443 rates = &match.rates;
7444
7445 if (network == NULL &&
7446 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7447 priv->config & CFG_ADHOC_CREATE &&
7448 priv->config & CFG_STATIC_ESSID &&
7449 priv->config & CFG_STATIC_CHANNEL &&
7450 !list_empty(&priv->ieee->network_free_list)) {
7451 element = priv->ieee->network_free_list.next;
7452 network = list_entry(element, struct ieee80211_network, list);
7453 ipw_adhoc_create(priv, network);
7454 rates = &priv->rates;
7455 list_del(element);
7456 list_add_tail(&network->list, &priv->ieee->network_list);
7457 }
7458 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7459
7460 /* If we reached the end of the list, then we don't have any valid
7461 * matching APs */
7462 if (!network) {
7463 ipw_debug_config(priv);
7464
7465 if (!(priv->status & STATUS_SCANNING)) {
7466 if (!(priv->config & CFG_SPEED_SCAN))
7467 queue_delayed_work(priv->workqueue,
7468 &priv->request_scan,
7469 SCAN_INTERVAL);
7470 else
7471 queue_work(priv->workqueue,
7472 &priv->request_scan);
7473 }
7474
7475 return 0;
7476 }
7477
7478 ipw_associate_network(priv, network, rates, 0);
7479
7480 return 1;
7481 }
7482
7483 static void ipw_bg_associate(void *data)
7484 {
7485 struct ipw_priv *priv = data;
7486 mutex_lock(&priv->mutex);
7487 ipw_associate(data);
7488 mutex_unlock(&priv->mutex);
7489 }
7490
7491 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7492 struct sk_buff *skb)
7493 {
7494 struct ieee80211_hdr *hdr;
7495 u16 fc;
7496
7497 hdr = (struct ieee80211_hdr *)skb->data;
7498 fc = le16_to_cpu(hdr->frame_ctl);
7499 if (!(fc & IEEE80211_FCTL_PROTECTED))
7500 return;
7501
7502 fc &= ~IEEE80211_FCTL_PROTECTED;
7503 hdr->frame_ctl = cpu_to_le16(fc);
7504 switch (priv->ieee->sec.level) {
7505 case SEC_LEVEL_3:
7506 /* Remove CCMP HDR */
7507 memmove(skb->data + IEEE80211_3ADDR_LEN,
7508 skb->data + IEEE80211_3ADDR_LEN + 8,
7509 skb->len - IEEE80211_3ADDR_LEN - 8);
7510 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7511 break;
7512 case SEC_LEVEL_2:
7513 break;
7514 case SEC_LEVEL_1:
7515 /* Remove IV */
7516 memmove(skb->data + IEEE80211_3ADDR_LEN,
7517 skb->data + IEEE80211_3ADDR_LEN + 4,
7518 skb->len - IEEE80211_3ADDR_LEN - 4);
7519 skb_trim(skb, skb->len - 8); /* IV + ICV */
7520 break;
7521 case SEC_LEVEL_0:
7522 break;
7523 default:
7524 printk(KERN_ERR "Unknow security level %d\n",
7525 priv->ieee->sec.level);
7526 break;
7527 }
7528 }
7529
7530 static void ipw_handle_data_packet(struct ipw_priv *priv,
7531 struct ipw_rx_mem_buffer *rxb,
7532 struct ieee80211_rx_stats *stats)
7533 {
7534 struct ieee80211_hdr_4addr *hdr;
7535 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7536
7537 /* We received data from the HW, so stop the watchdog */
7538 priv->net_dev->trans_start = jiffies;
7539
7540 /* We only process data packets if the
7541 * interface is open */
7542 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7543 skb_tailroom(rxb->skb))) {
7544 priv->ieee->stats.rx_errors++;
7545 priv->wstats.discard.misc++;
7546 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7547 return;
7548 } else if (unlikely(!netif_running(priv->net_dev))) {
7549 priv->ieee->stats.rx_dropped++;
7550 priv->wstats.discard.misc++;
7551 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7552 return;
7553 }
7554
7555 /* Advance skb->data to the start of the actual payload */
7556 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7557
7558 /* Set the size of the skb to the size of the frame */
7559 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7560
7561 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7562
7563 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7564 hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data;
7565 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7566 (is_multicast_ether_addr(hdr->addr1) ?
7567 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7568 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7569
7570 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7571 priv->ieee->stats.rx_errors++;
7572 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7573 rxb->skb = NULL;
7574 __ipw_led_activity_on(priv);
7575 }
7576 }
7577
7578 #ifdef CONFIG_IPW2200_RADIOTAP
7579 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7580 struct ipw_rx_mem_buffer *rxb,
7581 struct ieee80211_rx_stats *stats)
7582 {
7583 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7584 struct ipw_rx_frame *frame = &pkt->u.frame;
7585
7586 /* initial pull of some data */
7587 u16 received_channel = frame->received_channel;
7588 u8 antennaAndPhy = frame->antennaAndPhy;
7589 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7590 u16 pktrate = frame->rate;
7591
7592 /* Magic struct that slots into the radiotap header -- no reason
7593 * to build this manually element by element, we can write it much
7594 * more efficiently than we can parse it. ORDER MATTERS HERE */
7595 struct ipw_rt_hdr *ipw_rt;
7596
7597 short len = le16_to_cpu(pkt->u.frame.length);
7598
7599 /* We received data from the HW, so stop the watchdog */
7600 priv->net_dev->trans_start = jiffies;
7601
7602 /* We only process data packets if the
7603 * interface is open */
7604 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7605 skb_tailroom(rxb->skb))) {
7606 priv->ieee->stats.rx_errors++;
7607 priv->wstats.discard.misc++;
7608 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7609 return;
7610 } else if (unlikely(!netif_running(priv->net_dev))) {
7611 priv->ieee->stats.rx_dropped++;
7612 priv->wstats.discard.misc++;
7613 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7614 return;
7615 }
7616
7617 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7618 * that now */
7619 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7620 /* FIXME: Should alloc bigger skb instead */
7621 priv->ieee->stats.rx_dropped++;
7622 priv->wstats.discard.misc++;
7623 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7624 return;
7625 }
7626
7627 /* copy the frame itself */
7628 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7629 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7630
7631 /* Zero the radiotap static buffer ... We only need to zero the bytes NOT
7632 * part of our real header, saves a little time.
7633 *
7634 * No longer necessary since we fill in all our data. Purge before merging
7635 * patch officially.
7636 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7637 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7638 */
7639
7640 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7641
7642 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7643 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7644 ipw_rt->rt_hdr.it_len = sizeof(struct ipw_rt_hdr); /* total header+data */
7645
7646 /* Big bitfield of all the fields we provide in radiotap */
7647 ipw_rt->rt_hdr.it_present =
7648 ((1 << IEEE80211_RADIOTAP_FLAGS) |
7649 (1 << IEEE80211_RADIOTAP_TSFT) |
7650 (1 << IEEE80211_RADIOTAP_RATE) |
7651 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7652 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7653 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7654 (1 << IEEE80211_RADIOTAP_ANTENNA));
7655
7656 /* Zero the flags, we'll add to them as we go */
7657 ipw_rt->rt_flags = 0;
7658
7659 /* Convert signal to DBM */
7660 ipw_rt->rt_dbmsignal = antsignal;
7661
7662 /* Convert the channel data and set the flags */
7663 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7664 if (received_channel > 14) { /* 802.11a */
7665 ipw_rt->rt_chbitmask =
7666 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7667 } else if (antennaAndPhy & 32) { /* 802.11b */
7668 ipw_rt->rt_chbitmask =
7669 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7670 } else { /* 802.11g */
7671 ipw_rt->rt_chbitmask =
7672 (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7673 }
7674
7675 /* set the rate in multiples of 500k/s */
7676 switch (pktrate) {
7677 case IPW_TX_RATE_1MB:
7678 ipw_rt->rt_rate = 2;
7679 break;
7680 case IPW_TX_RATE_2MB:
7681 ipw_rt->rt_rate = 4;
7682 break;
7683 case IPW_TX_RATE_5MB:
7684 ipw_rt->rt_rate = 10;
7685 break;
7686 case IPW_TX_RATE_6MB:
7687 ipw_rt->rt_rate = 12;
7688 break;
7689 case IPW_TX_RATE_9MB:
7690 ipw_rt->rt_rate = 18;
7691 break;
7692 case IPW_TX_RATE_11MB:
7693 ipw_rt->rt_rate = 22;
7694 break;
7695 case IPW_TX_RATE_12MB:
7696 ipw_rt->rt_rate = 24;
7697 break;
7698 case IPW_TX_RATE_18MB:
7699 ipw_rt->rt_rate = 36;
7700 break;
7701 case IPW_TX_RATE_24MB:
7702 ipw_rt->rt_rate = 48;
7703 break;
7704 case IPW_TX_RATE_36MB:
7705 ipw_rt->rt_rate = 72;
7706 break;
7707 case IPW_TX_RATE_48MB:
7708 ipw_rt->rt_rate = 96;
7709 break;
7710 case IPW_TX_RATE_54MB:
7711 ipw_rt->rt_rate = 108;
7712 break;
7713 default:
7714 ipw_rt->rt_rate = 0;
7715 break;
7716 }
7717
7718 /* antenna number */
7719 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7720
7721 /* set the preamble flag if we have it */
7722 if ((antennaAndPhy & 64))
7723 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7724
7725 /* Set the size of the skb to the size of the frame */
7726 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7727
7728 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7729
7730 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7731 priv->ieee->stats.rx_errors++;
7732 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7733 rxb->skb = NULL;
7734 /* no LED during capture */
7735 }
7736 }
7737 #endif
7738
7739 #ifdef CONFIG_IPW2200_PROMISCUOUS
7740 #define ieee80211_is_probe_response(fc) \
7741 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7742 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7743
7744 #define ieee80211_is_management(fc) \
7745 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7746
7747 #define ieee80211_is_control(fc) \
7748 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7749
7750 #define ieee80211_is_data(fc) \
7751 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7752
7753 #define ieee80211_is_assoc_request(fc) \
7754 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7755
7756 #define ieee80211_is_reassoc_request(fc) \
7757 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7758
7759 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7760 struct ipw_rx_mem_buffer *rxb,
7761 struct ieee80211_rx_stats *stats)
7762 {
7763 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7764 struct ipw_rx_frame *frame = &pkt->u.frame;
7765 struct ipw_rt_hdr *ipw_rt;
7766
7767 /* First cache any information we need before we overwrite
7768 * the information provided in the skb from the hardware */
7769 struct ieee80211_hdr *hdr;
7770 u16 channel = frame->received_channel;
7771 u8 phy_flags = frame->antennaAndPhy;
7772 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7773 s8 noise = frame->noise;
7774 u8 rate = frame->rate;
7775 short len = le16_to_cpu(pkt->u.frame.length);
7776 u64 tsf = 0;
7777 struct sk_buff *skb;
7778 int hdr_only = 0;
7779 u16 filter = priv->prom_priv->filter;
7780
7781 /* If the filter is set to not include Rx frames then return */
7782 if (filter & IPW_PROM_NO_RX)
7783 return;
7784
7785 /* We received data from the HW, so stop the watchdog */
7786 priv->prom_net_dev->trans_start = jiffies;
7787
7788 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
7789 priv->prom_priv->ieee->stats.rx_errors++;
7790 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7791 return;
7792 }
7793
7794 /* We only process data packets if the interface is open */
7795 if (unlikely(!netif_running(priv->prom_net_dev))) {
7796 priv->prom_priv->ieee->stats.rx_dropped++;
7797 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7798 return;
7799 }
7800
7801 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7802 * that now */
7803 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7804 /* FIXME: Should alloc bigger skb instead */
7805 priv->prom_priv->ieee->stats.rx_dropped++;
7806 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7807 return;
7808 }
7809
7810 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
7811 if (ieee80211_is_management(hdr->frame_ctl)) {
7812 if (filter & IPW_PROM_NO_MGMT)
7813 return;
7814 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
7815 hdr_only = 1;
7816 } else if (ieee80211_is_control(hdr->frame_ctl)) {
7817 if (filter & IPW_PROM_NO_CTL)
7818 return;
7819 if (filter & IPW_PROM_CTL_HEADER_ONLY)
7820 hdr_only = 1;
7821 } else if (ieee80211_is_data(hdr->frame_ctl)) {
7822 if (filter & IPW_PROM_NO_DATA)
7823 return;
7824 if (filter & IPW_PROM_DATA_HEADER_ONLY)
7825 hdr_only = 1;
7826 }
7827
7828 /* Copy the SKB since this is for the promiscuous side */
7829 skb = skb_copy(rxb->skb, GFP_ATOMIC);
7830 if (skb == NULL) {
7831 IPW_ERROR("skb_clone failed for promiscuous copy.\n");
7832 return;
7833 }
7834
7835 /* copy the frame data to write after where the radiotap header goes */
7836 ipw_rt = (void *)skb->data;
7837
7838 if (hdr_only)
7839 len = ieee80211_get_hdrlen(hdr->frame_ctl);
7840
7841 memcpy(ipw_rt->payload, hdr, len);
7842
7843 /* Zero the radiotap static buffer ... We only need to zero the bytes
7844 * NOT part of our real header, saves a little time.
7845 *
7846 * No longer necessary since we fill in all our data. Purge before
7847 * merging patch officially.
7848 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7849 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7850 */
7851
7852 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7853 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7854 ipw_rt->rt_hdr.it_len = sizeof(*ipw_rt); /* total header+data */
7855
7856 /* Set the size of the skb to the size of the frame */
7857 skb_put(skb, ipw_rt->rt_hdr.it_len + len);
7858
7859 /* Big bitfield of all the fields we provide in radiotap */
7860 ipw_rt->rt_hdr.it_present =
7861 ((1 << IEEE80211_RADIOTAP_FLAGS) |
7862 (1 << IEEE80211_RADIOTAP_TSFT) |
7863 (1 << IEEE80211_RADIOTAP_RATE) |
7864 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7865 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7866 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7867 (1 << IEEE80211_RADIOTAP_ANTENNA));
7868
7869 /* Zero the flags, we'll add to them as we go */
7870 ipw_rt->rt_flags = 0;
7871
7872 ipw_rt->rt_tsf = tsf;
7873
7874 /* Convert to DBM */
7875 ipw_rt->rt_dbmsignal = signal;
7876 ipw_rt->rt_dbmnoise = noise;
7877
7878 /* Convert the channel data and set the flags */
7879 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
7880 if (channel > 14) { /* 802.11a */
7881 ipw_rt->rt_chbitmask =
7882 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7883 } else if (phy_flags & (1 << 5)) { /* 802.11b */
7884 ipw_rt->rt_chbitmask =
7885 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7886 } else { /* 802.11g */
7887 ipw_rt->rt_chbitmask =
7888 (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7889 }
7890
7891 /* set the rate in multiples of 500k/s */
7892 switch (rate) {
7893 case IPW_TX_RATE_1MB:
7894 ipw_rt->rt_rate = 2;
7895 break;
7896 case IPW_TX_RATE_2MB:
7897 ipw_rt->rt_rate = 4;
7898 break;
7899 case IPW_TX_RATE_5MB:
7900 ipw_rt->rt_rate = 10;
7901 break;
7902 case IPW_TX_RATE_6MB:
7903 ipw_rt->rt_rate = 12;
7904 break;
7905 case IPW_TX_RATE_9MB:
7906 ipw_rt->rt_rate = 18;
7907 break;
7908 case IPW_TX_RATE_11MB:
7909 ipw_rt->rt_rate = 22;
7910 break;
7911 case IPW_TX_RATE_12MB:
7912 ipw_rt->rt_rate = 24;
7913 break;
7914 case IPW_TX_RATE_18MB:
7915 ipw_rt->rt_rate = 36;
7916 break;
7917 case IPW_TX_RATE_24MB:
7918 ipw_rt->rt_rate = 48;
7919 break;
7920 case IPW_TX_RATE_36MB:
7921 ipw_rt->rt_rate = 72;
7922 break;
7923 case IPW_TX_RATE_48MB:
7924 ipw_rt->rt_rate = 96;
7925 break;
7926 case IPW_TX_RATE_54MB:
7927 ipw_rt->rt_rate = 108;
7928 break;
7929 default:
7930 ipw_rt->rt_rate = 0;
7931 break;
7932 }
7933
7934 /* antenna number */
7935 ipw_rt->rt_antenna = (phy_flags & 3);
7936
7937 /* set the preamble flag if we have it */
7938 if (phy_flags & (1 << 6))
7939 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7940
7941 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
7942
7943 if (!ieee80211_rx(priv->prom_priv->ieee, skb, stats)) {
7944 priv->prom_priv->ieee->stats.rx_errors++;
7945 dev_kfree_skb_any(skb);
7946 }
7947 }
7948 #endif
7949
7950 static int is_network_packet(struct ipw_priv *priv,
7951 struct ieee80211_hdr_4addr *header)
7952 {
7953 /* Filter incoming packets to determine if they are targetted toward
7954 * this network, discarding packets coming from ourselves */
7955 switch (priv->ieee->iw_mode) {
7956 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
7957 /* packets from our adapter are dropped (echo) */
7958 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
7959 return 0;
7960
7961 /* {broad,multi}cast packets to our BSSID go through */
7962 if (is_multicast_ether_addr(header->addr1))
7963 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
7964
7965 /* packets to our adapter go through */
7966 return !memcmp(header->addr1, priv->net_dev->dev_addr,
7967 ETH_ALEN);
7968
7969 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
7970 /* packets from our adapter are dropped (echo) */
7971 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
7972 return 0;
7973
7974 /* {broad,multi}cast packets to our BSS go through */
7975 if (is_multicast_ether_addr(header->addr1))
7976 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
7977
7978 /* packets to our adapter go through */
7979 return !memcmp(header->addr1, priv->net_dev->dev_addr,
7980 ETH_ALEN);
7981 }
7982
7983 return 1;
7984 }
7985
7986 #define IPW_PACKET_RETRY_TIME HZ
7987
7988 static int is_duplicate_packet(struct ipw_priv *priv,
7989 struct ieee80211_hdr_4addr *header)
7990 {
7991 u16 sc = le16_to_cpu(header->seq_ctl);
7992 u16 seq = WLAN_GET_SEQ_SEQ(sc);
7993 u16 frag = WLAN_GET_SEQ_FRAG(sc);
7994 u16 *last_seq, *last_frag;
7995 unsigned long *last_time;
7996
7997 switch (priv->ieee->iw_mode) {
7998 case IW_MODE_ADHOC:
7999 {
8000 struct list_head *p;
8001 struct ipw_ibss_seq *entry = NULL;
8002 u8 *mac = header->addr2;
8003 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8004
8005 __list_for_each(p, &priv->ibss_mac_hash[index]) {
8006 entry =
8007 list_entry(p, struct ipw_ibss_seq, list);
8008 if (!memcmp(entry->mac, mac, ETH_ALEN))
8009 break;
8010 }
8011 if (p == &priv->ibss_mac_hash[index]) {
8012 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8013 if (!entry) {
8014 IPW_ERROR
8015 ("Cannot malloc new mac entry\n");
8016 return 0;
8017 }
8018 memcpy(entry->mac, mac, ETH_ALEN);
8019 entry->seq_num = seq;
8020 entry->frag_num = frag;
8021 entry->packet_time = jiffies;
8022 list_add(&entry->list,
8023 &priv->ibss_mac_hash[index]);
8024 return 0;
8025 }
8026 last_seq = &entry->seq_num;
8027 last_frag = &entry->frag_num;
8028 last_time = &entry->packet_time;
8029 break;
8030 }
8031 case IW_MODE_INFRA:
8032 last_seq = &priv->last_seq_num;
8033 last_frag = &priv->last_frag_num;
8034 last_time = &priv->last_packet_time;
8035 break;
8036 default:
8037 return 0;
8038 }
8039 if ((*last_seq == seq) &&
8040 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8041 if (*last_frag == frag)
8042 goto drop;
8043 if (*last_frag + 1 != frag)
8044 /* out-of-order fragment */
8045 goto drop;
8046 } else
8047 *last_seq = seq;
8048
8049 *last_frag = frag;
8050 *last_time = jiffies;
8051 return 0;
8052
8053 drop:
8054 /* Comment this line now since we observed the card receives
8055 * duplicate packets but the FCTL_RETRY bit is not set in the
8056 * IBSS mode with fragmentation enabled.
8057 BUG_ON(!(le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_RETRY)); */
8058 return 1;
8059 }
8060
8061 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8062 struct ipw_rx_mem_buffer *rxb,
8063 struct ieee80211_rx_stats *stats)
8064 {
8065 struct sk_buff *skb = rxb->skb;
8066 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8067 struct ieee80211_hdr_4addr *header = (struct ieee80211_hdr_4addr *)
8068 (skb->data + IPW_RX_FRAME_SIZE);
8069
8070 ieee80211_rx_mgt(priv->ieee, header, stats);
8071
8072 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8073 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8074 IEEE80211_STYPE_PROBE_RESP) ||
8075 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8076 IEEE80211_STYPE_BEACON))) {
8077 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8078 ipw_add_station(priv, header->addr2);
8079 }
8080
8081 if (priv->config & CFG_NET_STATS) {
8082 IPW_DEBUG_HC("sending stat packet\n");
8083
8084 /* Set the size of the skb to the size of the full
8085 * ipw header and 802.11 frame */
8086 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8087 IPW_RX_FRAME_SIZE);
8088
8089 /* Advance past the ipw packet header to the 802.11 frame */
8090 skb_pull(skb, IPW_RX_FRAME_SIZE);
8091
8092 /* Push the ieee80211_rx_stats before the 802.11 frame */
8093 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8094
8095 skb->dev = priv->ieee->dev;
8096
8097 /* Point raw at the ieee80211_stats */
8098 skb->mac.raw = skb->data;
8099
8100 skb->pkt_type = PACKET_OTHERHOST;
8101 skb->protocol = __constant_htons(ETH_P_80211_STATS);
8102 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8103 netif_rx(skb);
8104 rxb->skb = NULL;
8105 }
8106 }
8107
8108 /*
8109 * Main entry function for recieving a packet with 80211 headers. This
8110 * should be called when ever the FW has notified us that there is a new
8111 * skb in the recieve queue.
8112 */
8113 static void ipw_rx(struct ipw_priv *priv)
8114 {
8115 struct ipw_rx_mem_buffer *rxb;
8116 struct ipw_rx_packet *pkt;
8117 struct ieee80211_hdr_4addr *header;
8118 u32 r, w, i;
8119 u8 network_packet;
8120
8121 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8122 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8123 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
8124
8125 while (i != r) {
8126 rxb = priv->rxq->queue[i];
8127 if (unlikely(rxb == NULL)) {
8128 printk(KERN_CRIT "Queue not allocated!\n");
8129 break;
8130 }
8131 priv->rxq->queue[i] = NULL;
8132
8133 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8134 IPW_RX_BUF_SIZE,
8135 PCI_DMA_FROMDEVICE);
8136
8137 pkt = (struct ipw_rx_packet *)rxb->skb->data;
8138 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8139 pkt->header.message_type,
8140 pkt->header.rx_seq_num, pkt->header.control_bits);
8141
8142 switch (pkt->header.message_type) {
8143 case RX_FRAME_TYPE: /* 802.11 frame */ {
8144 struct ieee80211_rx_stats stats = {
8145 .rssi =
8146 le16_to_cpu(pkt->u.frame.rssi_dbm) -
8147 IPW_RSSI_TO_DBM,
8148 .signal =
8149 le16_to_cpu(pkt->u.frame.rssi_dbm) -
8150 IPW_RSSI_TO_DBM + 0x100,
8151 .noise =
8152 le16_to_cpu(pkt->u.frame.noise),
8153 .rate = pkt->u.frame.rate,
8154 .mac_time = jiffies,
8155 .received_channel =
8156 pkt->u.frame.received_channel,
8157 .freq =
8158 (pkt->u.frame.
8159 control & (1 << 0)) ?
8160 IEEE80211_24GHZ_BAND :
8161 IEEE80211_52GHZ_BAND,
8162 .len = le16_to_cpu(pkt->u.frame.length),
8163 };
8164
8165 if (stats.rssi != 0)
8166 stats.mask |= IEEE80211_STATMASK_RSSI;
8167 if (stats.signal != 0)
8168 stats.mask |= IEEE80211_STATMASK_SIGNAL;
8169 if (stats.noise != 0)
8170 stats.mask |= IEEE80211_STATMASK_NOISE;
8171 if (stats.rate != 0)
8172 stats.mask |= IEEE80211_STATMASK_RATE;
8173
8174 priv->rx_packets++;
8175
8176 #ifdef CONFIG_IPW2200_PROMISCUOUS
8177 if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8178 ipw_handle_promiscuous_rx(priv, rxb, &stats);
8179 #endif
8180
8181 #ifdef CONFIG_IPW2200_MONITOR
8182 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8183 #ifdef CONFIG_IPW2200_RADIOTAP
8184
8185 ipw_handle_data_packet_monitor(priv,
8186 rxb,
8187 &stats);
8188 #else
8189 ipw_handle_data_packet(priv, rxb,
8190 &stats);
8191 #endif
8192 break;
8193 }
8194 #endif
8195
8196 header =
8197 (struct ieee80211_hdr_4addr *)(rxb->skb->
8198 data +
8199 IPW_RX_FRAME_SIZE);
8200 /* TODO: Check Ad-Hoc dest/source and make sure
8201 * that we are actually parsing these packets
8202 * correctly -- we should probably use the
8203 * frame control of the packet and disregard
8204 * the current iw_mode */
8205
8206 network_packet =
8207 is_network_packet(priv, header);
8208 if (network_packet && priv->assoc_network) {
8209 priv->assoc_network->stats.rssi =
8210 stats.rssi;
8211 priv->exp_avg_rssi =
8212 exponential_average(priv->exp_avg_rssi,
8213 stats.rssi, DEPTH_RSSI);
8214 }
8215
8216 IPW_DEBUG_RX("Frame: len=%u\n",
8217 le16_to_cpu(pkt->u.frame.length));
8218
8219 if (le16_to_cpu(pkt->u.frame.length) <
8220 ieee80211_get_hdrlen(le16_to_cpu(
8221 header->frame_ctl))) {
8222 IPW_DEBUG_DROP
8223 ("Received packet is too small. "
8224 "Dropping.\n");
8225 priv->ieee->stats.rx_errors++;
8226 priv->wstats.discard.misc++;
8227 break;
8228 }
8229
8230 switch (WLAN_FC_GET_TYPE
8231 (le16_to_cpu(header->frame_ctl))) {
8232
8233 case IEEE80211_FTYPE_MGMT:
8234 ipw_handle_mgmt_packet(priv, rxb,
8235 &stats);
8236 break;
8237
8238 case IEEE80211_FTYPE_CTL:
8239 break;
8240
8241 case IEEE80211_FTYPE_DATA:
8242 if (unlikely(!network_packet ||
8243 is_duplicate_packet(priv,
8244 header)))
8245 {
8246 IPW_DEBUG_DROP("Dropping: "
8247 MAC_FMT ", "
8248 MAC_FMT ", "
8249 MAC_FMT "\n",
8250 MAC_ARG(header->
8251 addr1),
8252 MAC_ARG(header->
8253 addr2),
8254 MAC_ARG(header->
8255 addr3));
8256 break;
8257 }
8258
8259 ipw_handle_data_packet(priv, rxb,
8260 &stats);
8261
8262 break;
8263 }
8264 break;
8265 }
8266
8267 case RX_HOST_NOTIFICATION_TYPE:{
8268 IPW_DEBUG_RX
8269 ("Notification: subtype=%02X flags=%02X size=%d\n",
8270 pkt->u.notification.subtype,
8271 pkt->u.notification.flags,
8272 pkt->u.notification.size);
8273 ipw_rx_notification(priv, &pkt->u.notification);
8274 break;
8275 }
8276
8277 default:
8278 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8279 pkt->header.message_type);
8280 break;
8281 }
8282
8283 /* For now we just don't re-use anything. We can tweak this
8284 * later to try and re-use notification packets and SKBs that
8285 * fail to Rx correctly */
8286 if (rxb->skb != NULL) {
8287 dev_kfree_skb_any(rxb->skb);
8288 rxb->skb = NULL;
8289 }
8290
8291 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8292 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8293 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8294
8295 i = (i + 1) % RX_QUEUE_SIZE;
8296 }
8297
8298 /* Backtrack one entry */
8299 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
8300
8301 ipw_rx_queue_restock(priv);
8302 }
8303
8304 #define DEFAULT_RTS_THRESHOLD 2304U
8305 #define MIN_RTS_THRESHOLD 1U
8306 #define MAX_RTS_THRESHOLD 2304U
8307 #define DEFAULT_BEACON_INTERVAL 100U
8308 #define DEFAULT_SHORT_RETRY_LIMIT 7U
8309 #define DEFAULT_LONG_RETRY_LIMIT 4U
8310
8311 /**
8312 * ipw_sw_reset
8313 * @option: options to control different reset behaviour
8314 * 0 = reset everything except the 'disable' module_param
8315 * 1 = reset everything and print out driver info (for probe only)
8316 * 2 = reset everything
8317 */
8318 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8319 {
8320 int band, modulation;
8321 int old_mode = priv->ieee->iw_mode;
8322
8323 /* Initialize module parameter values here */
8324 priv->config = 0;
8325
8326 /* We default to disabling the LED code as right now it causes
8327 * too many systems to lock up... */
8328 if (!led)
8329 priv->config |= CFG_NO_LED;
8330
8331 if (associate)
8332 priv->config |= CFG_ASSOCIATE;
8333 else
8334 IPW_DEBUG_INFO("Auto associate disabled.\n");
8335
8336 if (auto_create)
8337 priv->config |= CFG_ADHOC_CREATE;
8338 else
8339 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8340
8341 priv->config &= ~CFG_STATIC_ESSID;
8342 priv->essid_len = 0;
8343 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8344
8345 if (disable && option) {
8346 priv->status |= STATUS_RF_KILL_SW;
8347 IPW_DEBUG_INFO("Radio disabled.\n");
8348 }
8349
8350 if (channel != 0) {
8351 priv->config |= CFG_STATIC_CHANNEL;
8352 priv->channel = channel;
8353 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
8354 /* TODO: Validate that provided channel is in range */
8355 }
8356 #ifdef CONFIG_IPW2200_QOS
8357 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8358 burst_duration_CCK, burst_duration_OFDM);
8359 #endif /* CONFIG_IPW2200_QOS */
8360
8361 switch (mode) {
8362 case 1:
8363 priv->ieee->iw_mode = IW_MODE_ADHOC;
8364 priv->net_dev->type = ARPHRD_ETHER;
8365
8366 break;
8367 #ifdef CONFIG_IPW2200_MONITOR
8368 case 2:
8369 priv->ieee->iw_mode = IW_MODE_MONITOR;
8370 #ifdef CONFIG_IPW2200_RADIOTAP
8371 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8372 #else
8373 priv->net_dev->type = ARPHRD_IEEE80211;
8374 #endif
8375 break;
8376 #endif
8377 default:
8378 case 0:
8379 priv->net_dev->type = ARPHRD_ETHER;
8380 priv->ieee->iw_mode = IW_MODE_INFRA;
8381 break;
8382 }
8383
8384 if (hwcrypto) {
8385 priv->ieee->host_encrypt = 0;
8386 priv->ieee->host_encrypt_msdu = 0;
8387 priv->ieee->host_decrypt = 0;
8388 priv->ieee->host_mc_decrypt = 0;
8389 }
8390 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8391
8392 /* IPW2200/2915 is abled to do hardware fragmentation. */
8393 priv->ieee->host_open_frag = 0;
8394
8395 if ((priv->pci_dev->device == 0x4223) ||
8396 (priv->pci_dev->device == 0x4224)) {
8397 if (option == 1)
8398 printk(KERN_INFO DRV_NAME
8399 ": Detected Intel PRO/Wireless 2915ABG Network "
8400 "Connection\n");
8401 priv->ieee->abg_true = 1;
8402 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
8403 modulation = IEEE80211_OFDM_MODULATION |
8404 IEEE80211_CCK_MODULATION;
8405 priv->adapter = IPW_2915ABG;
8406 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8407 } else {
8408 if (option == 1)
8409 printk(KERN_INFO DRV_NAME
8410 ": Detected Intel PRO/Wireless 2200BG Network "
8411 "Connection\n");
8412
8413 priv->ieee->abg_true = 0;
8414 band = IEEE80211_24GHZ_BAND;
8415 modulation = IEEE80211_OFDM_MODULATION |
8416 IEEE80211_CCK_MODULATION;
8417 priv->adapter = IPW_2200BG;
8418 priv->ieee->mode = IEEE_G | IEEE_B;
8419 }
8420
8421 priv->ieee->freq_band = band;
8422 priv->ieee->modulation = modulation;
8423
8424 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
8425
8426 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8427 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8428
8429 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8430 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8431 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8432
8433 /* If power management is turned on, default to AC mode */
8434 priv->power_mode = IPW_POWER_AC;
8435 priv->tx_power = IPW_TX_POWER_DEFAULT;
8436
8437 return old_mode == priv->ieee->iw_mode;
8438 }
8439
8440 /*
8441 * This file defines the Wireless Extension handlers. It does not
8442 * define any methods of hardware manipulation and relies on the
8443 * functions defined in ipw_main to provide the HW interaction.
8444 *
8445 * The exception to this is the use of the ipw_get_ordinal()
8446 * function used to poll the hardware vs. making unecessary calls.
8447 *
8448 */
8449
8450 static int ipw_wx_get_name(struct net_device *dev,
8451 struct iw_request_info *info,
8452 union iwreq_data *wrqu, char *extra)
8453 {
8454 struct ipw_priv *priv = ieee80211_priv(dev);
8455 mutex_lock(&priv->mutex);
8456 if (priv->status & STATUS_RF_KILL_MASK)
8457 strcpy(wrqu->name, "radio off");
8458 else if (!(priv->status & STATUS_ASSOCIATED))
8459 strcpy(wrqu->name, "unassociated");
8460 else
8461 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8462 ipw_modes[priv->assoc_request.ieee_mode]);
8463 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8464 mutex_unlock(&priv->mutex);
8465 return 0;
8466 }
8467
8468 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8469 {
8470 if (channel == 0) {
8471 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8472 priv->config &= ~CFG_STATIC_CHANNEL;
8473 IPW_DEBUG_ASSOC("Attempting to associate with new "
8474 "parameters.\n");
8475 ipw_associate(priv);
8476 return 0;
8477 }
8478
8479 priv->config |= CFG_STATIC_CHANNEL;
8480
8481 if (priv->channel == channel) {
8482 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8483 channel);
8484 return 0;
8485 }
8486
8487 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8488 priv->channel = channel;
8489
8490 #ifdef CONFIG_IPW2200_MONITOR
8491 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8492 int i;
8493 if (priv->status & STATUS_SCANNING) {
8494 IPW_DEBUG_SCAN("Scan abort triggered due to "
8495 "channel change.\n");
8496 ipw_abort_scan(priv);
8497 }
8498
8499 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8500 udelay(10);
8501
8502 if (priv->status & STATUS_SCANNING)
8503 IPW_DEBUG_SCAN("Still scanning...\n");
8504 else
8505 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8506 1000 - i);
8507
8508 return 0;
8509 }
8510 #endif /* CONFIG_IPW2200_MONITOR */
8511
8512 /* Network configuration changed -- force [re]association */
8513 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8514 if (!ipw_disassociate(priv))
8515 ipw_associate(priv);
8516
8517 return 0;
8518 }
8519
8520 static int ipw_wx_set_freq(struct net_device *dev,
8521 struct iw_request_info *info,
8522 union iwreq_data *wrqu, char *extra)
8523 {
8524 struct ipw_priv *priv = ieee80211_priv(dev);
8525 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8526 struct iw_freq *fwrq = &wrqu->freq;
8527 int ret = 0, i;
8528 u8 channel, flags;
8529 int band;
8530
8531 if (fwrq->m == 0) {
8532 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8533 mutex_lock(&priv->mutex);
8534 ret = ipw_set_channel(priv, 0);
8535 mutex_unlock(&priv->mutex);
8536 return ret;
8537 }
8538 /* if setting by freq convert to channel */
8539 if (fwrq->e == 1) {
8540 channel = ieee80211_freq_to_channel(priv->ieee, fwrq->m);
8541 if (channel == 0)
8542 return -EINVAL;
8543 } else
8544 channel = fwrq->m;
8545
8546 if (!(band = ieee80211_is_valid_channel(priv->ieee, channel)))
8547 return -EINVAL;
8548
8549 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8550 i = ieee80211_channel_to_index(priv->ieee, channel);
8551 if (i == -1)
8552 return -EINVAL;
8553
8554 flags = (band == IEEE80211_24GHZ_BAND) ?
8555 geo->bg[i].flags : geo->a[i].flags;
8556 if (flags & IEEE80211_CH_PASSIVE_ONLY) {
8557 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8558 return -EINVAL;
8559 }
8560 }
8561
8562 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8563 mutex_lock(&priv->mutex);
8564 ret = ipw_set_channel(priv, channel);
8565 mutex_unlock(&priv->mutex);
8566 return ret;
8567 }
8568
8569 static int ipw_wx_get_freq(struct net_device *dev,
8570 struct iw_request_info *info,
8571 union iwreq_data *wrqu, char *extra)
8572 {
8573 struct ipw_priv *priv = ieee80211_priv(dev);
8574
8575 wrqu->freq.e = 0;
8576
8577 /* If we are associated, trying to associate, or have a statically
8578 * configured CHANNEL then return that; otherwise return ANY */
8579 mutex_lock(&priv->mutex);
8580 if (priv->config & CFG_STATIC_CHANNEL ||
8581 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))
8582 wrqu->freq.m = priv->channel;
8583 else
8584 wrqu->freq.m = 0;
8585
8586 mutex_unlock(&priv->mutex);
8587 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8588 return 0;
8589 }
8590
8591 static int ipw_wx_set_mode(struct net_device *dev,
8592 struct iw_request_info *info,
8593 union iwreq_data *wrqu, char *extra)
8594 {
8595 struct ipw_priv *priv = ieee80211_priv(dev);
8596 int err = 0;
8597
8598 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8599
8600 switch (wrqu->mode) {
8601 #ifdef CONFIG_IPW2200_MONITOR
8602 case IW_MODE_MONITOR:
8603 #endif
8604 case IW_MODE_ADHOC:
8605 case IW_MODE_INFRA:
8606 break;
8607 case IW_MODE_AUTO:
8608 wrqu->mode = IW_MODE_INFRA;
8609 break;
8610 default:
8611 return -EINVAL;
8612 }
8613 if (wrqu->mode == priv->ieee->iw_mode)
8614 return 0;
8615
8616 mutex_lock(&priv->mutex);
8617
8618 ipw_sw_reset(priv, 0);
8619
8620 #ifdef CONFIG_IPW2200_MONITOR
8621 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8622 priv->net_dev->type = ARPHRD_ETHER;
8623
8624 if (wrqu->mode == IW_MODE_MONITOR)
8625 #ifdef CONFIG_IPW2200_RADIOTAP
8626 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8627 #else
8628 priv->net_dev->type = ARPHRD_IEEE80211;
8629 #endif
8630 #endif /* CONFIG_IPW2200_MONITOR */
8631
8632 /* Free the existing firmware and reset the fw_loaded
8633 * flag so ipw_load() will bring in the new firmawre */
8634 free_firmware();
8635
8636 priv->ieee->iw_mode = wrqu->mode;
8637
8638 queue_work(priv->workqueue, &priv->adapter_restart);
8639 mutex_unlock(&priv->mutex);
8640 return err;
8641 }
8642
8643 static int ipw_wx_get_mode(struct net_device *dev,
8644 struct iw_request_info *info,
8645 union iwreq_data *wrqu, char *extra)
8646 {
8647 struct ipw_priv *priv = ieee80211_priv(dev);
8648 mutex_lock(&priv->mutex);
8649 wrqu->mode = priv->ieee->iw_mode;
8650 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8651 mutex_unlock(&priv->mutex);
8652 return 0;
8653 }
8654
8655 /* Values are in microsecond */
8656 static const s32 timeout_duration[] = {
8657 350000,
8658 250000,
8659 75000,
8660 37000,
8661 25000,
8662 };
8663
8664 static const s32 period_duration[] = {
8665 400000,
8666 700000,
8667 1000000,
8668 1000000,
8669 1000000
8670 };
8671
8672 static int ipw_wx_get_range(struct net_device *dev,
8673 struct iw_request_info *info,
8674 union iwreq_data *wrqu, char *extra)
8675 {
8676 struct ipw_priv *priv = ieee80211_priv(dev);
8677 struct iw_range *range = (struct iw_range *)extra;
8678 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8679 int i = 0, j;
8680
8681 wrqu->data.length = sizeof(*range);
8682 memset(range, 0, sizeof(*range));
8683
8684 /* 54Mbs == ~27 Mb/s real (802.11g) */
8685 range->throughput = 27 * 1000 * 1000;
8686
8687 range->max_qual.qual = 100;
8688 /* TODO: Find real max RSSI and stick here */
8689 range->max_qual.level = 0;
8690 range->max_qual.noise = 0;
8691 range->max_qual.updated = 7; /* Updated all three */
8692
8693 range->avg_qual.qual = 70;
8694 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8695 range->avg_qual.level = 0; /* FIXME to real average level */
8696 range->avg_qual.noise = 0;
8697 range->avg_qual.updated = 7; /* Updated all three */
8698 mutex_lock(&priv->mutex);
8699 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8700
8701 for (i = 0; i < range->num_bitrates; i++)
8702 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8703 500000;
8704
8705 range->max_rts = DEFAULT_RTS_THRESHOLD;
8706 range->min_frag = MIN_FRAG_THRESHOLD;
8707 range->max_frag = MAX_FRAG_THRESHOLD;
8708
8709 range->encoding_size[0] = 5;
8710 range->encoding_size[1] = 13;
8711 range->num_encoding_sizes = 2;
8712 range->max_encoding_tokens = WEP_KEYS;
8713
8714 /* Set the Wireless Extension versions */
8715 range->we_version_compiled = WIRELESS_EXT;
8716 range->we_version_source = 18;
8717
8718 i = 0;
8719 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8720 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8721 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8722 (geo->bg[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8723 continue;
8724
8725 range->freq[i].i = geo->bg[j].channel;
8726 range->freq[i].m = geo->bg[j].freq * 100000;
8727 range->freq[i].e = 1;
8728 i++;
8729 }
8730 }
8731
8732 if (priv->ieee->mode & IEEE_A) {
8733 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8734 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8735 (geo->a[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8736 continue;
8737
8738 range->freq[i].i = geo->a[j].channel;
8739 range->freq[i].m = geo->a[j].freq * 100000;
8740 range->freq[i].e = 1;
8741 i++;
8742 }
8743 }
8744
8745 range->num_channels = i;
8746 range->num_frequency = i;
8747
8748 mutex_unlock(&priv->mutex);
8749
8750 /* Event capability (kernel + driver) */
8751 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8752 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8753 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8754 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8755 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8756
8757 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8758 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8759
8760 IPW_DEBUG_WX("GET Range\n");
8761 return 0;
8762 }
8763
8764 static int ipw_wx_set_wap(struct net_device *dev,
8765 struct iw_request_info *info,
8766 union iwreq_data *wrqu, char *extra)
8767 {
8768 struct ipw_priv *priv = ieee80211_priv(dev);
8769
8770 static const unsigned char any[] = {
8771 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
8772 };
8773 static const unsigned char off[] = {
8774 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
8775 };
8776
8777 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8778 return -EINVAL;
8779 mutex_lock(&priv->mutex);
8780 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
8781 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8782 /* we disable mandatory BSSID association */
8783 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8784 priv->config &= ~CFG_STATIC_BSSID;
8785 IPW_DEBUG_ASSOC("Attempting to associate with new "
8786 "parameters.\n");
8787 ipw_associate(priv);
8788 mutex_unlock(&priv->mutex);
8789 return 0;
8790 }
8791
8792 priv->config |= CFG_STATIC_BSSID;
8793 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8794 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8795 mutex_unlock(&priv->mutex);
8796 return 0;
8797 }
8798
8799 IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n",
8800 MAC_ARG(wrqu->ap_addr.sa_data));
8801
8802 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8803
8804 /* Network configuration changed -- force [re]association */
8805 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8806 if (!ipw_disassociate(priv))
8807 ipw_associate(priv);
8808
8809 mutex_unlock(&priv->mutex);
8810 return 0;
8811 }
8812
8813 static int ipw_wx_get_wap(struct net_device *dev,
8814 struct iw_request_info *info,
8815 union iwreq_data *wrqu, char *extra)
8816 {
8817 struct ipw_priv *priv = ieee80211_priv(dev);
8818 /* If we are associated, trying to associate, or have a statically
8819 * configured BSSID then return that; otherwise return ANY */
8820 mutex_lock(&priv->mutex);
8821 if (priv->config & CFG_STATIC_BSSID ||
8822 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8823 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8824 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8825 } else
8826 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
8827
8828 IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
8829 MAC_ARG(wrqu->ap_addr.sa_data));
8830 mutex_unlock(&priv->mutex);
8831 return 0;
8832 }
8833
8834 static int ipw_wx_set_essid(struct net_device *dev,
8835 struct iw_request_info *info,
8836 union iwreq_data *wrqu, char *extra)
8837 {
8838 struct ipw_priv *priv = ieee80211_priv(dev);
8839 char *essid = ""; /* ANY */
8840 int length = 0;
8841 mutex_lock(&priv->mutex);
8842 if (wrqu->essid.flags && wrqu->essid.length) {
8843 length = wrqu->essid.length - 1;
8844 essid = extra;
8845 }
8846 if (length == 0) {
8847 IPW_DEBUG_WX("Setting ESSID to ANY\n");
8848 if ((priv->config & CFG_STATIC_ESSID) &&
8849 !(priv->status & (STATUS_ASSOCIATED |
8850 STATUS_ASSOCIATING))) {
8851 IPW_DEBUG_ASSOC("Attempting to associate with new "
8852 "parameters.\n");
8853 priv->config &= ~CFG_STATIC_ESSID;
8854 ipw_associate(priv);
8855 }
8856 mutex_unlock(&priv->mutex);
8857 return 0;
8858 }
8859
8860 length = min(length, IW_ESSID_MAX_SIZE);
8861
8862 priv->config |= CFG_STATIC_ESSID;
8863
8864 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
8865 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
8866 mutex_unlock(&priv->mutex);
8867 return 0;
8868 }
8869
8870 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length),
8871 length);
8872
8873 priv->essid_len = length;
8874 memcpy(priv->essid, essid, priv->essid_len);
8875
8876 /* Network configuration changed -- force [re]association */
8877 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
8878 if (!ipw_disassociate(priv))
8879 ipw_associate(priv);
8880
8881 mutex_unlock(&priv->mutex);
8882 return 0;
8883 }
8884
8885 static int ipw_wx_get_essid(struct net_device *dev,
8886 struct iw_request_info *info,
8887 union iwreq_data *wrqu, char *extra)
8888 {
8889 struct ipw_priv *priv = ieee80211_priv(dev);
8890
8891 /* If we are associated, trying to associate, or have a statically
8892 * configured ESSID then return that; otherwise return ANY */
8893 mutex_lock(&priv->mutex);
8894 if (priv->config & CFG_STATIC_ESSID ||
8895 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8896 IPW_DEBUG_WX("Getting essid: '%s'\n",
8897 escape_essid(priv->essid, priv->essid_len));
8898 memcpy(extra, priv->essid, priv->essid_len);
8899 wrqu->essid.length = priv->essid_len;
8900 wrqu->essid.flags = 1; /* active */
8901 } else {
8902 IPW_DEBUG_WX("Getting essid: ANY\n");
8903 wrqu->essid.length = 0;
8904 wrqu->essid.flags = 0; /* active */
8905 }
8906 mutex_unlock(&priv->mutex);
8907 return 0;
8908 }
8909
8910 static int ipw_wx_set_nick(struct net_device *dev,
8911 struct iw_request_info *info,
8912 union iwreq_data *wrqu, char *extra)
8913 {
8914 struct ipw_priv *priv = ieee80211_priv(dev);
8915
8916 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
8917 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
8918 return -E2BIG;
8919 mutex_lock(&priv->mutex);
8920 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
8921 memset(priv->nick, 0, sizeof(priv->nick));
8922 memcpy(priv->nick, extra, wrqu->data.length);
8923 IPW_DEBUG_TRACE("<<\n");
8924 mutex_unlock(&priv->mutex);
8925 return 0;
8926
8927 }
8928
8929 static int ipw_wx_get_nick(struct net_device *dev,
8930 struct iw_request_info *info,
8931 union iwreq_data *wrqu, char *extra)
8932 {
8933 struct ipw_priv *priv = ieee80211_priv(dev);
8934 IPW_DEBUG_WX("Getting nick\n");
8935 mutex_lock(&priv->mutex);
8936 wrqu->data.length = strlen(priv->nick) + 1;
8937 memcpy(extra, priv->nick, wrqu->data.length);
8938 wrqu->data.flags = 1; /* active */
8939 mutex_unlock(&priv->mutex);
8940 return 0;
8941 }
8942
8943 static int ipw_wx_set_sens(struct net_device *dev,
8944 struct iw_request_info *info,
8945 union iwreq_data *wrqu, char *extra)
8946 {
8947 struct ipw_priv *priv = ieee80211_priv(dev);
8948 int err = 0;
8949
8950 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
8951 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
8952 mutex_lock(&priv->mutex);
8953
8954 if (wrqu->sens.fixed == 0)
8955 {
8956 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8957 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8958 goto out;
8959 }
8960 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
8961 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
8962 err = -EINVAL;
8963 goto out;
8964 }
8965
8966 priv->roaming_threshold = wrqu->sens.value;
8967 priv->disassociate_threshold = 3*wrqu->sens.value;
8968 out:
8969 mutex_unlock(&priv->mutex);
8970 return err;
8971 }
8972
8973 static int ipw_wx_get_sens(struct net_device *dev,
8974 struct iw_request_info *info,
8975 union iwreq_data *wrqu, char *extra)
8976 {
8977 struct ipw_priv *priv = ieee80211_priv(dev);
8978 mutex_lock(&priv->mutex);
8979 wrqu->sens.fixed = 1;
8980 wrqu->sens.value = priv->roaming_threshold;
8981 mutex_unlock(&priv->mutex);
8982
8983 IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
8984 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
8985
8986 return 0;
8987 }
8988
8989 static int ipw_wx_set_rate(struct net_device *dev,
8990 struct iw_request_info *info,
8991 union iwreq_data *wrqu, char *extra)
8992 {
8993 /* TODO: We should use semaphores or locks for access to priv */
8994 struct ipw_priv *priv = ieee80211_priv(dev);
8995 u32 target_rate = wrqu->bitrate.value;
8996 u32 fixed, mask;
8997
8998 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
8999 /* value = X, fixed = 1 means only rate X */
9000 /* value = X, fixed = 0 means all rates lower equal X */
9001
9002 if (target_rate == -1) {
9003 fixed = 0;
9004 mask = IEEE80211_DEFAULT_RATES_MASK;
9005 /* Now we should reassociate */
9006 goto apply;
9007 }
9008
9009 mask = 0;
9010 fixed = wrqu->bitrate.fixed;
9011
9012 if (target_rate == 1000000 || !fixed)
9013 mask |= IEEE80211_CCK_RATE_1MB_MASK;
9014 if (target_rate == 1000000)
9015 goto apply;
9016
9017 if (target_rate == 2000000 || !fixed)
9018 mask |= IEEE80211_CCK_RATE_2MB_MASK;
9019 if (target_rate == 2000000)
9020 goto apply;
9021
9022 if (target_rate == 5500000 || !fixed)
9023 mask |= IEEE80211_CCK_RATE_5MB_MASK;
9024 if (target_rate == 5500000)
9025 goto apply;
9026
9027 if (target_rate == 6000000 || !fixed)
9028 mask |= IEEE80211_OFDM_RATE_6MB_MASK;
9029 if (target_rate == 6000000)
9030 goto apply;
9031
9032 if (target_rate == 9000000 || !fixed)
9033 mask |= IEEE80211_OFDM_RATE_9MB_MASK;
9034 if (target_rate == 9000000)
9035 goto apply;
9036
9037 if (target_rate == 11000000 || !fixed)
9038 mask |= IEEE80211_CCK_RATE_11MB_MASK;
9039 if (target_rate == 11000000)
9040 goto apply;
9041
9042 if (target_rate == 12000000 || !fixed)
9043 mask |= IEEE80211_OFDM_RATE_12MB_MASK;
9044 if (target_rate == 12000000)
9045 goto apply;
9046
9047 if (target_rate == 18000000 || !fixed)
9048 mask |= IEEE80211_OFDM_RATE_18MB_MASK;
9049 if (target_rate == 18000000)
9050 goto apply;
9051
9052 if (target_rate == 24000000 || !fixed)
9053 mask |= IEEE80211_OFDM_RATE_24MB_MASK;
9054 if (target_rate == 24000000)
9055 goto apply;
9056
9057 if (target_rate == 36000000 || !fixed)
9058 mask |= IEEE80211_OFDM_RATE_36MB_MASK;
9059 if (target_rate == 36000000)
9060 goto apply;
9061
9062 if (target_rate == 48000000 || !fixed)
9063 mask |= IEEE80211_OFDM_RATE_48MB_MASK;
9064 if (target_rate == 48000000)
9065 goto apply;
9066
9067 if (target_rate == 54000000 || !fixed)
9068 mask |= IEEE80211_OFDM_RATE_54MB_MASK;
9069 if (target_rate == 54000000)
9070 goto apply;
9071
9072 IPW_DEBUG_WX("invalid rate specified, returning error\n");
9073 return -EINVAL;
9074
9075 apply:
9076 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9077 mask, fixed ? "fixed" : "sub-rates");
9078 mutex_lock(&priv->mutex);
9079 if (mask == IEEE80211_DEFAULT_RATES_MASK) {
9080 priv->config &= ~CFG_FIXED_RATE;
9081 ipw_set_fixed_rate(priv, priv->ieee->mode);
9082 } else
9083 priv->config |= CFG_FIXED_RATE;
9084
9085 if (priv->rates_mask == mask) {
9086 IPW_DEBUG_WX("Mask set to current mask.\n");
9087 mutex_unlock(&priv->mutex);
9088 return 0;
9089 }
9090
9091 priv->rates_mask = mask;
9092
9093 /* Network configuration changed -- force [re]association */
9094 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9095 if (!ipw_disassociate(priv))
9096 ipw_associate(priv);
9097
9098 mutex_unlock(&priv->mutex);
9099 return 0;
9100 }
9101
9102 static int ipw_wx_get_rate(struct net_device *dev,
9103 struct iw_request_info *info,
9104 union iwreq_data *wrqu, char *extra)
9105 {
9106 struct ipw_priv *priv = ieee80211_priv(dev);
9107 mutex_lock(&priv->mutex);
9108 wrqu->bitrate.value = priv->last_rate;
9109 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9110 mutex_unlock(&priv->mutex);
9111 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
9112 return 0;
9113 }
9114
9115 static int ipw_wx_set_rts(struct net_device *dev,
9116 struct iw_request_info *info,
9117 union iwreq_data *wrqu, char *extra)
9118 {
9119 struct ipw_priv *priv = ieee80211_priv(dev);
9120 mutex_lock(&priv->mutex);
9121 if (wrqu->rts.disabled)
9122 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9123 else {
9124 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9125 wrqu->rts.value > MAX_RTS_THRESHOLD) {
9126 mutex_unlock(&priv->mutex);
9127 return -EINVAL;
9128 }
9129 priv->rts_threshold = wrqu->rts.value;
9130 }
9131
9132 ipw_send_rts_threshold(priv, priv->rts_threshold);
9133 mutex_unlock(&priv->mutex);
9134 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
9135 return 0;
9136 }
9137
9138 static int ipw_wx_get_rts(struct net_device *dev,
9139 struct iw_request_info *info,
9140 union iwreq_data *wrqu, char *extra)
9141 {
9142 struct ipw_priv *priv = ieee80211_priv(dev);
9143 mutex_lock(&priv->mutex);
9144 wrqu->rts.value = priv->rts_threshold;
9145 wrqu->rts.fixed = 0; /* no auto select */
9146 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9147 mutex_unlock(&priv->mutex);
9148 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
9149 return 0;
9150 }
9151
9152 static int ipw_wx_set_txpow(struct net_device *dev,
9153 struct iw_request_info *info,
9154 union iwreq_data *wrqu, char *extra)
9155 {
9156 struct ipw_priv *priv = ieee80211_priv(dev);
9157 int err = 0;
9158
9159 mutex_lock(&priv->mutex);
9160 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9161 err = -EINPROGRESS;
9162 goto out;
9163 }
9164
9165 if (!wrqu->power.fixed)
9166 wrqu->power.value = IPW_TX_POWER_DEFAULT;
9167
9168 if (wrqu->power.flags != IW_TXPOW_DBM) {
9169 err = -EINVAL;
9170 goto out;
9171 }
9172
9173 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9174 (wrqu->power.value < IPW_TX_POWER_MIN)) {
9175 err = -EINVAL;
9176 goto out;
9177 }
9178
9179 priv->tx_power = wrqu->power.value;
9180 err = ipw_set_tx_power(priv);
9181 out:
9182 mutex_unlock(&priv->mutex);
9183 return err;
9184 }
9185
9186 static int ipw_wx_get_txpow(struct net_device *dev,
9187 struct iw_request_info *info,
9188 union iwreq_data *wrqu, char *extra)
9189 {
9190 struct ipw_priv *priv = ieee80211_priv(dev);
9191 mutex_lock(&priv->mutex);
9192 wrqu->power.value = priv->tx_power;
9193 wrqu->power.fixed = 1;
9194 wrqu->power.flags = IW_TXPOW_DBM;
9195 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9196 mutex_unlock(&priv->mutex);
9197
9198 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
9199 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9200
9201 return 0;
9202 }
9203
9204 static int ipw_wx_set_frag(struct net_device *dev,
9205 struct iw_request_info *info,
9206 union iwreq_data *wrqu, char *extra)
9207 {
9208 struct ipw_priv *priv = ieee80211_priv(dev);
9209 mutex_lock(&priv->mutex);
9210 if (wrqu->frag.disabled)
9211 priv->ieee->fts = DEFAULT_FTS;
9212 else {
9213 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9214 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9215 mutex_unlock(&priv->mutex);
9216 return -EINVAL;
9217 }
9218
9219 priv->ieee->fts = wrqu->frag.value & ~0x1;
9220 }
9221
9222 ipw_send_frag_threshold(priv, wrqu->frag.value);
9223 mutex_unlock(&priv->mutex);
9224 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
9225 return 0;
9226 }
9227
9228 static int ipw_wx_get_frag(struct net_device *dev,
9229 struct iw_request_info *info,
9230 union iwreq_data *wrqu, char *extra)
9231 {
9232 struct ipw_priv *priv = ieee80211_priv(dev);
9233 mutex_lock(&priv->mutex);
9234 wrqu->frag.value = priv->ieee->fts;
9235 wrqu->frag.fixed = 0; /* no auto select */
9236 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9237 mutex_unlock(&priv->mutex);
9238 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
9239
9240 return 0;
9241 }
9242
9243 static int ipw_wx_set_retry(struct net_device *dev,
9244 struct iw_request_info *info,
9245 union iwreq_data *wrqu, char *extra)
9246 {
9247 struct ipw_priv *priv = ieee80211_priv(dev);
9248
9249 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9250 return -EINVAL;
9251
9252 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9253 return 0;
9254
9255 if (wrqu->retry.value < 0 || wrqu->retry.value > 255)
9256 return -EINVAL;
9257
9258 mutex_lock(&priv->mutex);
9259 if (wrqu->retry.flags & IW_RETRY_MIN)
9260 priv->short_retry_limit = (u8) wrqu->retry.value;
9261 else if (wrqu->retry.flags & IW_RETRY_MAX)
9262 priv->long_retry_limit = (u8) wrqu->retry.value;
9263 else {
9264 priv->short_retry_limit = (u8) wrqu->retry.value;
9265 priv->long_retry_limit = (u8) wrqu->retry.value;
9266 }
9267
9268 ipw_send_retry_limit(priv, priv->short_retry_limit,
9269 priv->long_retry_limit);
9270 mutex_unlock(&priv->mutex);
9271 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9272 priv->short_retry_limit, priv->long_retry_limit);
9273 return 0;
9274 }
9275
9276 static int ipw_wx_get_retry(struct net_device *dev,
9277 struct iw_request_info *info,
9278 union iwreq_data *wrqu, char *extra)
9279 {
9280 struct ipw_priv *priv = ieee80211_priv(dev);
9281
9282 mutex_lock(&priv->mutex);
9283 wrqu->retry.disabled = 0;
9284
9285 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9286 mutex_unlock(&priv->mutex);
9287 return -EINVAL;
9288 }
9289
9290 if (wrqu->retry.flags & IW_RETRY_MAX) {
9291 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
9292 wrqu->retry.value = priv->long_retry_limit;
9293 } else if (wrqu->retry.flags & IW_RETRY_MIN) {
9294 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MIN;
9295 wrqu->retry.value = priv->short_retry_limit;
9296 } else {
9297 wrqu->retry.flags = IW_RETRY_LIMIT;
9298 wrqu->retry.value = priv->short_retry_limit;
9299 }
9300 mutex_unlock(&priv->mutex);
9301
9302 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
9303
9304 return 0;
9305 }
9306
9307 static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
9308 int essid_len)
9309 {
9310 struct ipw_scan_request_ext scan;
9311 int err = 0, scan_type;
9312
9313 if (!(priv->status & STATUS_INIT) ||
9314 (priv->status & STATUS_EXIT_PENDING))
9315 return 0;
9316
9317 mutex_lock(&priv->mutex);
9318
9319 if (priv->status & STATUS_RF_KILL_MASK) {
9320 IPW_DEBUG_HC("Aborting scan due to RF kill activation\n");
9321 priv->status |= STATUS_SCAN_PENDING;
9322 goto done;
9323 }
9324
9325 IPW_DEBUG_HC("starting request direct scan!\n");
9326
9327 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
9328 /* We should not sleep here; otherwise we will block most
9329 * of the system (for instance, we hold rtnl_lock when we
9330 * get here).
9331 */
9332 err = -EAGAIN;
9333 goto done;
9334 }
9335 memset(&scan, 0, sizeof(scan));
9336
9337 if (priv->config & CFG_SPEED_SCAN)
9338 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
9339 cpu_to_le16(30);
9340 else
9341 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
9342 cpu_to_le16(20);
9343
9344 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
9345 cpu_to_le16(20);
9346 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
9347 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
9348
9349 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
9350
9351 err = ipw_send_ssid(priv, essid, essid_len);
9352 if (err) {
9353 IPW_DEBUG_HC("Attempt to send SSID command failed\n");
9354 goto done;
9355 }
9356 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
9357
9358 ipw_add_scan_channels(priv, &scan, scan_type);
9359
9360 err = ipw_send_scan_request_ext(priv, &scan);
9361 if (err) {
9362 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
9363 goto done;
9364 }
9365
9366 priv->status |= STATUS_SCANNING;
9367
9368 done:
9369 mutex_unlock(&priv->mutex);
9370 return err;
9371 }
9372
9373 static int ipw_wx_set_scan(struct net_device *dev,
9374 struct iw_request_info *info,
9375 union iwreq_data *wrqu, char *extra)
9376 {
9377 struct ipw_priv *priv = ieee80211_priv(dev);
9378 struct iw_scan_req *req = NULL;
9379 if (wrqu->data.length
9380 && wrqu->data.length == sizeof(struct iw_scan_req)) {
9381 req = (struct iw_scan_req *)extra;
9382 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9383 ipw_request_direct_scan(priv, req->essid,
9384 req->essid_len);
9385 return 0;
9386 }
9387 }
9388
9389 IPW_DEBUG_WX("Start scan\n");
9390
9391 queue_work(priv->workqueue, &priv->request_scan);
9392
9393 return 0;
9394 }
9395
9396 static int ipw_wx_get_scan(struct net_device *dev,
9397 struct iw_request_info *info,
9398 union iwreq_data *wrqu, char *extra)
9399 {
9400 struct ipw_priv *priv = ieee80211_priv(dev);
9401 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
9402 }
9403
9404 static int ipw_wx_set_encode(struct net_device *dev,
9405 struct iw_request_info *info,
9406 union iwreq_data *wrqu, char *key)
9407 {
9408 struct ipw_priv *priv = ieee80211_priv(dev);
9409 int ret;
9410 u32 cap = priv->capability;
9411
9412 mutex_lock(&priv->mutex);
9413 ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
9414
9415 /* In IBSS mode, we need to notify the firmware to update
9416 * the beacon info after we changed the capability. */
9417 if (cap != priv->capability &&
9418 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9419 priv->status & STATUS_ASSOCIATED)
9420 ipw_disassociate(priv);
9421
9422 mutex_unlock(&priv->mutex);
9423 return ret;
9424 }
9425
9426 static int ipw_wx_get_encode(struct net_device *dev,
9427 struct iw_request_info *info,
9428 union iwreq_data *wrqu, char *key)
9429 {
9430 struct ipw_priv *priv = ieee80211_priv(dev);
9431 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
9432 }
9433
9434 static int ipw_wx_set_power(struct net_device *dev,
9435 struct iw_request_info *info,
9436 union iwreq_data *wrqu, char *extra)
9437 {
9438 struct ipw_priv *priv = ieee80211_priv(dev);
9439 int err;
9440 mutex_lock(&priv->mutex);
9441 if (wrqu->power.disabled) {
9442 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9443 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9444 if (err) {
9445 IPW_DEBUG_WX("failed setting power mode.\n");
9446 mutex_unlock(&priv->mutex);
9447 return err;
9448 }
9449 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9450 mutex_unlock(&priv->mutex);
9451 return 0;
9452 }
9453
9454 switch (wrqu->power.flags & IW_POWER_MODE) {
9455 case IW_POWER_ON: /* If not specified */
9456 case IW_POWER_MODE: /* If set all mask */
9457 case IW_POWER_ALL_R: /* If explicitely state all */
9458 break;
9459 default: /* Otherwise we don't support it */
9460 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9461 wrqu->power.flags);
9462 mutex_unlock(&priv->mutex);
9463 return -EOPNOTSUPP;
9464 }
9465
9466 /* If the user hasn't specified a power management mode yet, default
9467 * to BATTERY */
9468 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9469 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9470 else
9471 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9472 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9473 if (err) {
9474 IPW_DEBUG_WX("failed setting power mode.\n");
9475 mutex_unlock(&priv->mutex);
9476 return err;
9477 }
9478
9479 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9480 mutex_unlock(&priv->mutex);
9481 return 0;
9482 }
9483
9484 static int ipw_wx_get_power(struct net_device *dev,
9485 struct iw_request_info *info,
9486 union iwreq_data *wrqu, char *extra)
9487 {
9488 struct ipw_priv *priv = ieee80211_priv(dev);
9489 mutex_lock(&priv->mutex);
9490 if (!(priv->power_mode & IPW_POWER_ENABLED))
9491 wrqu->power.disabled = 1;
9492 else
9493 wrqu->power.disabled = 0;
9494
9495 mutex_unlock(&priv->mutex);
9496 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9497
9498 return 0;
9499 }
9500
9501 static int ipw_wx_set_powermode(struct net_device *dev,
9502 struct iw_request_info *info,
9503 union iwreq_data *wrqu, char *extra)
9504 {
9505 struct ipw_priv *priv = ieee80211_priv(dev);
9506 int mode = *(int *)extra;
9507 int err;
9508 mutex_lock(&priv->mutex);
9509 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
9510 mode = IPW_POWER_AC;
9511 priv->power_mode = mode;
9512 } else {
9513 priv->power_mode = IPW_POWER_ENABLED | mode;
9514 }
9515
9516 if (priv->power_mode != mode) {
9517 err = ipw_send_power_mode(priv, mode);
9518
9519 if (err) {
9520 IPW_DEBUG_WX("failed setting power mode.\n");
9521 mutex_unlock(&priv->mutex);
9522 return err;
9523 }
9524 }
9525 mutex_unlock(&priv->mutex);
9526 return 0;
9527 }
9528
9529 #define MAX_WX_STRING 80
9530 static int ipw_wx_get_powermode(struct net_device *dev,
9531 struct iw_request_info *info,
9532 union iwreq_data *wrqu, char *extra)
9533 {
9534 struct ipw_priv *priv = ieee80211_priv(dev);
9535 int level = IPW_POWER_LEVEL(priv->power_mode);
9536 char *p = extra;
9537
9538 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9539
9540 switch (level) {
9541 case IPW_POWER_AC:
9542 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9543 break;
9544 case IPW_POWER_BATTERY:
9545 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9546 break;
9547 default:
9548 p += snprintf(p, MAX_WX_STRING - (p - extra),
9549 "(Timeout %dms, Period %dms)",
9550 timeout_duration[level - 1] / 1000,
9551 period_duration[level - 1] / 1000);
9552 }
9553
9554 if (!(priv->power_mode & IPW_POWER_ENABLED))
9555 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9556
9557 wrqu->data.length = p - extra + 1;
9558
9559 return 0;
9560 }
9561
9562 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9563 struct iw_request_info *info,
9564 union iwreq_data *wrqu, char *extra)
9565 {
9566 struct ipw_priv *priv = ieee80211_priv(dev);
9567 int mode = *(int *)extra;
9568 u8 band = 0, modulation = 0;
9569
9570 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9571 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9572 return -EINVAL;
9573 }
9574 mutex_lock(&priv->mutex);
9575 if (priv->adapter == IPW_2915ABG) {
9576 priv->ieee->abg_true = 1;
9577 if (mode & IEEE_A) {
9578 band |= IEEE80211_52GHZ_BAND;
9579 modulation |= IEEE80211_OFDM_MODULATION;
9580 } else
9581 priv->ieee->abg_true = 0;
9582 } else {
9583 if (mode & IEEE_A) {
9584 IPW_WARNING("Attempt to set 2200BG into "
9585 "802.11a mode\n");
9586 mutex_unlock(&priv->mutex);
9587 return -EINVAL;
9588 }
9589
9590 priv->ieee->abg_true = 0;
9591 }
9592
9593 if (mode & IEEE_B) {
9594 band |= IEEE80211_24GHZ_BAND;
9595 modulation |= IEEE80211_CCK_MODULATION;
9596 } else
9597 priv->ieee->abg_true = 0;
9598
9599 if (mode & IEEE_G) {
9600 band |= IEEE80211_24GHZ_BAND;
9601 modulation |= IEEE80211_OFDM_MODULATION;
9602 } else
9603 priv->ieee->abg_true = 0;
9604
9605 priv->ieee->mode = mode;
9606 priv->ieee->freq_band = band;
9607 priv->ieee->modulation = modulation;
9608 init_supported_rates(priv, &priv->rates);
9609
9610 /* Network configuration changed -- force [re]association */
9611 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9612 if (!ipw_disassociate(priv)) {
9613 ipw_send_supported_rates(priv, &priv->rates);
9614 ipw_associate(priv);
9615 }
9616
9617 /* Update the band LEDs */
9618 ipw_led_band_on(priv);
9619
9620 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9621 mode & IEEE_A ? 'a' : '.',
9622 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9623 mutex_unlock(&priv->mutex);
9624 return 0;
9625 }
9626
9627 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9628 struct iw_request_info *info,
9629 union iwreq_data *wrqu, char *extra)
9630 {
9631 struct ipw_priv *priv = ieee80211_priv(dev);
9632 mutex_lock(&priv->mutex);
9633 switch (priv->ieee->mode) {
9634 case IEEE_A:
9635 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9636 break;
9637 case IEEE_B:
9638 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9639 break;
9640 case IEEE_A | IEEE_B:
9641 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9642 break;
9643 case IEEE_G:
9644 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9645 break;
9646 case IEEE_A | IEEE_G:
9647 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9648 break;
9649 case IEEE_B | IEEE_G:
9650 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9651 break;
9652 case IEEE_A | IEEE_B | IEEE_G:
9653 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9654 break;
9655 default:
9656 strncpy(extra, "unknown", MAX_WX_STRING);
9657 break;
9658 }
9659
9660 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9661
9662 wrqu->data.length = strlen(extra) + 1;
9663 mutex_unlock(&priv->mutex);
9664
9665 return 0;
9666 }
9667
9668 static int ipw_wx_set_preamble(struct net_device *dev,
9669 struct iw_request_info *info,
9670 union iwreq_data *wrqu, char *extra)
9671 {
9672 struct ipw_priv *priv = ieee80211_priv(dev);
9673 int mode = *(int *)extra;
9674 mutex_lock(&priv->mutex);
9675 /* Switching from SHORT -> LONG requires a disassociation */
9676 if (mode == 1) {
9677 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9678 priv->config |= CFG_PREAMBLE_LONG;
9679
9680 /* Network configuration changed -- force [re]association */
9681 IPW_DEBUG_ASSOC
9682 ("[re]association triggered due to preamble change.\n");
9683 if (!ipw_disassociate(priv))
9684 ipw_associate(priv);
9685 }
9686 goto done;
9687 }
9688
9689 if (mode == 0) {
9690 priv->config &= ~CFG_PREAMBLE_LONG;
9691 goto done;
9692 }
9693 mutex_unlock(&priv->mutex);
9694 return -EINVAL;
9695
9696 done:
9697 mutex_unlock(&priv->mutex);
9698 return 0;
9699 }
9700
9701 static int ipw_wx_get_preamble(struct net_device *dev,
9702 struct iw_request_info *info,
9703 union iwreq_data *wrqu, char *extra)
9704 {
9705 struct ipw_priv *priv = ieee80211_priv(dev);
9706 mutex_lock(&priv->mutex);
9707 if (priv->config & CFG_PREAMBLE_LONG)
9708 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9709 else
9710 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9711 mutex_unlock(&priv->mutex);
9712 return 0;
9713 }
9714
9715 #ifdef CONFIG_IPW2200_MONITOR
9716 static int ipw_wx_set_monitor(struct net_device *dev,
9717 struct iw_request_info *info,
9718 union iwreq_data *wrqu, char *extra)
9719 {
9720 struct ipw_priv *priv = ieee80211_priv(dev);
9721 int *parms = (int *)extra;
9722 int enable = (parms[0] > 0);
9723 mutex_lock(&priv->mutex);
9724 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9725 if (enable) {
9726 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9727 #ifdef CONFIG_IPW2200_RADIOTAP
9728 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9729 #else
9730 priv->net_dev->type = ARPHRD_IEEE80211;
9731 #endif
9732 queue_work(priv->workqueue, &priv->adapter_restart);
9733 }
9734
9735 ipw_set_channel(priv, parms[1]);
9736 } else {
9737 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9738 mutex_unlock(&priv->mutex);
9739 return 0;
9740 }
9741 priv->net_dev->type = ARPHRD_ETHER;
9742 queue_work(priv->workqueue, &priv->adapter_restart);
9743 }
9744 mutex_unlock(&priv->mutex);
9745 return 0;
9746 }
9747
9748 #endif /* CONFIG_IPW2200_MONITOR */
9749
9750 static int ipw_wx_reset(struct net_device *dev,
9751 struct iw_request_info *info,
9752 union iwreq_data *wrqu, char *extra)
9753 {
9754 struct ipw_priv *priv = ieee80211_priv(dev);
9755 IPW_DEBUG_WX("RESET\n");
9756 queue_work(priv->workqueue, &priv->adapter_restart);
9757 return 0;
9758 }
9759
9760 static int ipw_wx_sw_reset(struct net_device *dev,
9761 struct iw_request_info *info,
9762 union iwreq_data *wrqu, char *extra)
9763 {
9764 struct ipw_priv *priv = ieee80211_priv(dev);
9765 union iwreq_data wrqu_sec = {
9766 .encoding = {
9767 .flags = IW_ENCODE_DISABLED,
9768 },
9769 };
9770 int ret;
9771
9772 IPW_DEBUG_WX("SW_RESET\n");
9773
9774 mutex_lock(&priv->mutex);
9775
9776 ret = ipw_sw_reset(priv, 2);
9777 if (!ret) {
9778 free_firmware();
9779 ipw_adapter_restart(priv);
9780 }
9781
9782 /* The SW reset bit might have been toggled on by the 'disable'
9783 * module parameter, so take appropriate action */
9784 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9785
9786 mutex_unlock(&priv->mutex);
9787 ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9788 mutex_lock(&priv->mutex);
9789
9790 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9791 /* Configuration likely changed -- force [re]association */
9792 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9793 "reset.\n");
9794 if (!ipw_disassociate(priv))
9795 ipw_associate(priv);
9796 }
9797
9798 mutex_unlock(&priv->mutex);
9799
9800 return 0;
9801 }
9802
9803 /* Rebase the WE IOCTLs to zero for the handler array */
9804 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9805 static iw_handler ipw_wx_handlers[] = {
9806 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9807 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9808 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9809 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9810 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9811 IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
9812 IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
9813 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9814 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9815 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9816 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9817 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9818 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9819 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9820 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9821 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9822 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9823 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9824 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9825 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
9826 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
9827 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
9828 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
9829 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
9830 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
9831 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
9832 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
9833 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
9834 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
9835 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
9836 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
9837 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
9838 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
9839 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
9840 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
9841 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
9842 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
9843 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
9844 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
9845 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
9846 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
9847 };
9848
9849 enum {
9850 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9851 IPW_PRIV_GET_POWER,
9852 IPW_PRIV_SET_MODE,
9853 IPW_PRIV_GET_MODE,
9854 IPW_PRIV_SET_PREAMBLE,
9855 IPW_PRIV_GET_PREAMBLE,
9856 IPW_PRIV_RESET,
9857 IPW_PRIV_SW_RESET,
9858 #ifdef CONFIG_IPW2200_MONITOR
9859 IPW_PRIV_SET_MONITOR,
9860 #endif
9861 };
9862
9863 static struct iw_priv_args ipw_priv_args[] = {
9864 {
9865 .cmd = IPW_PRIV_SET_POWER,
9866 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9867 .name = "set_power"},
9868 {
9869 .cmd = IPW_PRIV_GET_POWER,
9870 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9871 .name = "get_power"},
9872 {
9873 .cmd = IPW_PRIV_SET_MODE,
9874 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9875 .name = "set_mode"},
9876 {
9877 .cmd = IPW_PRIV_GET_MODE,
9878 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9879 .name = "get_mode"},
9880 {
9881 .cmd = IPW_PRIV_SET_PREAMBLE,
9882 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9883 .name = "set_preamble"},
9884 {
9885 .cmd = IPW_PRIV_GET_PREAMBLE,
9886 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
9887 .name = "get_preamble"},
9888 {
9889 IPW_PRIV_RESET,
9890 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
9891 {
9892 IPW_PRIV_SW_RESET,
9893 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
9894 #ifdef CONFIG_IPW2200_MONITOR
9895 {
9896 IPW_PRIV_SET_MONITOR,
9897 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
9898 #endif /* CONFIG_IPW2200_MONITOR */
9899 };
9900
9901 static iw_handler ipw_priv_handler[] = {
9902 ipw_wx_set_powermode,
9903 ipw_wx_get_powermode,
9904 ipw_wx_set_wireless_mode,
9905 ipw_wx_get_wireless_mode,
9906 ipw_wx_set_preamble,
9907 ipw_wx_get_preamble,
9908 ipw_wx_reset,
9909 ipw_wx_sw_reset,
9910 #ifdef CONFIG_IPW2200_MONITOR
9911 ipw_wx_set_monitor,
9912 #endif
9913 };
9914
9915 static struct iw_handler_def ipw_wx_handler_def = {
9916 .standard = ipw_wx_handlers,
9917 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
9918 .num_private = ARRAY_SIZE(ipw_priv_handler),
9919 .num_private_args = ARRAY_SIZE(ipw_priv_args),
9920 .private = ipw_priv_handler,
9921 .private_args = ipw_priv_args,
9922 .get_wireless_stats = ipw_get_wireless_stats,
9923 };
9924
9925 /*
9926 * Get wireless statistics.
9927 * Called by /proc/net/wireless
9928 * Also called by SIOCGIWSTATS
9929 */
9930 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
9931 {
9932 struct ipw_priv *priv = ieee80211_priv(dev);
9933 struct iw_statistics *wstats;
9934
9935 wstats = &priv->wstats;
9936
9937 /* if hw is disabled, then ipw_get_ordinal() can't be called.
9938 * netdev->get_wireless_stats seems to be called before fw is
9939 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
9940 * and associated; if not associcated, the values are all meaningless
9941 * anyway, so set them all to NULL and INVALID */
9942 if (!(priv->status & STATUS_ASSOCIATED)) {
9943 wstats->miss.beacon = 0;
9944 wstats->discard.retries = 0;
9945 wstats->qual.qual = 0;
9946 wstats->qual.level = 0;
9947 wstats->qual.noise = 0;
9948 wstats->qual.updated = 7;
9949 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
9950 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
9951 return wstats;
9952 }
9953
9954 wstats->qual.qual = priv->quality;
9955 wstats->qual.level = priv->exp_avg_rssi;
9956 wstats->qual.noise = priv->exp_avg_noise;
9957 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
9958 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
9959
9960 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
9961 wstats->discard.retries = priv->last_tx_failures;
9962 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
9963
9964 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
9965 goto fail_get_ordinal;
9966 wstats->discard.retries += tx_retry; */
9967
9968 return wstats;
9969 }
9970
9971 /* net device stuff */
9972
9973 static void init_sys_config(struct ipw_sys_config *sys_config)
9974 {
9975 memset(sys_config, 0, sizeof(struct ipw_sys_config));
9976 sys_config->bt_coexistence = 0;
9977 sys_config->answer_broadcast_ssid_probe = 0;
9978 sys_config->accept_all_data_frames = 0;
9979 sys_config->accept_non_directed_frames = 1;
9980 sys_config->exclude_unicast_unencrypted = 0;
9981 sys_config->disable_unicast_decryption = 1;
9982 sys_config->exclude_multicast_unencrypted = 0;
9983 sys_config->disable_multicast_decryption = 1;
9984 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
9985 antenna = CFG_SYS_ANTENNA_BOTH;
9986 sys_config->antenna_diversity = antenna;
9987 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
9988 sys_config->dot11g_auto_detection = 0;
9989 sys_config->enable_cts_to_self = 0;
9990 sys_config->bt_coexist_collision_thr = 0;
9991 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
9992 sys_config->silence_threshold = 0x1e;
9993 }
9994
9995 static int ipw_net_open(struct net_device *dev)
9996 {
9997 struct ipw_priv *priv = ieee80211_priv(dev);
9998 IPW_DEBUG_INFO("dev->open\n");
9999 /* we should be verifying the device is ready to be opened */
10000 mutex_lock(&priv->mutex);
10001 if (!(priv->status & STATUS_RF_KILL_MASK) &&
10002 (priv->status & STATUS_ASSOCIATED))
10003 netif_start_queue(dev);
10004 mutex_unlock(&priv->mutex);
10005 return 0;
10006 }
10007
10008 static int ipw_net_stop(struct net_device *dev)
10009 {
10010 IPW_DEBUG_INFO("dev->close\n");
10011 netif_stop_queue(dev);
10012 return 0;
10013 }
10014
10015 /*
10016 todo:
10017
10018 modify to send one tfd per fragment instead of using chunking. otherwise
10019 we need to heavily modify the ieee80211_skb_to_txb.
10020 */
10021
10022 static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10023 int pri)
10024 {
10025 struct ieee80211_hdr_3addrqos *hdr = (struct ieee80211_hdr_3addrqos *)
10026 txb->fragments[0]->data;
10027 int i = 0;
10028 struct tfd_frame *tfd;
10029 #ifdef CONFIG_IPW2200_QOS
10030 int tx_id = ipw_get_tx_queue_number(priv, pri);
10031 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10032 #else
10033 struct clx2_tx_queue *txq = &priv->txq[0];
10034 #endif
10035 struct clx2_queue *q = &txq->q;
10036 u8 id, hdr_len, unicast;
10037 u16 remaining_bytes;
10038 int fc;
10039
10040 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10041 switch (priv->ieee->iw_mode) {
10042 case IW_MODE_ADHOC:
10043 unicast = !is_multicast_ether_addr(hdr->addr1);
10044 id = ipw_find_station(priv, hdr->addr1);
10045 if (id == IPW_INVALID_STATION) {
10046 id = ipw_add_station(priv, hdr->addr1);
10047 if (id == IPW_INVALID_STATION) {
10048 IPW_WARNING("Attempt to send data to "
10049 "invalid cell: " MAC_FMT "\n",
10050 MAC_ARG(hdr->addr1));
10051 goto drop;
10052 }
10053 }
10054 break;
10055
10056 case IW_MODE_INFRA:
10057 default:
10058 unicast = !is_multicast_ether_addr(hdr->addr3);
10059 id = 0;
10060 break;
10061 }
10062
10063 tfd = &txq->bd[q->first_empty];
10064 txq->txb[q->first_empty] = txb;
10065 memset(tfd, 0, sizeof(*tfd));
10066 tfd->u.data.station_number = id;
10067
10068 tfd->control_flags.message_type = TX_FRAME_TYPE;
10069 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10070
10071 tfd->u.data.cmd_id = DINO_CMD_TX;
10072 tfd->u.data.len = cpu_to_le16(txb->payload_size);
10073 remaining_bytes = txb->payload_size;
10074
10075 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10076 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10077 else
10078 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10079
10080 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10081 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10082
10083 fc = le16_to_cpu(hdr->frame_ctl);
10084 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10085
10086 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10087
10088 if (likely(unicast))
10089 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10090
10091 if (txb->encrypted && !priv->ieee->host_encrypt) {
10092 switch (priv->ieee->sec.level) {
10093 case SEC_LEVEL_3:
10094 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10095 IEEE80211_FCTL_PROTECTED;
10096 /* XXX: ACK flag must be set for CCMP even if it
10097 * is a multicast/broadcast packet, because CCMP
10098 * group communication encrypted by GTK is
10099 * actually done by the AP. */
10100 if (!unicast)
10101 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10102
10103 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10104 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10105 tfd->u.data.key_index = 0;
10106 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10107 break;
10108 case SEC_LEVEL_2:
10109 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10110 IEEE80211_FCTL_PROTECTED;
10111 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10112 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10113 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10114 break;
10115 case SEC_LEVEL_1:
10116 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10117 IEEE80211_FCTL_PROTECTED;
10118 tfd->u.data.key_index = priv->ieee->tx_keyidx;
10119 if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <=
10120 40)
10121 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10122 else
10123 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10124 break;
10125 case SEC_LEVEL_0:
10126 break;
10127 default:
10128 printk(KERN_ERR "Unknow security level %d\n",
10129 priv->ieee->sec.level);
10130 break;
10131 }
10132 } else
10133 /* No hardware encryption */
10134 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10135
10136 #ifdef CONFIG_IPW2200_QOS
10137 if (fc & IEEE80211_STYPE_QOS_DATA)
10138 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10139 #endif /* CONFIG_IPW2200_QOS */
10140
10141 /* payload */
10142 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10143 txb->nr_frags));
10144 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10145 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10146 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10147 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10148 i, le32_to_cpu(tfd->u.data.num_chunks),
10149 txb->fragments[i]->len - hdr_len);
10150 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10151 i, tfd->u.data.num_chunks,
10152 txb->fragments[i]->len - hdr_len);
10153 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10154 txb->fragments[i]->len - hdr_len);
10155
10156 tfd->u.data.chunk_ptr[i] =
10157 cpu_to_le32(pci_map_single
10158 (priv->pci_dev,
10159 txb->fragments[i]->data + hdr_len,
10160 txb->fragments[i]->len - hdr_len,
10161 PCI_DMA_TODEVICE));
10162 tfd->u.data.chunk_len[i] =
10163 cpu_to_le16(txb->fragments[i]->len - hdr_len);
10164 }
10165
10166 if (i != txb->nr_frags) {
10167 struct sk_buff *skb;
10168 u16 remaining_bytes = 0;
10169 int j;
10170
10171 for (j = i; j < txb->nr_frags; j++)
10172 remaining_bytes += txb->fragments[j]->len - hdr_len;
10173
10174 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10175 remaining_bytes);
10176 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10177 if (skb != NULL) {
10178 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10179 for (j = i; j < txb->nr_frags; j++) {
10180 int size = txb->fragments[j]->len - hdr_len;
10181
10182 printk(KERN_INFO "Adding frag %d %d...\n",
10183 j, size);
10184 memcpy(skb_put(skb, size),
10185 txb->fragments[j]->data + hdr_len, size);
10186 }
10187 dev_kfree_skb_any(txb->fragments[i]);
10188 txb->fragments[i] = skb;
10189 tfd->u.data.chunk_ptr[i] =
10190 cpu_to_le32(pci_map_single
10191 (priv->pci_dev, skb->data,
10192 tfd->u.data.chunk_len[i],
10193 PCI_DMA_TODEVICE));
10194
10195 tfd->u.data.num_chunks =
10196 cpu_to_le32(le32_to_cpu(tfd->u.data.num_chunks) +
10197 1);
10198 }
10199 }
10200
10201 /* kick DMA */
10202 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10203 ipw_write32(priv, q->reg_w, q->first_empty);
10204
10205 if (ipw_queue_space(q) < q->high_mark)
10206 netif_stop_queue(priv->net_dev);
10207
10208 return NETDEV_TX_OK;
10209
10210 drop:
10211 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10212 ieee80211_txb_free(txb);
10213 return NETDEV_TX_OK;
10214 }
10215
10216 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10217 {
10218 struct ipw_priv *priv = ieee80211_priv(dev);
10219 #ifdef CONFIG_IPW2200_QOS
10220 int tx_id = ipw_get_tx_queue_number(priv, pri);
10221 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10222 #else
10223 struct clx2_tx_queue *txq = &priv->txq[0];
10224 #endif /* CONFIG_IPW2200_QOS */
10225
10226 if (ipw_queue_space(&txq->q) < txq->q.high_mark)
10227 return 1;
10228
10229 return 0;
10230 }
10231
10232 #ifdef CONFIG_IPW2200_PROMISCUOUS
10233 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10234 struct ieee80211_txb *txb)
10235 {
10236 struct ieee80211_rx_stats dummystats;
10237 struct ieee80211_hdr *hdr;
10238 u8 n;
10239 u16 filter = priv->prom_priv->filter;
10240 int hdr_only = 0;
10241
10242 if (filter & IPW_PROM_NO_TX)
10243 return;
10244
10245 memset(&dummystats, 0, sizeof(dummystats));
10246
10247 /* Filtering of fragment chains is done agains the first fragment */
10248 hdr = (void *)txb->fragments[0]->data;
10249 if (ieee80211_is_management(hdr->frame_ctl)) {
10250 if (filter & IPW_PROM_NO_MGMT)
10251 return;
10252 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10253 hdr_only = 1;
10254 } else if (ieee80211_is_control(hdr->frame_ctl)) {
10255 if (filter & IPW_PROM_NO_CTL)
10256 return;
10257 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10258 hdr_only = 1;
10259 } else if (ieee80211_is_data(hdr->frame_ctl)) {
10260 if (filter & IPW_PROM_NO_DATA)
10261 return;
10262 if (filter & IPW_PROM_DATA_HEADER_ONLY)
10263 hdr_only = 1;
10264 }
10265
10266 for(n=0; n<txb->nr_frags; ++n) {
10267 struct sk_buff *src = txb->fragments[n];
10268 struct sk_buff *dst;
10269 struct ieee80211_radiotap_header *rt_hdr;
10270 int len;
10271
10272 if (hdr_only) {
10273 hdr = (void *)src->data;
10274 len = ieee80211_get_hdrlen(hdr->frame_ctl);
10275 } else
10276 len = src->len;
10277
10278 dst = alloc_skb(
10279 len + IEEE80211_RADIOTAP_HDRLEN, GFP_ATOMIC);
10280 if (!dst) continue;
10281
10282 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10283
10284 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10285 rt_hdr->it_pad = 0;
10286 rt_hdr->it_present = 0; /* after all, it's just an idea */
10287 rt_hdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
10288
10289 *(u16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10290 ieee80211chan2mhz(priv->channel));
10291 if (priv->channel > 14) /* 802.11a */
10292 *(u16*)skb_put(dst, sizeof(u16)) =
10293 cpu_to_le16(IEEE80211_CHAN_OFDM |
10294 IEEE80211_CHAN_5GHZ);
10295 else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10296 *(u16*)skb_put(dst, sizeof(u16)) =
10297 cpu_to_le16(IEEE80211_CHAN_CCK |
10298 IEEE80211_CHAN_2GHZ);
10299 else /* 802.11g */
10300 *(u16*)skb_put(dst, sizeof(u16)) =
10301 cpu_to_le16(IEEE80211_CHAN_OFDM |
10302 IEEE80211_CHAN_2GHZ);
10303
10304 rt_hdr->it_len = dst->len;
10305
10306 memcpy(skb_put(dst, len), src->data, len);
10307
10308 if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats))
10309 dev_kfree_skb_any(dst);
10310 }
10311 }
10312 #endif
10313
10314 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
10315 struct net_device *dev, int pri)
10316 {
10317 struct ipw_priv *priv = ieee80211_priv(dev);
10318 unsigned long flags;
10319 int ret;
10320
10321 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10322 spin_lock_irqsave(&priv->lock, flags);
10323
10324 if (!(priv->status & STATUS_ASSOCIATED)) {
10325 IPW_DEBUG_INFO("Tx attempt while not associated.\n");
10326 priv->ieee->stats.tx_carrier_errors++;
10327 netif_stop_queue(dev);
10328 goto fail_unlock;
10329 }
10330
10331 #ifdef CONFIG_IPW2200_PROMISCUOUS
10332 if (rtap_iface && netif_running(priv->prom_net_dev))
10333 ipw_handle_promiscuous_tx(priv, txb);
10334 #endif
10335
10336 ret = ipw_tx_skb(priv, txb, pri);
10337 if (ret == NETDEV_TX_OK)
10338 __ipw_led_activity_on(priv);
10339 spin_unlock_irqrestore(&priv->lock, flags);
10340
10341 return ret;
10342
10343 fail_unlock:
10344 spin_unlock_irqrestore(&priv->lock, flags);
10345 return 1;
10346 }
10347
10348 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
10349 {
10350 struct ipw_priv *priv = ieee80211_priv(dev);
10351
10352 priv->ieee->stats.tx_packets = priv->tx_packets;
10353 priv->ieee->stats.rx_packets = priv->rx_packets;
10354 return &priv->ieee->stats;
10355 }
10356
10357 static void ipw_net_set_multicast_list(struct net_device *dev)
10358 {
10359
10360 }
10361
10362 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10363 {
10364 struct ipw_priv *priv = ieee80211_priv(dev);
10365 struct sockaddr *addr = p;
10366 if (!is_valid_ether_addr(addr->sa_data))
10367 return -EADDRNOTAVAIL;
10368 mutex_lock(&priv->mutex);
10369 priv->config |= CFG_CUSTOM_MAC;
10370 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10371 printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n",
10372 priv->net_dev->name, MAC_ARG(priv->mac_addr));
10373 queue_work(priv->workqueue, &priv->adapter_restart);
10374 mutex_unlock(&priv->mutex);
10375 return 0;
10376 }
10377
10378 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10379 struct ethtool_drvinfo *info)
10380 {
10381 struct ipw_priv *p = ieee80211_priv(dev);
10382 char vers[64];
10383 char date[32];
10384 u32 len;
10385
10386 strcpy(info->driver, DRV_NAME);
10387 strcpy(info->version, DRV_VERSION);
10388
10389 len = sizeof(vers);
10390 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10391 len = sizeof(date);
10392 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10393
10394 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10395 vers, date);
10396 strcpy(info->bus_info, pci_name(p->pci_dev));
10397 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10398 }
10399
10400 static u32 ipw_ethtool_get_link(struct net_device *dev)
10401 {
10402 struct ipw_priv *priv = ieee80211_priv(dev);
10403 return (priv->status & STATUS_ASSOCIATED) != 0;
10404 }
10405
10406 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10407 {
10408 return IPW_EEPROM_IMAGE_SIZE;
10409 }
10410
10411 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10412 struct ethtool_eeprom *eeprom, u8 * bytes)
10413 {
10414 struct ipw_priv *p = ieee80211_priv(dev);
10415
10416 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10417 return -EINVAL;
10418 mutex_lock(&p->mutex);
10419 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10420 mutex_unlock(&p->mutex);
10421 return 0;
10422 }
10423
10424 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10425 struct ethtool_eeprom *eeprom, u8 * bytes)
10426 {
10427 struct ipw_priv *p = ieee80211_priv(dev);
10428 int i;
10429
10430 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10431 return -EINVAL;
10432 mutex_lock(&p->mutex);
10433 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10434 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10435 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10436 mutex_unlock(&p->mutex);
10437 return 0;
10438 }
10439
10440 static struct ethtool_ops ipw_ethtool_ops = {
10441 .get_link = ipw_ethtool_get_link,
10442 .get_drvinfo = ipw_ethtool_get_drvinfo,
10443 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10444 .get_eeprom = ipw_ethtool_get_eeprom,
10445 .set_eeprom = ipw_ethtool_set_eeprom,
10446 };
10447
10448 static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
10449 {
10450 struct ipw_priv *priv = data;
10451 u32 inta, inta_mask;
10452
10453 if (!priv)
10454 return IRQ_NONE;
10455
10456 spin_lock(&priv->irq_lock);
10457
10458 if (!(priv->status & STATUS_INT_ENABLED)) {
10459 /* Shared IRQ */
10460 goto none;
10461 }
10462
10463 inta = ipw_read32(priv, IPW_INTA_RW);
10464 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10465
10466 if (inta == 0xFFFFFFFF) {
10467 /* Hardware disappeared */
10468 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10469 goto none;
10470 }
10471
10472 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10473 /* Shared interrupt */
10474 goto none;
10475 }
10476
10477 /* tell the device to stop sending interrupts */
10478 __ipw_disable_interrupts(priv);
10479
10480 /* ack current interrupts */
10481 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10482 ipw_write32(priv, IPW_INTA_RW, inta);
10483
10484 /* Cache INTA value for our tasklet */
10485 priv->isr_inta = inta;
10486
10487 tasklet_schedule(&priv->irq_tasklet);
10488
10489 spin_unlock(&priv->irq_lock);
10490
10491 return IRQ_HANDLED;
10492 none:
10493 spin_unlock(&priv->irq_lock);
10494 return IRQ_NONE;
10495 }
10496
10497 static void ipw_rf_kill(void *adapter)
10498 {
10499 struct ipw_priv *priv = adapter;
10500 unsigned long flags;
10501
10502 spin_lock_irqsave(&priv->lock, flags);
10503
10504 if (rf_kill_active(priv)) {
10505 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10506 if (priv->workqueue)
10507 queue_delayed_work(priv->workqueue,
10508 &priv->rf_kill, 2 * HZ);
10509 goto exit_unlock;
10510 }
10511
10512 /* RF Kill is now disabled, so bring the device back up */
10513
10514 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10515 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10516 "device\n");
10517
10518 /* we can not do an adapter restart while inside an irq lock */
10519 queue_work(priv->workqueue, &priv->adapter_restart);
10520 } else
10521 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10522 "enabled\n");
10523
10524 exit_unlock:
10525 spin_unlock_irqrestore(&priv->lock, flags);
10526 }
10527
10528 static void ipw_bg_rf_kill(void *data)
10529 {
10530 struct ipw_priv *priv = data;
10531 mutex_lock(&priv->mutex);
10532 ipw_rf_kill(data);
10533 mutex_unlock(&priv->mutex);
10534 }
10535
10536 static void ipw_link_up(struct ipw_priv *priv)
10537 {
10538 priv->last_seq_num = -1;
10539 priv->last_frag_num = -1;
10540 priv->last_packet_time = 0;
10541
10542 netif_carrier_on(priv->net_dev);
10543 if (netif_queue_stopped(priv->net_dev)) {
10544 IPW_DEBUG_NOTIF("waking queue\n");
10545 netif_wake_queue(priv->net_dev);
10546 } else {
10547 IPW_DEBUG_NOTIF("starting queue\n");
10548 netif_start_queue(priv->net_dev);
10549 }
10550
10551 cancel_delayed_work(&priv->request_scan);
10552 ipw_reset_stats(priv);
10553 /* Ensure the rate is updated immediately */
10554 priv->last_rate = ipw_get_current_rate(priv);
10555 ipw_gather_stats(priv);
10556 ipw_led_link_up(priv);
10557 notify_wx_assoc_event(priv);
10558
10559 if (priv->config & CFG_BACKGROUND_SCAN)
10560 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10561 }
10562
10563 static void ipw_bg_link_up(void *data)
10564 {
10565 struct ipw_priv *priv = data;
10566 mutex_lock(&priv->mutex);
10567 ipw_link_up(data);
10568 mutex_unlock(&priv->mutex);
10569 }
10570
10571 static void ipw_link_down(struct ipw_priv *priv)
10572 {
10573 ipw_led_link_down(priv);
10574 netif_carrier_off(priv->net_dev);
10575 netif_stop_queue(priv->net_dev);
10576 notify_wx_assoc_event(priv);
10577
10578 /* Cancel any queued work ... */
10579 cancel_delayed_work(&priv->request_scan);
10580 cancel_delayed_work(&priv->adhoc_check);
10581 cancel_delayed_work(&priv->gather_stats);
10582
10583 ipw_reset_stats(priv);
10584
10585 if (!(priv->status & STATUS_EXIT_PENDING)) {
10586 /* Queue up another scan... */
10587 queue_work(priv->workqueue, &priv->request_scan);
10588 }
10589 }
10590
10591 static void ipw_bg_link_down(void *data)
10592 {
10593 struct ipw_priv *priv = data;
10594 mutex_lock(&priv->mutex);
10595 ipw_link_down(data);
10596 mutex_unlock(&priv->mutex);
10597 }
10598
10599 static int ipw_setup_deferred_work(struct ipw_priv *priv)
10600 {
10601 int ret = 0;
10602
10603 priv->workqueue = create_workqueue(DRV_NAME);
10604 init_waitqueue_head(&priv->wait_command_queue);
10605 init_waitqueue_head(&priv->wait_state);
10606
10607 INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv);
10608 INIT_WORK(&priv->associate, ipw_bg_associate, priv);
10609 INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv);
10610 INIT_WORK(&priv->system_config, ipw_system_config, priv);
10611 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv);
10612 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv);
10613 INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv);
10614 INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv);
10615 INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv);
10616 INIT_WORK(&priv->request_scan,
10617 (void (*)(void *))ipw_request_scan, priv);
10618 INIT_WORK(&priv->gather_stats,
10619 (void (*)(void *))ipw_bg_gather_stats, priv);
10620 INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv);
10621 INIT_WORK(&priv->roam, ipw_bg_roam, priv);
10622 INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv);
10623 INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv);
10624 INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv);
10625 INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on,
10626 priv);
10627 INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off,
10628 priv);
10629 INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off,
10630 priv);
10631 INIT_WORK(&priv->merge_networks,
10632 (void (*)(void *))ipw_merge_adhoc_network, priv);
10633
10634 #ifdef CONFIG_IPW2200_QOS
10635 INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate,
10636 priv);
10637 #endif /* CONFIG_IPW2200_QOS */
10638
10639 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10640 ipw_irq_tasklet, (unsigned long)priv);
10641
10642 return ret;
10643 }
10644
10645 static void shim__set_security(struct net_device *dev,
10646 struct ieee80211_security *sec)
10647 {
10648 struct ipw_priv *priv = ieee80211_priv(dev);
10649 int i;
10650 for (i = 0; i < 4; i++) {
10651 if (sec->flags & (1 << i)) {
10652 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10653 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10654 if (sec->key_sizes[i] == 0)
10655 priv->ieee->sec.flags &= ~(1 << i);
10656 else {
10657 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10658 sec->key_sizes[i]);
10659 priv->ieee->sec.flags |= (1 << i);
10660 }
10661 priv->status |= STATUS_SECURITY_UPDATED;
10662 } else if (sec->level != SEC_LEVEL_1)
10663 priv->ieee->sec.flags &= ~(1 << i);
10664 }
10665
10666 if (sec->flags & SEC_ACTIVE_KEY) {
10667 if (sec->active_key <= 3) {
10668 priv->ieee->sec.active_key = sec->active_key;
10669 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10670 } else
10671 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10672 priv->status |= STATUS_SECURITY_UPDATED;
10673 } else
10674 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10675
10676 if ((sec->flags & SEC_AUTH_MODE) &&
10677 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10678 priv->ieee->sec.auth_mode = sec->auth_mode;
10679 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10680 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10681 priv->capability |= CAP_SHARED_KEY;
10682 else
10683 priv->capability &= ~CAP_SHARED_KEY;
10684 priv->status |= STATUS_SECURITY_UPDATED;
10685 }
10686
10687 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10688 priv->ieee->sec.flags |= SEC_ENABLED;
10689 priv->ieee->sec.enabled = sec->enabled;
10690 priv->status |= STATUS_SECURITY_UPDATED;
10691 if (sec->enabled)
10692 priv->capability |= CAP_PRIVACY_ON;
10693 else
10694 priv->capability &= ~CAP_PRIVACY_ON;
10695 }
10696
10697 if (sec->flags & SEC_ENCRYPT)
10698 priv->ieee->sec.encrypt = sec->encrypt;
10699
10700 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10701 priv->ieee->sec.level = sec->level;
10702 priv->ieee->sec.flags |= SEC_LEVEL;
10703 priv->status |= STATUS_SECURITY_UPDATED;
10704 }
10705
10706 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10707 ipw_set_hwcrypto_keys(priv);
10708
10709 /* To match current functionality of ipw2100 (which works well w/
10710 * various supplicants, we don't force a disassociate if the
10711 * privacy capability changes ... */
10712 #if 0
10713 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10714 (((priv->assoc_request.capability &
10715 WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
10716 (!(priv->assoc_request.capability &
10717 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
10718 IPW_DEBUG_ASSOC("Disassociating due to capability "
10719 "change.\n");
10720 ipw_disassociate(priv);
10721 }
10722 #endif
10723 }
10724
10725 static int init_supported_rates(struct ipw_priv *priv,
10726 struct ipw_supported_rates *rates)
10727 {
10728 /* TODO: Mask out rates based on priv->rates_mask */
10729
10730 memset(rates, 0, sizeof(*rates));
10731 /* configure supported rates */
10732 switch (priv->ieee->freq_band) {
10733 case IEEE80211_52GHZ_BAND:
10734 rates->ieee_mode = IPW_A_MODE;
10735 rates->purpose = IPW_RATE_CAPABILITIES;
10736 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10737 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10738 break;
10739
10740 default: /* Mixed or 2.4Ghz */
10741 rates->ieee_mode = IPW_G_MODE;
10742 rates->purpose = IPW_RATE_CAPABILITIES;
10743 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
10744 IEEE80211_CCK_DEFAULT_RATES_MASK);
10745 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
10746 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10747 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10748 }
10749 break;
10750 }
10751
10752 return 0;
10753 }
10754
10755 static int ipw_config(struct ipw_priv *priv)
10756 {
10757 /* This is only called from ipw_up, which resets/reloads the firmware
10758 so, we don't need to first disable the card before we configure
10759 it */
10760 if (ipw_set_tx_power(priv))
10761 goto error;
10762
10763 /* initialize adapter address */
10764 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10765 goto error;
10766
10767 /* set basic system config settings */
10768 init_sys_config(&priv->sys_config);
10769
10770 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10771 * Does not support BT priority yet (don't abort or defer our Tx) */
10772 if (bt_coexist) {
10773 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10774
10775 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10776 priv->sys_config.bt_coexistence
10777 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10778 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10779 priv->sys_config.bt_coexistence
10780 |= CFG_BT_COEXISTENCE_OOB;
10781 }
10782
10783 #ifdef CONFIG_IPW2200_PROMISCUOUS
10784 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10785 priv->sys_config.accept_all_data_frames = 1;
10786 priv->sys_config.accept_non_directed_frames = 1;
10787 priv->sys_config.accept_all_mgmt_bcpr = 1;
10788 priv->sys_config.accept_all_mgmt_frames = 1;
10789 }
10790 #endif
10791
10792 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10793 priv->sys_config.answer_broadcast_ssid_probe = 1;
10794 else
10795 priv->sys_config.answer_broadcast_ssid_probe = 0;
10796
10797 if (ipw_send_system_config(priv))
10798 goto error;
10799
10800 init_supported_rates(priv, &priv->rates);
10801 if (ipw_send_supported_rates(priv, &priv->rates))
10802 goto error;
10803
10804 /* Set request-to-send threshold */
10805 if (priv->rts_threshold) {
10806 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10807 goto error;
10808 }
10809 #ifdef CONFIG_IPW2200_QOS
10810 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10811 ipw_qos_activate(priv, NULL);
10812 #endif /* CONFIG_IPW2200_QOS */
10813
10814 if (ipw_set_random_seed(priv))
10815 goto error;
10816
10817 /* final state transition to the RUN state */
10818 if (ipw_send_host_complete(priv))
10819 goto error;
10820
10821 priv->status |= STATUS_INIT;
10822
10823 ipw_led_init(priv);
10824 ipw_led_radio_on(priv);
10825 priv->notif_missed_beacons = 0;
10826
10827 /* Set hardware WEP key if it is configured. */
10828 if ((priv->capability & CAP_PRIVACY_ON) &&
10829 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10830 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10831 ipw_set_hwcrypto_keys(priv);
10832
10833 return 0;
10834
10835 error:
10836 return -EIO;
10837 }
10838
10839 /*
10840 * NOTE:
10841 *
10842 * These tables have been tested in conjunction with the
10843 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10844 *
10845 * Altering this values, using it on other hardware, or in geographies
10846 * not intended for resale of the above mentioned Intel adapters has
10847 * not been tested.
10848 *
10849 * Remember to update the table in README.ipw2200 when changing this
10850 * table.
10851 *
10852 */
10853 static const struct ieee80211_geo ipw_geos[] = {
10854 { /* Restricted */
10855 "---",
10856 .bg_channels = 11,
10857 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10858 {2427, 4}, {2432, 5}, {2437, 6},
10859 {2442, 7}, {2447, 8}, {2452, 9},
10860 {2457, 10}, {2462, 11}},
10861 },
10862
10863 { /* Custom US/Canada */
10864 "ZZF",
10865 .bg_channels = 11,
10866 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10867 {2427, 4}, {2432, 5}, {2437, 6},
10868 {2442, 7}, {2447, 8}, {2452, 9},
10869 {2457, 10}, {2462, 11}},
10870 .a_channels = 8,
10871 .a = {{5180, 36},
10872 {5200, 40},
10873 {5220, 44},
10874 {5240, 48},
10875 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10876 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10877 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10878 {5320, 64, IEEE80211_CH_PASSIVE_ONLY}},
10879 },
10880
10881 { /* Rest of World */
10882 "ZZD",
10883 .bg_channels = 13,
10884 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10885 {2427, 4}, {2432, 5}, {2437, 6},
10886 {2442, 7}, {2447, 8}, {2452, 9},
10887 {2457, 10}, {2462, 11}, {2467, 12},
10888 {2472, 13}},
10889 },
10890
10891 { /* Custom USA & Europe & High */
10892 "ZZA",
10893 .bg_channels = 11,
10894 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10895 {2427, 4}, {2432, 5}, {2437, 6},
10896 {2442, 7}, {2447, 8}, {2452, 9},
10897 {2457, 10}, {2462, 11}},
10898 .a_channels = 13,
10899 .a = {{5180, 36},
10900 {5200, 40},
10901 {5220, 44},
10902 {5240, 48},
10903 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10904 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10905 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10906 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10907 {5745, 149},
10908 {5765, 153},
10909 {5785, 157},
10910 {5805, 161},
10911 {5825, 165}},
10912 },
10913
10914 { /* Custom NA & Europe */
10915 "ZZB",
10916 .bg_channels = 11,
10917 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10918 {2427, 4}, {2432, 5}, {2437, 6},
10919 {2442, 7}, {2447, 8}, {2452, 9},
10920 {2457, 10}, {2462, 11}},
10921 .a_channels = 13,
10922 .a = {{5180, 36},
10923 {5200, 40},
10924 {5220, 44},
10925 {5240, 48},
10926 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10927 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10928 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10929 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10930 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10931 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10932 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10933 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10934 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10935 },
10936
10937 { /* Custom Japan */
10938 "ZZC",
10939 .bg_channels = 11,
10940 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10941 {2427, 4}, {2432, 5}, {2437, 6},
10942 {2442, 7}, {2447, 8}, {2452, 9},
10943 {2457, 10}, {2462, 11}},
10944 .a_channels = 4,
10945 .a = {{5170, 34}, {5190, 38},
10946 {5210, 42}, {5230, 46}},
10947 },
10948
10949 { /* Custom */
10950 "ZZM",
10951 .bg_channels = 11,
10952 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10953 {2427, 4}, {2432, 5}, {2437, 6},
10954 {2442, 7}, {2447, 8}, {2452, 9},
10955 {2457, 10}, {2462, 11}},
10956 },
10957
10958 { /* Europe */
10959 "ZZE",
10960 .bg_channels = 13,
10961 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10962 {2427, 4}, {2432, 5}, {2437, 6},
10963 {2442, 7}, {2447, 8}, {2452, 9},
10964 {2457, 10}, {2462, 11}, {2467, 12},
10965 {2472, 13}},
10966 .a_channels = 19,
10967 .a = {{5180, 36},
10968 {5200, 40},
10969 {5220, 44},
10970 {5240, 48},
10971 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10972 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10973 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10974 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10975 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
10976 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
10977 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
10978 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
10979 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
10980 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
10981 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
10982 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
10983 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
10984 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
10985 {5700, 140, IEEE80211_CH_PASSIVE_ONLY}},
10986 },
10987
10988 { /* Custom Japan */
10989 "ZZJ",
10990 .bg_channels = 14,
10991 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10992 {2427, 4}, {2432, 5}, {2437, 6},
10993 {2442, 7}, {2447, 8}, {2452, 9},
10994 {2457, 10}, {2462, 11}, {2467, 12},
10995 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY}},
10996 .a_channels = 4,
10997 .a = {{5170, 34}, {5190, 38},
10998 {5210, 42}, {5230, 46}},
10999 },
11000
11001 { /* Rest of World */
11002 "ZZR",
11003 .bg_channels = 14,
11004 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11005 {2427, 4}, {2432, 5}, {2437, 6},
11006 {2442, 7}, {2447, 8}, {2452, 9},
11007 {2457, 10}, {2462, 11}, {2467, 12},
11008 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY |
11009 IEEE80211_CH_PASSIVE_ONLY}},
11010 },
11011
11012 { /* High Band */
11013 "ZZH",
11014 .bg_channels = 13,
11015 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11016 {2427, 4}, {2432, 5}, {2437, 6},
11017 {2442, 7}, {2447, 8}, {2452, 9},
11018 {2457, 10}, {2462, 11},
11019 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11020 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11021 .a_channels = 4,
11022 .a = {{5745, 149}, {5765, 153},
11023 {5785, 157}, {5805, 161}},
11024 },
11025
11026 { /* Custom Europe */
11027 "ZZG",
11028 .bg_channels = 13,
11029 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11030 {2427, 4}, {2432, 5}, {2437, 6},
11031 {2442, 7}, {2447, 8}, {2452, 9},
11032 {2457, 10}, {2462, 11},
11033 {2467, 12}, {2472, 13}},
11034 .a_channels = 4,
11035 .a = {{5180, 36}, {5200, 40},
11036 {5220, 44}, {5240, 48}},
11037 },
11038
11039 { /* Europe */
11040 "ZZK",
11041 .bg_channels = 13,
11042 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11043 {2427, 4}, {2432, 5}, {2437, 6},
11044 {2442, 7}, {2447, 8}, {2452, 9},
11045 {2457, 10}, {2462, 11},
11046 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11047 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11048 .a_channels = 24,
11049 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11050 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11051 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11052 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11053 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11054 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11055 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11056 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11057 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11058 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11059 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11060 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11061 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11062 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11063 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11064 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11065 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11066 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11067 {5700, 140, IEEE80211_CH_PASSIVE_ONLY},
11068 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11069 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11070 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11071 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11072 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11073 },
11074
11075 { /* Europe */
11076 "ZZL",
11077 .bg_channels = 11,
11078 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11079 {2427, 4}, {2432, 5}, {2437, 6},
11080 {2442, 7}, {2447, 8}, {2452, 9},
11081 {2457, 10}, {2462, 11}},
11082 .a_channels = 13,
11083 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11084 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11085 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11086 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11087 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11088 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11089 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11090 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11091 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11092 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11093 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11094 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11095 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11096 }
11097 };
11098
11099 #define MAX_HW_RESTARTS 5
11100 static int ipw_up(struct ipw_priv *priv)
11101 {
11102 int rc, i, j;
11103
11104 if (priv->status & STATUS_EXIT_PENDING)
11105 return -EIO;
11106
11107 if (cmdlog && !priv->cmdlog) {
11108 priv->cmdlog = kmalloc(sizeof(*priv->cmdlog) * cmdlog,
11109 GFP_KERNEL);
11110 if (priv->cmdlog == NULL) {
11111 IPW_ERROR("Error allocating %d command log entries.\n",
11112 cmdlog);
11113 return -ENOMEM;
11114 } else {
11115 memset(priv->cmdlog, 0, sizeof(*priv->cmdlog) * cmdlog);
11116 priv->cmdlog_len = cmdlog;
11117 }
11118 }
11119
11120 for (i = 0; i < MAX_HW_RESTARTS; i++) {
11121 /* Load the microcode, firmware, and eeprom.
11122 * Also start the clocks. */
11123 rc = ipw_load(priv);
11124 if (rc) {
11125 IPW_ERROR("Unable to load firmware: %d\n", rc);
11126 return rc;
11127 }
11128
11129 ipw_init_ordinals(priv);
11130 if (!(priv->config & CFG_CUSTOM_MAC))
11131 eeprom_parse_mac(priv, priv->mac_addr);
11132 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11133
11134 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11135 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11136 ipw_geos[j].name, 3))
11137 break;
11138 }
11139 if (j == ARRAY_SIZE(ipw_geos)) {
11140 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11141 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11142 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11143 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11144 j = 0;
11145 }
11146 if (ieee80211_set_geo(priv->ieee, &ipw_geos[j])) {
11147 IPW_WARNING("Could not set geography.");
11148 return 0;
11149 }
11150
11151 if (priv->status & STATUS_RF_KILL_SW) {
11152 IPW_WARNING("Radio disabled by module parameter.\n");
11153 return 0;
11154 } else if (rf_kill_active(priv)) {
11155 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11156 "Kill switch must be turned off for "
11157 "wireless networking to work.\n");
11158 queue_delayed_work(priv->workqueue, &priv->rf_kill,
11159 2 * HZ);
11160 return 0;
11161 }
11162
11163 rc = ipw_config(priv);
11164 if (!rc) {
11165 IPW_DEBUG_INFO("Configured device on count %i\n", i);
11166
11167 /* If configure to try and auto-associate, kick
11168 * off a scan. */
11169 queue_work(priv->workqueue, &priv->request_scan);
11170
11171 return 0;
11172 }
11173
11174 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11175 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11176 i, MAX_HW_RESTARTS);
11177
11178 /* We had an error bringing up the hardware, so take it
11179 * all the way back down so we can try again */
11180 ipw_down(priv);
11181 }
11182
11183 /* tried to restart and config the device for as long as our
11184 * patience could withstand */
11185 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11186
11187 return -EIO;
11188 }
11189
11190 static void ipw_bg_up(void *data)
11191 {
11192 struct ipw_priv *priv = data;
11193 mutex_lock(&priv->mutex);
11194 ipw_up(data);
11195 mutex_unlock(&priv->mutex);
11196 }
11197
11198 static void ipw_deinit(struct ipw_priv *priv)
11199 {
11200 int i;
11201
11202 if (priv->status & STATUS_SCANNING) {
11203 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11204 ipw_abort_scan(priv);
11205 }
11206
11207 if (priv->status & STATUS_ASSOCIATED) {
11208 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11209 ipw_disassociate(priv);
11210 }
11211
11212 ipw_led_shutdown(priv);
11213
11214 /* Wait up to 1s for status to change to not scanning and not
11215 * associated (disassociation can take a while for a ful 802.11
11216 * exchange */
11217 for (i = 1000; i && (priv->status &
11218 (STATUS_DISASSOCIATING |
11219 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11220 udelay(10);
11221
11222 if (priv->status & (STATUS_DISASSOCIATING |
11223 STATUS_ASSOCIATED | STATUS_SCANNING))
11224 IPW_DEBUG_INFO("Still associated or scanning...\n");
11225 else
11226 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11227
11228 /* Attempt to disable the card */
11229 ipw_send_card_disable(priv, 0);
11230
11231 priv->status &= ~STATUS_INIT;
11232 }
11233
11234 static void ipw_down(struct ipw_priv *priv)
11235 {
11236 int exit_pending = priv->status & STATUS_EXIT_PENDING;
11237
11238 priv->status |= STATUS_EXIT_PENDING;
11239
11240 if (ipw_is_init(priv))
11241 ipw_deinit(priv);
11242
11243 /* Wipe out the EXIT_PENDING status bit if we are not actually
11244 * exiting the module */
11245 if (!exit_pending)
11246 priv->status &= ~STATUS_EXIT_PENDING;
11247
11248 /* tell the device to stop sending interrupts */
11249 ipw_disable_interrupts(priv);
11250
11251 /* Clear all bits but the RF Kill */
11252 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11253 netif_carrier_off(priv->net_dev);
11254 netif_stop_queue(priv->net_dev);
11255
11256 ipw_stop_nic(priv);
11257
11258 ipw_led_radio_off(priv);
11259 }
11260
11261 static void ipw_bg_down(void *data)
11262 {
11263 struct ipw_priv *priv = data;
11264 mutex_lock(&priv->mutex);
11265 ipw_down(data);
11266 mutex_unlock(&priv->mutex);
11267 }
11268
11269 /* Called by register_netdev() */
11270 static int ipw_net_init(struct net_device *dev)
11271 {
11272 struct ipw_priv *priv = ieee80211_priv(dev);
11273 mutex_lock(&priv->mutex);
11274
11275 if (ipw_up(priv)) {
11276 mutex_unlock(&priv->mutex);
11277 return -EIO;
11278 }
11279
11280 mutex_unlock(&priv->mutex);
11281 return 0;
11282 }
11283
11284 /* PCI driver stuff */
11285 static struct pci_device_id card_ids[] = {
11286 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11287 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11288 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11289 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11290 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11291 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11292 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11293 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11294 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11295 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11296 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11297 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11298 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11299 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11300 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11301 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11302 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11303 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
11304 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11305 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11306 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11307 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11308
11309 /* required last entry */
11310 {0,}
11311 };
11312
11313 MODULE_DEVICE_TABLE(pci, card_ids);
11314
11315 static struct attribute *ipw_sysfs_entries[] = {
11316 &dev_attr_rf_kill.attr,
11317 &dev_attr_direct_dword.attr,
11318 &dev_attr_indirect_byte.attr,
11319 &dev_attr_indirect_dword.attr,
11320 &dev_attr_mem_gpio_reg.attr,
11321 &dev_attr_command_event_reg.attr,
11322 &dev_attr_nic_type.attr,
11323 &dev_attr_status.attr,
11324 &dev_attr_cfg.attr,
11325 &dev_attr_error.attr,
11326 &dev_attr_event_log.attr,
11327 &dev_attr_cmd_log.attr,
11328 &dev_attr_eeprom_delay.attr,
11329 &dev_attr_ucode_version.attr,
11330 &dev_attr_rtc.attr,
11331 &dev_attr_scan_age.attr,
11332 &dev_attr_led.attr,
11333 &dev_attr_speed_scan.attr,
11334 &dev_attr_net_stats.attr,
11335 #ifdef CONFIG_IPW2200_PROMISCUOUS
11336 &dev_attr_rtap_iface.attr,
11337 &dev_attr_rtap_filter.attr,
11338 #endif
11339 NULL
11340 };
11341
11342 static struct attribute_group ipw_attribute_group = {
11343 .name = NULL, /* put in device directory */
11344 .attrs = ipw_sysfs_entries,
11345 };
11346
11347 #ifdef CONFIG_IPW2200_PROMISCUOUS
11348 static int ipw_prom_open(struct net_device *dev)
11349 {
11350 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11351 struct ipw_priv *priv = prom_priv->priv;
11352
11353 IPW_DEBUG_INFO("prom dev->open\n");
11354 netif_carrier_off(dev);
11355 netif_stop_queue(dev);
11356
11357 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11358 priv->sys_config.accept_all_data_frames = 1;
11359 priv->sys_config.accept_non_directed_frames = 1;
11360 priv->sys_config.accept_all_mgmt_bcpr = 1;
11361 priv->sys_config.accept_all_mgmt_frames = 1;
11362
11363 ipw_send_system_config(priv);
11364 }
11365
11366 return 0;
11367 }
11368
11369 static int ipw_prom_stop(struct net_device *dev)
11370 {
11371 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11372 struct ipw_priv *priv = prom_priv->priv;
11373
11374 IPW_DEBUG_INFO("prom dev->stop\n");
11375
11376 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11377 priv->sys_config.accept_all_data_frames = 0;
11378 priv->sys_config.accept_non_directed_frames = 0;
11379 priv->sys_config.accept_all_mgmt_bcpr = 0;
11380 priv->sys_config.accept_all_mgmt_frames = 0;
11381
11382 ipw_send_system_config(priv);
11383 }
11384
11385 return 0;
11386 }
11387
11388 static int ipw_prom_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
11389 {
11390 IPW_DEBUG_INFO("prom dev->xmit\n");
11391 netif_stop_queue(dev);
11392 return -EOPNOTSUPP;
11393 }
11394
11395 static struct net_device_stats *ipw_prom_get_stats(struct net_device *dev)
11396 {
11397 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11398 return &prom_priv->ieee->stats;
11399 }
11400
11401 static int ipw_prom_alloc(struct ipw_priv *priv)
11402 {
11403 int rc = 0;
11404
11405 if (priv->prom_net_dev)
11406 return -EPERM;
11407
11408 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
11409 if (priv->prom_net_dev == NULL)
11410 return -ENOMEM;
11411
11412 priv->prom_priv = ieee80211_priv(priv->prom_net_dev);
11413 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11414 priv->prom_priv->priv = priv;
11415
11416 strcpy(priv->prom_net_dev->name, "rtap%d");
11417
11418 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11419 priv->prom_net_dev->open = ipw_prom_open;
11420 priv->prom_net_dev->stop = ipw_prom_stop;
11421 priv->prom_net_dev->get_stats = ipw_prom_get_stats;
11422 priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit;
11423
11424 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11425
11426 rc = register_netdev(priv->prom_net_dev);
11427 if (rc) {
11428 free_ieee80211(priv->prom_net_dev);
11429 priv->prom_net_dev = NULL;
11430 return rc;
11431 }
11432
11433 return 0;
11434 }
11435
11436 static void ipw_prom_free(struct ipw_priv *priv)
11437 {
11438 if (!priv->prom_net_dev)
11439 return;
11440
11441 unregister_netdev(priv->prom_net_dev);
11442 free_ieee80211(priv->prom_net_dev);
11443
11444 priv->prom_net_dev = NULL;
11445 }
11446
11447 #endif
11448
11449
11450 static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11451 {
11452 int err = 0;
11453 struct net_device *net_dev;
11454 void __iomem *base;
11455 u32 length, val;
11456 struct ipw_priv *priv;
11457 int i;
11458
11459 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
11460 if (net_dev == NULL) {
11461 err = -ENOMEM;
11462 goto out;
11463 }
11464
11465 priv = ieee80211_priv(net_dev);
11466 priv->ieee = netdev_priv(net_dev);
11467
11468 priv->net_dev = net_dev;
11469 priv->pci_dev = pdev;
11470 #ifdef CONFIG_IPW2200_DEBUG
11471 ipw_debug_level = debug;
11472 #endif
11473 spin_lock_init(&priv->irq_lock);
11474 spin_lock_init(&priv->lock);
11475 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11476 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11477
11478 mutex_init(&priv->mutex);
11479 if (pci_enable_device(pdev)) {
11480 err = -ENODEV;
11481 goto out_free_ieee80211;
11482 }
11483
11484 pci_set_master(pdev);
11485
11486 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11487 if (!err)
11488 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
11489 if (err) {
11490 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11491 goto out_pci_disable_device;
11492 }
11493
11494 pci_set_drvdata(pdev, priv);
11495
11496 err = pci_request_regions(pdev, DRV_NAME);
11497 if (err)
11498 goto out_pci_disable_device;
11499
11500 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11501 * PCI Tx retries from interfering with C3 CPU state */
11502 pci_read_config_dword(pdev, 0x40, &val);
11503 if ((val & 0x0000ff00) != 0)
11504 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11505
11506 length = pci_resource_len(pdev, 0);
11507 priv->hw_len = length;
11508
11509 base = ioremap_nocache(pci_resource_start(pdev, 0), length);
11510 if (!base) {
11511 err = -ENODEV;
11512 goto out_pci_release_regions;
11513 }
11514
11515 priv->hw_base = base;
11516 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11517 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11518
11519 err = ipw_setup_deferred_work(priv);
11520 if (err) {
11521 IPW_ERROR("Unable to setup deferred work\n");
11522 goto out_iounmap;
11523 }
11524
11525 ipw_sw_reset(priv, 1);
11526
11527 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11528 if (err) {
11529 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11530 goto out_destroy_workqueue;
11531 }
11532
11533 SET_MODULE_OWNER(net_dev);
11534 SET_NETDEV_DEV(net_dev, &pdev->dev);
11535
11536 mutex_lock(&priv->mutex);
11537
11538 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11539 priv->ieee->set_security = shim__set_security;
11540 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11541
11542 #ifdef CONFIG_IPW2200_QOS
11543 priv->ieee->is_qos_active = ipw_is_qos_active;
11544 priv->ieee->handle_probe_response = ipw_handle_beacon;
11545 priv->ieee->handle_beacon = ipw_handle_probe_response;
11546 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11547 #endif /* CONFIG_IPW2200_QOS */
11548
11549 priv->ieee->perfect_rssi = -20;
11550 priv->ieee->worst_rssi = -85;
11551
11552 net_dev->open = ipw_net_open;
11553 net_dev->stop = ipw_net_stop;
11554 net_dev->init = ipw_net_init;
11555 net_dev->get_stats = ipw_net_get_stats;
11556 net_dev->set_multicast_list = ipw_net_set_multicast_list;
11557 net_dev->set_mac_address = ipw_net_set_mac_address;
11558 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11559 net_dev->wireless_data = &priv->wireless_data;
11560 net_dev->wireless_handlers = &ipw_wx_handler_def;
11561 net_dev->ethtool_ops = &ipw_ethtool_ops;
11562 net_dev->irq = pdev->irq;
11563 net_dev->base_addr = (unsigned long)priv->hw_base;
11564 net_dev->mem_start = pci_resource_start(pdev, 0);
11565 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11566
11567 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11568 if (err) {
11569 IPW_ERROR("failed to create sysfs device attributes\n");
11570 mutex_unlock(&priv->mutex);
11571 goto out_release_irq;
11572 }
11573
11574 mutex_unlock(&priv->mutex);
11575 err = register_netdev(net_dev);
11576 if (err) {
11577 IPW_ERROR("failed to register network device\n");
11578 goto out_remove_sysfs;
11579 }
11580
11581 #ifdef CONFIG_IPW2200_PROMISCUOUS
11582 if (rtap_iface) {
11583 err = ipw_prom_alloc(priv);
11584 if (err) {
11585 IPW_ERROR("Failed to register promiscuous network "
11586 "device (error %d).\n", err);
11587 unregister_netdev(priv->net_dev);
11588 goto out_remove_sysfs;
11589 }
11590 }
11591 #endif
11592
11593 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11594 "channels, %d 802.11a channels)\n",
11595 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11596 priv->ieee->geo.a_channels);
11597
11598 return 0;
11599
11600 out_remove_sysfs:
11601 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11602 out_release_irq:
11603 free_irq(pdev->irq, priv);
11604 out_destroy_workqueue:
11605 destroy_workqueue(priv->workqueue);
11606 priv->workqueue = NULL;
11607 out_iounmap:
11608 iounmap(priv->hw_base);
11609 out_pci_release_regions:
11610 pci_release_regions(pdev);
11611 out_pci_disable_device:
11612 pci_disable_device(pdev);
11613 pci_set_drvdata(pdev, NULL);
11614 out_free_ieee80211:
11615 free_ieee80211(priv->net_dev);
11616 out:
11617 return err;
11618 }
11619
11620 static void ipw_pci_remove(struct pci_dev *pdev)
11621 {
11622 struct ipw_priv *priv = pci_get_drvdata(pdev);
11623 struct list_head *p, *q;
11624 int i;
11625
11626 if (!priv)
11627 return;
11628
11629 mutex_lock(&priv->mutex);
11630
11631 priv->status |= STATUS_EXIT_PENDING;
11632 ipw_down(priv);
11633 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11634
11635 mutex_unlock(&priv->mutex);
11636
11637 unregister_netdev(priv->net_dev);
11638
11639 if (priv->rxq) {
11640 ipw_rx_queue_free(priv, priv->rxq);
11641 priv->rxq = NULL;
11642 }
11643 ipw_tx_queue_free(priv);
11644
11645 if (priv->cmdlog) {
11646 kfree(priv->cmdlog);
11647 priv->cmdlog = NULL;
11648 }
11649 /* ipw_down will ensure that there is no more pending work
11650 * in the workqueue's, so we can safely remove them now. */
11651 cancel_delayed_work(&priv->adhoc_check);
11652 cancel_delayed_work(&priv->gather_stats);
11653 cancel_delayed_work(&priv->request_scan);
11654 cancel_delayed_work(&priv->rf_kill);
11655 cancel_delayed_work(&priv->scan_check);
11656 destroy_workqueue(priv->workqueue);
11657 priv->workqueue = NULL;
11658
11659 /* Free MAC hash list for ADHOC */
11660 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11661 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11662 list_del(p);
11663 kfree(list_entry(p, struct ipw_ibss_seq, list));
11664 }
11665 }
11666
11667 kfree(priv->error);
11668 priv->error = NULL;
11669
11670 #ifdef CONFIG_IPW2200_PROMISCUOUS
11671 ipw_prom_free(priv);
11672 #endif
11673
11674 free_irq(pdev->irq, priv);
11675 iounmap(priv->hw_base);
11676 pci_release_regions(pdev);
11677 pci_disable_device(pdev);
11678 pci_set_drvdata(pdev, NULL);
11679 free_ieee80211(priv->net_dev);
11680 free_firmware();
11681 }
11682
11683 #ifdef CONFIG_PM
11684 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11685 {
11686 struct ipw_priv *priv = pci_get_drvdata(pdev);
11687 struct net_device *dev = priv->net_dev;
11688
11689 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11690
11691 /* Take down the device; powers it off, etc. */
11692 ipw_down(priv);
11693
11694 /* Remove the PRESENT state of the device */
11695 netif_device_detach(dev);
11696
11697 pci_save_state(pdev);
11698 pci_disable_device(pdev);
11699 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11700
11701 return 0;
11702 }
11703
11704 static int ipw_pci_resume(struct pci_dev *pdev)
11705 {
11706 struct ipw_priv *priv = pci_get_drvdata(pdev);
11707 struct net_device *dev = priv->net_dev;
11708 u32 val;
11709
11710 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11711
11712 pci_set_power_state(pdev, PCI_D0);
11713 pci_enable_device(pdev);
11714 pci_restore_state(pdev);
11715
11716 /*
11717 * Suspend/Resume resets the PCI configuration space, so we have to
11718 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11719 * from interfering with C3 CPU state. pci_restore_state won't help
11720 * here since it only restores the first 64 bytes pci config header.
11721 */
11722 pci_read_config_dword(pdev, 0x40, &val);
11723 if ((val & 0x0000ff00) != 0)
11724 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11725
11726 /* Set the device back into the PRESENT state; this will also wake
11727 * the queue of needed */
11728 netif_device_attach(dev);
11729
11730 /* Bring the device back up */
11731 queue_work(priv->workqueue, &priv->up);
11732
11733 return 0;
11734 }
11735 #endif
11736
11737 /* driver initialization stuff */
11738 static struct pci_driver ipw_driver = {
11739 .name = DRV_NAME,
11740 .id_table = card_ids,
11741 .probe = ipw_pci_probe,
11742 .remove = __devexit_p(ipw_pci_remove),
11743 #ifdef CONFIG_PM
11744 .suspend = ipw_pci_suspend,
11745 .resume = ipw_pci_resume,
11746 #endif
11747 };
11748
11749 static int __init ipw_init(void)
11750 {
11751 int ret;
11752
11753 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11754 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11755
11756 ret = pci_module_init(&ipw_driver);
11757 if (ret) {
11758 IPW_ERROR("Unable to initialize PCI module\n");
11759 return ret;
11760 }
11761
11762 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11763 if (ret) {
11764 IPW_ERROR("Unable to create driver sysfs file\n");
11765 pci_unregister_driver(&ipw_driver);
11766 return ret;
11767 }
11768
11769 return ret;
11770 }
11771
11772 static void __exit ipw_exit(void)
11773 {
11774 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11775 pci_unregister_driver(&ipw_driver);
11776 }
11777
11778 module_param(disable, int, 0444);
11779 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11780
11781 module_param(associate, int, 0444);
11782 MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
11783
11784 module_param(auto_create, int, 0444);
11785 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11786
11787 module_param(led, int, 0444);
11788 MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n");
11789
11790 #ifdef CONFIG_IPW2200_DEBUG
11791 module_param(debug, int, 0444);
11792 MODULE_PARM_DESC(debug, "debug output mask");
11793 #endif
11794
11795 module_param(channel, int, 0444);
11796 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11797
11798 #ifdef CONFIG_IPW2200_PROMISCUOUS
11799 module_param(rtap_iface, int, 0444);
11800 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
11801 #endif
11802
11803 #ifdef CONFIG_IPW2200_QOS
11804 module_param(qos_enable, int, 0444);
11805 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
11806
11807 module_param(qos_burst_enable, int, 0444);
11808 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11809
11810 module_param(qos_no_ack_mask, int, 0444);
11811 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
11812
11813 module_param(burst_duration_CCK, int, 0444);
11814 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
11815
11816 module_param(burst_duration_OFDM, int, 0444);
11817 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
11818 #endif /* CONFIG_IPW2200_QOS */
11819
11820 #ifdef CONFIG_IPW2200_MONITOR
11821 module_param(mode, int, 0444);
11822 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
11823 #else
11824 module_param(mode, int, 0444);
11825 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
11826 #endif
11827
11828 module_param(bt_coexist, int, 0444);
11829 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
11830
11831 module_param(hwcrypto, int, 0444);
11832 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
11833
11834 module_param(cmdlog, int, 0444);
11835 MODULE_PARM_DESC(cmdlog,
11836 "allocate a ring buffer for logging firmware commands");
11837
11838 module_param(roaming, int, 0444);
11839 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
11840
11841 module_param(antenna, int, 0444);
11842 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
11843
11844 module_exit(ipw_exit);
11845 module_init(ipw_init);