]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/wireless/ipw2200.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bcollins/linux1394-2.6
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / ipw2200.c
1 /******************************************************************************
2
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31 ******************************************************************************/
32
33 #include "ipw2200.h"
34 #include <linux/version.h>
35
36
37 #ifndef KBUILD_EXTMOD
38 #define VK "k"
39 #else
40 #define VK
41 #endif
42
43 #ifdef CONFIG_IPW2200_DEBUG
44 #define VD "d"
45 #else
46 #define VD
47 #endif
48
49 #ifdef CONFIG_IPW2200_MONITOR
50 #define VM "m"
51 #else
52 #define VM
53 #endif
54
55 #ifdef CONFIG_IPW2200_PROMISCUOUS
56 #define VP "p"
57 #else
58 #define VP
59 #endif
60
61 #ifdef CONFIG_IPW2200_RADIOTAP
62 #define VR "r"
63 #else
64 #define VR
65 #endif
66
67 #ifdef CONFIG_IPW2200_QOS
68 #define VQ "q"
69 #else
70 #define VQ
71 #endif
72
73 #define IPW2200_VERSION "1.1.2" VK VD VM VP VR VQ
74 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
75 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
76 #define DRV_VERSION IPW2200_VERSION
77
78 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
79
80 MODULE_DESCRIPTION(DRV_DESCRIPTION);
81 MODULE_VERSION(DRV_VERSION);
82 MODULE_AUTHOR(DRV_COPYRIGHT);
83 MODULE_LICENSE("GPL");
84
85 static int cmdlog = 0;
86 #ifdef CONFIG_IPW2200_DEBUG
87 static int debug = 0;
88 #endif
89 static int channel = 0;
90 static int mode = 0;
91
92 static u32 ipw_debug_level;
93 static int associate = 1;
94 static int auto_create = 1;
95 static int led = 0;
96 static int disable = 0;
97 static int bt_coexist = 0;
98 static int hwcrypto = 0;
99 static int roaming = 1;
100 static const char ipw_modes[] = {
101 'a', 'b', 'g', '?'
102 };
103 static int antenna = CFG_SYS_ANTENNA_BOTH;
104
105 #ifdef CONFIG_IPW2200_PROMISCUOUS
106 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
107 #endif
108
109
110 #ifdef CONFIG_IPW2200_QOS
111 static int qos_enable = 0;
112 static int qos_burst_enable = 0;
113 static int qos_no_ack_mask = 0;
114 static int burst_duration_CCK = 0;
115 static int burst_duration_OFDM = 0;
116
117 static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
118 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
119 QOS_TX3_CW_MIN_OFDM},
120 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
121 QOS_TX3_CW_MAX_OFDM},
122 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
123 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
124 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
125 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
126 };
127
128 static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
129 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
130 QOS_TX3_CW_MIN_CCK},
131 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
132 QOS_TX3_CW_MAX_CCK},
133 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
134 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
135 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
136 QOS_TX3_TXOP_LIMIT_CCK}
137 };
138
139 static struct ieee80211_qos_parameters def_parameters_OFDM = {
140 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
141 DEF_TX3_CW_MIN_OFDM},
142 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
143 DEF_TX3_CW_MAX_OFDM},
144 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
145 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
146 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
147 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
148 };
149
150 static struct ieee80211_qos_parameters def_parameters_CCK = {
151 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
152 DEF_TX3_CW_MIN_CCK},
153 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
154 DEF_TX3_CW_MAX_CCK},
155 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
156 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
157 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
158 DEF_TX3_TXOP_LIMIT_CCK}
159 };
160
161 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
162
163 static int from_priority_to_tx_queue[] = {
164 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
165 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
166 };
167
168 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
169
170 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
171 *qos_param);
172 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
173 *qos_param);
174 #endif /* CONFIG_IPW2200_QOS */
175
176 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
177 static void ipw_remove_current_network(struct ipw_priv *priv);
178 static void ipw_rx(struct ipw_priv *priv);
179 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
180 struct clx2_tx_queue *txq, int qindex);
181 static int ipw_queue_reset(struct ipw_priv *priv);
182
183 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
184 int len, int sync);
185
186 static void ipw_tx_queue_free(struct ipw_priv *);
187
188 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
189 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
190 static void ipw_rx_queue_replenish(void *);
191 static int ipw_up(struct ipw_priv *);
192 static void ipw_bg_up(void *);
193 static void ipw_down(struct ipw_priv *);
194 static void ipw_bg_down(void *);
195 static int ipw_config(struct ipw_priv *);
196 static int init_supported_rates(struct ipw_priv *priv,
197 struct ipw_supported_rates *prates);
198 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
199 static void ipw_send_wep_keys(struct ipw_priv *, int);
200
201 static int snprint_line(char *buf, size_t count,
202 const u8 * data, u32 len, u32 ofs)
203 {
204 int out, i, j, l;
205 char c;
206
207 out = snprintf(buf, count, "%08X", ofs);
208
209 for (l = 0, i = 0; i < 2; i++) {
210 out += snprintf(buf + out, count - out, " ");
211 for (j = 0; j < 8 && l < len; j++, l++)
212 out += snprintf(buf + out, count - out, "%02X ",
213 data[(i * 8 + j)]);
214 for (; j < 8; j++)
215 out += snprintf(buf + out, count - out, " ");
216 }
217
218 out += snprintf(buf + out, count - out, " ");
219 for (l = 0, i = 0; i < 2; i++) {
220 out += snprintf(buf + out, count - out, " ");
221 for (j = 0; j < 8 && l < len; j++, l++) {
222 c = data[(i * 8 + j)];
223 if (!isascii(c) || !isprint(c))
224 c = '.';
225
226 out += snprintf(buf + out, count - out, "%c", c);
227 }
228
229 for (; j < 8; j++)
230 out += snprintf(buf + out, count - out, " ");
231 }
232
233 return out;
234 }
235
236 static void printk_buf(int level, const u8 * data, u32 len)
237 {
238 char line[81];
239 u32 ofs = 0;
240 if (!(ipw_debug_level & level))
241 return;
242
243 while (len) {
244 snprint_line(line, sizeof(line), &data[ofs],
245 min(len, 16U), ofs);
246 printk(KERN_DEBUG "%s\n", line);
247 ofs += 16;
248 len -= min(len, 16U);
249 }
250 }
251
252 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
253 {
254 size_t out = size;
255 u32 ofs = 0;
256 int total = 0;
257
258 while (size && len) {
259 out = snprint_line(output, size, &data[ofs],
260 min_t(size_t, len, 16U), ofs);
261
262 ofs += 16;
263 output += out;
264 size -= out;
265 len -= min_t(size_t, len, 16U);
266 total += out;
267 }
268 return total;
269 }
270
271 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
272 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
273 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
274
275 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
276 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
277 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
278
279 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
280 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
281 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
282 {
283 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
284 __LINE__, (u32) (b), (u32) (c));
285 _ipw_write_reg8(a, b, c);
286 }
287
288 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
289 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
290 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
291 {
292 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
293 __LINE__, (u32) (b), (u32) (c));
294 _ipw_write_reg16(a, b, c);
295 }
296
297 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
298 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
299 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
300 {
301 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
302 __LINE__, (u32) (b), (u32) (c));
303 _ipw_write_reg32(a, b, c);
304 }
305
306 /* 8-bit direct write (low 4K) */
307 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
308
309 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
310 #define ipw_write8(ipw, ofs, val) \
311 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
312 _ipw_write8(ipw, ofs, val)
313
314 /* 16-bit direct write (low 4K) */
315 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
316
317 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
318 #define ipw_write16(ipw, ofs, val) \
319 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
320 _ipw_write16(ipw, ofs, val)
321
322 /* 32-bit direct write (low 4K) */
323 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
324
325 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
326 #define ipw_write32(ipw, ofs, val) \
327 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
328 _ipw_write32(ipw, ofs, val)
329
330 /* 8-bit direct read (low 4K) */
331 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
332
333 /* 8-bit direct read (low 4K), with debug wrapper */
334 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
335 {
336 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
337 return _ipw_read8(ipw, ofs);
338 }
339
340 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
341 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
342
343 /* 16-bit direct read (low 4K) */
344 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
345
346 /* 16-bit direct read (low 4K), with debug wrapper */
347 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
348 {
349 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
350 return _ipw_read16(ipw, ofs);
351 }
352
353 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
354 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
355
356 /* 32-bit direct read (low 4K) */
357 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
358
359 /* 32-bit direct read (low 4K), with debug wrapper */
360 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
361 {
362 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
363 return _ipw_read32(ipw, ofs);
364 }
365
366 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
367 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
368
369 /* multi-byte read (above 4K), with debug wrapper */
370 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
371 static inline void __ipw_read_indirect(const char *f, int l,
372 struct ipw_priv *a, u32 b, u8 * c, int d)
373 {
374 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %d bytes\n", f, l, (u32) (b),
375 d);
376 _ipw_read_indirect(a, b, c, d);
377 }
378
379 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
380 #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d)
381
382 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
383 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
384 int num);
385 #define ipw_write_indirect(a, b, c, d) \
386 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
387 _ipw_write_indirect(a, b, c, d)
388
389 /* 32-bit indirect write (above 4K) */
390 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
391 {
392 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
393 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
394 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
395 }
396
397 /* 8-bit indirect write (above 4K) */
398 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
399 {
400 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
401 u32 dif_len = reg - aligned_addr;
402
403 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
404 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
405 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
406 }
407
408 /* 16-bit indirect write (above 4K) */
409 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
410 {
411 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
412 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
413
414 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
415 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
416 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
417 }
418
419 /* 8-bit indirect read (above 4K) */
420 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
421 {
422 u32 word;
423 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
424 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
425 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
426 return (word >> ((reg & 0x3) * 8)) & 0xff;
427 }
428
429 /* 32-bit indirect read (above 4K) */
430 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
431 {
432 u32 value;
433
434 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
435
436 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
437 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
438 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
439 return value;
440 }
441
442 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
443 /* for area above 1st 4K of SRAM/reg space */
444 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
445 int num)
446 {
447 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
448 u32 dif_len = addr - aligned_addr;
449 u32 i;
450
451 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
452
453 if (num <= 0) {
454 return;
455 }
456
457 /* Read the first dword (or portion) byte by byte */
458 if (unlikely(dif_len)) {
459 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
460 /* Start reading at aligned_addr + dif_len */
461 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
462 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
463 aligned_addr += 4;
464 }
465
466 /* Read all of the middle dwords as dwords, with auto-increment */
467 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
468 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
469 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
470
471 /* Read the last dword (or portion) byte by byte */
472 if (unlikely(num)) {
473 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
474 for (i = 0; num > 0; i++, num--)
475 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
476 }
477 }
478
479 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
480 /* for area above 1st 4K of SRAM/reg space */
481 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
482 int num)
483 {
484 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
485 u32 dif_len = addr - aligned_addr;
486 u32 i;
487
488 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
489
490 if (num <= 0) {
491 return;
492 }
493
494 /* Write the first dword (or portion) byte by byte */
495 if (unlikely(dif_len)) {
496 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
497 /* Start writing at aligned_addr + dif_len */
498 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
499 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
500 aligned_addr += 4;
501 }
502
503 /* Write all of the middle dwords as dwords, with auto-increment */
504 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
505 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
506 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
507
508 /* Write the last dword (or portion) byte by byte */
509 if (unlikely(num)) {
510 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
511 for (i = 0; num > 0; i++, num--, buf++)
512 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
513 }
514 }
515
516 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
517 /* for 1st 4K of SRAM/regs space */
518 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
519 int num)
520 {
521 memcpy_toio((priv->hw_base + addr), buf, num);
522 }
523
524 /* Set bit(s) in low 4K of SRAM/regs */
525 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
526 {
527 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
528 }
529
530 /* Clear bit(s) in low 4K of SRAM/regs */
531 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
532 {
533 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
534 }
535
536 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
537 {
538 if (priv->status & STATUS_INT_ENABLED)
539 return;
540 priv->status |= STATUS_INT_ENABLED;
541 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
542 }
543
544 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
545 {
546 if (!(priv->status & STATUS_INT_ENABLED))
547 return;
548 priv->status &= ~STATUS_INT_ENABLED;
549 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
550 }
551
552 #ifdef CONFIG_IPW2200_DEBUG
553 static char *ipw_error_desc(u32 val)
554 {
555 switch (val) {
556 case IPW_FW_ERROR_OK:
557 return "ERROR_OK";
558 case IPW_FW_ERROR_FAIL:
559 return "ERROR_FAIL";
560 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
561 return "MEMORY_UNDERFLOW";
562 case IPW_FW_ERROR_MEMORY_OVERFLOW:
563 return "MEMORY_OVERFLOW";
564 case IPW_FW_ERROR_BAD_PARAM:
565 return "BAD_PARAM";
566 case IPW_FW_ERROR_BAD_CHECKSUM:
567 return "BAD_CHECKSUM";
568 case IPW_FW_ERROR_NMI_INTERRUPT:
569 return "NMI_INTERRUPT";
570 case IPW_FW_ERROR_BAD_DATABASE:
571 return "BAD_DATABASE";
572 case IPW_FW_ERROR_ALLOC_FAIL:
573 return "ALLOC_FAIL";
574 case IPW_FW_ERROR_DMA_UNDERRUN:
575 return "DMA_UNDERRUN";
576 case IPW_FW_ERROR_DMA_STATUS:
577 return "DMA_STATUS";
578 case IPW_FW_ERROR_DINO_ERROR:
579 return "DINO_ERROR";
580 case IPW_FW_ERROR_EEPROM_ERROR:
581 return "EEPROM_ERROR";
582 case IPW_FW_ERROR_SYSASSERT:
583 return "SYSASSERT";
584 case IPW_FW_ERROR_FATAL_ERROR:
585 return "FATAL_ERROR";
586 default:
587 return "UNKNOWN_ERROR";
588 }
589 }
590
591 static void ipw_dump_error_log(struct ipw_priv *priv,
592 struct ipw_fw_error *error)
593 {
594 u32 i;
595
596 if (!error) {
597 IPW_ERROR("Error allocating and capturing error log. "
598 "Nothing to dump.\n");
599 return;
600 }
601
602 IPW_ERROR("Start IPW Error Log Dump:\n");
603 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
604 error->status, error->config);
605
606 for (i = 0; i < error->elem_len; i++)
607 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
608 ipw_error_desc(error->elem[i].desc),
609 error->elem[i].time,
610 error->elem[i].blink1,
611 error->elem[i].blink2,
612 error->elem[i].link1,
613 error->elem[i].link2, error->elem[i].data);
614 for (i = 0; i < error->log_len; i++)
615 IPW_ERROR("%i\t0x%08x\t%i\n",
616 error->log[i].time,
617 error->log[i].data, error->log[i].event);
618 }
619 #endif
620
621 static inline int ipw_is_init(struct ipw_priv *priv)
622 {
623 return (priv->status & STATUS_INIT) ? 1 : 0;
624 }
625
626 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
627 {
628 u32 addr, field_info, field_len, field_count, total_len;
629
630 IPW_DEBUG_ORD("ordinal = %i\n", ord);
631
632 if (!priv || !val || !len) {
633 IPW_DEBUG_ORD("Invalid argument\n");
634 return -EINVAL;
635 }
636
637 /* verify device ordinal tables have been initialized */
638 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
639 IPW_DEBUG_ORD("Access ordinals before initialization\n");
640 return -EINVAL;
641 }
642
643 switch (IPW_ORD_TABLE_ID_MASK & ord) {
644 case IPW_ORD_TABLE_0_MASK:
645 /*
646 * TABLE 0: Direct access to a table of 32 bit values
647 *
648 * This is a very simple table with the data directly
649 * read from the table
650 */
651
652 /* remove the table id from the ordinal */
653 ord &= IPW_ORD_TABLE_VALUE_MASK;
654
655 /* boundary check */
656 if (ord > priv->table0_len) {
657 IPW_DEBUG_ORD("ordinal value (%i) longer then "
658 "max (%i)\n", ord, priv->table0_len);
659 return -EINVAL;
660 }
661
662 /* verify we have enough room to store the value */
663 if (*len < sizeof(u32)) {
664 IPW_DEBUG_ORD("ordinal buffer length too small, "
665 "need %zd\n", sizeof(u32));
666 return -EINVAL;
667 }
668
669 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
670 ord, priv->table0_addr + (ord << 2));
671
672 *len = sizeof(u32);
673 ord <<= 2;
674 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
675 break;
676
677 case IPW_ORD_TABLE_1_MASK:
678 /*
679 * TABLE 1: Indirect access to a table of 32 bit values
680 *
681 * This is a fairly large table of u32 values each
682 * representing starting addr for the data (which is
683 * also a u32)
684 */
685
686 /* remove the table id from the ordinal */
687 ord &= IPW_ORD_TABLE_VALUE_MASK;
688
689 /* boundary check */
690 if (ord > priv->table1_len) {
691 IPW_DEBUG_ORD("ordinal value too long\n");
692 return -EINVAL;
693 }
694
695 /* verify we have enough room to store the value */
696 if (*len < sizeof(u32)) {
697 IPW_DEBUG_ORD("ordinal buffer length too small, "
698 "need %zd\n", sizeof(u32));
699 return -EINVAL;
700 }
701
702 *((u32 *) val) =
703 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
704 *len = sizeof(u32);
705 break;
706
707 case IPW_ORD_TABLE_2_MASK:
708 /*
709 * TABLE 2: Indirect access to a table of variable sized values
710 *
711 * This table consist of six values, each containing
712 * - dword containing the starting offset of the data
713 * - dword containing the lengh in the first 16bits
714 * and the count in the second 16bits
715 */
716
717 /* remove the table id from the ordinal */
718 ord &= IPW_ORD_TABLE_VALUE_MASK;
719
720 /* boundary check */
721 if (ord > priv->table2_len) {
722 IPW_DEBUG_ORD("ordinal value too long\n");
723 return -EINVAL;
724 }
725
726 /* get the address of statistic */
727 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
728
729 /* get the second DW of statistics ;
730 * two 16-bit words - first is length, second is count */
731 field_info =
732 ipw_read_reg32(priv,
733 priv->table2_addr + (ord << 3) +
734 sizeof(u32));
735
736 /* get each entry length */
737 field_len = *((u16 *) & field_info);
738
739 /* get number of entries */
740 field_count = *(((u16 *) & field_info) + 1);
741
742 /* abort if not enought memory */
743 total_len = field_len * field_count;
744 if (total_len > *len) {
745 *len = total_len;
746 return -EINVAL;
747 }
748
749 *len = total_len;
750 if (!total_len)
751 return 0;
752
753 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
754 "field_info = 0x%08x\n",
755 addr, total_len, field_info);
756 ipw_read_indirect(priv, addr, val, total_len);
757 break;
758
759 default:
760 IPW_DEBUG_ORD("Invalid ordinal!\n");
761 return -EINVAL;
762
763 }
764
765 return 0;
766 }
767
768 static void ipw_init_ordinals(struct ipw_priv *priv)
769 {
770 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
771 priv->table0_len = ipw_read32(priv, priv->table0_addr);
772
773 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
774 priv->table0_addr, priv->table0_len);
775
776 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
777 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
778
779 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
780 priv->table1_addr, priv->table1_len);
781
782 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
783 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
784 priv->table2_len &= 0x0000ffff; /* use first two bytes */
785
786 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
787 priv->table2_addr, priv->table2_len);
788
789 }
790
791 static u32 ipw_register_toggle(u32 reg)
792 {
793 reg &= ~IPW_START_STANDBY;
794 if (reg & IPW_GATE_ODMA)
795 reg &= ~IPW_GATE_ODMA;
796 if (reg & IPW_GATE_IDMA)
797 reg &= ~IPW_GATE_IDMA;
798 if (reg & IPW_GATE_ADMA)
799 reg &= ~IPW_GATE_ADMA;
800 return reg;
801 }
802
803 /*
804 * LED behavior:
805 * - On radio ON, turn on any LEDs that require to be on during start
806 * - On initialization, start unassociated blink
807 * - On association, disable unassociated blink
808 * - On disassociation, start unassociated blink
809 * - On radio OFF, turn off any LEDs started during radio on
810 *
811 */
812 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
813 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
814 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
815
816 static void ipw_led_link_on(struct ipw_priv *priv)
817 {
818 unsigned long flags;
819 u32 led;
820
821 /* If configured to not use LEDs, or nic_type is 1,
822 * then we don't toggle a LINK led */
823 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
824 return;
825
826 spin_lock_irqsave(&priv->lock, flags);
827
828 if (!(priv->status & STATUS_RF_KILL_MASK) &&
829 !(priv->status & STATUS_LED_LINK_ON)) {
830 IPW_DEBUG_LED("Link LED On\n");
831 led = ipw_read_reg32(priv, IPW_EVENT_REG);
832 led |= priv->led_association_on;
833
834 led = ipw_register_toggle(led);
835
836 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
837 ipw_write_reg32(priv, IPW_EVENT_REG, led);
838
839 priv->status |= STATUS_LED_LINK_ON;
840
841 /* If we aren't associated, schedule turning the LED off */
842 if (!(priv->status & STATUS_ASSOCIATED))
843 queue_delayed_work(priv->workqueue,
844 &priv->led_link_off,
845 LD_TIME_LINK_ON);
846 }
847
848 spin_unlock_irqrestore(&priv->lock, flags);
849 }
850
851 static void ipw_bg_led_link_on(void *data)
852 {
853 struct ipw_priv *priv = data;
854 mutex_lock(&priv->mutex);
855 ipw_led_link_on(data);
856 mutex_unlock(&priv->mutex);
857 }
858
859 static void ipw_led_link_off(struct ipw_priv *priv)
860 {
861 unsigned long flags;
862 u32 led;
863
864 /* If configured not to use LEDs, or nic type is 1,
865 * then we don't goggle the LINK led. */
866 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
867 return;
868
869 spin_lock_irqsave(&priv->lock, flags);
870
871 if (priv->status & STATUS_LED_LINK_ON) {
872 led = ipw_read_reg32(priv, IPW_EVENT_REG);
873 led &= priv->led_association_off;
874 led = ipw_register_toggle(led);
875
876 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
877 ipw_write_reg32(priv, IPW_EVENT_REG, led);
878
879 IPW_DEBUG_LED("Link LED Off\n");
880
881 priv->status &= ~STATUS_LED_LINK_ON;
882
883 /* If we aren't associated and the radio is on, schedule
884 * turning the LED on (blink while unassociated) */
885 if (!(priv->status & STATUS_RF_KILL_MASK) &&
886 !(priv->status & STATUS_ASSOCIATED))
887 queue_delayed_work(priv->workqueue, &priv->led_link_on,
888 LD_TIME_LINK_OFF);
889
890 }
891
892 spin_unlock_irqrestore(&priv->lock, flags);
893 }
894
895 static void ipw_bg_led_link_off(void *data)
896 {
897 struct ipw_priv *priv = data;
898 mutex_lock(&priv->mutex);
899 ipw_led_link_off(data);
900 mutex_unlock(&priv->mutex);
901 }
902
903 static void __ipw_led_activity_on(struct ipw_priv *priv)
904 {
905 u32 led;
906
907 if (priv->config & CFG_NO_LED)
908 return;
909
910 if (priv->status & STATUS_RF_KILL_MASK)
911 return;
912
913 if (!(priv->status & STATUS_LED_ACT_ON)) {
914 led = ipw_read_reg32(priv, IPW_EVENT_REG);
915 led |= priv->led_activity_on;
916
917 led = ipw_register_toggle(led);
918
919 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
920 ipw_write_reg32(priv, IPW_EVENT_REG, led);
921
922 IPW_DEBUG_LED("Activity LED On\n");
923
924 priv->status |= STATUS_LED_ACT_ON;
925
926 cancel_delayed_work(&priv->led_act_off);
927 queue_delayed_work(priv->workqueue, &priv->led_act_off,
928 LD_TIME_ACT_ON);
929 } else {
930 /* Reschedule LED off for full time period */
931 cancel_delayed_work(&priv->led_act_off);
932 queue_delayed_work(priv->workqueue, &priv->led_act_off,
933 LD_TIME_ACT_ON);
934 }
935 }
936
937 #if 0
938 void ipw_led_activity_on(struct ipw_priv *priv)
939 {
940 unsigned long flags;
941 spin_lock_irqsave(&priv->lock, flags);
942 __ipw_led_activity_on(priv);
943 spin_unlock_irqrestore(&priv->lock, flags);
944 }
945 #endif /* 0 */
946
947 static void ipw_led_activity_off(struct ipw_priv *priv)
948 {
949 unsigned long flags;
950 u32 led;
951
952 if (priv->config & CFG_NO_LED)
953 return;
954
955 spin_lock_irqsave(&priv->lock, flags);
956
957 if (priv->status & STATUS_LED_ACT_ON) {
958 led = ipw_read_reg32(priv, IPW_EVENT_REG);
959 led &= priv->led_activity_off;
960
961 led = ipw_register_toggle(led);
962
963 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
964 ipw_write_reg32(priv, IPW_EVENT_REG, led);
965
966 IPW_DEBUG_LED("Activity LED Off\n");
967
968 priv->status &= ~STATUS_LED_ACT_ON;
969 }
970
971 spin_unlock_irqrestore(&priv->lock, flags);
972 }
973
974 static void ipw_bg_led_activity_off(void *data)
975 {
976 struct ipw_priv *priv = data;
977 mutex_lock(&priv->mutex);
978 ipw_led_activity_off(data);
979 mutex_unlock(&priv->mutex);
980 }
981
982 static void ipw_led_band_on(struct ipw_priv *priv)
983 {
984 unsigned long flags;
985 u32 led;
986
987 /* Only nic type 1 supports mode LEDs */
988 if (priv->config & CFG_NO_LED ||
989 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
990 return;
991
992 spin_lock_irqsave(&priv->lock, flags);
993
994 led = ipw_read_reg32(priv, IPW_EVENT_REG);
995 if (priv->assoc_network->mode == IEEE_A) {
996 led |= priv->led_ofdm_on;
997 led &= priv->led_association_off;
998 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
999 } else if (priv->assoc_network->mode == IEEE_G) {
1000 led |= priv->led_ofdm_on;
1001 led |= priv->led_association_on;
1002 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1003 } else {
1004 led &= priv->led_ofdm_off;
1005 led |= priv->led_association_on;
1006 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1007 }
1008
1009 led = ipw_register_toggle(led);
1010
1011 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1012 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1013
1014 spin_unlock_irqrestore(&priv->lock, flags);
1015 }
1016
1017 static void ipw_led_band_off(struct ipw_priv *priv)
1018 {
1019 unsigned long flags;
1020 u32 led;
1021
1022 /* Only nic type 1 supports mode LEDs */
1023 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1024 return;
1025
1026 spin_lock_irqsave(&priv->lock, flags);
1027
1028 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1029 led &= priv->led_ofdm_off;
1030 led &= priv->led_association_off;
1031
1032 led = ipw_register_toggle(led);
1033
1034 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1035 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1036
1037 spin_unlock_irqrestore(&priv->lock, flags);
1038 }
1039
1040 static void ipw_led_radio_on(struct ipw_priv *priv)
1041 {
1042 ipw_led_link_on(priv);
1043 }
1044
1045 static void ipw_led_radio_off(struct ipw_priv *priv)
1046 {
1047 ipw_led_activity_off(priv);
1048 ipw_led_link_off(priv);
1049 }
1050
1051 static void ipw_led_link_up(struct ipw_priv *priv)
1052 {
1053 /* Set the Link Led on for all nic types */
1054 ipw_led_link_on(priv);
1055 }
1056
1057 static void ipw_led_link_down(struct ipw_priv *priv)
1058 {
1059 ipw_led_activity_off(priv);
1060 ipw_led_link_off(priv);
1061
1062 if (priv->status & STATUS_RF_KILL_MASK)
1063 ipw_led_radio_off(priv);
1064 }
1065
1066 static void ipw_led_init(struct ipw_priv *priv)
1067 {
1068 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1069
1070 /* Set the default PINs for the link and activity leds */
1071 priv->led_activity_on = IPW_ACTIVITY_LED;
1072 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1073
1074 priv->led_association_on = IPW_ASSOCIATED_LED;
1075 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1076
1077 /* Set the default PINs for the OFDM leds */
1078 priv->led_ofdm_on = IPW_OFDM_LED;
1079 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1080
1081 switch (priv->nic_type) {
1082 case EEPROM_NIC_TYPE_1:
1083 /* In this NIC type, the LEDs are reversed.... */
1084 priv->led_activity_on = IPW_ASSOCIATED_LED;
1085 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1086 priv->led_association_on = IPW_ACTIVITY_LED;
1087 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1088
1089 if (!(priv->config & CFG_NO_LED))
1090 ipw_led_band_on(priv);
1091
1092 /* And we don't blink link LEDs for this nic, so
1093 * just return here */
1094 return;
1095
1096 case EEPROM_NIC_TYPE_3:
1097 case EEPROM_NIC_TYPE_2:
1098 case EEPROM_NIC_TYPE_4:
1099 case EEPROM_NIC_TYPE_0:
1100 break;
1101
1102 default:
1103 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1104 priv->nic_type);
1105 priv->nic_type = EEPROM_NIC_TYPE_0;
1106 break;
1107 }
1108
1109 if (!(priv->config & CFG_NO_LED)) {
1110 if (priv->status & STATUS_ASSOCIATED)
1111 ipw_led_link_on(priv);
1112 else
1113 ipw_led_link_off(priv);
1114 }
1115 }
1116
1117 static void ipw_led_shutdown(struct ipw_priv *priv)
1118 {
1119 ipw_led_activity_off(priv);
1120 ipw_led_link_off(priv);
1121 ipw_led_band_off(priv);
1122 cancel_delayed_work(&priv->led_link_on);
1123 cancel_delayed_work(&priv->led_link_off);
1124 cancel_delayed_work(&priv->led_act_off);
1125 }
1126
1127 /*
1128 * The following adds a new attribute to the sysfs representation
1129 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1130 * used for controling the debug level.
1131 *
1132 * See the level definitions in ipw for details.
1133 */
1134 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1135 {
1136 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1137 }
1138
1139 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1140 size_t count)
1141 {
1142 char *p = (char *)buf;
1143 u32 val;
1144
1145 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1146 p++;
1147 if (p[0] == 'x' || p[0] == 'X')
1148 p++;
1149 val = simple_strtoul(p, &p, 16);
1150 } else
1151 val = simple_strtoul(p, &p, 10);
1152 if (p == buf)
1153 printk(KERN_INFO DRV_NAME
1154 ": %s is not in hex or decimal form.\n", buf);
1155 else
1156 ipw_debug_level = val;
1157
1158 return strnlen(buf, count);
1159 }
1160
1161 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1162 show_debug_level, store_debug_level);
1163
1164 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1165 {
1166 /* length = 1st dword in log */
1167 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1168 }
1169
1170 static void ipw_capture_event_log(struct ipw_priv *priv,
1171 u32 log_len, struct ipw_event *log)
1172 {
1173 u32 base;
1174
1175 if (log_len) {
1176 base = ipw_read32(priv, IPW_EVENT_LOG);
1177 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1178 (u8 *) log, sizeof(*log) * log_len);
1179 }
1180 }
1181
1182 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1183 {
1184 struct ipw_fw_error *error;
1185 u32 log_len = ipw_get_event_log_len(priv);
1186 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1187 u32 elem_len = ipw_read_reg32(priv, base);
1188
1189 error = kmalloc(sizeof(*error) +
1190 sizeof(*error->elem) * elem_len +
1191 sizeof(*error->log) * log_len, GFP_ATOMIC);
1192 if (!error) {
1193 IPW_ERROR("Memory allocation for firmware error log "
1194 "failed.\n");
1195 return NULL;
1196 }
1197 error->jiffies = jiffies;
1198 error->status = priv->status;
1199 error->config = priv->config;
1200 error->elem_len = elem_len;
1201 error->log_len = log_len;
1202 error->elem = (struct ipw_error_elem *)error->payload;
1203 error->log = (struct ipw_event *)(error->elem + elem_len);
1204
1205 ipw_capture_event_log(priv, log_len, error->log);
1206
1207 if (elem_len)
1208 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1209 sizeof(*error->elem) * elem_len);
1210
1211 return error;
1212 }
1213
1214 static void ipw_free_error_log(struct ipw_fw_error *error)
1215 {
1216 if (error)
1217 kfree(error);
1218 }
1219
1220 static ssize_t show_event_log(struct device *d,
1221 struct device_attribute *attr, char *buf)
1222 {
1223 struct ipw_priv *priv = dev_get_drvdata(d);
1224 u32 log_len = ipw_get_event_log_len(priv);
1225 struct ipw_event log[log_len];
1226 u32 len = 0, i;
1227
1228 ipw_capture_event_log(priv, log_len, log);
1229
1230 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1231 for (i = 0; i < log_len; i++)
1232 len += snprintf(buf + len, PAGE_SIZE - len,
1233 "\n%08X%08X%08X",
1234 log[i].time, log[i].event, log[i].data);
1235 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1236 return len;
1237 }
1238
1239 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1240
1241 static ssize_t show_error(struct device *d,
1242 struct device_attribute *attr, char *buf)
1243 {
1244 struct ipw_priv *priv = dev_get_drvdata(d);
1245 u32 len = 0, i;
1246 if (!priv->error)
1247 return 0;
1248 len += snprintf(buf + len, PAGE_SIZE - len,
1249 "%08lX%08X%08X%08X",
1250 priv->error->jiffies,
1251 priv->error->status,
1252 priv->error->config, priv->error->elem_len);
1253 for (i = 0; i < priv->error->elem_len; i++)
1254 len += snprintf(buf + len, PAGE_SIZE - len,
1255 "\n%08X%08X%08X%08X%08X%08X%08X",
1256 priv->error->elem[i].time,
1257 priv->error->elem[i].desc,
1258 priv->error->elem[i].blink1,
1259 priv->error->elem[i].blink2,
1260 priv->error->elem[i].link1,
1261 priv->error->elem[i].link2,
1262 priv->error->elem[i].data);
1263
1264 len += snprintf(buf + len, PAGE_SIZE - len,
1265 "\n%08X", priv->error->log_len);
1266 for (i = 0; i < priv->error->log_len; i++)
1267 len += snprintf(buf + len, PAGE_SIZE - len,
1268 "\n%08X%08X%08X",
1269 priv->error->log[i].time,
1270 priv->error->log[i].event,
1271 priv->error->log[i].data);
1272 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1273 return len;
1274 }
1275
1276 static ssize_t clear_error(struct device *d,
1277 struct device_attribute *attr,
1278 const char *buf, size_t count)
1279 {
1280 struct ipw_priv *priv = dev_get_drvdata(d);
1281 if (priv->error) {
1282 ipw_free_error_log(priv->error);
1283 priv->error = NULL;
1284 }
1285 return count;
1286 }
1287
1288 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1289
1290 static ssize_t show_cmd_log(struct device *d,
1291 struct device_attribute *attr, char *buf)
1292 {
1293 struct ipw_priv *priv = dev_get_drvdata(d);
1294 u32 len = 0, i;
1295 if (!priv->cmdlog)
1296 return 0;
1297 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1298 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1299 i = (i + 1) % priv->cmdlog_len) {
1300 len +=
1301 snprintf(buf + len, PAGE_SIZE - len,
1302 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1303 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1304 priv->cmdlog[i].cmd.len);
1305 len +=
1306 snprintk_buf(buf + len, PAGE_SIZE - len,
1307 (u8 *) priv->cmdlog[i].cmd.param,
1308 priv->cmdlog[i].cmd.len);
1309 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1310 }
1311 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1312 return len;
1313 }
1314
1315 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1316
1317 #ifdef CONFIG_IPW2200_PROMISCUOUS
1318 static void ipw_prom_free(struct ipw_priv *priv);
1319 static int ipw_prom_alloc(struct ipw_priv *priv);
1320 static ssize_t store_rtap_iface(struct device *d,
1321 struct device_attribute *attr,
1322 const char *buf, size_t count)
1323 {
1324 struct ipw_priv *priv = dev_get_drvdata(d);
1325 int rc = 0;
1326
1327 if (count < 1)
1328 return -EINVAL;
1329
1330 switch (buf[0]) {
1331 case '0':
1332 if (!rtap_iface)
1333 return count;
1334
1335 if (netif_running(priv->prom_net_dev)) {
1336 IPW_WARNING("Interface is up. Cannot unregister.\n");
1337 return count;
1338 }
1339
1340 ipw_prom_free(priv);
1341 rtap_iface = 0;
1342 break;
1343
1344 case '1':
1345 if (rtap_iface)
1346 return count;
1347
1348 rc = ipw_prom_alloc(priv);
1349 if (!rc)
1350 rtap_iface = 1;
1351 break;
1352
1353 default:
1354 return -EINVAL;
1355 }
1356
1357 if (rc) {
1358 IPW_ERROR("Failed to register promiscuous network "
1359 "device (error %d).\n", rc);
1360 }
1361
1362 return count;
1363 }
1364
1365 static ssize_t show_rtap_iface(struct device *d,
1366 struct device_attribute *attr,
1367 char *buf)
1368 {
1369 struct ipw_priv *priv = dev_get_drvdata(d);
1370 if (rtap_iface)
1371 return sprintf(buf, "%s", priv->prom_net_dev->name);
1372 else {
1373 buf[0] = '-';
1374 buf[1] = '1';
1375 buf[2] = '\0';
1376 return 3;
1377 }
1378 }
1379
1380 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1381 store_rtap_iface);
1382
1383 static ssize_t store_rtap_filter(struct device *d,
1384 struct device_attribute *attr,
1385 const char *buf, size_t count)
1386 {
1387 struct ipw_priv *priv = dev_get_drvdata(d);
1388
1389 if (!priv->prom_priv) {
1390 IPW_ERROR("Attempting to set filter without "
1391 "rtap_iface enabled.\n");
1392 return -EPERM;
1393 }
1394
1395 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1396
1397 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1398 BIT_ARG16(priv->prom_priv->filter));
1399
1400 return count;
1401 }
1402
1403 static ssize_t show_rtap_filter(struct device *d,
1404 struct device_attribute *attr,
1405 char *buf)
1406 {
1407 struct ipw_priv *priv = dev_get_drvdata(d);
1408 return sprintf(buf, "0x%04X",
1409 priv->prom_priv ? priv->prom_priv->filter : 0);
1410 }
1411
1412 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1413 store_rtap_filter);
1414 #endif
1415
1416 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1417 char *buf)
1418 {
1419 struct ipw_priv *priv = dev_get_drvdata(d);
1420 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1421 }
1422
1423 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1424 const char *buf, size_t count)
1425 {
1426 struct ipw_priv *priv = dev_get_drvdata(d);
1427 #ifdef CONFIG_IPW2200_DEBUG
1428 struct net_device *dev = priv->net_dev;
1429 #endif
1430 char buffer[] = "00000000";
1431 unsigned long len =
1432 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1433 unsigned long val;
1434 char *p = buffer;
1435
1436 IPW_DEBUG_INFO("enter\n");
1437
1438 strncpy(buffer, buf, len);
1439 buffer[len] = 0;
1440
1441 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1442 p++;
1443 if (p[0] == 'x' || p[0] == 'X')
1444 p++;
1445 val = simple_strtoul(p, &p, 16);
1446 } else
1447 val = simple_strtoul(p, &p, 10);
1448 if (p == buffer) {
1449 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1450 } else {
1451 priv->ieee->scan_age = val;
1452 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1453 }
1454
1455 IPW_DEBUG_INFO("exit\n");
1456 return len;
1457 }
1458
1459 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1460
1461 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1462 char *buf)
1463 {
1464 struct ipw_priv *priv = dev_get_drvdata(d);
1465 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1466 }
1467
1468 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1469 const char *buf, size_t count)
1470 {
1471 struct ipw_priv *priv = dev_get_drvdata(d);
1472
1473 IPW_DEBUG_INFO("enter\n");
1474
1475 if (count == 0)
1476 return 0;
1477
1478 if (*buf == 0) {
1479 IPW_DEBUG_LED("Disabling LED control.\n");
1480 priv->config |= CFG_NO_LED;
1481 ipw_led_shutdown(priv);
1482 } else {
1483 IPW_DEBUG_LED("Enabling LED control.\n");
1484 priv->config &= ~CFG_NO_LED;
1485 ipw_led_init(priv);
1486 }
1487
1488 IPW_DEBUG_INFO("exit\n");
1489 return count;
1490 }
1491
1492 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1493
1494 static ssize_t show_status(struct device *d,
1495 struct device_attribute *attr, char *buf)
1496 {
1497 struct ipw_priv *p = d->driver_data;
1498 return sprintf(buf, "0x%08x\n", (int)p->status);
1499 }
1500
1501 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1502
1503 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1504 char *buf)
1505 {
1506 struct ipw_priv *p = d->driver_data;
1507 return sprintf(buf, "0x%08x\n", (int)p->config);
1508 }
1509
1510 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1511
1512 static ssize_t show_nic_type(struct device *d,
1513 struct device_attribute *attr, char *buf)
1514 {
1515 struct ipw_priv *priv = d->driver_data;
1516 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1517 }
1518
1519 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1520
1521 static ssize_t show_ucode_version(struct device *d,
1522 struct device_attribute *attr, char *buf)
1523 {
1524 u32 len = sizeof(u32), tmp = 0;
1525 struct ipw_priv *p = d->driver_data;
1526
1527 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1528 return 0;
1529
1530 return sprintf(buf, "0x%08x\n", tmp);
1531 }
1532
1533 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1534
1535 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1536 char *buf)
1537 {
1538 u32 len = sizeof(u32), tmp = 0;
1539 struct ipw_priv *p = d->driver_data;
1540
1541 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1542 return 0;
1543
1544 return sprintf(buf, "0x%08x\n", tmp);
1545 }
1546
1547 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1548
1549 /*
1550 * Add a device attribute to view/control the delay between eeprom
1551 * operations.
1552 */
1553 static ssize_t show_eeprom_delay(struct device *d,
1554 struct device_attribute *attr, char *buf)
1555 {
1556 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
1557 return sprintf(buf, "%i\n", n);
1558 }
1559 static ssize_t store_eeprom_delay(struct device *d,
1560 struct device_attribute *attr,
1561 const char *buf, size_t count)
1562 {
1563 struct ipw_priv *p = d->driver_data;
1564 sscanf(buf, "%i", &p->eeprom_delay);
1565 return strnlen(buf, count);
1566 }
1567
1568 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1569 show_eeprom_delay, store_eeprom_delay);
1570
1571 static ssize_t show_command_event_reg(struct device *d,
1572 struct device_attribute *attr, char *buf)
1573 {
1574 u32 reg = 0;
1575 struct ipw_priv *p = d->driver_data;
1576
1577 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1578 return sprintf(buf, "0x%08x\n", reg);
1579 }
1580 static ssize_t store_command_event_reg(struct device *d,
1581 struct device_attribute *attr,
1582 const char *buf, size_t count)
1583 {
1584 u32 reg;
1585 struct ipw_priv *p = d->driver_data;
1586
1587 sscanf(buf, "%x", &reg);
1588 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1589 return strnlen(buf, count);
1590 }
1591
1592 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1593 show_command_event_reg, store_command_event_reg);
1594
1595 static ssize_t show_mem_gpio_reg(struct device *d,
1596 struct device_attribute *attr, char *buf)
1597 {
1598 u32 reg = 0;
1599 struct ipw_priv *p = d->driver_data;
1600
1601 reg = ipw_read_reg32(p, 0x301100);
1602 return sprintf(buf, "0x%08x\n", reg);
1603 }
1604 static ssize_t store_mem_gpio_reg(struct device *d,
1605 struct device_attribute *attr,
1606 const char *buf, size_t count)
1607 {
1608 u32 reg;
1609 struct ipw_priv *p = d->driver_data;
1610
1611 sscanf(buf, "%x", &reg);
1612 ipw_write_reg32(p, 0x301100, reg);
1613 return strnlen(buf, count);
1614 }
1615
1616 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1617 show_mem_gpio_reg, store_mem_gpio_reg);
1618
1619 static ssize_t show_indirect_dword(struct device *d,
1620 struct device_attribute *attr, char *buf)
1621 {
1622 u32 reg = 0;
1623 struct ipw_priv *priv = d->driver_data;
1624
1625 if (priv->status & STATUS_INDIRECT_DWORD)
1626 reg = ipw_read_reg32(priv, priv->indirect_dword);
1627 else
1628 reg = 0;
1629
1630 return sprintf(buf, "0x%08x\n", reg);
1631 }
1632 static ssize_t store_indirect_dword(struct device *d,
1633 struct device_attribute *attr,
1634 const char *buf, size_t count)
1635 {
1636 struct ipw_priv *priv = d->driver_data;
1637
1638 sscanf(buf, "%x", &priv->indirect_dword);
1639 priv->status |= STATUS_INDIRECT_DWORD;
1640 return strnlen(buf, count);
1641 }
1642
1643 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1644 show_indirect_dword, store_indirect_dword);
1645
1646 static ssize_t show_indirect_byte(struct device *d,
1647 struct device_attribute *attr, char *buf)
1648 {
1649 u8 reg = 0;
1650 struct ipw_priv *priv = d->driver_data;
1651
1652 if (priv->status & STATUS_INDIRECT_BYTE)
1653 reg = ipw_read_reg8(priv, priv->indirect_byte);
1654 else
1655 reg = 0;
1656
1657 return sprintf(buf, "0x%02x\n", reg);
1658 }
1659 static ssize_t store_indirect_byte(struct device *d,
1660 struct device_attribute *attr,
1661 const char *buf, size_t count)
1662 {
1663 struct ipw_priv *priv = d->driver_data;
1664
1665 sscanf(buf, "%x", &priv->indirect_byte);
1666 priv->status |= STATUS_INDIRECT_BYTE;
1667 return strnlen(buf, count);
1668 }
1669
1670 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1671 show_indirect_byte, store_indirect_byte);
1672
1673 static ssize_t show_direct_dword(struct device *d,
1674 struct device_attribute *attr, char *buf)
1675 {
1676 u32 reg = 0;
1677 struct ipw_priv *priv = d->driver_data;
1678
1679 if (priv->status & STATUS_DIRECT_DWORD)
1680 reg = ipw_read32(priv, priv->direct_dword);
1681 else
1682 reg = 0;
1683
1684 return sprintf(buf, "0x%08x\n", reg);
1685 }
1686 static ssize_t store_direct_dword(struct device *d,
1687 struct device_attribute *attr,
1688 const char *buf, size_t count)
1689 {
1690 struct ipw_priv *priv = d->driver_data;
1691
1692 sscanf(buf, "%x", &priv->direct_dword);
1693 priv->status |= STATUS_DIRECT_DWORD;
1694 return strnlen(buf, count);
1695 }
1696
1697 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1698 show_direct_dword, store_direct_dword);
1699
1700 static int rf_kill_active(struct ipw_priv *priv)
1701 {
1702 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1703 priv->status |= STATUS_RF_KILL_HW;
1704 else
1705 priv->status &= ~STATUS_RF_KILL_HW;
1706
1707 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1708 }
1709
1710 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1711 char *buf)
1712 {
1713 /* 0 - RF kill not enabled
1714 1 - SW based RF kill active (sysfs)
1715 2 - HW based RF kill active
1716 3 - Both HW and SW baed RF kill active */
1717 struct ipw_priv *priv = d->driver_data;
1718 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1719 (rf_kill_active(priv) ? 0x2 : 0x0);
1720 return sprintf(buf, "%i\n", val);
1721 }
1722
1723 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1724 {
1725 if ((disable_radio ? 1 : 0) ==
1726 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1727 return 0;
1728
1729 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1730 disable_radio ? "OFF" : "ON");
1731
1732 if (disable_radio) {
1733 priv->status |= STATUS_RF_KILL_SW;
1734
1735 if (priv->workqueue)
1736 cancel_delayed_work(&priv->request_scan);
1737 queue_work(priv->workqueue, &priv->down);
1738 } else {
1739 priv->status &= ~STATUS_RF_KILL_SW;
1740 if (rf_kill_active(priv)) {
1741 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1742 "disabled by HW switch\n");
1743 /* Make sure the RF_KILL check timer is running */
1744 cancel_delayed_work(&priv->rf_kill);
1745 queue_delayed_work(priv->workqueue, &priv->rf_kill,
1746 2 * HZ);
1747 } else
1748 queue_work(priv->workqueue, &priv->up);
1749 }
1750
1751 return 1;
1752 }
1753
1754 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1755 const char *buf, size_t count)
1756 {
1757 struct ipw_priv *priv = d->driver_data;
1758
1759 ipw_radio_kill_sw(priv, buf[0] == '1');
1760
1761 return count;
1762 }
1763
1764 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1765
1766 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1767 char *buf)
1768 {
1769 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1770 int pos = 0, len = 0;
1771 if (priv->config & CFG_SPEED_SCAN) {
1772 while (priv->speed_scan[pos] != 0)
1773 len += sprintf(&buf[len], "%d ",
1774 priv->speed_scan[pos++]);
1775 return len + sprintf(&buf[len], "\n");
1776 }
1777
1778 return sprintf(buf, "0\n");
1779 }
1780
1781 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1782 const char *buf, size_t count)
1783 {
1784 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1785 int channel, pos = 0;
1786 const char *p = buf;
1787
1788 /* list of space separated channels to scan, optionally ending with 0 */
1789 while ((channel = simple_strtol(p, NULL, 0))) {
1790 if (pos == MAX_SPEED_SCAN - 1) {
1791 priv->speed_scan[pos] = 0;
1792 break;
1793 }
1794
1795 if (ieee80211_is_valid_channel(priv->ieee, channel))
1796 priv->speed_scan[pos++] = channel;
1797 else
1798 IPW_WARNING("Skipping invalid channel request: %d\n",
1799 channel);
1800 p = strchr(p, ' ');
1801 if (!p)
1802 break;
1803 while (*p == ' ' || *p == '\t')
1804 p++;
1805 }
1806
1807 if (pos == 0)
1808 priv->config &= ~CFG_SPEED_SCAN;
1809 else {
1810 priv->speed_scan_pos = 0;
1811 priv->config |= CFG_SPEED_SCAN;
1812 }
1813
1814 return count;
1815 }
1816
1817 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1818 store_speed_scan);
1819
1820 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1821 char *buf)
1822 {
1823 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1824 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1825 }
1826
1827 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1828 const char *buf, size_t count)
1829 {
1830 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1831 if (buf[0] == '1')
1832 priv->config |= CFG_NET_STATS;
1833 else
1834 priv->config &= ~CFG_NET_STATS;
1835
1836 return count;
1837 }
1838
1839 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1840 show_net_stats, store_net_stats);
1841
1842 static void notify_wx_assoc_event(struct ipw_priv *priv)
1843 {
1844 union iwreq_data wrqu;
1845 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1846 if (priv->status & STATUS_ASSOCIATED)
1847 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1848 else
1849 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1850 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1851 }
1852
1853 static void ipw_irq_tasklet(struct ipw_priv *priv)
1854 {
1855 u32 inta, inta_mask, handled = 0;
1856 unsigned long flags;
1857 int rc = 0;
1858
1859 spin_lock_irqsave(&priv->lock, flags);
1860
1861 inta = ipw_read32(priv, IPW_INTA_RW);
1862 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1863 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1864
1865 /* Add any cached INTA values that need to be handled */
1866 inta |= priv->isr_inta;
1867
1868 /* handle all the justifications for the interrupt */
1869 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1870 ipw_rx(priv);
1871 handled |= IPW_INTA_BIT_RX_TRANSFER;
1872 }
1873
1874 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1875 IPW_DEBUG_HC("Command completed.\n");
1876 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1877 priv->status &= ~STATUS_HCMD_ACTIVE;
1878 wake_up_interruptible(&priv->wait_command_queue);
1879 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1880 }
1881
1882 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1883 IPW_DEBUG_TX("TX_QUEUE_1\n");
1884 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1885 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1886 }
1887
1888 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1889 IPW_DEBUG_TX("TX_QUEUE_2\n");
1890 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1891 handled |= IPW_INTA_BIT_TX_QUEUE_2;
1892 }
1893
1894 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1895 IPW_DEBUG_TX("TX_QUEUE_3\n");
1896 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1897 handled |= IPW_INTA_BIT_TX_QUEUE_3;
1898 }
1899
1900 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1901 IPW_DEBUG_TX("TX_QUEUE_4\n");
1902 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1903 handled |= IPW_INTA_BIT_TX_QUEUE_4;
1904 }
1905
1906 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
1907 IPW_WARNING("STATUS_CHANGE\n");
1908 handled |= IPW_INTA_BIT_STATUS_CHANGE;
1909 }
1910
1911 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1912 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1913 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
1914 }
1915
1916 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1917 IPW_WARNING("HOST_CMD_DONE\n");
1918 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1919 }
1920
1921 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
1922 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1923 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
1924 }
1925
1926 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1927 IPW_WARNING("PHY_OFF_DONE\n");
1928 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1929 }
1930
1931 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
1932 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1933 priv->status |= STATUS_RF_KILL_HW;
1934 wake_up_interruptible(&priv->wait_command_queue);
1935 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1936 cancel_delayed_work(&priv->request_scan);
1937 schedule_work(&priv->link_down);
1938 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
1939 handled |= IPW_INTA_BIT_RF_KILL_DONE;
1940 }
1941
1942 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
1943 IPW_WARNING("Firmware error detected. Restarting.\n");
1944 if (priv->error) {
1945 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
1946 #ifdef CONFIG_IPW2200_DEBUG
1947 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1948 struct ipw_fw_error *error =
1949 ipw_alloc_error_log(priv);
1950 ipw_dump_error_log(priv, error);
1951 if (error)
1952 ipw_free_error_log(error);
1953 }
1954 #endif
1955 } else {
1956 priv->error = ipw_alloc_error_log(priv);
1957 if (priv->error)
1958 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
1959 else
1960 IPW_DEBUG_FW("Error allocating sysfs 'error' "
1961 "log.\n");
1962 #ifdef CONFIG_IPW2200_DEBUG
1963 if (ipw_debug_level & IPW_DL_FW_ERRORS)
1964 ipw_dump_error_log(priv, priv->error);
1965 #endif
1966 }
1967
1968 /* XXX: If hardware encryption is for WPA/WPA2,
1969 * we have to notify the supplicant. */
1970 if (priv->ieee->sec.encrypt) {
1971 priv->status &= ~STATUS_ASSOCIATED;
1972 notify_wx_assoc_event(priv);
1973 }
1974
1975 /* Keep the restart process from trying to send host
1976 * commands by clearing the INIT status bit */
1977 priv->status &= ~STATUS_INIT;
1978
1979 /* Cancel currently queued command. */
1980 priv->status &= ~STATUS_HCMD_ACTIVE;
1981 wake_up_interruptible(&priv->wait_command_queue);
1982
1983 queue_work(priv->workqueue, &priv->adapter_restart);
1984 handled |= IPW_INTA_BIT_FATAL_ERROR;
1985 }
1986
1987 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
1988 IPW_ERROR("Parity error\n");
1989 handled |= IPW_INTA_BIT_PARITY_ERROR;
1990 }
1991
1992 if (handled != inta) {
1993 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
1994 }
1995
1996 /* enable all interrupts */
1997 ipw_enable_interrupts(priv);
1998
1999 spin_unlock_irqrestore(&priv->lock, flags);
2000 }
2001
2002 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2003 static char *get_cmd_string(u8 cmd)
2004 {
2005 switch (cmd) {
2006 IPW_CMD(HOST_COMPLETE);
2007 IPW_CMD(POWER_DOWN);
2008 IPW_CMD(SYSTEM_CONFIG);
2009 IPW_CMD(MULTICAST_ADDRESS);
2010 IPW_CMD(SSID);
2011 IPW_CMD(ADAPTER_ADDRESS);
2012 IPW_CMD(PORT_TYPE);
2013 IPW_CMD(RTS_THRESHOLD);
2014 IPW_CMD(FRAG_THRESHOLD);
2015 IPW_CMD(POWER_MODE);
2016 IPW_CMD(WEP_KEY);
2017 IPW_CMD(TGI_TX_KEY);
2018 IPW_CMD(SCAN_REQUEST);
2019 IPW_CMD(SCAN_REQUEST_EXT);
2020 IPW_CMD(ASSOCIATE);
2021 IPW_CMD(SUPPORTED_RATES);
2022 IPW_CMD(SCAN_ABORT);
2023 IPW_CMD(TX_FLUSH);
2024 IPW_CMD(QOS_PARAMETERS);
2025 IPW_CMD(DINO_CONFIG);
2026 IPW_CMD(RSN_CAPABILITIES);
2027 IPW_CMD(RX_KEY);
2028 IPW_CMD(CARD_DISABLE);
2029 IPW_CMD(SEED_NUMBER);
2030 IPW_CMD(TX_POWER);
2031 IPW_CMD(COUNTRY_INFO);
2032 IPW_CMD(AIRONET_INFO);
2033 IPW_CMD(AP_TX_POWER);
2034 IPW_CMD(CCKM_INFO);
2035 IPW_CMD(CCX_VER_INFO);
2036 IPW_CMD(SET_CALIBRATION);
2037 IPW_CMD(SENSITIVITY_CALIB);
2038 IPW_CMD(RETRY_LIMIT);
2039 IPW_CMD(IPW_PRE_POWER_DOWN);
2040 IPW_CMD(VAP_BEACON_TEMPLATE);
2041 IPW_CMD(VAP_DTIM_PERIOD);
2042 IPW_CMD(EXT_SUPPORTED_RATES);
2043 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2044 IPW_CMD(VAP_QUIET_INTERVALS);
2045 IPW_CMD(VAP_CHANNEL_SWITCH);
2046 IPW_CMD(VAP_MANDATORY_CHANNELS);
2047 IPW_CMD(VAP_CELL_PWR_LIMIT);
2048 IPW_CMD(VAP_CF_PARAM_SET);
2049 IPW_CMD(VAP_SET_BEACONING_STATE);
2050 IPW_CMD(MEASUREMENT);
2051 IPW_CMD(POWER_CAPABILITY);
2052 IPW_CMD(SUPPORTED_CHANNELS);
2053 IPW_CMD(TPC_REPORT);
2054 IPW_CMD(WME_INFO);
2055 IPW_CMD(PRODUCTION_COMMAND);
2056 default:
2057 return "UNKNOWN";
2058 }
2059 }
2060
2061 #define HOST_COMPLETE_TIMEOUT HZ
2062
2063 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2064 {
2065 int rc = 0;
2066 unsigned long flags;
2067
2068 spin_lock_irqsave(&priv->lock, flags);
2069 if (priv->status & STATUS_HCMD_ACTIVE) {
2070 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2071 get_cmd_string(cmd->cmd));
2072 spin_unlock_irqrestore(&priv->lock, flags);
2073 return -EAGAIN;
2074 }
2075
2076 priv->status |= STATUS_HCMD_ACTIVE;
2077
2078 if (priv->cmdlog) {
2079 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2080 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2081 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2082 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2083 cmd->len);
2084 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2085 }
2086
2087 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2088 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2089 priv->status);
2090
2091 #ifndef DEBUG_CMD_WEP_KEY
2092 if (cmd->cmd == IPW_CMD_WEP_KEY)
2093 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2094 else
2095 #endif
2096 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2097
2098 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2099 if (rc) {
2100 priv->status &= ~STATUS_HCMD_ACTIVE;
2101 IPW_ERROR("Failed to send %s: Reason %d\n",
2102 get_cmd_string(cmd->cmd), rc);
2103 spin_unlock_irqrestore(&priv->lock, flags);
2104 goto exit;
2105 }
2106 spin_unlock_irqrestore(&priv->lock, flags);
2107
2108 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2109 !(priv->
2110 status & STATUS_HCMD_ACTIVE),
2111 HOST_COMPLETE_TIMEOUT);
2112 if (rc == 0) {
2113 spin_lock_irqsave(&priv->lock, flags);
2114 if (priv->status & STATUS_HCMD_ACTIVE) {
2115 IPW_ERROR("Failed to send %s: Command timed out.\n",
2116 get_cmd_string(cmd->cmd));
2117 priv->status &= ~STATUS_HCMD_ACTIVE;
2118 spin_unlock_irqrestore(&priv->lock, flags);
2119 rc = -EIO;
2120 goto exit;
2121 }
2122 spin_unlock_irqrestore(&priv->lock, flags);
2123 } else
2124 rc = 0;
2125
2126 if (priv->status & STATUS_RF_KILL_HW) {
2127 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2128 get_cmd_string(cmd->cmd));
2129 rc = -EIO;
2130 goto exit;
2131 }
2132
2133 exit:
2134 if (priv->cmdlog) {
2135 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2136 priv->cmdlog_pos %= priv->cmdlog_len;
2137 }
2138 return rc;
2139 }
2140
2141 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2142 {
2143 struct host_cmd cmd = {
2144 .cmd = command,
2145 };
2146
2147 return __ipw_send_cmd(priv, &cmd);
2148 }
2149
2150 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2151 void *data)
2152 {
2153 struct host_cmd cmd = {
2154 .cmd = command,
2155 .len = len,
2156 .param = data,
2157 };
2158
2159 return __ipw_send_cmd(priv, &cmd);
2160 }
2161
2162 static int ipw_send_host_complete(struct ipw_priv *priv)
2163 {
2164 if (!priv) {
2165 IPW_ERROR("Invalid args\n");
2166 return -1;
2167 }
2168
2169 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2170 }
2171
2172 static int ipw_send_system_config(struct ipw_priv *priv)
2173 {
2174 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2175 sizeof(priv->sys_config),
2176 &priv->sys_config);
2177 }
2178
2179 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2180 {
2181 if (!priv || !ssid) {
2182 IPW_ERROR("Invalid args\n");
2183 return -1;
2184 }
2185
2186 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2187 ssid);
2188 }
2189
2190 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2191 {
2192 if (!priv || !mac) {
2193 IPW_ERROR("Invalid args\n");
2194 return -1;
2195 }
2196
2197 IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
2198 priv->net_dev->name, MAC_ARG(mac));
2199
2200 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2201 }
2202
2203 /*
2204 * NOTE: This must be executed from our workqueue as it results in udelay
2205 * being called which may corrupt the keyboard if executed on default
2206 * workqueue
2207 */
2208 static void ipw_adapter_restart(void *adapter)
2209 {
2210 struct ipw_priv *priv = adapter;
2211
2212 if (priv->status & STATUS_RF_KILL_MASK)
2213 return;
2214
2215 ipw_down(priv);
2216
2217 if (priv->assoc_network &&
2218 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2219 ipw_remove_current_network(priv);
2220
2221 if (ipw_up(priv)) {
2222 IPW_ERROR("Failed to up device\n");
2223 return;
2224 }
2225 }
2226
2227 static void ipw_bg_adapter_restart(void *data)
2228 {
2229 struct ipw_priv *priv = data;
2230 mutex_lock(&priv->mutex);
2231 ipw_adapter_restart(data);
2232 mutex_unlock(&priv->mutex);
2233 }
2234
2235 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2236
2237 static void ipw_scan_check(void *data)
2238 {
2239 struct ipw_priv *priv = data;
2240 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2241 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2242 "adapter after (%dms).\n",
2243 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2244 queue_work(priv->workqueue, &priv->adapter_restart);
2245 }
2246 }
2247
2248 static void ipw_bg_scan_check(void *data)
2249 {
2250 struct ipw_priv *priv = data;
2251 mutex_lock(&priv->mutex);
2252 ipw_scan_check(data);
2253 mutex_unlock(&priv->mutex);
2254 }
2255
2256 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2257 struct ipw_scan_request_ext *request)
2258 {
2259 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2260 sizeof(*request), request);
2261 }
2262
2263 static int ipw_send_scan_abort(struct ipw_priv *priv)
2264 {
2265 if (!priv) {
2266 IPW_ERROR("Invalid args\n");
2267 return -1;
2268 }
2269
2270 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2271 }
2272
2273 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2274 {
2275 struct ipw_sensitivity_calib calib = {
2276 .beacon_rssi_raw = sens,
2277 };
2278
2279 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2280 &calib);
2281 }
2282
2283 static int ipw_send_associate(struct ipw_priv *priv,
2284 struct ipw_associate *associate)
2285 {
2286 struct ipw_associate tmp_associate;
2287
2288 if (!priv || !associate) {
2289 IPW_ERROR("Invalid args\n");
2290 return -1;
2291 }
2292
2293 memcpy(&tmp_associate, associate, sizeof(*associate));
2294 tmp_associate.policy_support =
2295 cpu_to_le16(tmp_associate.policy_support);
2296 tmp_associate.assoc_tsf_msw = cpu_to_le32(tmp_associate.assoc_tsf_msw);
2297 tmp_associate.assoc_tsf_lsw = cpu_to_le32(tmp_associate.assoc_tsf_lsw);
2298 tmp_associate.capability = cpu_to_le16(tmp_associate.capability);
2299 tmp_associate.listen_interval =
2300 cpu_to_le16(tmp_associate.listen_interval);
2301 tmp_associate.beacon_interval =
2302 cpu_to_le16(tmp_associate.beacon_interval);
2303 tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window);
2304
2305 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(tmp_associate),
2306 &tmp_associate);
2307 }
2308
2309 static int ipw_send_supported_rates(struct ipw_priv *priv,
2310 struct ipw_supported_rates *rates)
2311 {
2312 if (!priv || !rates) {
2313 IPW_ERROR("Invalid args\n");
2314 return -1;
2315 }
2316
2317 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2318 rates);
2319 }
2320
2321 static int ipw_set_random_seed(struct ipw_priv *priv)
2322 {
2323 u32 val;
2324
2325 if (!priv) {
2326 IPW_ERROR("Invalid args\n");
2327 return -1;
2328 }
2329
2330 get_random_bytes(&val, sizeof(val));
2331
2332 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2333 }
2334
2335 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2336 {
2337 if (!priv) {
2338 IPW_ERROR("Invalid args\n");
2339 return -1;
2340 }
2341
2342 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off),
2343 &phy_off);
2344 }
2345
2346 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2347 {
2348 if (!priv || !power) {
2349 IPW_ERROR("Invalid args\n");
2350 return -1;
2351 }
2352
2353 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2354 }
2355
2356 static int ipw_set_tx_power(struct ipw_priv *priv)
2357 {
2358 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
2359 struct ipw_tx_power tx_power;
2360 s8 max_power;
2361 int i;
2362
2363 memset(&tx_power, 0, sizeof(tx_power));
2364
2365 /* configure device for 'G' band */
2366 tx_power.ieee_mode = IPW_G_MODE;
2367 tx_power.num_channels = geo->bg_channels;
2368 for (i = 0; i < geo->bg_channels; i++) {
2369 max_power = geo->bg[i].max_power;
2370 tx_power.channels_tx_power[i].channel_number =
2371 geo->bg[i].channel;
2372 tx_power.channels_tx_power[i].tx_power = max_power ?
2373 min(max_power, priv->tx_power) : priv->tx_power;
2374 }
2375 if (ipw_send_tx_power(priv, &tx_power))
2376 return -EIO;
2377
2378 /* configure device to also handle 'B' band */
2379 tx_power.ieee_mode = IPW_B_MODE;
2380 if (ipw_send_tx_power(priv, &tx_power))
2381 return -EIO;
2382
2383 /* configure device to also handle 'A' band */
2384 if (priv->ieee->abg_true) {
2385 tx_power.ieee_mode = IPW_A_MODE;
2386 tx_power.num_channels = geo->a_channels;
2387 for (i = 0; i < tx_power.num_channels; i++) {
2388 max_power = geo->a[i].max_power;
2389 tx_power.channels_tx_power[i].channel_number =
2390 geo->a[i].channel;
2391 tx_power.channels_tx_power[i].tx_power = max_power ?
2392 min(max_power, priv->tx_power) : priv->tx_power;
2393 }
2394 if (ipw_send_tx_power(priv, &tx_power))
2395 return -EIO;
2396 }
2397 return 0;
2398 }
2399
2400 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2401 {
2402 struct ipw_rts_threshold rts_threshold = {
2403 .rts_threshold = rts,
2404 };
2405
2406 if (!priv) {
2407 IPW_ERROR("Invalid args\n");
2408 return -1;
2409 }
2410
2411 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2412 sizeof(rts_threshold), &rts_threshold);
2413 }
2414
2415 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2416 {
2417 struct ipw_frag_threshold frag_threshold = {
2418 .frag_threshold = frag,
2419 };
2420
2421 if (!priv) {
2422 IPW_ERROR("Invalid args\n");
2423 return -1;
2424 }
2425
2426 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2427 sizeof(frag_threshold), &frag_threshold);
2428 }
2429
2430 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2431 {
2432 u32 param;
2433
2434 if (!priv) {
2435 IPW_ERROR("Invalid args\n");
2436 return -1;
2437 }
2438
2439 /* If on battery, set to 3, if AC set to CAM, else user
2440 * level */
2441 switch (mode) {
2442 case IPW_POWER_BATTERY:
2443 param = IPW_POWER_INDEX_3;
2444 break;
2445 case IPW_POWER_AC:
2446 param = IPW_POWER_MODE_CAM;
2447 break;
2448 default:
2449 param = mode;
2450 break;
2451 }
2452
2453 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2454 &param);
2455 }
2456
2457 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2458 {
2459 struct ipw_retry_limit retry_limit = {
2460 .short_retry_limit = slimit,
2461 .long_retry_limit = llimit
2462 };
2463
2464 if (!priv) {
2465 IPW_ERROR("Invalid args\n");
2466 return -1;
2467 }
2468
2469 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2470 &retry_limit);
2471 }
2472
2473 /*
2474 * The IPW device contains a Microwire compatible EEPROM that stores
2475 * various data like the MAC address. Usually the firmware has exclusive
2476 * access to the eeprom, but during device initialization (before the
2477 * device driver has sent the HostComplete command to the firmware) the
2478 * device driver has read access to the EEPROM by way of indirect addressing
2479 * through a couple of memory mapped registers.
2480 *
2481 * The following is a simplified implementation for pulling data out of the
2482 * the eeprom, along with some helper functions to find information in
2483 * the per device private data's copy of the eeprom.
2484 *
2485 * NOTE: To better understand how these functions work (i.e what is a chip
2486 * select and why do have to keep driving the eeprom clock?), read
2487 * just about any data sheet for a Microwire compatible EEPROM.
2488 */
2489
2490 /* write a 32 bit value into the indirect accessor register */
2491 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2492 {
2493 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2494
2495 /* the eeprom requires some time to complete the operation */
2496 udelay(p->eeprom_delay);
2497
2498 return;
2499 }
2500
2501 /* perform a chip select operation */
2502 static void eeprom_cs(struct ipw_priv *priv)
2503 {
2504 eeprom_write_reg(priv, 0);
2505 eeprom_write_reg(priv, EEPROM_BIT_CS);
2506 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2507 eeprom_write_reg(priv, EEPROM_BIT_CS);
2508 }
2509
2510 /* perform a chip select operation */
2511 static void eeprom_disable_cs(struct ipw_priv *priv)
2512 {
2513 eeprom_write_reg(priv, EEPROM_BIT_CS);
2514 eeprom_write_reg(priv, 0);
2515 eeprom_write_reg(priv, EEPROM_BIT_SK);
2516 }
2517
2518 /* push a single bit down to the eeprom */
2519 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2520 {
2521 int d = (bit ? EEPROM_BIT_DI : 0);
2522 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2523 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2524 }
2525
2526 /* push an opcode followed by an address down to the eeprom */
2527 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2528 {
2529 int i;
2530
2531 eeprom_cs(priv);
2532 eeprom_write_bit(priv, 1);
2533 eeprom_write_bit(priv, op & 2);
2534 eeprom_write_bit(priv, op & 1);
2535 for (i = 7; i >= 0; i--) {
2536 eeprom_write_bit(priv, addr & (1 << i));
2537 }
2538 }
2539
2540 /* pull 16 bits off the eeprom, one bit at a time */
2541 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2542 {
2543 int i;
2544 u16 r = 0;
2545
2546 /* Send READ Opcode */
2547 eeprom_op(priv, EEPROM_CMD_READ, addr);
2548
2549 /* Send dummy bit */
2550 eeprom_write_reg(priv, EEPROM_BIT_CS);
2551
2552 /* Read the byte off the eeprom one bit at a time */
2553 for (i = 0; i < 16; i++) {
2554 u32 data = 0;
2555 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2556 eeprom_write_reg(priv, EEPROM_BIT_CS);
2557 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2558 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2559 }
2560
2561 /* Send another dummy bit */
2562 eeprom_write_reg(priv, 0);
2563 eeprom_disable_cs(priv);
2564
2565 return r;
2566 }
2567
2568 /* helper function for pulling the mac address out of the private */
2569 /* data's copy of the eeprom data */
2570 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2571 {
2572 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2573 }
2574
2575 /*
2576 * Either the device driver (i.e. the host) or the firmware can
2577 * load eeprom data into the designated region in SRAM. If neither
2578 * happens then the FW will shutdown with a fatal error.
2579 *
2580 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2581 * bit needs region of shared SRAM needs to be non-zero.
2582 */
2583 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2584 {
2585 int i;
2586 u16 *eeprom = (u16 *) priv->eeprom;
2587
2588 IPW_DEBUG_TRACE(">>\n");
2589
2590 /* read entire contents of eeprom into private buffer */
2591 for (i = 0; i < 128; i++)
2592 eeprom[i] = le16_to_cpu(eeprom_read_u16(priv, (u8) i));
2593
2594 /*
2595 If the data looks correct, then copy it to our private
2596 copy. Otherwise let the firmware know to perform the operation
2597 on its own.
2598 */
2599 if (priv->eeprom[EEPROM_VERSION] != 0) {
2600 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2601
2602 /* write the eeprom data to sram */
2603 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2604 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2605
2606 /* Do not load eeprom data on fatal error or suspend */
2607 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2608 } else {
2609 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2610
2611 /* Load eeprom data on fatal error or suspend */
2612 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2613 }
2614
2615 IPW_DEBUG_TRACE("<<\n");
2616 }
2617
2618 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2619 {
2620 count >>= 2;
2621 if (!count)
2622 return;
2623 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2624 while (count--)
2625 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2626 }
2627
2628 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2629 {
2630 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2631 CB_NUMBER_OF_ELEMENTS_SMALL *
2632 sizeof(struct command_block));
2633 }
2634
2635 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2636 { /* start dma engine but no transfers yet */
2637
2638 IPW_DEBUG_FW(">> : \n");
2639
2640 /* Start the dma */
2641 ipw_fw_dma_reset_command_blocks(priv);
2642
2643 /* Write CB base address */
2644 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2645
2646 IPW_DEBUG_FW("<< : \n");
2647 return 0;
2648 }
2649
2650 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2651 {
2652 u32 control = 0;
2653
2654 IPW_DEBUG_FW(">> :\n");
2655
2656 //set the Stop and Abort bit
2657 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2658 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2659 priv->sram_desc.last_cb_index = 0;
2660
2661 IPW_DEBUG_FW("<< \n");
2662 }
2663
2664 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2665 struct command_block *cb)
2666 {
2667 u32 address =
2668 IPW_SHARED_SRAM_DMA_CONTROL +
2669 (sizeof(struct command_block) * index);
2670 IPW_DEBUG_FW(">> :\n");
2671
2672 ipw_write_indirect(priv, address, (u8 *) cb,
2673 (int)sizeof(struct command_block));
2674
2675 IPW_DEBUG_FW("<< :\n");
2676 return 0;
2677
2678 }
2679
2680 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2681 {
2682 u32 control = 0;
2683 u32 index = 0;
2684
2685 IPW_DEBUG_FW(">> :\n");
2686
2687 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2688 ipw_fw_dma_write_command_block(priv, index,
2689 &priv->sram_desc.cb_list[index]);
2690
2691 /* Enable the DMA in the CSR register */
2692 ipw_clear_bit(priv, IPW_RESET_REG,
2693 IPW_RESET_REG_MASTER_DISABLED |
2694 IPW_RESET_REG_STOP_MASTER);
2695
2696 /* Set the Start bit. */
2697 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2698 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2699
2700 IPW_DEBUG_FW("<< :\n");
2701 return 0;
2702 }
2703
2704 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2705 {
2706 u32 address;
2707 u32 register_value = 0;
2708 u32 cb_fields_address = 0;
2709
2710 IPW_DEBUG_FW(">> :\n");
2711 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2712 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2713
2714 /* Read the DMA Controlor register */
2715 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2716 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2717
2718 /* Print the CB values */
2719 cb_fields_address = address;
2720 register_value = ipw_read_reg32(priv, cb_fields_address);
2721 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2722
2723 cb_fields_address += sizeof(u32);
2724 register_value = ipw_read_reg32(priv, cb_fields_address);
2725 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2726
2727 cb_fields_address += sizeof(u32);
2728 register_value = ipw_read_reg32(priv, cb_fields_address);
2729 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2730 register_value);
2731
2732 cb_fields_address += sizeof(u32);
2733 register_value = ipw_read_reg32(priv, cb_fields_address);
2734 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2735
2736 IPW_DEBUG_FW(">> :\n");
2737 }
2738
2739 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2740 {
2741 u32 current_cb_address = 0;
2742 u32 current_cb_index = 0;
2743
2744 IPW_DEBUG_FW("<< :\n");
2745 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2746
2747 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2748 sizeof(struct command_block);
2749
2750 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2751 current_cb_index, current_cb_address);
2752
2753 IPW_DEBUG_FW(">> :\n");
2754 return current_cb_index;
2755
2756 }
2757
2758 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2759 u32 src_address,
2760 u32 dest_address,
2761 u32 length,
2762 int interrupt_enabled, int is_last)
2763 {
2764
2765 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2766 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2767 CB_DEST_SIZE_LONG;
2768 struct command_block *cb;
2769 u32 last_cb_element = 0;
2770
2771 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2772 src_address, dest_address, length);
2773
2774 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2775 return -1;
2776
2777 last_cb_element = priv->sram_desc.last_cb_index;
2778 cb = &priv->sram_desc.cb_list[last_cb_element];
2779 priv->sram_desc.last_cb_index++;
2780
2781 /* Calculate the new CB control word */
2782 if (interrupt_enabled)
2783 control |= CB_INT_ENABLED;
2784
2785 if (is_last)
2786 control |= CB_LAST_VALID;
2787
2788 control |= length;
2789
2790 /* Calculate the CB Element's checksum value */
2791 cb->status = control ^ src_address ^ dest_address;
2792
2793 /* Copy the Source and Destination addresses */
2794 cb->dest_addr = dest_address;
2795 cb->source_addr = src_address;
2796
2797 /* Copy the Control Word last */
2798 cb->control = control;
2799
2800 return 0;
2801 }
2802
2803 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2804 u32 src_phys, u32 dest_address, u32 length)
2805 {
2806 u32 bytes_left = length;
2807 u32 src_offset = 0;
2808 u32 dest_offset = 0;
2809 int status = 0;
2810 IPW_DEBUG_FW(">> \n");
2811 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2812 src_phys, dest_address, length);
2813 while (bytes_left > CB_MAX_LENGTH) {
2814 status = ipw_fw_dma_add_command_block(priv,
2815 src_phys + src_offset,
2816 dest_address +
2817 dest_offset,
2818 CB_MAX_LENGTH, 0, 0);
2819 if (status) {
2820 IPW_DEBUG_FW_INFO(": Failed\n");
2821 return -1;
2822 } else
2823 IPW_DEBUG_FW_INFO(": Added new cb\n");
2824
2825 src_offset += CB_MAX_LENGTH;
2826 dest_offset += CB_MAX_LENGTH;
2827 bytes_left -= CB_MAX_LENGTH;
2828 }
2829
2830 /* add the buffer tail */
2831 if (bytes_left > 0) {
2832 status =
2833 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2834 dest_address + dest_offset,
2835 bytes_left, 0, 0);
2836 if (status) {
2837 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2838 return -1;
2839 } else
2840 IPW_DEBUG_FW_INFO
2841 (": Adding new cb - the buffer tail\n");
2842 }
2843
2844 IPW_DEBUG_FW("<< \n");
2845 return 0;
2846 }
2847
2848 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2849 {
2850 u32 current_index = 0, previous_index;
2851 u32 watchdog = 0;
2852
2853 IPW_DEBUG_FW(">> : \n");
2854
2855 current_index = ipw_fw_dma_command_block_index(priv);
2856 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2857 (int)priv->sram_desc.last_cb_index);
2858
2859 while (current_index < priv->sram_desc.last_cb_index) {
2860 udelay(50);
2861 previous_index = current_index;
2862 current_index = ipw_fw_dma_command_block_index(priv);
2863
2864 if (previous_index < current_index) {
2865 watchdog = 0;
2866 continue;
2867 }
2868 if (++watchdog > 400) {
2869 IPW_DEBUG_FW_INFO("Timeout\n");
2870 ipw_fw_dma_dump_command_block(priv);
2871 ipw_fw_dma_abort(priv);
2872 return -1;
2873 }
2874 }
2875
2876 ipw_fw_dma_abort(priv);
2877
2878 /*Disable the DMA in the CSR register */
2879 ipw_set_bit(priv, IPW_RESET_REG,
2880 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2881
2882 IPW_DEBUG_FW("<< dmaWaitSync \n");
2883 return 0;
2884 }
2885
2886 static void ipw_remove_current_network(struct ipw_priv *priv)
2887 {
2888 struct list_head *element, *safe;
2889 struct ieee80211_network *network = NULL;
2890 unsigned long flags;
2891
2892 spin_lock_irqsave(&priv->ieee->lock, flags);
2893 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2894 network = list_entry(element, struct ieee80211_network, list);
2895 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2896 list_del(element);
2897 list_add_tail(&network->list,
2898 &priv->ieee->network_free_list);
2899 }
2900 }
2901 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2902 }
2903
2904 /**
2905 * Check that card is still alive.
2906 * Reads debug register from domain0.
2907 * If card is present, pre-defined value should
2908 * be found there.
2909 *
2910 * @param priv
2911 * @return 1 if card is present, 0 otherwise
2912 */
2913 static inline int ipw_alive(struct ipw_priv *priv)
2914 {
2915 return ipw_read32(priv, 0x90) == 0xd55555d5;
2916 }
2917
2918 /* timeout in msec, attempted in 10-msec quanta */
2919 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2920 int timeout)
2921 {
2922 int i = 0;
2923
2924 do {
2925 if ((ipw_read32(priv, addr) & mask) == mask)
2926 return i;
2927 mdelay(10);
2928 i += 10;
2929 } while (i < timeout);
2930
2931 return -ETIME;
2932 }
2933
2934 /* These functions load the firmware and micro code for the operation of
2935 * the ipw hardware. It assumes the buffer has all the bits for the
2936 * image and the caller is handling the memory allocation and clean up.
2937 */
2938
2939 static int ipw_stop_master(struct ipw_priv *priv)
2940 {
2941 int rc;
2942
2943 IPW_DEBUG_TRACE(">> \n");
2944 /* stop master. typical delay - 0 */
2945 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
2946
2947 /* timeout is in msec, polled in 10-msec quanta */
2948 rc = ipw_poll_bit(priv, IPW_RESET_REG,
2949 IPW_RESET_REG_MASTER_DISABLED, 100);
2950 if (rc < 0) {
2951 IPW_ERROR("wait for stop master failed after 100ms\n");
2952 return -1;
2953 }
2954
2955 IPW_DEBUG_INFO("stop master %dms\n", rc);
2956
2957 return rc;
2958 }
2959
2960 static void ipw_arc_release(struct ipw_priv *priv)
2961 {
2962 IPW_DEBUG_TRACE(">> \n");
2963 mdelay(5);
2964
2965 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2966
2967 /* no one knows timing, for safety add some delay */
2968 mdelay(5);
2969 }
2970
2971 struct fw_chunk {
2972 u32 address;
2973 u32 length;
2974 };
2975
2976 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
2977 {
2978 int rc = 0, i, addr;
2979 u8 cr = 0;
2980 u16 *image;
2981
2982 image = (u16 *) data;
2983
2984 IPW_DEBUG_TRACE(">> \n");
2985
2986 rc = ipw_stop_master(priv);
2987
2988 if (rc < 0)
2989 return rc;
2990
2991 // spin_lock_irqsave(&priv->lock, flags);
2992
2993 for (addr = IPW_SHARED_LOWER_BOUND;
2994 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
2995 ipw_write32(priv, addr, 0);
2996 }
2997
2998 /* no ucode (yet) */
2999 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3000 /* destroy DMA queues */
3001 /* reset sequence */
3002
3003 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3004 ipw_arc_release(priv);
3005 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3006 mdelay(1);
3007
3008 /* reset PHY */
3009 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3010 mdelay(1);
3011
3012 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3013 mdelay(1);
3014
3015 /* enable ucode store */
3016 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3017 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3018 mdelay(1);
3019
3020 /* write ucode */
3021 /**
3022 * @bug
3023 * Do NOT set indirect address register once and then
3024 * store data to indirect data register in the loop.
3025 * It seems very reasonable, but in this case DINO do not
3026 * accept ucode. It is essential to set address each time.
3027 */
3028 /* load new ipw uCode */
3029 for (i = 0; i < len / 2; i++)
3030 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3031 cpu_to_le16(image[i]));
3032
3033 /* enable DINO */
3034 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3035 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3036
3037 /* this is where the igx / win driver deveates from the VAP driver. */
3038
3039 /* wait for alive response */
3040 for (i = 0; i < 100; i++) {
3041 /* poll for incoming data */
3042 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3043 if (cr & DINO_RXFIFO_DATA)
3044 break;
3045 mdelay(1);
3046 }
3047
3048 if (cr & DINO_RXFIFO_DATA) {
3049 /* alive_command_responce size is NOT multiple of 4 */
3050 u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3051
3052 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3053 response_buffer[i] =
3054 le32_to_cpu(ipw_read_reg32(priv,
3055 IPW_BASEBAND_RX_FIFO_READ));
3056 memcpy(&priv->dino_alive, response_buffer,
3057 sizeof(priv->dino_alive));
3058 if (priv->dino_alive.alive_command == 1
3059 && priv->dino_alive.ucode_valid == 1) {
3060 rc = 0;
3061 IPW_DEBUG_INFO
3062 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3063 "of %02d/%02d/%02d %02d:%02d\n",
3064 priv->dino_alive.software_revision,
3065 priv->dino_alive.software_revision,
3066 priv->dino_alive.device_identifier,
3067 priv->dino_alive.device_identifier,
3068 priv->dino_alive.time_stamp[0],
3069 priv->dino_alive.time_stamp[1],
3070 priv->dino_alive.time_stamp[2],
3071 priv->dino_alive.time_stamp[3],
3072 priv->dino_alive.time_stamp[4]);
3073 } else {
3074 IPW_DEBUG_INFO("Microcode is not alive\n");
3075 rc = -EINVAL;
3076 }
3077 } else {
3078 IPW_DEBUG_INFO("No alive response from DINO\n");
3079 rc = -ETIME;
3080 }
3081
3082 /* disable DINO, otherwise for some reason
3083 firmware have problem getting alive resp. */
3084 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3085
3086 // spin_unlock_irqrestore(&priv->lock, flags);
3087
3088 return rc;
3089 }
3090
3091 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3092 {
3093 int rc = -1;
3094 int offset = 0;
3095 struct fw_chunk *chunk;
3096 dma_addr_t shared_phys;
3097 u8 *shared_virt;
3098
3099 IPW_DEBUG_TRACE("<< : \n");
3100 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
3101
3102 if (!shared_virt)
3103 return -ENOMEM;
3104
3105 memmove(shared_virt, data, len);
3106
3107 /* Start the Dma */
3108 rc = ipw_fw_dma_enable(priv);
3109
3110 if (priv->sram_desc.last_cb_index > 0) {
3111 /* the DMA is already ready this would be a bug. */
3112 BUG();
3113 goto out;
3114 }
3115
3116 do {
3117 chunk = (struct fw_chunk *)(data + offset);
3118 offset += sizeof(struct fw_chunk);
3119 /* build DMA packet and queue up for sending */
3120 /* dma to chunk->address, the chunk->length bytes from data +
3121 * offeset*/
3122 /* Dma loading */
3123 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
3124 le32_to_cpu(chunk->address),
3125 le32_to_cpu(chunk->length));
3126 if (rc) {
3127 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3128 goto out;
3129 }
3130
3131 offset += le32_to_cpu(chunk->length);
3132 } while (offset < len);
3133
3134 /* Run the DMA and wait for the answer */
3135 rc = ipw_fw_dma_kick(priv);
3136 if (rc) {
3137 IPW_ERROR("dmaKick Failed\n");
3138 goto out;
3139 }
3140
3141 rc = ipw_fw_dma_wait(priv);
3142 if (rc) {
3143 IPW_ERROR("dmaWaitSync Failed\n");
3144 goto out;
3145 }
3146 out:
3147 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
3148 return rc;
3149 }
3150
3151 /* stop nic */
3152 static int ipw_stop_nic(struct ipw_priv *priv)
3153 {
3154 int rc = 0;
3155
3156 /* stop */
3157 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3158
3159 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3160 IPW_RESET_REG_MASTER_DISABLED, 500);
3161 if (rc < 0) {
3162 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3163 return rc;
3164 }
3165
3166 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3167
3168 return rc;
3169 }
3170
3171 static void ipw_start_nic(struct ipw_priv *priv)
3172 {
3173 IPW_DEBUG_TRACE(">>\n");
3174
3175 /* prvHwStartNic release ARC */
3176 ipw_clear_bit(priv, IPW_RESET_REG,
3177 IPW_RESET_REG_MASTER_DISABLED |
3178 IPW_RESET_REG_STOP_MASTER |
3179 CBD_RESET_REG_PRINCETON_RESET);
3180
3181 /* enable power management */
3182 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3183 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3184
3185 IPW_DEBUG_TRACE("<<\n");
3186 }
3187
3188 static int ipw_init_nic(struct ipw_priv *priv)
3189 {
3190 int rc;
3191
3192 IPW_DEBUG_TRACE(">>\n");
3193 /* reset */
3194 /*prvHwInitNic */
3195 /* set "initialization complete" bit to move adapter to D0 state */
3196 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3197
3198 /* low-level PLL activation */
3199 ipw_write32(priv, IPW_READ_INT_REGISTER,
3200 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3201
3202 /* wait for clock stabilization */
3203 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3204 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3205 if (rc < 0)
3206 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3207
3208 /* assert SW reset */
3209 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3210
3211 udelay(10);
3212
3213 /* set "initialization complete" bit to move adapter to D0 state */
3214 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3215
3216 IPW_DEBUG_TRACE(">>\n");
3217 return 0;
3218 }
3219
3220 /* Call this function from process context, it will sleep in request_firmware.
3221 * Probe is an ok place to call this from.
3222 */
3223 static int ipw_reset_nic(struct ipw_priv *priv)
3224 {
3225 int rc = 0;
3226 unsigned long flags;
3227
3228 IPW_DEBUG_TRACE(">>\n");
3229
3230 rc = ipw_init_nic(priv);
3231
3232 spin_lock_irqsave(&priv->lock, flags);
3233 /* Clear the 'host command active' bit... */
3234 priv->status &= ~STATUS_HCMD_ACTIVE;
3235 wake_up_interruptible(&priv->wait_command_queue);
3236 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3237 wake_up_interruptible(&priv->wait_state);
3238 spin_unlock_irqrestore(&priv->lock, flags);
3239
3240 IPW_DEBUG_TRACE("<<\n");
3241 return rc;
3242 }
3243
3244
3245 struct ipw_fw {
3246 __le32 ver;
3247 __le32 boot_size;
3248 __le32 ucode_size;
3249 __le32 fw_size;
3250 u8 data[0];
3251 };
3252
3253 static int ipw_get_fw(struct ipw_priv *priv,
3254 const struct firmware **raw, const char *name)
3255 {
3256 struct ipw_fw *fw;
3257 int rc;
3258
3259 /* ask firmware_class module to get the boot firmware off disk */
3260 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3261 if (rc < 0) {
3262 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3263 return rc;
3264 }
3265
3266 if ((*raw)->size < sizeof(*fw)) {
3267 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3268 return -EINVAL;
3269 }
3270
3271 fw = (void *)(*raw)->data;
3272
3273 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3274 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3275 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3276 name, (*raw)->size);
3277 return -EINVAL;
3278 }
3279
3280 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3281 name,
3282 le32_to_cpu(fw->ver) >> 16,
3283 le32_to_cpu(fw->ver) & 0xff,
3284 (*raw)->size - sizeof(*fw));
3285 return 0;
3286 }
3287
3288 #define IPW_RX_BUF_SIZE (3000)
3289
3290 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3291 struct ipw_rx_queue *rxq)
3292 {
3293 unsigned long flags;
3294 int i;
3295
3296 spin_lock_irqsave(&rxq->lock, flags);
3297
3298 INIT_LIST_HEAD(&rxq->rx_free);
3299 INIT_LIST_HEAD(&rxq->rx_used);
3300
3301 /* Fill the rx_used queue with _all_ of the Rx buffers */
3302 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3303 /* In the reset function, these buffers may have been allocated
3304 * to an SKB, so we need to unmap and free potential storage */
3305 if (rxq->pool[i].skb != NULL) {
3306 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3307 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3308 dev_kfree_skb(rxq->pool[i].skb);
3309 rxq->pool[i].skb = NULL;
3310 }
3311 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3312 }
3313
3314 /* Set us so that we have processed and used all buffers, but have
3315 * not restocked the Rx queue with fresh buffers */
3316 rxq->read = rxq->write = 0;
3317 rxq->processed = RX_QUEUE_SIZE - 1;
3318 rxq->free_count = 0;
3319 spin_unlock_irqrestore(&rxq->lock, flags);
3320 }
3321
3322 #ifdef CONFIG_PM
3323 static int fw_loaded = 0;
3324 static const struct firmware *raw = NULL;
3325
3326 static void free_firmware(void)
3327 {
3328 if (fw_loaded) {
3329 release_firmware(raw);
3330 raw = NULL;
3331 fw_loaded = 0;
3332 }
3333 }
3334 #else
3335 #define free_firmware() do {} while (0)
3336 #endif
3337
3338 static int ipw_load(struct ipw_priv *priv)
3339 {
3340 #ifndef CONFIG_PM
3341 const struct firmware *raw = NULL;
3342 #endif
3343 struct ipw_fw *fw;
3344 u8 *boot_img, *ucode_img, *fw_img;
3345 u8 *name = NULL;
3346 int rc = 0, retries = 3;
3347
3348 switch (priv->ieee->iw_mode) {
3349 case IW_MODE_ADHOC:
3350 name = "ipw2200-ibss.fw";
3351 break;
3352 #ifdef CONFIG_IPW2200_MONITOR
3353 case IW_MODE_MONITOR:
3354 name = "ipw2200-sniffer.fw";
3355 break;
3356 #endif
3357 case IW_MODE_INFRA:
3358 name = "ipw2200-bss.fw";
3359 break;
3360 }
3361
3362 if (!name) {
3363 rc = -EINVAL;
3364 goto error;
3365 }
3366
3367 #ifdef CONFIG_PM
3368 if (!fw_loaded) {
3369 #endif
3370 rc = ipw_get_fw(priv, &raw, name);
3371 if (rc < 0)
3372 goto error;
3373 #ifdef CONFIG_PM
3374 }
3375 #endif
3376
3377 fw = (void *)raw->data;
3378 boot_img = &fw->data[0];
3379 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3380 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3381 le32_to_cpu(fw->ucode_size)];
3382
3383 if (rc < 0)
3384 goto error;
3385
3386 if (!priv->rxq)
3387 priv->rxq = ipw_rx_queue_alloc(priv);
3388 else
3389 ipw_rx_queue_reset(priv, priv->rxq);
3390 if (!priv->rxq) {
3391 IPW_ERROR("Unable to initialize Rx queue\n");
3392 goto error;
3393 }
3394
3395 retry:
3396 /* Ensure interrupts are disabled */
3397 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3398 priv->status &= ~STATUS_INT_ENABLED;
3399
3400 /* ack pending interrupts */
3401 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3402
3403 ipw_stop_nic(priv);
3404
3405 rc = ipw_reset_nic(priv);
3406 if (rc < 0) {
3407 IPW_ERROR("Unable to reset NIC\n");
3408 goto error;
3409 }
3410
3411 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3412 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3413
3414 /* DMA the initial boot firmware into the device */
3415 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3416 if (rc < 0) {
3417 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3418 goto error;
3419 }
3420
3421 /* kick start the device */
3422 ipw_start_nic(priv);
3423
3424 /* wait for the device to finish its initial startup sequence */
3425 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3426 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3427 if (rc < 0) {
3428 IPW_ERROR("device failed to boot initial fw image\n");
3429 goto error;
3430 }
3431 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3432
3433 /* ack fw init done interrupt */
3434 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3435
3436 /* DMA the ucode into the device */
3437 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3438 if (rc < 0) {
3439 IPW_ERROR("Unable to load ucode: %d\n", rc);
3440 goto error;
3441 }
3442
3443 /* stop nic */
3444 ipw_stop_nic(priv);
3445
3446 /* DMA bss firmware into the device */
3447 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3448 if (rc < 0) {
3449 IPW_ERROR("Unable to load firmware: %d\n", rc);
3450 goto error;
3451 }
3452 #ifdef CONFIG_PM
3453 fw_loaded = 1;
3454 #endif
3455
3456 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3457
3458 rc = ipw_queue_reset(priv);
3459 if (rc < 0) {
3460 IPW_ERROR("Unable to initialize queues\n");
3461 goto error;
3462 }
3463
3464 /* Ensure interrupts are disabled */
3465 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3466 /* ack pending interrupts */
3467 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3468
3469 /* kick start the device */
3470 ipw_start_nic(priv);
3471
3472 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3473 if (retries > 0) {
3474 IPW_WARNING("Parity error. Retrying init.\n");
3475 retries--;
3476 goto retry;
3477 }
3478
3479 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3480 rc = -EIO;
3481 goto error;
3482 }
3483
3484 /* wait for the device */
3485 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3486 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3487 if (rc < 0) {
3488 IPW_ERROR("device failed to start within 500ms\n");
3489 goto error;
3490 }
3491 IPW_DEBUG_INFO("device response after %dms\n", rc);
3492
3493 /* ack fw init done interrupt */
3494 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3495
3496 /* read eeprom data and initialize the eeprom region of sram */
3497 priv->eeprom_delay = 1;
3498 ipw_eeprom_init_sram(priv);
3499
3500 /* enable interrupts */
3501 ipw_enable_interrupts(priv);
3502
3503 /* Ensure our queue has valid packets */
3504 ipw_rx_queue_replenish(priv);
3505
3506 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3507
3508 /* ack pending interrupts */
3509 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3510
3511 #ifndef CONFIG_PM
3512 release_firmware(raw);
3513 #endif
3514 return 0;
3515
3516 error:
3517 if (priv->rxq) {
3518 ipw_rx_queue_free(priv, priv->rxq);
3519 priv->rxq = NULL;
3520 }
3521 ipw_tx_queue_free(priv);
3522 if (raw)
3523 release_firmware(raw);
3524 #ifdef CONFIG_PM
3525 fw_loaded = 0;
3526 raw = NULL;
3527 #endif
3528
3529 return rc;
3530 }
3531
3532 /**
3533 * DMA services
3534 *
3535 * Theory of operation
3536 *
3537 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3538 * 2 empty entries always kept in the buffer to protect from overflow.
3539 *
3540 * For Tx queue, there are low mark and high mark limits. If, after queuing
3541 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3542 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3543 * Tx queue resumed.
3544 *
3545 * The IPW operates with six queues, one receive queue in the device's
3546 * sram, one transmit queue for sending commands to the device firmware,
3547 * and four transmit queues for data.
3548 *
3549 * The four transmit queues allow for performing quality of service (qos)
3550 * transmissions as per the 802.11 protocol. Currently Linux does not
3551 * provide a mechanism to the user for utilizing prioritized queues, so
3552 * we only utilize the first data transmit queue (queue1).
3553 */
3554
3555 /**
3556 * Driver allocates buffers of this size for Rx
3557 */
3558
3559 static inline int ipw_queue_space(const struct clx2_queue *q)
3560 {
3561 int s = q->last_used - q->first_empty;
3562 if (s <= 0)
3563 s += q->n_bd;
3564 s -= 2; /* keep some reserve to not confuse empty and full situations */
3565 if (s < 0)
3566 s = 0;
3567 return s;
3568 }
3569
3570 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3571 {
3572 return (++index == n_bd) ? 0 : index;
3573 }
3574
3575 /**
3576 * Initialize common DMA queue structure
3577 *
3578 * @param q queue to init
3579 * @param count Number of BD's to allocate. Should be power of 2
3580 * @param read_register Address for 'read' register
3581 * (not offset within BAR, full address)
3582 * @param write_register Address for 'write' register
3583 * (not offset within BAR, full address)
3584 * @param base_register Address for 'base' register
3585 * (not offset within BAR, full address)
3586 * @param size Address for 'size' register
3587 * (not offset within BAR, full address)
3588 */
3589 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3590 int count, u32 read, u32 write, u32 base, u32 size)
3591 {
3592 q->n_bd = count;
3593
3594 q->low_mark = q->n_bd / 4;
3595 if (q->low_mark < 4)
3596 q->low_mark = 4;
3597
3598 q->high_mark = q->n_bd / 8;
3599 if (q->high_mark < 2)
3600 q->high_mark = 2;
3601
3602 q->first_empty = q->last_used = 0;
3603 q->reg_r = read;
3604 q->reg_w = write;
3605
3606 ipw_write32(priv, base, q->dma_addr);
3607 ipw_write32(priv, size, count);
3608 ipw_write32(priv, read, 0);
3609 ipw_write32(priv, write, 0);
3610
3611 _ipw_read32(priv, 0x90);
3612 }
3613
3614 static int ipw_queue_tx_init(struct ipw_priv *priv,
3615 struct clx2_tx_queue *q,
3616 int count, u32 read, u32 write, u32 base, u32 size)
3617 {
3618 struct pci_dev *dev = priv->pci_dev;
3619
3620 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3621 if (!q->txb) {
3622 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3623 return -ENOMEM;
3624 }
3625
3626 q->bd =
3627 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3628 if (!q->bd) {
3629 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3630 sizeof(q->bd[0]) * count);
3631 kfree(q->txb);
3632 q->txb = NULL;
3633 return -ENOMEM;
3634 }
3635
3636 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3637 return 0;
3638 }
3639
3640 /**
3641 * Free one TFD, those at index [txq->q.last_used].
3642 * Do NOT advance any indexes
3643 *
3644 * @param dev
3645 * @param txq
3646 */
3647 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3648 struct clx2_tx_queue *txq)
3649 {
3650 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3651 struct pci_dev *dev = priv->pci_dev;
3652 int i;
3653
3654 /* classify bd */
3655 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3656 /* nothing to cleanup after for host commands */
3657 return;
3658
3659 /* sanity check */
3660 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3661 IPW_ERROR("Too many chunks: %i\n",
3662 le32_to_cpu(bd->u.data.num_chunks));
3663 /** @todo issue fatal error, it is quite serious situation */
3664 return;
3665 }
3666
3667 /* unmap chunks if any */
3668 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3669 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3670 le16_to_cpu(bd->u.data.chunk_len[i]),
3671 PCI_DMA_TODEVICE);
3672 if (txq->txb[txq->q.last_used]) {
3673 ieee80211_txb_free(txq->txb[txq->q.last_used]);
3674 txq->txb[txq->q.last_used] = NULL;
3675 }
3676 }
3677 }
3678
3679 /**
3680 * Deallocate DMA queue.
3681 *
3682 * Empty queue by removing and destroying all BD's.
3683 * Free all buffers.
3684 *
3685 * @param dev
3686 * @param q
3687 */
3688 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3689 {
3690 struct clx2_queue *q = &txq->q;
3691 struct pci_dev *dev = priv->pci_dev;
3692
3693 if (q->n_bd == 0)
3694 return;
3695
3696 /* first, empty all BD's */
3697 for (; q->first_empty != q->last_used;
3698 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3699 ipw_queue_tx_free_tfd(priv, txq);
3700 }
3701
3702 /* free buffers belonging to queue itself */
3703 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3704 q->dma_addr);
3705 kfree(txq->txb);
3706
3707 /* 0 fill whole structure */
3708 memset(txq, 0, sizeof(*txq));
3709 }
3710
3711 /**
3712 * Destroy all DMA queues and structures
3713 *
3714 * @param priv
3715 */
3716 static void ipw_tx_queue_free(struct ipw_priv *priv)
3717 {
3718 /* Tx CMD queue */
3719 ipw_queue_tx_free(priv, &priv->txq_cmd);
3720
3721 /* Tx queues */
3722 ipw_queue_tx_free(priv, &priv->txq[0]);
3723 ipw_queue_tx_free(priv, &priv->txq[1]);
3724 ipw_queue_tx_free(priv, &priv->txq[2]);
3725 ipw_queue_tx_free(priv, &priv->txq[3]);
3726 }
3727
3728 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3729 {
3730 /* First 3 bytes are manufacturer */
3731 bssid[0] = priv->mac_addr[0];
3732 bssid[1] = priv->mac_addr[1];
3733 bssid[2] = priv->mac_addr[2];
3734
3735 /* Last bytes are random */
3736 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3737
3738 bssid[0] &= 0xfe; /* clear multicast bit */
3739 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3740 }
3741
3742 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3743 {
3744 struct ipw_station_entry entry;
3745 int i;
3746
3747 for (i = 0; i < priv->num_stations; i++) {
3748 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3749 /* Another node is active in network */
3750 priv->missed_adhoc_beacons = 0;
3751 if (!(priv->config & CFG_STATIC_CHANNEL))
3752 /* when other nodes drop out, we drop out */
3753 priv->config &= ~CFG_ADHOC_PERSIST;
3754
3755 return i;
3756 }
3757 }
3758
3759 if (i == MAX_STATIONS)
3760 return IPW_INVALID_STATION;
3761
3762 IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
3763
3764 entry.reserved = 0;
3765 entry.support_mode = 0;
3766 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3767 memcpy(priv->stations[i], bssid, ETH_ALEN);
3768 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3769 &entry, sizeof(entry));
3770 priv->num_stations++;
3771
3772 return i;
3773 }
3774
3775 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3776 {
3777 int i;
3778
3779 for (i = 0; i < priv->num_stations; i++)
3780 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3781 return i;
3782
3783 return IPW_INVALID_STATION;
3784 }
3785
3786 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3787 {
3788 int err;
3789
3790 if (priv->status & STATUS_ASSOCIATING) {
3791 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3792 queue_work(priv->workqueue, &priv->disassociate);
3793 return;
3794 }
3795
3796 if (!(priv->status & STATUS_ASSOCIATED)) {
3797 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3798 return;
3799 }
3800
3801 IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
3802 "on channel %d.\n",
3803 MAC_ARG(priv->assoc_request.bssid),
3804 priv->assoc_request.channel);
3805
3806 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3807 priv->status |= STATUS_DISASSOCIATING;
3808
3809 if (quiet)
3810 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3811 else
3812 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3813
3814 err = ipw_send_associate(priv, &priv->assoc_request);
3815 if (err) {
3816 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3817 "failed.\n");
3818 return;
3819 }
3820
3821 }
3822
3823 static int ipw_disassociate(void *data)
3824 {
3825 struct ipw_priv *priv = data;
3826 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3827 return 0;
3828 ipw_send_disassociate(data, 0);
3829 return 1;
3830 }
3831
3832 static void ipw_bg_disassociate(void *data)
3833 {
3834 struct ipw_priv *priv = data;
3835 mutex_lock(&priv->mutex);
3836 ipw_disassociate(data);
3837 mutex_unlock(&priv->mutex);
3838 }
3839
3840 static void ipw_system_config(void *data)
3841 {
3842 struct ipw_priv *priv = data;
3843
3844 #ifdef CONFIG_IPW2200_PROMISCUOUS
3845 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3846 priv->sys_config.accept_all_data_frames = 1;
3847 priv->sys_config.accept_non_directed_frames = 1;
3848 priv->sys_config.accept_all_mgmt_bcpr = 1;
3849 priv->sys_config.accept_all_mgmt_frames = 1;
3850 }
3851 #endif
3852
3853 ipw_send_system_config(priv);
3854 }
3855
3856 struct ipw_status_code {
3857 u16 status;
3858 const char *reason;
3859 };
3860
3861 static const struct ipw_status_code ipw_status_codes[] = {
3862 {0x00, "Successful"},
3863 {0x01, "Unspecified failure"},
3864 {0x0A, "Cannot support all requested capabilities in the "
3865 "Capability information field"},
3866 {0x0B, "Reassociation denied due to inability to confirm that "
3867 "association exists"},
3868 {0x0C, "Association denied due to reason outside the scope of this "
3869 "standard"},
3870 {0x0D,
3871 "Responding station does not support the specified authentication "
3872 "algorithm"},
3873 {0x0E,
3874 "Received an Authentication frame with authentication sequence "
3875 "transaction sequence number out of expected sequence"},
3876 {0x0F, "Authentication rejected because of challenge failure"},
3877 {0x10, "Authentication rejected due to timeout waiting for next "
3878 "frame in sequence"},
3879 {0x11, "Association denied because AP is unable to handle additional "
3880 "associated stations"},
3881 {0x12,
3882 "Association denied due to requesting station not supporting all "
3883 "of the datarates in the BSSBasicServiceSet Parameter"},
3884 {0x13,
3885 "Association denied due to requesting station not supporting "
3886 "short preamble operation"},
3887 {0x14,
3888 "Association denied due to requesting station not supporting "
3889 "PBCC encoding"},
3890 {0x15,
3891 "Association denied due to requesting station not supporting "
3892 "channel agility"},
3893 {0x19,
3894 "Association denied due to requesting station not supporting "
3895 "short slot operation"},
3896 {0x1A,
3897 "Association denied due to requesting station not supporting "
3898 "DSSS-OFDM operation"},
3899 {0x28, "Invalid Information Element"},
3900 {0x29, "Group Cipher is not valid"},
3901 {0x2A, "Pairwise Cipher is not valid"},
3902 {0x2B, "AKMP is not valid"},
3903 {0x2C, "Unsupported RSN IE version"},
3904 {0x2D, "Invalid RSN IE Capabilities"},
3905 {0x2E, "Cipher suite is rejected per security policy"},
3906 };
3907
3908 #ifdef CONFIG_IPW2200_DEBUG
3909 static const char *ipw_get_status_code(u16 status)
3910 {
3911 int i;
3912 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3913 if (ipw_status_codes[i].status == (status & 0xff))
3914 return ipw_status_codes[i].reason;
3915 return "Unknown status value.";
3916 }
3917 #endif
3918
3919 static void inline average_init(struct average *avg)
3920 {
3921 memset(avg, 0, sizeof(*avg));
3922 }
3923
3924 #define DEPTH_RSSI 8
3925 #define DEPTH_NOISE 16
3926 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
3927 {
3928 return ((depth-1)*prev_avg + val)/depth;
3929 }
3930
3931 static void average_add(struct average *avg, s16 val)
3932 {
3933 avg->sum -= avg->entries[avg->pos];
3934 avg->sum += val;
3935 avg->entries[avg->pos++] = val;
3936 if (unlikely(avg->pos == AVG_ENTRIES)) {
3937 avg->init = 1;
3938 avg->pos = 0;
3939 }
3940 }
3941
3942 static s16 average_value(struct average *avg)
3943 {
3944 if (!unlikely(avg->init)) {
3945 if (avg->pos)
3946 return avg->sum / avg->pos;
3947 return 0;
3948 }
3949
3950 return avg->sum / AVG_ENTRIES;
3951 }
3952
3953 static void ipw_reset_stats(struct ipw_priv *priv)
3954 {
3955 u32 len = sizeof(u32);
3956
3957 priv->quality = 0;
3958
3959 average_init(&priv->average_missed_beacons);
3960 priv->exp_avg_rssi = -60;
3961 priv->exp_avg_noise = -85 + 0x100;
3962
3963 priv->last_rate = 0;
3964 priv->last_missed_beacons = 0;
3965 priv->last_rx_packets = 0;
3966 priv->last_tx_packets = 0;
3967 priv->last_tx_failures = 0;
3968
3969 /* Firmware managed, reset only when NIC is restarted, so we have to
3970 * normalize on the current value */
3971 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
3972 &priv->last_rx_err, &len);
3973 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
3974 &priv->last_tx_failures, &len);
3975
3976 /* Driver managed, reset with each association */
3977 priv->missed_adhoc_beacons = 0;
3978 priv->missed_beacons = 0;
3979 priv->tx_packets = 0;
3980 priv->rx_packets = 0;
3981
3982 }
3983
3984 static u32 ipw_get_max_rate(struct ipw_priv *priv)
3985 {
3986 u32 i = 0x80000000;
3987 u32 mask = priv->rates_mask;
3988 /* If currently associated in B mode, restrict the maximum
3989 * rate match to B rates */
3990 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
3991 mask &= IEEE80211_CCK_RATES_MASK;
3992
3993 /* TODO: Verify that the rate is supported by the current rates
3994 * list. */
3995
3996 while (i && !(mask & i))
3997 i >>= 1;
3998 switch (i) {
3999 case IEEE80211_CCK_RATE_1MB_MASK:
4000 return 1000000;
4001 case IEEE80211_CCK_RATE_2MB_MASK:
4002 return 2000000;
4003 case IEEE80211_CCK_RATE_5MB_MASK:
4004 return 5500000;
4005 case IEEE80211_OFDM_RATE_6MB_MASK:
4006 return 6000000;
4007 case IEEE80211_OFDM_RATE_9MB_MASK:
4008 return 9000000;
4009 case IEEE80211_CCK_RATE_11MB_MASK:
4010 return 11000000;
4011 case IEEE80211_OFDM_RATE_12MB_MASK:
4012 return 12000000;
4013 case IEEE80211_OFDM_RATE_18MB_MASK:
4014 return 18000000;
4015 case IEEE80211_OFDM_RATE_24MB_MASK:
4016 return 24000000;
4017 case IEEE80211_OFDM_RATE_36MB_MASK:
4018 return 36000000;
4019 case IEEE80211_OFDM_RATE_48MB_MASK:
4020 return 48000000;
4021 case IEEE80211_OFDM_RATE_54MB_MASK:
4022 return 54000000;
4023 }
4024
4025 if (priv->ieee->mode == IEEE_B)
4026 return 11000000;
4027 else
4028 return 54000000;
4029 }
4030
4031 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4032 {
4033 u32 rate, len = sizeof(rate);
4034 int err;
4035
4036 if (!(priv->status & STATUS_ASSOCIATED))
4037 return 0;
4038
4039 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4040 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4041 &len);
4042 if (err) {
4043 IPW_DEBUG_INFO("failed querying ordinals.\n");
4044 return 0;
4045 }
4046 } else
4047 return ipw_get_max_rate(priv);
4048
4049 switch (rate) {
4050 case IPW_TX_RATE_1MB:
4051 return 1000000;
4052 case IPW_TX_RATE_2MB:
4053 return 2000000;
4054 case IPW_TX_RATE_5MB:
4055 return 5500000;
4056 case IPW_TX_RATE_6MB:
4057 return 6000000;
4058 case IPW_TX_RATE_9MB:
4059 return 9000000;
4060 case IPW_TX_RATE_11MB:
4061 return 11000000;
4062 case IPW_TX_RATE_12MB:
4063 return 12000000;
4064 case IPW_TX_RATE_18MB:
4065 return 18000000;
4066 case IPW_TX_RATE_24MB:
4067 return 24000000;
4068 case IPW_TX_RATE_36MB:
4069 return 36000000;
4070 case IPW_TX_RATE_48MB:
4071 return 48000000;
4072 case IPW_TX_RATE_54MB:
4073 return 54000000;
4074 }
4075
4076 return 0;
4077 }
4078
4079 #define IPW_STATS_INTERVAL (2 * HZ)
4080 static void ipw_gather_stats(struct ipw_priv *priv)
4081 {
4082 u32 rx_err, rx_err_delta, rx_packets_delta;
4083 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4084 u32 missed_beacons_percent, missed_beacons_delta;
4085 u32 quality = 0;
4086 u32 len = sizeof(u32);
4087 s16 rssi;
4088 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4089 rate_quality;
4090 u32 max_rate;
4091
4092 if (!(priv->status & STATUS_ASSOCIATED)) {
4093 priv->quality = 0;
4094 return;
4095 }
4096
4097 /* Update the statistics */
4098 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4099 &priv->missed_beacons, &len);
4100 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4101 priv->last_missed_beacons = priv->missed_beacons;
4102 if (priv->assoc_request.beacon_interval) {
4103 missed_beacons_percent = missed_beacons_delta *
4104 (HZ * priv->assoc_request.beacon_interval) /
4105 (IPW_STATS_INTERVAL * 10);
4106 } else {
4107 missed_beacons_percent = 0;
4108 }
4109 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4110
4111 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4112 rx_err_delta = rx_err - priv->last_rx_err;
4113 priv->last_rx_err = rx_err;
4114
4115 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4116 tx_failures_delta = tx_failures - priv->last_tx_failures;
4117 priv->last_tx_failures = tx_failures;
4118
4119 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4120 priv->last_rx_packets = priv->rx_packets;
4121
4122 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4123 priv->last_tx_packets = priv->tx_packets;
4124
4125 /* Calculate quality based on the following:
4126 *
4127 * Missed beacon: 100% = 0, 0% = 70% missed
4128 * Rate: 60% = 1Mbs, 100% = Max
4129 * Rx and Tx errors represent a straight % of total Rx/Tx
4130 * RSSI: 100% = > -50, 0% = < -80
4131 * Rx errors: 100% = 0, 0% = 50% missed
4132 *
4133 * The lowest computed quality is used.
4134 *
4135 */
4136 #define BEACON_THRESHOLD 5
4137 beacon_quality = 100 - missed_beacons_percent;
4138 if (beacon_quality < BEACON_THRESHOLD)
4139 beacon_quality = 0;
4140 else
4141 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4142 (100 - BEACON_THRESHOLD);
4143 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4144 beacon_quality, missed_beacons_percent);
4145
4146 priv->last_rate = ipw_get_current_rate(priv);
4147 max_rate = ipw_get_max_rate(priv);
4148 rate_quality = priv->last_rate * 40 / max_rate + 60;
4149 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4150 rate_quality, priv->last_rate / 1000000);
4151
4152 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4153 rx_quality = 100 - (rx_err_delta * 100) /
4154 (rx_packets_delta + rx_err_delta);
4155 else
4156 rx_quality = 100;
4157 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4158 rx_quality, rx_err_delta, rx_packets_delta);
4159
4160 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4161 tx_quality = 100 - (tx_failures_delta * 100) /
4162 (tx_packets_delta + tx_failures_delta);
4163 else
4164 tx_quality = 100;
4165 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4166 tx_quality, tx_failures_delta, tx_packets_delta);
4167
4168 rssi = priv->exp_avg_rssi;
4169 signal_quality =
4170 (100 *
4171 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4172 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4173 (priv->ieee->perfect_rssi - rssi) *
4174 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4175 62 * (priv->ieee->perfect_rssi - rssi))) /
4176 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4177 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4178 if (signal_quality > 100)
4179 signal_quality = 100;
4180 else if (signal_quality < 1)
4181 signal_quality = 0;
4182
4183 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4184 signal_quality, rssi);
4185
4186 quality = min(beacon_quality,
4187 min(rate_quality,
4188 min(tx_quality, min(rx_quality, signal_quality))));
4189 if (quality == beacon_quality)
4190 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4191 quality);
4192 if (quality == rate_quality)
4193 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4194 quality);
4195 if (quality == tx_quality)
4196 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4197 quality);
4198 if (quality == rx_quality)
4199 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4200 quality);
4201 if (quality == signal_quality)
4202 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4203 quality);
4204
4205 priv->quality = quality;
4206
4207 queue_delayed_work(priv->workqueue, &priv->gather_stats,
4208 IPW_STATS_INTERVAL);
4209 }
4210
4211 static void ipw_bg_gather_stats(void *data)
4212 {
4213 struct ipw_priv *priv = data;
4214 mutex_lock(&priv->mutex);
4215 ipw_gather_stats(data);
4216 mutex_unlock(&priv->mutex);
4217 }
4218
4219 /* Missed beacon behavior:
4220 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4221 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4222 * Above disassociate threshold, give up and stop scanning.
4223 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4224 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4225 int missed_count)
4226 {
4227 priv->notif_missed_beacons = missed_count;
4228
4229 if (missed_count > priv->disassociate_threshold &&
4230 priv->status & STATUS_ASSOCIATED) {
4231 /* If associated and we've hit the missed
4232 * beacon threshold, disassociate, turn
4233 * off roaming, and abort any active scans */
4234 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4235 IPW_DL_STATE | IPW_DL_ASSOC,
4236 "Missed beacon: %d - disassociate\n", missed_count);
4237 priv->status &= ~STATUS_ROAMING;
4238 if (priv->status & STATUS_SCANNING) {
4239 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4240 IPW_DL_STATE,
4241 "Aborting scan with missed beacon.\n");
4242 queue_work(priv->workqueue, &priv->abort_scan);
4243 }
4244
4245 queue_work(priv->workqueue, &priv->disassociate);
4246 return;
4247 }
4248
4249 if (priv->status & STATUS_ROAMING) {
4250 /* If we are currently roaming, then just
4251 * print a debug statement... */
4252 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4253 "Missed beacon: %d - roam in progress\n",
4254 missed_count);
4255 return;
4256 }
4257
4258 if (roaming &&
4259 (missed_count > priv->roaming_threshold &&
4260 missed_count <= priv->disassociate_threshold)) {
4261 /* If we are not already roaming, set the ROAM
4262 * bit in the status and kick off a scan.
4263 * This can happen several times before we reach
4264 * disassociate_threshold. */
4265 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4266 "Missed beacon: %d - initiate "
4267 "roaming\n", missed_count);
4268 if (!(priv->status & STATUS_ROAMING)) {
4269 priv->status |= STATUS_ROAMING;
4270 if (!(priv->status & STATUS_SCANNING))
4271 queue_work(priv->workqueue,
4272 &priv->request_scan);
4273 }
4274 return;
4275 }
4276
4277 if (priv->status & STATUS_SCANNING) {
4278 /* Stop scan to keep fw from getting
4279 * stuck (only if we aren't roaming --
4280 * otherwise we'll never scan more than 2 or 3
4281 * channels..) */
4282 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4283 "Aborting scan with missed beacon.\n");
4284 queue_work(priv->workqueue, &priv->abort_scan);
4285 }
4286
4287 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4288 }
4289
4290 /**
4291 * Handle host notification packet.
4292 * Called from interrupt routine
4293 */
4294 static void ipw_rx_notification(struct ipw_priv *priv,
4295 struct ipw_rx_notification *notif)
4296 {
4297 notif->size = le16_to_cpu(notif->size);
4298
4299 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size);
4300
4301 switch (notif->subtype) {
4302 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4303 struct notif_association *assoc = &notif->u.assoc;
4304
4305 switch (assoc->state) {
4306 case CMAS_ASSOCIATED:{
4307 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4308 IPW_DL_ASSOC,
4309 "associated: '%s' " MAC_FMT
4310 " \n",
4311 escape_essid(priv->essid,
4312 priv->essid_len),
4313 MAC_ARG(priv->bssid));
4314
4315 switch (priv->ieee->iw_mode) {
4316 case IW_MODE_INFRA:
4317 memcpy(priv->ieee->bssid,
4318 priv->bssid, ETH_ALEN);
4319 break;
4320
4321 case IW_MODE_ADHOC:
4322 memcpy(priv->ieee->bssid,
4323 priv->bssid, ETH_ALEN);
4324
4325 /* clear out the station table */
4326 priv->num_stations = 0;
4327
4328 IPW_DEBUG_ASSOC
4329 ("queueing adhoc check\n");
4330 queue_delayed_work(priv->
4331 workqueue,
4332 &priv->
4333 adhoc_check,
4334 priv->
4335 assoc_request.
4336 beacon_interval);
4337 break;
4338 }
4339
4340 priv->status &= ~STATUS_ASSOCIATING;
4341 priv->status |= STATUS_ASSOCIATED;
4342 queue_work(priv->workqueue,
4343 &priv->system_config);
4344
4345 #ifdef CONFIG_IPW2200_QOS
4346 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4347 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_ctl))
4348 if ((priv->status & STATUS_AUTH) &&
4349 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4350 == IEEE80211_STYPE_ASSOC_RESP)) {
4351 if ((sizeof
4352 (struct
4353 ieee80211_assoc_response)
4354 <= notif->size)
4355 && (notif->size <= 2314)) {
4356 struct
4357 ieee80211_rx_stats
4358 stats = {
4359 .len =
4360 notif->
4361 size - 1,
4362 };
4363
4364 IPW_DEBUG_QOS
4365 ("QoS Associate "
4366 "size %d\n",
4367 notif->size);
4368 ieee80211_rx_mgt(priv->
4369 ieee,
4370 (struct
4371 ieee80211_hdr_4addr
4372 *)
4373 &notif->u.raw, &stats);
4374 }
4375 }
4376 #endif
4377
4378 schedule_work(&priv->link_up);
4379
4380 break;
4381 }
4382
4383 case CMAS_AUTHENTICATED:{
4384 if (priv->
4385 status & (STATUS_ASSOCIATED |
4386 STATUS_AUTH)) {
4387 #ifdef CONFIG_IPW2200_DEBUG
4388 struct notif_authenticate *auth
4389 = &notif->u.auth;
4390 IPW_DEBUG(IPW_DL_NOTIF |
4391 IPW_DL_STATE |
4392 IPW_DL_ASSOC,
4393 "deauthenticated: '%s' "
4394 MAC_FMT
4395 ": (0x%04X) - %s \n",
4396 escape_essid(priv->
4397 essid,
4398 priv->
4399 essid_len),
4400 MAC_ARG(priv->bssid),
4401 ntohs(auth->status),
4402 ipw_get_status_code
4403 (ntohs
4404 (auth->status)));
4405 #endif
4406
4407 priv->status &=
4408 ~(STATUS_ASSOCIATING |
4409 STATUS_AUTH |
4410 STATUS_ASSOCIATED);
4411
4412 schedule_work(&priv->link_down);
4413 break;
4414 }
4415
4416 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4417 IPW_DL_ASSOC,
4418 "authenticated: '%s' " MAC_FMT
4419 "\n",
4420 escape_essid(priv->essid,
4421 priv->essid_len),
4422 MAC_ARG(priv->bssid));
4423 break;
4424 }
4425
4426 case CMAS_INIT:{
4427 if (priv->status & STATUS_AUTH) {
4428 struct
4429 ieee80211_assoc_response
4430 *resp;
4431 resp =
4432 (struct
4433 ieee80211_assoc_response
4434 *)&notif->u.raw;
4435 IPW_DEBUG(IPW_DL_NOTIF |
4436 IPW_DL_STATE |
4437 IPW_DL_ASSOC,
4438 "association failed (0x%04X): %s\n",
4439 ntohs(resp->status),
4440 ipw_get_status_code
4441 (ntohs
4442 (resp->status)));
4443 }
4444
4445 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4446 IPW_DL_ASSOC,
4447 "disassociated: '%s' " MAC_FMT
4448 " \n",
4449 escape_essid(priv->essid,
4450 priv->essid_len),
4451 MAC_ARG(priv->bssid));
4452
4453 priv->status &=
4454 ~(STATUS_DISASSOCIATING |
4455 STATUS_ASSOCIATING |
4456 STATUS_ASSOCIATED | STATUS_AUTH);
4457 if (priv->assoc_network
4458 && (priv->assoc_network->
4459 capability &
4460 WLAN_CAPABILITY_IBSS))
4461 ipw_remove_current_network
4462 (priv);
4463
4464 schedule_work(&priv->link_down);
4465
4466 break;
4467 }
4468
4469 case CMAS_RX_ASSOC_RESP:
4470 break;
4471
4472 default:
4473 IPW_ERROR("assoc: unknown (%d)\n",
4474 assoc->state);
4475 break;
4476 }
4477
4478 break;
4479 }
4480
4481 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4482 struct notif_authenticate *auth = &notif->u.auth;
4483 switch (auth->state) {
4484 case CMAS_AUTHENTICATED:
4485 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4486 "authenticated: '%s' " MAC_FMT " \n",
4487 escape_essid(priv->essid,
4488 priv->essid_len),
4489 MAC_ARG(priv->bssid));
4490 priv->status |= STATUS_AUTH;
4491 break;
4492
4493 case CMAS_INIT:
4494 if (priv->status & STATUS_AUTH) {
4495 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4496 IPW_DL_ASSOC,
4497 "authentication failed (0x%04X): %s\n",
4498 ntohs(auth->status),
4499 ipw_get_status_code(ntohs
4500 (auth->
4501 status)));
4502 }
4503 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4504 IPW_DL_ASSOC,
4505 "deauthenticated: '%s' " MAC_FMT "\n",
4506 escape_essid(priv->essid,
4507 priv->essid_len),
4508 MAC_ARG(priv->bssid));
4509
4510 priv->status &= ~(STATUS_ASSOCIATING |
4511 STATUS_AUTH |
4512 STATUS_ASSOCIATED);
4513
4514 schedule_work(&priv->link_down);
4515 break;
4516
4517 case CMAS_TX_AUTH_SEQ_1:
4518 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4519 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4520 break;
4521 case CMAS_RX_AUTH_SEQ_2:
4522 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4523 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4524 break;
4525 case CMAS_AUTH_SEQ_1_PASS:
4526 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4527 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4528 break;
4529 case CMAS_AUTH_SEQ_1_FAIL:
4530 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4531 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4532 break;
4533 case CMAS_TX_AUTH_SEQ_3:
4534 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4535 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4536 break;
4537 case CMAS_RX_AUTH_SEQ_4:
4538 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4539 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4540 break;
4541 case CMAS_AUTH_SEQ_2_PASS:
4542 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4543 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4544 break;
4545 case CMAS_AUTH_SEQ_2_FAIL:
4546 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4547 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4548 break;
4549 case CMAS_TX_ASSOC:
4550 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4551 IPW_DL_ASSOC, "TX_ASSOC\n");
4552 break;
4553 case CMAS_RX_ASSOC_RESP:
4554 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4555 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4556
4557 break;
4558 case CMAS_ASSOCIATED:
4559 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4560 IPW_DL_ASSOC, "ASSOCIATED\n");
4561 break;
4562 default:
4563 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4564 auth->state);
4565 break;
4566 }
4567 break;
4568 }
4569
4570 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4571 struct notif_channel_result *x =
4572 &notif->u.channel_result;
4573
4574 if (notif->size == sizeof(*x)) {
4575 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4576 x->channel_num);
4577 } else {
4578 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4579 "(should be %zd)\n",
4580 notif->size, sizeof(*x));
4581 }
4582 break;
4583 }
4584
4585 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4586 struct notif_scan_complete *x = &notif->u.scan_complete;
4587 if (notif->size == sizeof(*x)) {
4588 IPW_DEBUG_SCAN
4589 ("Scan completed: type %d, %d channels, "
4590 "%d status\n", x->scan_type,
4591 x->num_channels, x->status);
4592 } else {
4593 IPW_ERROR("Scan completed of wrong size %d "
4594 "(should be %zd)\n",
4595 notif->size, sizeof(*x));
4596 }
4597
4598 priv->status &=
4599 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4600
4601 wake_up_interruptible(&priv->wait_state);
4602 cancel_delayed_work(&priv->scan_check);
4603
4604 if (priv->status & STATUS_EXIT_PENDING)
4605 break;
4606
4607 priv->ieee->scans++;
4608
4609 #ifdef CONFIG_IPW2200_MONITOR
4610 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4611 priv->status |= STATUS_SCAN_FORCED;
4612 queue_work(priv->workqueue,
4613 &priv->request_scan);
4614 break;
4615 }
4616 priv->status &= ~STATUS_SCAN_FORCED;
4617 #endif /* CONFIG_IPW2200_MONITOR */
4618
4619 if (!(priv->status & (STATUS_ASSOCIATED |
4620 STATUS_ASSOCIATING |
4621 STATUS_ROAMING |
4622 STATUS_DISASSOCIATING)))
4623 queue_work(priv->workqueue, &priv->associate);
4624 else if (priv->status & STATUS_ROAMING) {
4625 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4626 /* If a scan completed and we are in roam mode, then
4627 * the scan that completed was the one requested as a
4628 * result of entering roam... so, schedule the
4629 * roam work */
4630 queue_work(priv->workqueue,
4631 &priv->roam);
4632 else
4633 /* Don't schedule if we aborted the scan */
4634 priv->status &= ~STATUS_ROAMING;
4635 } else if (priv->status & STATUS_SCAN_PENDING)
4636 queue_work(priv->workqueue,
4637 &priv->request_scan);
4638 else if (priv->config & CFG_BACKGROUND_SCAN
4639 && priv->status & STATUS_ASSOCIATED)
4640 queue_delayed_work(priv->workqueue,
4641 &priv->request_scan, HZ);
4642
4643 /* Send an empty event to user space.
4644 * We don't send the received data on the event because
4645 * it would require us to do complex transcoding, and
4646 * we want to minimise the work done in the irq handler
4647 * Use a request to extract the data.
4648 * Also, we generate this even for any scan, regardless
4649 * on how the scan was initiated. User space can just
4650 * sync on periodic scan to get fresh data...
4651 * Jean II */
4652 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) {
4653 union iwreq_data wrqu;
4654
4655 wrqu.data.length = 0;
4656 wrqu.data.flags = 0;
4657 wireless_send_event(priv->net_dev, SIOCGIWSCAN,
4658 &wrqu, NULL);
4659 }
4660 break;
4661 }
4662
4663 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4664 struct notif_frag_length *x = &notif->u.frag_len;
4665
4666 if (notif->size == sizeof(*x))
4667 IPW_ERROR("Frag length: %d\n",
4668 le16_to_cpu(x->frag_length));
4669 else
4670 IPW_ERROR("Frag length of wrong size %d "
4671 "(should be %zd)\n",
4672 notif->size, sizeof(*x));
4673 break;
4674 }
4675
4676 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4677 struct notif_link_deterioration *x =
4678 &notif->u.link_deterioration;
4679
4680 if (notif->size == sizeof(*x)) {
4681 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4682 "link deterioration: type %d, cnt %d\n",
4683 x->silence_notification_type,
4684 x->silence_count);
4685 memcpy(&priv->last_link_deterioration, x,
4686 sizeof(*x));
4687 } else {
4688 IPW_ERROR("Link Deterioration of wrong size %d "
4689 "(should be %zd)\n",
4690 notif->size, sizeof(*x));
4691 }
4692 break;
4693 }
4694
4695 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4696 IPW_ERROR("Dino config\n");
4697 if (priv->hcmd
4698 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4699 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4700
4701 break;
4702 }
4703
4704 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4705 struct notif_beacon_state *x = &notif->u.beacon_state;
4706 if (notif->size != sizeof(*x)) {
4707 IPW_ERROR
4708 ("Beacon state of wrong size %d (should "
4709 "be %zd)\n", notif->size, sizeof(*x));
4710 break;
4711 }
4712
4713 if (le32_to_cpu(x->state) ==
4714 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4715 ipw_handle_missed_beacon(priv,
4716 le32_to_cpu(x->
4717 number));
4718
4719 break;
4720 }
4721
4722 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4723 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4724 if (notif->size == sizeof(*x)) {
4725 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4726 "0x%02x station %d\n",
4727 x->key_state, x->security_type,
4728 x->station_index);
4729 break;
4730 }
4731
4732 IPW_ERROR
4733 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4734 notif->size, sizeof(*x));
4735 break;
4736 }
4737
4738 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4739 struct notif_calibration *x = &notif->u.calibration;
4740
4741 if (notif->size == sizeof(*x)) {
4742 memcpy(&priv->calib, x, sizeof(*x));
4743 IPW_DEBUG_INFO("TODO: Calibration\n");
4744 break;
4745 }
4746
4747 IPW_ERROR
4748 ("Calibration of wrong size %d (should be %zd)\n",
4749 notif->size, sizeof(*x));
4750 break;
4751 }
4752
4753 case HOST_NOTIFICATION_NOISE_STATS:{
4754 if (notif->size == sizeof(u32)) {
4755 priv->exp_avg_noise =
4756 exponential_average(priv->exp_avg_noise,
4757 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4758 DEPTH_NOISE);
4759 break;
4760 }
4761
4762 IPW_ERROR
4763 ("Noise stat is wrong size %d (should be %zd)\n",
4764 notif->size, sizeof(u32));
4765 break;
4766 }
4767
4768 default:
4769 IPW_DEBUG_NOTIF("Unknown notification: "
4770 "subtype=%d,flags=0x%2x,size=%d\n",
4771 notif->subtype, notif->flags, notif->size);
4772 }
4773 }
4774
4775 /**
4776 * Destroys all DMA structures and initialise them again
4777 *
4778 * @param priv
4779 * @return error code
4780 */
4781 static int ipw_queue_reset(struct ipw_priv *priv)
4782 {
4783 int rc = 0;
4784 /** @todo customize queue sizes */
4785 int nTx = 64, nTxCmd = 8;
4786 ipw_tx_queue_free(priv);
4787 /* Tx CMD queue */
4788 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4789 IPW_TX_CMD_QUEUE_READ_INDEX,
4790 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4791 IPW_TX_CMD_QUEUE_BD_BASE,
4792 IPW_TX_CMD_QUEUE_BD_SIZE);
4793 if (rc) {
4794 IPW_ERROR("Tx Cmd queue init failed\n");
4795 goto error;
4796 }
4797 /* Tx queue(s) */
4798 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4799 IPW_TX_QUEUE_0_READ_INDEX,
4800 IPW_TX_QUEUE_0_WRITE_INDEX,
4801 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4802 if (rc) {
4803 IPW_ERROR("Tx 0 queue init failed\n");
4804 goto error;
4805 }
4806 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4807 IPW_TX_QUEUE_1_READ_INDEX,
4808 IPW_TX_QUEUE_1_WRITE_INDEX,
4809 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4810 if (rc) {
4811 IPW_ERROR("Tx 1 queue init failed\n");
4812 goto error;
4813 }
4814 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4815 IPW_TX_QUEUE_2_READ_INDEX,
4816 IPW_TX_QUEUE_2_WRITE_INDEX,
4817 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4818 if (rc) {
4819 IPW_ERROR("Tx 2 queue init failed\n");
4820 goto error;
4821 }
4822 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4823 IPW_TX_QUEUE_3_READ_INDEX,
4824 IPW_TX_QUEUE_3_WRITE_INDEX,
4825 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4826 if (rc) {
4827 IPW_ERROR("Tx 3 queue init failed\n");
4828 goto error;
4829 }
4830 /* statistics */
4831 priv->rx_bufs_min = 0;
4832 priv->rx_pend_max = 0;
4833 return rc;
4834
4835 error:
4836 ipw_tx_queue_free(priv);
4837 return rc;
4838 }
4839
4840 /**
4841 * Reclaim Tx queue entries no more used by NIC.
4842 *
4843 * When FW adwances 'R' index, all entries between old and
4844 * new 'R' index need to be reclaimed. As result, some free space
4845 * forms. If there is enough free space (> low mark), wake Tx queue.
4846 *
4847 * @note Need to protect against garbage in 'R' index
4848 * @param priv
4849 * @param txq
4850 * @param qindex
4851 * @return Number of used entries remains in the queue
4852 */
4853 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4854 struct clx2_tx_queue *txq, int qindex)
4855 {
4856 u32 hw_tail;
4857 int used;
4858 struct clx2_queue *q = &txq->q;
4859
4860 hw_tail = ipw_read32(priv, q->reg_r);
4861 if (hw_tail >= q->n_bd) {
4862 IPW_ERROR
4863 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4864 hw_tail, q->n_bd);
4865 goto done;
4866 }
4867 for (; q->last_used != hw_tail;
4868 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4869 ipw_queue_tx_free_tfd(priv, txq);
4870 priv->tx_packets++;
4871 }
4872 done:
4873 if ((ipw_queue_space(q) > q->low_mark) &&
4874 (qindex >= 0) &&
4875 (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev))
4876 netif_wake_queue(priv->net_dev);
4877 used = q->first_empty - q->last_used;
4878 if (used < 0)
4879 used += q->n_bd;
4880
4881 return used;
4882 }
4883
4884 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4885 int len, int sync)
4886 {
4887 struct clx2_tx_queue *txq = &priv->txq_cmd;
4888 struct clx2_queue *q = &txq->q;
4889 struct tfd_frame *tfd;
4890
4891 if (ipw_queue_space(q) < (sync ? 1 : 2)) {
4892 IPW_ERROR("No space for Tx\n");
4893 return -EBUSY;
4894 }
4895
4896 tfd = &txq->bd[q->first_empty];
4897 txq->txb[q->first_empty] = NULL;
4898
4899 memset(tfd, 0, sizeof(*tfd));
4900 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
4901 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
4902 priv->hcmd_seq++;
4903 tfd->u.cmd.index = hcmd;
4904 tfd->u.cmd.length = len;
4905 memcpy(tfd->u.cmd.payload, buf, len);
4906 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
4907 ipw_write32(priv, q->reg_w, q->first_empty);
4908 _ipw_read32(priv, 0x90);
4909
4910 return 0;
4911 }
4912
4913 /*
4914 * Rx theory of operation
4915 *
4916 * The host allocates 32 DMA target addresses and passes the host address
4917 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
4918 * 0 to 31
4919 *
4920 * Rx Queue Indexes
4921 * The host/firmware share two index registers for managing the Rx buffers.
4922 *
4923 * The READ index maps to the first position that the firmware may be writing
4924 * to -- the driver can read up to (but not including) this position and get
4925 * good data.
4926 * The READ index is managed by the firmware once the card is enabled.
4927 *
4928 * The WRITE index maps to the last position the driver has read from -- the
4929 * position preceding WRITE is the last slot the firmware can place a packet.
4930 *
4931 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4932 * WRITE = READ.
4933 *
4934 * During initialization the host sets up the READ queue position to the first
4935 * INDEX position, and WRITE to the last (READ - 1 wrapped)
4936 *
4937 * When the firmware places a packet in a buffer it will advance the READ index
4938 * and fire the RX interrupt. The driver can then query the READ index and
4939 * process as many packets as possible, moving the WRITE index forward as it
4940 * resets the Rx queue buffers with new memory.
4941 *
4942 * The management in the driver is as follows:
4943 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
4944 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
4945 * to replensish the ipw->rxq->rx_free.
4946 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
4947 * ipw->rxq is replenished and the READ INDEX is updated (updating the
4948 * 'processed' and 'read' driver indexes as well)
4949 * + A received packet is processed and handed to the kernel network stack,
4950 * detached from the ipw->rxq. The driver 'processed' index is updated.
4951 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
4952 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
4953 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
4954 * were enough free buffers and RX_STALLED is set it is cleared.
4955 *
4956 *
4957 * Driver sequence:
4958 *
4959 * ipw_rx_queue_alloc() Allocates rx_free
4960 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
4961 * ipw_rx_queue_restock
4962 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
4963 * queue, updates firmware pointers, and updates
4964 * the WRITE index. If insufficient rx_free buffers
4965 * are available, schedules ipw_rx_queue_replenish
4966 *
4967 * -- enable interrupts --
4968 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
4969 * READ INDEX, detaching the SKB from the pool.
4970 * Moves the packet buffer from queue to rx_used.
4971 * Calls ipw_rx_queue_restock to refill any empty
4972 * slots.
4973 * ...
4974 *
4975 */
4976
4977 /*
4978 * If there are slots in the RX queue that need to be restocked,
4979 * and we have free pre-allocated buffers, fill the ranks as much
4980 * as we can pulling from rx_free.
4981 *
4982 * This moves the 'write' index forward to catch up with 'processed', and
4983 * also updates the memory address in the firmware to reference the new
4984 * target buffer.
4985 */
4986 static void ipw_rx_queue_restock(struct ipw_priv *priv)
4987 {
4988 struct ipw_rx_queue *rxq = priv->rxq;
4989 struct list_head *element;
4990 struct ipw_rx_mem_buffer *rxb;
4991 unsigned long flags;
4992 int write;
4993
4994 spin_lock_irqsave(&rxq->lock, flags);
4995 write = rxq->write;
4996 while ((rxq->write != rxq->processed) && (rxq->free_count)) {
4997 element = rxq->rx_free.next;
4998 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
4999 list_del(element);
5000
5001 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5002 rxb->dma_addr);
5003 rxq->queue[rxq->write] = rxb;
5004 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5005 rxq->free_count--;
5006 }
5007 spin_unlock_irqrestore(&rxq->lock, flags);
5008
5009 /* If the pre-allocated buffer pool is dropping low, schedule to
5010 * refill it */
5011 if (rxq->free_count <= RX_LOW_WATERMARK)
5012 queue_work(priv->workqueue, &priv->rx_replenish);
5013
5014 /* If we've added more space for the firmware to place data, tell it */
5015 if (write != rxq->write)
5016 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5017 }
5018
5019 /*
5020 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5021 * Also restock the Rx queue via ipw_rx_queue_restock.
5022 *
5023 * This is called as a scheduled work item (except for during intialization)
5024 */
5025 static void ipw_rx_queue_replenish(void *data)
5026 {
5027 struct ipw_priv *priv = data;
5028 struct ipw_rx_queue *rxq = priv->rxq;
5029 struct list_head *element;
5030 struct ipw_rx_mem_buffer *rxb;
5031 unsigned long flags;
5032
5033 spin_lock_irqsave(&rxq->lock, flags);
5034 while (!list_empty(&rxq->rx_used)) {
5035 element = rxq->rx_used.next;
5036 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5037 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5038 if (!rxb->skb) {
5039 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5040 priv->net_dev->name);
5041 /* We don't reschedule replenish work here -- we will
5042 * call the restock method and if it still needs
5043 * more buffers it will schedule replenish */
5044 break;
5045 }
5046 list_del(element);
5047
5048 rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data;
5049 rxb->dma_addr =
5050 pci_map_single(priv->pci_dev, rxb->skb->data,
5051 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5052
5053 list_add_tail(&rxb->list, &rxq->rx_free);
5054 rxq->free_count++;
5055 }
5056 spin_unlock_irqrestore(&rxq->lock, flags);
5057
5058 ipw_rx_queue_restock(priv);
5059 }
5060
5061 static void ipw_bg_rx_queue_replenish(void *data)
5062 {
5063 struct ipw_priv *priv = data;
5064 mutex_lock(&priv->mutex);
5065 ipw_rx_queue_replenish(data);
5066 mutex_unlock(&priv->mutex);
5067 }
5068
5069 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5070 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5071 * This free routine walks the list of POOL entries and if SKB is set to
5072 * non NULL it is unmapped and freed
5073 */
5074 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5075 {
5076 int i;
5077
5078 if (!rxq)
5079 return;
5080
5081 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5082 if (rxq->pool[i].skb != NULL) {
5083 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5084 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5085 dev_kfree_skb(rxq->pool[i].skb);
5086 }
5087 }
5088
5089 kfree(rxq);
5090 }
5091
5092 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5093 {
5094 struct ipw_rx_queue *rxq;
5095 int i;
5096
5097 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5098 if (unlikely(!rxq)) {
5099 IPW_ERROR("memory allocation failed\n");
5100 return NULL;
5101 }
5102 spin_lock_init(&rxq->lock);
5103 INIT_LIST_HEAD(&rxq->rx_free);
5104 INIT_LIST_HEAD(&rxq->rx_used);
5105
5106 /* Fill the rx_used queue with _all_ of the Rx buffers */
5107 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5108 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5109
5110 /* Set us so that we have processed and used all buffers, but have
5111 * not restocked the Rx queue with fresh buffers */
5112 rxq->read = rxq->write = 0;
5113 rxq->processed = RX_QUEUE_SIZE - 1;
5114 rxq->free_count = 0;
5115
5116 return rxq;
5117 }
5118
5119 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5120 {
5121 rate &= ~IEEE80211_BASIC_RATE_MASK;
5122 if (ieee_mode == IEEE_A) {
5123 switch (rate) {
5124 case IEEE80211_OFDM_RATE_6MB:
5125 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
5126 1 : 0;
5127 case IEEE80211_OFDM_RATE_9MB:
5128 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
5129 1 : 0;
5130 case IEEE80211_OFDM_RATE_12MB:
5131 return priv->
5132 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5133 case IEEE80211_OFDM_RATE_18MB:
5134 return priv->
5135 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5136 case IEEE80211_OFDM_RATE_24MB:
5137 return priv->
5138 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5139 case IEEE80211_OFDM_RATE_36MB:
5140 return priv->
5141 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5142 case IEEE80211_OFDM_RATE_48MB:
5143 return priv->
5144 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5145 case IEEE80211_OFDM_RATE_54MB:
5146 return priv->
5147 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5148 default:
5149 return 0;
5150 }
5151 }
5152
5153 /* B and G mixed */
5154 switch (rate) {
5155 case IEEE80211_CCK_RATE_1MB:
5156 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
5157 case IEEE80211_CCK_RATE_2MB:
5158 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
5159 case IEEE80211_CCK_RATE_5MB:
5160 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
5161 case IEEE80211_CCK_RATE_11MB:
5162 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
5163 }
5164
5165 /* If we are limited to B modulations, bail at this point */
5166 if (ieee_mode == IEEE_B)
5167 return 0;
5168
5169 /* G */
5170 switch (rate) {
5171 case IEEE80211_OFDM_RATE_6MB:
5172 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
5173 case IEEE80211_OFDM_RATE_9MB:
5174 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
5175 case IEEE80211_OFDM_RATE_12MB:
5176 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5177 case IEEE80211_OFDM_RATE_18MB:
5178 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5179 case IEEE80211_OFDM_RATE_24MB:
5180 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5181 case IEEE80211_OFDM_RATE_36MB:
5182 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5183 case IEEE80211_OFDM_RATE_48MB:
5184 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5185 case IEEE80211_OFDM_RATE_54MB:
5186 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5187 }
5188
5189 return 0;
5190 }
5191
5192 static int ipw_compatible_rates(struct ipw_priv *priv,
5193 const struct ieee80211_network *network,
5194 struct ipw_supported_rates *rates)
5195 {
5196 int num_rates, i;
5197
5198 memset(rates, 0, sizeof(*rates));
5199 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5200 rates->num_rates = 0;
5201 for (i = 0; i < num_rates; i++) {
5202 if (!ipw_is_rate_in_mask(priv, network->mode,
5203 network->rates[i])) {
5204
5205 if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
5206 IPW_DEBUG_SCAN("Adding masked mandatory "
5207 "rate %02X\n",
5208 network->rates[i]);
5209 rates->supported_rates[rates->num_rates++] =
5210 network->rates[i];
5211 continue;
5212 }
5213
5214 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5215 network->rates[i], priv->rates_mask);
5216 continue;
5217 }
5218
5219 rates->supported_rates[rates->num_rates++] = network->rates[i];
5220 }
5221
5222 num_rates = min(network->rates_ex_len,
5223 (u8) (IPW_MAX_RATES - num_rates));
5224 for (i = 0; i < num_rates; i++) {
5225 if (!ipw_is_rate_in_mask(priv, network->mode,
5226 network->rates_ex[i])) {
5227 if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
5228 IPW_DEBUG_SCAN("Adding masked mandatory "
5229 "rate %02X\n",
5230 network->rates_ex[i]);
5231 rates->supported_rates[rates->num_rates++] =
5232 network->rates[i];
5233 continue;
5234 }
5235
5236 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5237 network->rates_ex[i], priv->rates_mask);
5238 continue;
5239 }
5240
5241 rates->supported_rates[rates->num_rates++] =
5242 network->rates_ex[i];
5243 }
5244
5245 return 1;
5246 }
5247
5248 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5249 const struct ipw_supported_rates *src)
5250 {
5251 u8 i;
5252 for (i = 0; i < src->num_rates; i++)
5253 dest->supported_rates[i] = src->supported_rates[i];
5254 dest->num_rates = src->num_rates;
5255 }
5256
5257 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5258 * mask should ever be used -- right now all callers to add the scan rates are
5259 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5260 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5261 u8 modulation, u32 rate_mask)
5262 {
5263 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5264 IEEE80211_BASIC_RATE_MASK : 0;
5265
5266 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
5267 rates->supported_rates[rates->num_rates++] =
5268 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
5269
5270 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
5271 rates->supported_rates[rates->num_rates++] =
5272 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
5273
5274 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
5275 rates->supported_rates[rates->num_rates++] = basic_mask |
5276 IEEE80211_CCK_RATE_5MB;
5277
5278 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
5279 rates->supported_rates[rates->num_rates++] = basic_mask |
5280 IEEE80211_CCK_RATE_11MB;
5281 }
5282
5283 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5284 u8 modulation, u32 rate_mask)
5285 {
5286 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5287 IEEE80211_BASIC_RATE_MASK : 0;
5288
5289 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
5290 rates->supported_rates[rates->num_rates++] = basic_mask |
5291 IEEE80211_OFDM_RATE_6MB;
5292
5293 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
5294 rates->supported_rates[rates->num_rates++] =
5295 IEEE80211_OFDM_RATE_9MB;
5296
5297 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
5298 rates->supported_rates[rates->num_rates++] = basic_mask |
5299 IEEE80211_OFDM_RATE_12MB;
5300
5301 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
5302 rates->supported_rates[rates->num_rates++] =
5303 IEEE80211_OFDM_RATE_18MB;
5304
5305 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
5306 rates->supported_rates[rates->num_rates++] = basic_mask |
5307 IEEE80211_OFDM_RATE_24MB;
5308
5309 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
5310 rates->supported_rates[rates->num_rates++] =
5311 IEEE80211_OFDM_RATE_36MB;
5312
5313 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
5314 rates->supported_rates[rates->num_rates++] =
5315 IEEE80211_OFDM_RATE_48MB;
5316
5317 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
5318 rates->supported_rates[rates->num_rates++] =
5319 IEEE80211_OFDM_RATE_54MB;
5320 }
5321
5322 struct ipw_network_match {
5323 struct ieee80211_network *network;
5324 struct ipw_supported_rates rates;
5325 };
5326
5327 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5328 struct ipw_network_match *match,
5329 struct ieee80211_network *network,
5330 int roaming)
5331 {
5332 struct ipw_supported_rates rates;
5333
5334 /* Verify that this network's capability is compatible with the
5335 * current mode (AdHoc or Infrastructure) */
5336 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5337 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5338 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded due to "
5339 "capability mismatch.\n",
5340 escape_essid(network->ssid, network->ssid_len),
5341 MAC_ARG(network->bssid));
5342 return 0;
5343 }
5344
5345 /* If we do not have an ESSID for this AP, we can not associate with
5346 * it */
5347 if (network->flags & NETWORK_EMPTY_ESSID) {
5348 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5349 "because of hidden ESSID.\n",
5350 escape_essid(network->ssid, network->ssid_len),
5351 MAC_ARG(network->bssid));
5352 return 0;
5353 }
5354
5355 if (unlikely(roaming)) {
5356 /* If we are roaming, then ensure check if this is a valid
5357 * network to try and roam to */
5358 if ((network->ssid_len != match->network->ssid_len) ||
5359 memcmp(network->ssid, match->network->ssid,
5360 network->ssid_len)) {
5361 IPW_DEBUG_MERGE("Netowrk '%s (" MAC_FMT ")' excluded "
5362 "because of non-network ESSID.\n",
5363 escape_essid(network->ssid,
5364 network->ssid_len),
5365 MAC_ARG(network->bssid));
5366 return 0;
5367 }
5368 } else {
5369 /* If an ESSID has been configured then compare the broadcast
5370 * ESSID to ours */
5371 if ((priv->config & CFG_STATIC_ESSID) &&
5372 ((network->ssid_len != priv->essid_len) ||
5373 memcmp(network->ssid, priv->essid,
5374 min(network->ssid_len, priv->essid_len)))) {
5375 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5376
5377 strncpy(escaped,
5378 escape_essid(network->ssid, network->ssid_len),
5379 sizeof(escaped));
5380 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5381 "because of ESSID mismatch: '%s'.\n",
5382 escaped, MAC_ARG(network->bssid),
5383 escape_essid(priv->essid,
5384 priv->essid_len));
5385 return 0;
5386 }
5387 }
5388
5389 /* If the old network rate is better than this one, don't bother
5390 * testing everything else. */
5391
5392 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5393 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5394 "current network.\n",
5395 escape_essid(match->network->ssid,
5396 match->network->ssid_len));
5397 return 0;
5398 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5399 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5400 "current network.\n",
5401 escape_essid(match->network->ssid,
5402 match->network->ssid_len));
5403 return 0;
5404 }
5405
5406 /* Now go through and see if the requested network is valid... */
5407 if (priv->ieee->scan_age != 0 &&
5408 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5409 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5410 "because of age: %ums.\n",
5411 escape_essid(network->ssid, network->ssid_len),
5412 MAC_ARG(network->bssid),
5413 jiffies_to_msecs(jiffies -
5414 network->last_scanned));
5415 return 0;
5416 }
5417
5418 if ((priv->config & CFG_STATIC_CHANNEL) &&
5419 (network->channel != priv->channel)) {
5420 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5421 "because of channel mismatch: %d != %d.\n",
5422 escape_essid(network->ssid, network->ssid_len),
5423 MAC_ARG(network->bssid),
5424 network->channel, priv->channel);
5425 return 0;
5426 }
5427
5428 /* Verify privacy compatability */
5429 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5430 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5431 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5432 "because of privacy mismatch: %s != %s.\n",
5433 escape_essid(network->ssid, network->ssid_len),
5434 MAC_ARG(network->bssid),
5435 priv->
5436 capability & CAP_PRIVACY_ON ? "on" : "off",
5437 network->
5438 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5439 "off");
5440 return 0;
5441 }
5442
5443 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5444 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5445 "because of the same BSSID match: " MAC_FMT
5446 ".\n", escape_essid(network->ssid,
5447 network->ssid_len),
5448 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5449 return 0;
5450 }
5451
5452 /* Filter out any incompatible freq / mode combinations */
5453 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5454 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5455 "because of invalid frequency/mode "
5456 "combination.\n",
5457 escape_essid(network->ssid, network->ssid_len),
5458 MAC_ARG(network->bssid));
5459 return 0;
5460 }
5461
5462 /* Ensure that the rates supported by the driver are compatible with
5463 * this AP, including verification of basic rates (mandatory) */
5464 if (!ipw_compatible_rates(priv, network, &rates)) {
5465 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5466 "because configured rate mask excludes "
5467 "AP mandatory rate.\n",
5468 escape_essid(network->ssid, network->ssid_len),
5469 MAC_ARG(network->bssid));
5470 return 0;
5471 }
5472
5473 if (rates.num_rates == 0) {
5474 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5475 "because of no compatible rates.\n",
5476 escape_essid(network->ssid, network->ssid_len),
5477 MAC_ARG(network->bssid));
5478 return 0;
5479 }
5480
5481 /* TODO: Perform any further minimal comparititive tests. We do not
5482 * want to put too much policy logic here; intelligent scan selection
5483 * should occur within a generic IEEE 802.11 user space tool. */
5484
5485 /* Set up 'new' AP to this network */
5486 ipw_copy_rates(&match->rates, &rates);
5487 match->network = network;
5488 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' is a viable match.\n",
5489 escape_essid(network->ssid, network->ssid_len),
5490 MAC_ARG(network->bssid));
5491
5492 return 1;
5493 }
5494
5495 static void ipw_merge_adhoc_network(void *data)
5496 {
5497 struct ipw_priv *priv = data;
5498 struct ieee80211_network *network = NULL;
5499 struct ipw_network_match match = {
5500 .network = priv->assoc_network
5501 };
5502
5503 if ((priv->status & STATUS_ASSOCIATED) &&
5504 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5505 /* First pass through ROAM process -- look for a better
5506 * network */
5507 unsigned long flags;
5508
5509 spin_lock_irqsave(&priv->ieee->lock, flags);
5510 list_for_each_entry(network, &priv->ieee->network_list, list) {
5511 if (network != priv->assoc_network)
5512 ipw_find_adhoc_network(priv, &match, network,
5513 1);
5514 }
5515 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5516
5517 if (match.network == priv->assoc_network) {
5518 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5519 "merge to.\n");
5520 return;
5521 }
5522
5523 mutex_lock(&priv->mutex);
5524 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5525 IPW_DEBUG_MERGE("remove network %s\n",
5526 escape_essid(priv->essid,
5527 priv->essid_len));
5528 ipw_remove_current_network(priv);
5529 }
5530
5531 ipw_disassociate(priv);
5532 priv->assoc_network = match.network;
5533 mutex_unlock(&priv->mutex);
5534 return;
5535 }
5536 }
5537
5538 static int ipw_best_network(struct ipw_priv *priv,
5539 struct ipw_network_match *match,
5540 struct ieee80211_network *network, int roaming)
5541 {
5542 struct ipw_supported_rates rates;
5543
5544 /* Verify that this network's capability is compatible with the
5545 * current mode (AdHoc or Infrastructure) */
5546 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5547 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5548 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5549 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5550 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to "
5551 "capability mismatch.\n",
5552 escape_essid(network->ssid, network->ssid_len),
5553 MAC_ARG(network->bssid));
5554 return 0;
5555 }
5556
5557 /* If we do not have an ESSID for this AP, we can not associate with
5558 * it */
5559 if (network->flags & NETWORK_EMPTY_ESSID) {
5560 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5561 "because of hidden ESSID.\n",
5562 escape_essid(network->ssid, network->ssid_len),
5563 MAC_ARG(network->bssid));
5564 return 0;
5565 }
5566
5567 if (unlikely(roaming)) {
5568 /* If we are roaming, then ensure check if this is a valid
5569 * network to try and roam to */
5570 if ((network->ssid_len != match->network->ssid_len) ||
5571 memcmp(network->ssid, match->network->ssid,
5572 network->ssid_len)) {
5573 IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded "
5574 "because of non-network ESSID.\n",
5575 escape_essid(network->ssid,
5576 network->ssid_len),
5577 MAC_ARG(network->bssid));
5578 return 0;
5579 }
5580 } else {
5581 /* If an ESSID has been configured then compare the broadcast
5582 * ESSID to ours */
5583 if ((priv->config & CFG_STATIC_ESSID) &&
5584 ((network->ssid_len != priv->essid_len) ||
5585 memcmp(network->ssid, priv->essid,
5586 min(network->ssid_len, priv->essid_len)))) {
5587 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5588 strncpy(escaped,
5589 escape_essid(network->ssid, network->ssid_len),
5590 sizeof(escaped));
5591 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5592 "because of ESSID mismatch: '%s'.\n",
5593 escaped, MAC_ARG(network->bssid),
5594 escape_essid(priv->essid,
5595 priv->essid_len));
5596 return 0;
5597 }
5598 }
5599
5600 /* If the old network rate is better than this one, don't bother
5601 * testing everything else. */
5602 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5603 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5604 strncpy(escaped,
5605 escape_essid(network->ssid, network->ssid_len),
5606 sizeof(escaped));
5607 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because "
5608 "'%s (" MAC_FMT ")' has a stronger signal.\n",
5609 escaped, MAC_ARG(network->bssid),
5610 escape_essid(match->network->ssid,
5611 match->network->ssid_len),
5612 MAC_ARG(match->network->bssid));
5613 return 0;
5614 }
5615
5616 /* If this network has already had an association attempt within the
5617 * last 3 seconds, do not try and associate again... */
5618 if (network->last_associate &&
5619 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5620 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5621 "because of storming (%ums since last "
5622 "assoc attempt).\n",
5623 escape_essid(network->ssid, network->ssid_len),
5624 MAC_ARG(network->bssid),
5625 jiffies_to_msecs(jiffies -
5626 network->last_associate));
5627 return 0;
5628 }
5629
5630 /* Now go through and see if the requested network is valid... */
5631 if (priv->ieee->scan_age != 0 &&
5632 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5633 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5634 "because of age: %ums.\n",
5635 escape_essid(network->ssid, network->ssid_len),
5636 MAC_ARG(network->bssid),
5637 jiffies_to_msecs(jiffies -
5638 network->last_scanned));
5639 return 0;
5640 }
5641
5642 if ((priv->config & CFG_STATIC_CHANNEL) &&
5643 (network->channel != priv->channel)) {
5644 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5645 "because of channel mismatch: %d != %d.\n",
5646 escape_essid(network->ssid, network->ssid_len),
5647 MAC_ARG(network->bssid),
5648 network->channel, priv->channel);
5649 return 0;
5650 }
5651
5652 /* Verify privacy compatability */
5653 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5654 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5655 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5656 "because of privacy mismatch: %s != %s.\n",
5657 escape_essid(network->ssid, network->ssid_len),
5658 MAC_ARG(network->bssid),
5659 priv->capability & CAP_PRIVACY_ON ? "on" :
5660 "off",
5661 network->capability &
5662 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5663 return 0;
5664 }
5665
5666 if ((priv->config & CFG_STATIC_BSSID) &&
5667 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5668 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5669 "because of BSSID mismatch: " MAC_FMT ".\n",
5670 escape_essid(network->ssid, network->ssid_len),
5671 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5672 return 0;
5673 }
5674
5675 /* Filter out any incompatible freq / mode combinations */
5676 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5677 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5678 "because of invalid frequency/mode "
5679 "combination.\n",
5680 escape_essid(network->ssid, network->ssid_len),
5681 MAC_ARG(network->bssid));
5682 return 0;
5683 }
5684
5685 /* Filter out invalid channel in current GEO */
5686 if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) {
5687 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5688 "because of invalid channel in current GEO\n",
5689 escape_essid(network->ssid, network->ssid_len),
5690 MAC_ARG(network->bssid));
5691 return 0;
5692 }
5693
5694 /* Ensure that the rates supported by the driver are compatible with
5695 * this AP, including verification of basic rates (mandatory) */
5696 if (!ipw_compatible_rates(priv, network, &rates)) {
5697 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5698 "because configured rate mask excludes "
5699 "AP mandatory rate.\n",
5700 escape_essid(network->ssid, network->ssid_len),
5701 MAC_ARG(network->bssid));
5702 return 0;
5703 }
5704
5705 if (rates.num_rates == 0) {
5706 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5707 "because of no compatible rates.\n",
5708 escape_essid(network->ssid, network->ssid_len),
5709 MAC_ARG(network->bssid));
5710 return 0;
5711 }
5712
5713 /* TODO: Perform any further minimal comparititive tests. We do not
5714 * want to put too much policy logic here; intelligent scan selection
5715 * should occur within a generic IEEE 802.11 user space tool. */
5716
5717 /* Set up 'new' AP to this network */
5718 ipw_copy_rates(&match->rates, &rates);
5719 match->network = network;
5720
5721 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n",
5722 escape_essid(network->ssid, network->ssid_len),
5723 MAC_ARG(network->bssid));
5724
5725 return 1;
5726 }
5727
5728 static void ipw_adhoc_create(struct ipw_priv *priv,
5729 struct ieee80211_network *network)
5730 {
5731 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
5732 int i;
5733
5734 /*
5735 * For the purposes of scanning, we can set our wireless mode
5736 * to trigger scans across combinations of bands, but when it
5737 * comes to creating a new ad-hoc network, we have tell the FW
5738 * exactly which band to use.
5739 *
5740 * We also have the possibility of an invalid channel for the
5741 * chossen band. Attempting to create a new ad-hoc network
5742 * with an invalid channel for wireless mode will trigger a
5743 * FW fatal error.
5744 *
5745 */
5746 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
5747 case IEEE80211_52GHZ_BAND:
5748 network->mode = IEEE_A;
5749 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5750 BUG_ON(i == -1);
5751 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5752 IPW_WARNING("Overriding invalid channel\n");
5753 priv->channel = geo->a[0].channel;
5754 }
5755 break;
5756
5757 case IEEE80211_24GHZ_BAND:
5758 if (priv->ieee->mode & IEEE_G)
5759 network->mode = IEEE_G;
5760 else
5761 network->mode = IEEE_B;
5762 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5763 BUG_ON(i == -1);
5764 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5765 IPW_WARNING("Overriding invalid channel\n");
5766 priv->channel = geo->bg[0].channel;
5767 }
5768 break;
5769
5770 default:
5771 IPW_WARNING("Overriding invalid channel\n");
5772 if (priv->ieee->mode & IEEE_A) {
5773 network->mode = IEEE_A;
5774 priv->channel = geo->a[0].channel;
5775 } else if (priv->ieee->mode & IEEE_G) {
5776 network->mode = IEEE_G;
5777 priv->channel = geo->bg[0].channel;
5778 } else {
5779 network->mode = IEEE_B;
5780 priv->channel = geo->bg[0].channel;
5781 }
5782 break;
5783 }
5784
5785 network->channel = priv->channel;
5786 priv->config |= CFG_ADHOC_PERSIST;
5787 ipw_create_bssid(priv, network->bssid);
5788 network->ssid_len = priv->essid_len;
5789 memcpy(network->ssid, priv->essid, priv->essid_len);
5790 memset(&network->stats, 0, sizeof(network->stats));
5791 network->capability = WLAN_CAPABILITY_IBSS;
5792 if (!(priv->config & CFG_PREAMBLE_LONG))
5793 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5794 if (priv->capability & CAP_PRIVACY_ON)
5795 network->capability |= WLAN_CAPABILITY_PRIVACY;
5796 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5797 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5798 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5799 memcpy(network->rates_ex,
5800 &priv->rates.supported_rates[network->rates_len],
5801 network->rates_ex_len);
5802 network->last_scanned = 0;
5803 network->flags = 0;
5804 network->last_associate = 0;
5805 network->time_stamp[0] = 0;
5806 network->time_stamp[1] = 0;
5807 network->beacon_interval = 100; /* Default */
5808 network->listen_interval = 10; /* Default */
5809 network->atim_window = 0; /* Default */
5810 network->wpa_ie_len = 0;
5811 network->rsn_ie_len = 0;
5812 }
5813
5814 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5815 {
5816 struct ipw_tgi_tx_key key;
5817
5818 if (!(priv->ieee->sec.flags & (1 << index)))
5819 return;
5820
5821 key.key_id = index;
5822 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5823 key.security_type = type;
5824 key.station_index = 0; /* always 0 for BSS */
5825 key.flags = 0;
5826 /* 0 for new key; previous value of counter (after fatal error) */
5827 key.tx_counter[0] = 0;
5828 key.tx_counter[1] = 0;
5829
5830 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5831 }
5832
5833 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5834 {
5835 struct ipw_wep_key key;
5836 int i;
5837
5838 key.cmd_id = DINO_CMD_WEP_KEY;
5839 key.seq_num = 0;
5840
5841 /* Note: AES keys cannot be set for multiple times.
5842 * Only set it at the first time. */
5843 for (i = 0; i < 4; i++) {
5844 key.key_index = i | type;
5845 if (!(priv->ieee->sec.flags & (1 << i))) {
5846 key.key_size = 0;
5847 continue;
5848 }
5849
5850 key.key_size = priv->ieee->sec.key_sizes[i];
5851 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5852
5853 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5854 }
5855 }
5856
5857 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5858 {
5859 if (priv->ieee->host_encrypt)
5860 return;
5861
5862 switch (level) {
5863 case SEC_LEVEL_3:
5864 priv->sys_config.disable_unicast_decryption = 0;
5865 priv->ieee->host_decrypt = 0;
5866 break;
5867 case SEC_LEVEL_2:
5868 priv->sys_config.disable_unicast_decryption = 1;
5869 priv->ieee->host_decrypt = 1;
5870 break;
5871 case SEC_LEVEL_1:
5872 priv->sys_config.disable_unicast_decryption = 0;
5873 priv->ieee->host_decrypt = 0;
5874 break;
5875 case SEC_LEVEL_0:
5876 priv->sys_config.disable_unicast_decryption = 1;
5877 break;
5878 default:
5879 break;
5880 }
5881 }
5882
5883 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5884 {
5885 if (priv->ieee->host_encrypt)
5886 return;
5887
5888 switch (level) {
5889 case SEC_LEVEL_3:
5890 priv->sys_config.disable_multicast_decryption = 0;
5891 break;
5892 case SEC_LEVEL_2:
5893 priv->sys_config.disable_multicast_decryption = 1;
5894 break;
5895 case SEC_LEVEL_1:
5896 priv->sys_config.disable_multicast_decryption = 0;
5897 break;
5898 case SEC_LEVEL_0:
5899 priv->sys_config.disable_multicast_decryption = 1;
5900 break;
5901 default:
5902 break;
5903 }
5904 }
5905
5906 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
5907 {
5908 switch (priv->ieee->sec.level) {
5909 case SEC_LEVEL_3:
5910 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5911 ipw_send_tgi_tx_key(priv,
5912 DCT_FLAG_EXT_SECURITY_CCM,
5913 priv->ieee->sec.active_key);
5914
5915 if (!priv->ieee->host_mc_decrypt)
5916 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
5917 break;
5918 case SEC_LEVEL_2:
5919 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5920 ipw_send_tgi_tx_key(priv,
5921 DCT_FLAG_EXT_SECURITY_TKIP,
5922 priv->ieee->sec.active_key);
5923 break;
5924 case SEC_LEVEL_1:
5925 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
5926 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
5927 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
5928 break;
5929 case SEC_LEVEL_0:
5930 default:
5931 break;
5932 }
5933 }
5934
5935 static void ipw_adhoc_check(void *data)
5936 {
5937 struct ipw_priv *priv = data;
5938
5939 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
5940 !(priv->config & CFG_ADHOC_PERSIST)) {
5941 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
5942 IPW_DL_STATE | IPW_DL_ASSOC,
5943 "Missed beacon: %d - disassociate\n",
5944 priv->missed_adhoc_beacons);
5945 ipw_remove_current_network(priv);
5946 ipw_disassociate(priv);
5947 return;
5948 }
5949
5950 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
5951 priv->assoc_request.beacon_interval);
5952 }
5953
5954 static void ipw_bg_adhoc_check(void *data)
5955 {
5956 struct ipw_priv *priv = data;
5957 mutex_lock(&priv->mutex);
5958 ipw_adhoc_check(data);
5959 mutex_unlock(&priv->mutex);
5960 }
5961
5962 #ifdef CONFIG_IPW2200_DEBUG
5963 static void ipw_debug_config(struct ipw_priv *priv)
5964 {
5965 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
5966 "[CFG 0x%08X]\n", priv->config);
5967 if (priv->config & CFG_STATIC_CHANNEL)
5968 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
5969 else
5970 IPW_DEBUG_INFO("Channel unlocked.\n");
5971 if (priv->config & CFG_STATIC_ESSID)
5972 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
5973 escape_essid(priv->essid, priv->essid_len));
5974 else
5975 IPW_DEBUG_INFO("ESSID unlocked.\n");
5976 if (priv->config & CFG_STATIC_BSSID)
5977 IPW_DEBUG_INFO("BSSID locked to " MAC_FMT "\n",
5978 MAC_ARG(priv->bssid));
5979 else
5980 IPW_DEBUG_INFO("BSSID unlocked.\n");
5981 if (priv->capability & CAP_PRIVACY_ON)
5982 IPW_DEBUG_INFO("PRIVACY on\n");
5983 else
5984 IPW_DEBUG_INFO("PRIVACY off\n");
5985 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
5986 }
5987 #else
5988 #define ipw_debug_config(x) do {} while (0)
5989 #endif
5990
5991 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
5992 {
5993 /* TODO: Verify that this works... */
5994 struct ipw_fixed_rate fr = {
5995 .tx_rates = priv->rates_mask
5996 };
5997 u32 reg;
5998 u16 mask = 0;
5999
6000 /* Identify 'current FW band' and match it with the fixed
6001 * Tx rates */
6002
6003 switch (priv->ieee->freq_band) {
6004 case IEEE80211_52GHZ_BAND: /* A only */
6005 /* IEEE_A */
6006 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
6007 /* Invalid fixed rate mask */
6008 IPW_DEBUG_WX
6009 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6010 fr.tx_rates = 0;
6011 break;
6012 }
6013
6014 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
6015 break;
6016
6017 default: /* 2.4Ghz or Mixed */
6018 /* IEEE_B */
6019 if (mode == IEEE_B) {
6020 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
6021 /* Invalid fixed rate mask */
6022 IPW_DEBUG_WX
6023 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6024 fr.tx_rates = 0;
6025 }
6026 break;
6027 }
6028
6029 /* IEEE_G */
6030 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
6031 IEEE80211_OFDM_RATES_MASK)) {
6032 /* Invalid fixed rate mask */
6033 IPW_DEBUG_WX
6034 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6035 fr.tx_rates = 0;
6036 break;
6037 }
6038
6039 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
6040 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
6041 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
6042 }
6043
6044 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
6045 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
6046 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
6047 }
6048
6049 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
6050 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
6051 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
6052 }
6053
6054 fr.tx_rates |= mask;
6055 break;
6056 }
6057
6058 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6059 ipw_write_reg32(priv, reg, *(u32 *) & fr);
6060 }
6061
6062 static void ipw_abort_scan(struct ipw_priv *priv)
6063 {
6064 int err;
6065
6066 if (priv->status & STATUS_SCAN_ABORTING) {
6067 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6068 return;
6069 }
6070 priv->status |= STATUS_SCAN_ABORTING;
6071
6072 err = ipw_send_scan_abort(priv);
6073 if (err)
6074 IPW_DEBUG_HC("Request to abort scan failed.\n");
6075 }
6076
6077 static void ipw_add_scan_channels(struct ipw_priv *priv,
6078 struct ipw_scan_request_ext *scan,
6079 int scan_type)
6080 {
6081 int channel_index = 0;
6082 const struct ieee80211_geo *geo;
6083 int i;
6084
6085 geo = ieee80211_get_geo(priv->ieee);
6086
6087 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
6088 int start = channel_index;
6089 for (i = 0; i < geo->a_channels; i++) {
6090 if ((priv->status & STATUS_ASSOCIATED) &&
6091 geo->a[i].channel == priv->channel)
6092 continue;
6093 channel_index++;
6094 scan->channels_list[channel_index] = geo->a[i].channel;
6095 ipw_set_scan_type(scan, channel_index,
6096 geo->a[i].
6097 flags & IEEE80211_CH_PASSIVE_ONLY ?
6098 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6099 scan_type);
6100 }
6101
6102 if (start != channel_index) {
6103 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6104 (channel_index - start);
6105 channel_index++;
6106 }
6107 }
6108
6109 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
6110 int start = channel_index;
6111 if (priv->config & CFG_SPEED_SCAN) {
6112 int index;
6113 u8 channels[IEEE80211_24GHZ_CHANNELS] = {
6114 /* nop out the list */
6115 [0] = 0
6116 };
6117
6118 u8 channel;
6119 while (channel_index < IPW_SCAN_CHANNELS) {
6120 channel =
6121 priv->speed_scan[priv->speed_scan_pos];
6122 if (channel == 0) {
6123 priv->speed_scan_pos = 0;
6124 channel = priv->speed_scan[0];
6125 }
6126 if ((priv->status & STATUS_ASSOCIATED) &&
6127 channel == priv->channel) {
6128 priv->speed_scan_pos++;
6129 continue;
6130 }
6131
6132 /* If this channel has already been
6133 * added in scan, break from loop
6134 * and this will be the first channel
6135 * in the next scan.
6136 */
6137 if (channels[channel - 1] != 0)
6138 break;
6139
6140 channels[channel - 1] = 1;
6141 priv->speed_scan_pos++;
6142 channel_index++;
6143 scan->channels_list[channel_index] = channel;
6144 index =
6145 ieee80211_channel_to_index(priv->ieee, channel);
6146 ipw_set_scan_type(scan, channel_index,
6147 geo->bg[index].
6148 flags &
6149 IEEE80211_CH_PASSIVE_ONLY ?
6150 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6151 : scan_type);
6152 }
6153 } else {
6154 for (i = 0; i < geo->bg_channels; i++) {
6155 if ((priv->status & STATUS_ASSOCIATED) &&
6156 geo->bg[i].channel == priv->channel)
6157 continue;
6158 channel_index++;
6159 scan->channels_list[channel_index] =
6160 geo->bg[i].channel;
6161 ipw_set_scan_type(scan, channel_index,
6162 geo->bg[i].
6163 flags &
6164 IEEE80211_CH_PASSIVE_ONLY ?
6165 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6166 : scan_type);
6167 }
6168 }
6169
6170 if (start != channel_index) {
6171 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6172 (channel_index - start);
6173 }
6174 }
6175 }
6176
6177 static int ipw_request_scan(struct ipw_priv *priv)
6178 {
6179 struct ipw_scan_request_ext scan;
6180 int err = 0, scan_type;
6181
6182 if (!(priv->status & STATUS_INIT) ||
6183 (priv->status & STATUS_EXIT_PENDING))
6184 return 0;
6185
6186 mutex_lock(&priv->mutex);
6187
6188 if (priv->status & STATUS_SCANNING) {
6189 IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n");
6190 priv->status |= STATUS_SCAN_PENDING;
6191 goto done;
6192 }
6193
6194 if (!(priv->status & STATUS_SCAN_FORCED) &&
6195 priv->status & STATUS_SCAN_ABORTING) {
6196 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6197 priv->status |= STATUS_SCAN_PENDING;
6198 goto done;
6199 }
6200
6201 if (priv->status & STATUS_RF_KILL_MASK) {
6202 IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6203 priv->status |= STATUS_SCAN_PENDING;
6204 goto done;
6205 }
6206
6207 memset(&scan, 0, sizeof(scan));
6208
6209 if (priv->config & CFG_SPEED_SCAN)
6210 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6211 cpu_to_le16(30);
6212 else
6213 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6214 cpu_to_le16(20);
6215
6216 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6217 cpu_to_le16(20);
6218 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
6219
6220 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
6221
6222 #ifdef CONFIG_IPW2200_MONITOR
6223 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6224 u8 channel;
6225 u8 band = 0;
6226
6227 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
6228 case IEEE80211_52GHZ_BAND:
6229 band = (u8) (IPW_A_MODE << 6) | 1;
6230 channel = priv->channel;
6231 break;
6232
6233 case IEEE80211_24GHZ_BAND:
6234 band = (u8) (IPW_B_MODE << 6) | 1;
6235 channel = priv->channel;
6236 break;
6237
6238 default:
6239 band = (u8) (IPW_B_MODE << 6) | 1;
6240 channel = 9;
6241 break;
6242 }
6243
6244 scan.channels_list[0] = band;
6245 scan.channels_list[1] = channel;
6246 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6247
6248 /* NOTE: The card will sit on this channel for this time
6249 * period. Scan aborts are timing sensitive and frequently
6250 * result in firmware restarts. As such, it is best to
6251 * set a small dwell_time here and just keep re-issuing
6252 * scans. Otherwise fast channel hopping will not actually
6253 * hop channels.
6254 *
6255 * TODO: Move SPEED SCAN support to all modes and bands */
6256 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6257 cpu_to_le16(2000);
6258 } else {
6259 #endif /* CONFIG_IPW2200_MONITOR */
6260 /* If we are roaming, then make this a directed scan for the
6261 * current network. Otherwise, ensure that every other scan
6262 * is a fast channel hop scan */
6263 if ((priv->status & STATUS_ROAMING)
6264 || (!(priv->status & STATUS_ASSOCIATED)
6265 && (priv->config & CFG_STATIC_ESSID)
6266 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6267 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6268 if (err) {
6269 IPW_DEBUG_HC("Attempt to send SSID command "
6270 "failed.\n");
6271 goto done;
6272 }
6273
6274 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6275 } else
6276 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6277
6278 ipw_add_scan_channels(priv, &scan, scan_type);
6279 #ifdef CONFIG_IPW2200_MONITOR
6280 }
6281 #endif
6282
6283 err = ipw_send_scan_request_ext(priv, &scan);
6284 if (err) {
6285 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6286 goto done;
6287 }
6288
6289 priv->status |= STATUS_SCANNING;
6290 priv->status &= ~STATUS_SCAN_PENDING;
6291 queue_delayed_work(priv->workqueue, &priv->scan_check,
6292 IPW_SCAN_CHECK_WATCHDOG);
6293 done:
6294 mutex_unlock(&priv->mutex);
6295 return err;
6296 }
6297
6298 static void ipw_bg_abort_scan(void *data)
6299 {
6300 struct ipw_priv *priv = data;
6301 mutex_lock(&priv->mutex);
6302 ipw_abort_scan(data);
6303 mutex_unlock(&priv->mutex);
6304 }
6305
6306 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6307 {
6308 /* This is called when wpa_supplicant loads and closes the driver
6309 * interface. */
6310 priv->ieee->wpa_enabled = value;
6311 return 0;
6312 }
6313
6314 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6315 {
6316 struct ieee80211_device *ieee = priv->ieee;
6317 struct ieee80211_security sec = {
6318 .flags = SEC_AUTH_MODE,
6319 };
6320 int ret = 0;
6321
6322 if (value & IW_AUTH_ALG_SHARED_KEY) {
6323 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6324 ieee->open_wep = 0;
6325 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6326 sec.auth_mode = WLAN_AUTH_OPEN;
6327 ieee->open_wep = 1;
6328 } else if (value & IW_AUTH_ALG_LEAP) {
6329 sec.auth_mode = WLAN_AUTH_LEAP;
6330 ieee->open_wep = 1;
6331 } else
6332 return -EINVAL;
6333
6334 if (ieee->set_security)
6335 ieee->set_security(ieee->dev, &sec);
6336 else
6337 ret = -EOPNOTSUPP;
6338
6339 return ret;
6340 }
6341
6342 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6343 int wpa_ie_len)
6344 {
6345 /* make sure WPA is enabled */
6346 ipw_wpa_enable(priv, 1);
6347 }
6348
6349 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6350 char *capabilities, int length)
6351 {
6352 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6353
6354 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6355 capabilities);
6356 }
6357
6358 /*
6359 * WE-18 support
6360 */
6361
6362 /* SIOCSIWGENIE */
6363 static int ipw_wx_set_genie(struct net_device *dev,
6364 struct iw_request_info *info,
6365 union iwreq_data *wrqu, char *extra)
6366 {
6367 struct ipw_priv *priv = ieee80211_priv(dev);
6368 struct ieee80211_device *ieee = priv->ieee;
6369 u8 *buf;
6370 int err = 0;
6371
6372 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6373 (wrqu->data.length && extra == NULL))
6374 return -EINVAL;
6375
6376 //mutex_lock(&priv->mutex);
6377
6378 //if (!ieee->wpa_enabled) {
6379 // err = -EOPNOTSUPP;
6380 // goto out;
6381 //}
6382
6383 if (wrqu->data.length) {
6384 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6385 if (buf == NULL) {
6386 err = -ENOMEM;
6387 goto out;
6388 }
6389
6390 memcpy(buf, extra, wrqu->data.length);
6391 kfree(ieee->wpa_ie);
6392 ieee->wpa_ie = buf;
6393 ieee->wpa_ie_len = wrqu->data.length;
6394 } else {
6395 kfree(ieee->wpa_ie);
6396 ieee->wpa_ie = NULL;
6397 ieee->wpa_ie_len = 0;
6398 }
6399
6400 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6401 out:
6402 //mutex_unlock(&priv->mutex);
6403 return err;
6404 }
6405
6406 /* SIOCGIWGENIE */
6407 static int ipw_wx_get_genie(struct net_device *dev,
6408 struct iw_request_info *info,
6409 union iwreq_data *wrqu, char *extra)
6410 {
6411 struct ipw_priv *priv = ieee80211_priv(dev);
6412 struct ieee80211_device *ieee = priv->ieee;
6413 int err = 0;
6414
6415 //mutex_lock(&priv->mutex);
6416
6417 //if (!ieee->wpa_enabled) {
6418 // err = -EOPNOTSUPP;
6419 // goto out;
6420 //}
6421
6422 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6423 wrqu->data.length = 0;
6424 goto out;
6425 }
6426
6427 if (wrqu->data.length < ieee->wpa_ie_len) {
6428 err = -E2BIG;
6429 goto out;
6430 }
6431
6432 wrqu->data.length = ieee->wpa_ie_len;
6433 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6434
6435 out:
6436 //mutex_unlock(&priv->mutex);
6437 return err;
6438 }
6439
6440 static int wext_cipher2level(int cipher)
6441 {
6442 switch (cipher) {
6443 case IW_AUTH_CIPHER_NONE:
6444 return SEC_LEVEL_0;
6445 case IW_AUTH_CIPHER_WEP40:
6446 case IW_AUTH_CIPHER_WEP104:
6447 return SEC_LEVEL_1;
6448 case IW_AUTH_CIPHER_TKIP:
6449 return SEC_LEVEL_2;
6450 case IW_AUTH_CIPHER_CCMP:
6451 return SEC_LEVEL_3;
6452 default:
6453 return -1;
6454 }
6455 }
6456
6457 /* SIOCSIWAUTH */
6458 static int ipw_wx_set_auth(struct net_device *dev,
6459 struct iw_request_info *info,
6460 union iwreq_data *wrqu, char *extra)
6461 {
6462 struct ipw_priv *priv = ieee80211_priv(dev);
6463 struct ieee80211_device *ieee = priv->ieee;
6464 struct iw_param *param = &wrqu->param;
6465 struct ieee80211_crypt_data *crypt;
6466 unsigned long flags;
6467 int ret = 0;
6468
6469 switch (param->flags & IW_AUTH_INDEX) {
6470 case IW_AUTH_WPA_VERSION:
6471 break;
6472 case IW_AUTH_CIPHER_PAIRWISE:
6473 ipw_set_hw_decrypt_unicast(priv,
6474 wext_cipher2level(param->value));
6475 break;
6476 case IW_AUTH_CIPHER_GROUP:
6477 ipw_set_hw_decrypt_multicast(priv,
6478 wext_cipher2level(param->value));
6479 break;
6480 case IW_AUTH_KEY_MGMT:
6481 /*
6482 * ipw2200 does not use these parameters
6483 */
6484 break;
6485
6486 case IW_AUTH_TKIP_COUNTERMEASURES:
6487 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6488 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6489 break;
6490
6491 flags = crypt->ops->get_flags(crypt->priv);
6492
6493 if (param->value)
6494 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6495 else
6496 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6497
6498 crypt->ops->set_flags(flags, crypt->priv);
6499
6500 break;
6501
6502 case IW_AUTH_DROP_UNENCRYPTED:{
6503 /* HACK:
6504 *
6505 * wpa_supplicant calls set_wpa_enabled when the driver
6506 * is loaded and unloaded, regardless of if WPA is being
6507 * used. No other calls are made which can be used to
6508 * determine if encryption will be used or not prior to
6509 * association being expected. If encryption is not being
6510 * used, drop_unencrypted is set to false, else true -- we
6511 * can use this to determine if the CAP_PRIVACY_ON bit should
6512 * be set.
6513 */
6514 struct ieee80211_security sec = {
6515 .flags = SEC_ENABLED,
6516 .enabled = param->value,
6517 };
6518 priv->ieee->drop_unencrypted = param->value;
6519 /* We only change SEC_LEVEL for open mode. Others
6520 * are set by ipw_wpa_set_encryption.
6521 */
6522 if (!param->value) {
6523 sec.flags |= SEC_LEVEL;
6524 sec.level = SEC_LEVEL_0;
6525 } else {
6526 sec.flags |= SEC_LEVEL;
6527 sec.level = SEC_LEVEL_1;
6528 }
6529 if (priv->ieee->set_security)
6530 priv->ieee->set_security(priv->ieee->dev, &sec);
6531 break;
6532 }
6533
6534 case IW_AUTH_80211_AUTH_ALG:
6535 ret = ipw_wpa_set_auth_algs(priv, param->value);
6536 break;
6537
6538 case IW_AUTH_WPA_ENABLED:
6539 ret = ipw_wpa_enable(priv, param->value);
6540 ipw_disassociate(priv);
6541 break;
6542
6543 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6544 ieee->ieee802_1x = param->value;
6545 break;
6546
6547 //case IW_AUTH_ROAMING_CONTROL:
6548 case IW_AUTH_PRIVACY_INVOKED:
6549 ieee->privacy_invoked = param->value;
6550 break;
6551
6552 default:
6553 return -EOPNOTSUPP;
6554 }
6555 return ret;
6556 }
6557
6558 /* SIOCGIWAUTH */
6559 static int ipw_wx_get_auth(struct net_device *dev,
6560 struct iw_request_info *info,
6561 union iwreq_data *wrqu, char *extra)
6562 {
6563 struct ipw_priv *priv = ieee80211_priv(dev);
6564 struct ieee80211_device *ieee = priv->ieee;
6565 struct ieee80211_crypt_data *crypt;
6566 struct iw_param *param = &wrqu->param;
6567 int ret = 0;
6568
6569 switch (param->flags & IW_AUTH_INDEX) {
6570 case IW_AUTH_WPA_VERSION:
6571 case IW_AUTH_CIPHER_PAIRWISE:
6572 case IW_AUTH_CIPHER_GROUP:
6573 case IW_AUTH_KEY_MGMT:
6574 /*
6575 * wpa_supplicant will control these internally
6576 */
6577 ret = -EOPNOTSUPP;
6578 break;
6579
6580 case IW_AUTH_TKIP_COUNTERMEASURES:
6581 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6582 if (!crypt || !crypt->ops->get_flags)
6583 break;
6584
6585 param->value = (crypt->ops->get_flags(crypt->priv) &
6586 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6587
6588 break;
6589
6590 case IW_AUTH_DROP_UNENCRYPTED:
6591 param->value = ieee->drop_unencrypted;
6592 break;
6593
6594 case IW_AUTH_80211_AUTH_ALG:
6595 param->value = ieee->sec.auth_mode;
6596 break;
6597
6598 case IW_AUTH_WPA_ENABLED:
6599 param->value = ieee->wpa_enabled;
6600 break;
6601
6602 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6603 param->value = ieee->ieee802_1x;
6604 break;
6605
6606 case IW_AUTH_ROAMING_CONTROL:
6607 case IW_AUTH_PRIVACY_INVOKED:
6608 param->value = ieee->privacy_invoked;
6609 break;
6610
6611 default:
6612 return -EOPNOTSUPP;
6613 }
6614 return 0;
6615 }
6616
6617 /* SIOCSIWENCODEEXT */
6618 static int ipw_wx_set_encodeext(struct net_device *dev,
6619 struct iw_request_info *info,
6620 union iwreq_data *wrqu, char *extra)
6621 {
6622 struct ipw_priv *priv = ieee80211_priv(dev);
6623 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6624
6625 if (hwcrypto) {
6626 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6627 /* IPW HW can't build TKIP MIC,
6628 host decryption still needed */
6629 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6630 priv->ieee->host_mc_decrypt = 1;
6631 else {
6632 priv->ieee->host_encrypt = 0;
6633 priv->ieee->host_encrypt_msdu = 1;
6634 priv->ieee->host_decrypt = 1;
6635 }
6636 } else {
6637 priv->ieee->host_encrypt = 0;
6638 priv->ieee->host_encrypt_msdu = 0;
6639 priv->ieee->host_decrypt = 0;
6640 priv->ieee->host_mc_decrypt = 0;
6641 }
6642 }
6643
6644 return ieee80211_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6645 }
6646
6647 /* SIOCGIWENCODEEXT */
6648 static int ipw_wx_get_encodeext(struct net_device *dev,
6649 struct iw_request_info *info,
6650 union iwreq_data *wrqu, char *extra)
6651 {
6652 struct ipw_priv *priv = ieee80211_priv(dev);
6653 return ieee80211_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6654 }
6655
6656 /* SIOCSIWMLME */
6657 static int ipw_wx_set_mlme(struct net_device *dev,
6658 struct iw_request_info *info,
6659 union iwreq_data *wrqu, char *extra)
6660 {
6661 struct ipw_priv *priv = ieee80211_priv(dev);
6662 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6663 u16 reason;
6664
6665 reason = cpu_to_le16(mlme->reason_code);
6666
6667 switch (mlme->cmd) {
6668 case IW_MLME_DEAUTH:
6669 // silently ignore
6670 break;
6671
6672 case IW_MLME_DISASSOC:
6673 ipw_disassociate(priv);
6674 break;
6675
6676 default:
6677 return -EOPNOTSUPP;
6678 }
6679 return 0;
6680 }
6681
6682 #ifdef CONFIG_IPW2200_QOS
6683
6684 /* QoS */
6685 /*
6686 * get the modulation type of the current network or
6687 * the card current mode
6688 */
6689 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6690 {
6691 u8 mode = 0;
6692
6693 if (priv->status & STATUS_ASSOCIATED) {
6694 unsigned long flags;
6695
6696 spin_lock_irqsave(&priv->ieee->lock, flags);
6697 mode = priv->assoc_network->mode;
6698 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6699 } else {
6700 mode = priv->ieee->mode;
6701 }
6702 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6703 return mode;
6704 }
6705
6706 /*
6707 * Handle management frame beacon and probe response
6708 */
6709 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6710 int active_network,
6711 struct ieee80211_network *network)
6712 {
6713 u32 size = sizeof(struct ieee80211_qos_parameters);
6714
6715 if (network->capability & WLAN_CAPABILITY_IBSS)
6716 network->qos_data.active = network->qos_data.supported;
6717
6718 if (network->flags & NETWORK_HAS_QOS_MASK) {
6719 if (active_network &&
6720 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6721 network->qos_data.active = network->qos_data.supported;
6722
6723 if ((network->qos_data.active == 1) && (active_network == 1) &&
6724 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6725 (network->qos_data.old_param_count !=
6726 network->qos_data.param_count)) {
6727 network->qos_data.old_param_count =
6728 network->qos_data.param_count;
6729 schedule_work(&priv->qos_activate);
6730 IPW_DEBUG_QOS("QoS parameters change call "
6731 "qos_activate\n");
6732 }
6733 } else {
6734 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6735 memcpy(&network->qos_data.parameters,
6736 &def_parameters_CCK, size);
6737 else
6738 memcpy(&network->qos_data.parameters,
6739 &def_parameters_OFDM, size);
6740
6741 if ((network->qos_data.active == 1) && (active_network == 1)) {
6742 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6743 schedule_work(&priv->qos_activate);
6744 }
6745
6746 network->qos_data.active = 0;
6747 network->qos_data.supported = 0;
6748 }
6749 if ((priv->status & STATUS_ASSOCIATED) &&
6750 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6751 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6752 if ((network->capability & WLAN_CAPABILITY_IBSS) &&
6753 !(network->flags & NETWORK_EMPTY_ESSID))
6754 if ((network->ssid_len ==
6755 priv->assoc_network->ssid_len) &&
6756 !memcmp(network->ssid,
6757 priv->assoc_network->ssid,
6758 network->ssid_len)) {
6759 queue_work(priv->workqueue,
6760 &priv->merge_networks);
6761 }
6762 }
6763
6764 return 0;
6765 }
6766
6767 /*
6768 * This function set up the firmware to support QoS. It sends
6769 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6770 */
6771 static int ipw_qos_activate(struct ipw_priv *priv,
6772 struct ieee80211_qos_data *qos_network_data)
6773 {
6774 int err;
6775 struct ieee80211_qos_parameters qos_parameters[QOS_QOS_SETS];
6776 struct ieee80211_qos_parameters *active_one = NULL;
6777 u32 size = sizeof(struct ieee80211_qos_parameters);
6778 u32 burst_duration;
6779 int i;
6780 u8 type;
6781
6782 type = ipw_qos_current_mode(priv);
6783
6784 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6785 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6786 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6787 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6788
6789 if (qos_network_data == NULL) {
6790 if (type == IEEE_B) {
6791 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6792 active_one = &def_parameters_CCK;
6793 } else
6794 active_one = &def_parameters_OFDM;
6795
6796 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6797 burst_duration = ipw_qos_get_burst_duration(priv);
6798 for (i = 0; i < QOS_QUEUE_NUM; i++)
6799 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6800 (u16) burst_duration;
6801 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6802 if (type == IEEE_B) {
6803 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
6804 type);
6805 if (priv->qos_data.qos_enable == 0)
6806 active_one = &def_parameters_CCK;
6807 else
6808 active_one = priv->qos_data.def_qos_parm_CCK;
6809 } else {
6810 if (priv->qos_data.qos_enable == 0)
6811 active_one = &def_parameters_OFDM;
6812 else
6813 active_one = priv->qos_data.def_qos_parm_OFDM;
6814 }
6815 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6816 } else {
6817 unsigned long flags;
6818 int active;
6819
6820 spin_lock_irqsave(&priv->ieee->lock, flags);
6821 active_one = &(qos_network_data->parameters);
6822 qos_network_data->old_param_count =
6823 qos_network_data->param_count;
6824 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6825 active = qos_network_data->supported;
6826 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6827
6828 if (active == 0) {
6829 burst_duration = ipw_qos_get_burst_duration(priv);
6830 for (i = 0; i < QOS_QUEUE_NUM; i++)
6831 qos_parameters[QOS_PARAM_SET_ACTIVE].
6832 tx_op_limit[i] = (u16) burst_duration;
6833 }
6834 }
6835
6836 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6837 err = ipw_send_qos_params_command(priv,
6838 (struct ieee80211_qos_parameters *)
6839 &(qos_parameters[0]));
6840 if (err)
6841 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6842
6843 return err;
6844 }
6845
6846 /*
6847 * send IPW_CMD_WME_INFO to the firmware
6848 */
6849 static int ipw_qos_set_info_element(struct ipw_priv *priv)
6850 {
6851 int ret = 0;
6852 struct ieee80211_qos_information_element qos_info;
6853
6854 if (priv == NULL)
6855 return -1;
6856
6857 qos_info.elementID = QOS_ELEMENT_ID;
6858 qos_info.length = sizeof(struct ieee80211_qos_information_element) - 2;
6859
6860 qos_info.version = QOS_VERSION_1;
6861 qos_info.ac_info = 0;
6862
6863 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
6864 qos_info.qui_type = QOS_OUI_TYPE;
6865 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
6866
6867 ret = ipw_send_qos_info_command(priv, &qos_info);
6868 if (ret != 0) {
6869 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
6870 }
6871 return ret;
6872 }
6873
6874 /*
6875 * Set the QoS parameter with the association request structure
6876 */
6877 static int ipw_qos_association(struct ipw_priv *priv,
6878 struct ieee80211_network *network)
6879 {
6880 int err = 0;
6881 struct ieee80211_qos_data *qos_data = NULL;
6882 struct ieee80211_qos_data ibss_data = {
6883 .supported = 1,
6884 .active = 1,
6885 };
6886
6887 switch (priv->ieee->iw_mode) {
6888 case IW_MODE_ADHOC:
6889 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
6890
6891 qos_data = &ibss_data;
6892 break;
6893
6894 case IW_MODE_INFRA:
6895 qos_data = &network->qos_data;
6896 break;
6897
6898 default:
6899 BUG();
6900 break;
6901 }
6902
6903 err = ipw_qos_activate(priv, qos_data);
6904 if (err) {
6905 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
6906 return err;
6907 }
6908
6909 if (priv->qos_data.qos_enable && qos_data->supported) {
6910 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
6911 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
6912 return ipw_qos_set_info_element(priv);
6913 }
6914
6915 return 0;
6916 }
6917
6918 /*
6919 * handling the beaconing responces. if we get different QoS setting
6920 * of the network from the the associated setting adjust the QoS
6921 * setting
6922 */
6923 static int ipw_qos_association_resp(struct ipw_priv *priv,
6924 struct ieee80211_network *network)
6925 {
6926 int ret = 0;
6927 unsigned long flags;
6928 u32 size = sizeof(struct ieee80211_qos_parameters);
6929 int set_qos_param = 0;
6930
6931 if ((priv == NULL) || (network == NULL) ||
6932 (priv->assoc_network == NULL))
6933 return ret;
6934
6935 if (!(priv->status & STATUS_ASSOCIATED))
6936 return ret;
6937
6938 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
6939 return ret;
6940
6941 spin_lock_irqsave(&priv->ieee->lock, flags);
6942 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
6943 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
6944 sizeof(struct ieee80211_qos_data));
6945 priv->assoc_network->qos_data.active = 1;
6946 if ((network->qos_data.old_param_count !=
6947 network->qos_data.param_count)) {
6948 set_qos_param = 1;
6949 network->qos_data.old_param_count =
6950 network->qos_data.param_count;
6951 }
6952
6953 } else {
6954 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
6955 memcpy(&priv->assoc_network->qos_data.parameters,
6956 &def_parameters_CCK, size);
6957 else
6958 memcpy(&priv->assoc_network->qos_data.parameters,
6959 &def_parameters_OFDM, size);
6960 priv->assoc_network->qos_data.active = 0;
6961 priv->assoc_network->qos_data.supported = 0;
6962 set_qos_param = 1;
6963 }
6964
6965 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6966
6967 if (set_qos_param == 1)
6968 schedule_work(&priv->qos_activate);
6969
6970 return ret;
6971 }
6972
6973 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
6974 {
6975 u32 ret = 0;
6976
6977 if ((priv == NULL))
6978 return 0;
6979
6980 if (!(priv->ieee->modulation & IEEE80211_OFDM_MODULATION))
6981 ret = priv->qos_data.burst_duration_CCK;
6982 else
6983 ret = priv->qos_data.burst_duration_OFDM;
6984
6985 return ret;
6986 }
6987
6988 /*
6989 * Initialize the setting of QoS global
6990 */
6991 static void ipw_qos_init(struct ipw_priv *priv, int enable,
6992 int burst_enable, u32 burst_duration_CCK,
6993 u32 burst_duration_OFDM)
6994 {
6995 priv->qos_data.qos_enable = enable;
6996
6997 if (priv->qos_data.qos_enable) {
6998 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
6999 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7000 IPW_DEBUG_QOS("QoS is enabled\n");
7001 } else {
7002 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7003 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7004 IPW_DEBUG_QOS("QoS is not enabled\n");
7005 }
7006
7007 priv->qos_data.burst_enable = burst_enable;
7008
7009 if (burst_enable) {
7010 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7011 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7012 } else {
7013 priv->qos_data.burst_duration_CCK = 0;
7014 priv->qos_data.burst_duration_OFDM = 0;
7015 }
7016 }
7017
7018 /*
7019 * map the packet priority to the right TX Queue
7020 */
7021 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7022 {
7023 if (priority > 7 || !priv->qos_data.qos_enable)
7024 priority = 0;
7025
7026 return from_priority_to_tx_queue[priority] - 1;
7027 }
7028
7029 static int ipw_is_qos_active(struct net_device *dev,
7030 struct sk_buff *skb)
7031 {
7032 struct ipw_priv *priv = ieee80211_priv(dev);
7033 struct ieee80211_qos_data *qos_data = NULL;
7034 int active, supported;
7035 u8 *daddr = skb->data + ETH_ALEN;
7036 int unicast = !is_multicast_ether_addr(daddr);
7037
7038 if (!(priv->status & STATUS_ASSOCIATED))
7039 return 0;
7040
7041 qos_data = &priv->assoc_network->qos_data;
7042
7043 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7044 if (unicast == 0)
7045 qos_data->active = 0;
7046 else
7047 qos_data->active = qos_data->supported;
7048 }
7049 active = qos_data->active;
7050 supported = qos_data->supported;
7051 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7052 "unicast %d\n",
7053 priv->qos_data.qos_enable, active, supported, unicast);
7054 if (active && priv->qos_data.qos_enable)
7055 return 1;
7056
7057 return 0;
7058
7059 }
7060 /*
7061 * add QoS parameter to the TX command
7062 */
7063 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7064 u16 priority,
7065 struct tfd_data *tfd)
7066 {
7067 int tx_queue_id = 0;
7068
7069
7070 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7071 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7072
7073 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7074 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7075 tfd->tfd.tfd_26.mchdr.qos_ctrl |= CTRL_QOS_NO_ACK;
7076 }
7077 return 0;
7078 }
7079
7080 /*
7081 * background support to run QoS activate functionality
7082 */
7083 static void ipw_bg_qos_activate(void *data)
7084 {
7085 struct ipw_priv *priv = data;
7086
7087 if (priv == NULL)
7088 return;
7089
7090 mutex_lock(&priv->mutex);
7091
7092 if (priv->status & STATUS_ASSOCIATED)
7093 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7094
7095 mutex_unlock(&priv->mutex);
7096 }
7097
7098 static int ipw_handle_probe_response(struct net_device *dev,
7099 struct ieee80211_probe_response *resp,
7100 struct ieee80211_network *network)
7101 {
7102 struct ipw_priv *priv = ieee80211_priv(dev);
7103 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7104 (network == priv->assoc_network));
7105
7106 ipw_qos_handle_probe_response(priv, active_network, network);
7107
7108 return 0;
7109 }
7110
7111 static int ipw_handle_beacon(struct net_device *dev,
7112 struct ieee80211_beacon *resp,
7113 struct ieee80211_network *network)
7114 {
7115 struct ipw_priv *priv = ieee80211_priv(dev);
7116 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7117 (network == priv->assoc_network));
7118
7119 ipw_qos_handle_probe_response(priv, active_network, network);
7120
7121 return 0;
7122 }
7123
7124 static int ipw_handle_assoc_response(struct net_device *dev,
7125 struct ieee80211_assoc_response *resp,
7126 struct ieee80211_network *network)
7127 {
7128 struct ipw_priv *priv = ieee80211_priv(dev);
7129 ipw_qos_association_resp(priv, network);
7130 return 0;
7131 }
7132
7133 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
7134 *qos_param)
7135 {
7136 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7137 sizeof(*qos_param) * 3, qos_param);
7138 }
7139
7140 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
7141 *qos_param)
7142 {
7143 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7144 qos_param);
7145 }
7146
7147 #endif /* CONFIG_IPW2200_QOS */
7148
7149 static int ipw_associate_network(struct ipw_priv *priv,
7150 struct ieee80211_network *network,
7151 struct ipw_supported_rates *rates, int roaming)
7152 {
7153 int err;
7154
7155 if (priv->config & CFG_FIXED_RATE)
7156 ipw_set_fixed_rate(priv, network->mode);
7157
7158 if (!(priv->config & CFG_STATIC_ESSID)) {
7159 priv->essid_len = min(network->ssid_len,
7160 (u8) IW_ESSID_MAX_SIZE);
7161 memcpy(priv->essid, network->ssid, priv->essid_len);
7162 }
7163
7164 network->last_associate = jiffies;
7165
7166 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7167 priv->assoc_request.channel = network->channel;
7168 priv->assoc_request.auth_key = 0;
7169
7170 if ((priv->capability & CAP_PRIVACY_ON) &&
7171 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7172 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7173 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7174
7175 if (priv->ieee->sec.level == SEC_LEVEL_1)
7176 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7177
7178 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7179 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7180 priv->assoc_request.auth_type = AUTH_LEAP;
7181 else
7182 priv->assoc_request.auth_type = AUTH_OPEN;
7183
7184 if (priv->ieee->wpa_ie_len) {
7185 priv->assoc_request.policy_support = 0x02; /* RSN active */
7186 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7187 priv->ieee->wpa_ie_len);
7188 }
7189
7190 /*
7191 * It is valid for our ieee device to support multiple modes, but
7192 * when it comes to associating to a given network we have to choose
7193 * just one mode.
7194 */
7195 if (network->mode & priv->ieee->mode & IEEE_A)
7196 priv->assoc_request.ieee_mode = IPW_A_MODE;
7197 else if (network->mode & priv->ieee->mode & IEEE_G)
7198 priv->assoc_request.ieee_mode = IPW_G_MODE;
7199 else if (network->mode & priv->ieee->mode & IEEE_B)
7200 priv->assoc_request.ieee_mode = IPW_B_MODE;
7201
7202 priv->assoc_request.capability = network->capability;
7203 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7204 && !(priv->config & CFG_PREAMBLE_LONG)) {
7205 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7206 } else {
7207 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7208
7209 /* Clear the short preamble if we won't be supporting it */
7210 priv->assoc_request.capability &=
7211 ~WLAN_CAPABILITY_SHORT_PREAMBLE;
7212 }
7213
7214 /* Clear capability bits that aren't used in Ad Hoc */
7215 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7216 priv->assoc_request.capability &=
7217 ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
7218
7219 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7220 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7221 roaming ? "Rea" : "A",
7222 escape_essid(priv->essid, priv->essid_len),
7223 network->channel,
7224 ipw_modes[priv->assoc_request.ieee_mode],
7225 rates->num_rates,
7226 (priv->assoc_request.preamble_length ==
7227 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7228 network->capability &
7229 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7230 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7231 priv->capability & CAP_PRIVACY_ON ?
7232 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7233 "(open)") : "",
7234 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7235 priv->capability & CAP_PRIVACY_ON ?
7236 '1' + priv->ieee->sec.active_key : '.',
7237 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7238
7239 priv->assoc_request.beacon_interval = network->beacon_interval;
7240 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7241 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7242 priv->assoc_request.assoc_type = HC_IBSS_START;
7243 priv->assoc_request.assoc_tsf_msw = 0;
7244 priv->assoc_request.assoc_tsf_lsw = 0;
7245 } else {
7246 if (unlikely(roaming))
7247 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7248 else
7249 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7250 priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
7251 priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
7252 }
7253
7254 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7255
7256 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7257 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7258 priv->assoc_request.atim_window = network->atim_window;
7259 } else {
7260 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7261 priv->assoc_request.atim_window = 0;
7262 }
7263
7264 priv->assoc_request.listen_interval = network->listen_interval;
7265
7266 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7267 if (err) {
7268 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7269 return err;
7270 }
7271
7272 rates->ieee_mode = priv->assoc_request.ieee_mode;
7273 rates->purpose = IPW_RATE_CONNECT;
7274 ipw_send_supported_rates(priv, rates);
7275
7276 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7277 priv->sys_config.dot11g_auto_detection = 1;
7278 else
7279 priv->sys_config.dot11g_auto_detection = 0;
7280
7281 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7282 priv->sys_config.answer_broadcast_ssid_probe = 1;
7283 else
7284 priv->sys_config.answer_broadcast_ssid_probe = 0;
7285
7286 err = ipw_send_system_config(priv);
7287 if (err) {
7288 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7289 return err;
7290 }
7291
7292 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7293 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7294 if (err) {
7295 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7296 return err;
7297 }
7298
7299 /*
7300 * If preemption is enabled, it is possible for the association
7301 * to complete before we return from ipw_send_associate. Therefore
7302 * we have to be sure and update our priviate data first.
7303 */
7304 priv->channel = network->channel;
7305 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7306 priv->status |= STATUS_ASSOCIATING;
7307 priv->status &= ~STATUS_SECURITY_UPDATED;
7308
7309 priv->assoc_network = network;
7310
7311 #ifdef CONFIG_IPW2200_QOS
7312 ipw_qos_association(priv, network);
7313 #endif
7314
7315 err = ipw_send_associate(priv, &priv->assoc_request);
7316 if (err) {
7317 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7318 return err;
7319 }
7320
7321 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n",
7322 escape_essid(priv->essid, priv->essid_len),
7323 MAC_ARG(priv->bssid));
7324
7325 return 0;
7326 }
7327
7328 static void ipw_roam(void *data)
7329 {
7330 struct ipw_priv *priv = data;
7331 struct ieee80211_network *network = NULL;
7332 struct ipw_network_match match = {
7333 .network = priv->assoc_network
7334 };
7335
7336 /* The roaming process is as follows:
7337 *
7338 * 1. Missed beacon threshold triggers the roaming process by
7339 * setting the status ROAM bit and requesting a scan.
7340 * 2. When the scan completes, it schedules the ROAM work
7341 * 3. The ROAM work looks at all of the known networks for one that
7342 * is a better network than the currently associated. If none
7343 * found, the ROAM process is over (ROAM bit cleared)
7344 * 4. If a better network is found, a disassociation request is
7345 * sent.
7346 * 5. When the disassociation completes, the roam work is again
7347 * scheduled. The second time through, the driver is no longer
7348 * associated, and the newly selected network is sent an
7349 * association request.
7350 * 6. At this point ,the roaming process is complete and the ROAM
7351 * status bit is cleared.
7352 */
7353
7354 /* If we are no longer associated, and the roaming bit is no longer
7355 * set, then we are not actively roaming, so just return */
7356 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7357 return;
7358
7359 if (priv->status & STATUS_ASSOCIATED) {
7360 /* First pass through ROAM process -- look for a better
7361 * network */
7362 unsigned long flags;
7363 u8 rssi = priv->assoc_network->stats.rssi;
7364 priv->assoc_network->stats.rssi = -128;
7365 spin_lock_irqsave(&priv->ieee->lock, flags);
7366 list_for_each_entry(network, &priv->ieee->network_list, list) {
7367 if (network != priv->assoc_network)
7368 ipw_best_network(priv, &match, network, 1);
7369 }
7370 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7371 priv->assoc_network->stats.rssi = rssi;
7372
7373 if (match.network == priv->assoc_network) {
7374 IPW_DEBUG_ASSOC("No better APs in this network to "
7375 "roam to.\n");
7376 priv->status &= ~STATUS_ROAMING;
7377 ipw_debug_config(priv);
7378 return;
7379 }
7380
7381 ipw_send_disassociate(priv, 1);
7382 priv->assoc_network = match.network;
7383
7384 return;
7385 }
7386
7387 /* Second pass through ROAM process -- request association */
7388 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7389 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7390 priv->status &= ~STATUS_ROAMING;
7391 }
7392
7393 static void ipw_bg_roam(void *data)
7394 {
7395 struct ipw_priv *priv = data;
7396 mutex_lock(&priv->mutex);
7397 ipw_roam(data);
7398 mutex_unlock(&priv->mutex);
7399 }
7400
7401 static int ipw_associate(void *data)
7402 {
7403 struct ipw_priv *priv = data;
7404
7405 struct ieee80211_network *network = NULL;
7406 struct ipw_network_match match = {
7407 .network = NULL
7408 };
7409 struct ipw_supported_rates *rates;
7410 struct list_head *element;
7411 unsigned long flags;
7412
7413 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7414 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7415 return 0;
7416 }
7417
7418 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7419 IPW_DEBUG_ASSOC("Not attempting association (already in "
7420 "progress)\n");
7421 return 0;
7422 }
7423
7424 if (priv->status & STATUS_DISASSOCIATING) {
7425 IPW_DEBUG_ASSOC("Not attempting association (in "
7426 "disassociating)\n ");
7427 queue_work(priv->workqueue, &priv->associate);
7428 return 0;
7429 }
7430
7431 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7432 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7433 "initialized)\n");
7434 return 0;
7435 }
7436
7437 if (!(priv->config & CFG_ASSOCIATE) &&
7438 !(priv->config & (CFG_STATIC_ESSID |
7439 CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
7440 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7441 return 0;
7442 }
7443
7444 /* Protect our use of the network_list */
7445 spin_lock_irqsave(&priv->ieee->lock, flags);
7446 list_for_each_entry(network, &priv->ieee->network_list, list)
7447 ipw_best_network(priv, &match, network, 0);
7448
7449 network = match.network;
7450 rates = &match.rates;
7451
7452 if (network == NULL &&
7453 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7454 priv->config & CFG_ADHOC_CREATE &&
7455 priv->config & CFG_STATIC_ESSID &&
7456 priv->config & CFG_STATIC_CHANNEL &&
7457 !list_empty(&priv->ieee->network_free_list)) {
7458 element = priv->ieee->network_free_list.next;
7459 network = list_entry(element, struct ieee80211_network, list);
7460 ipw_adhoc_create(priv, network);
7461 rates = &priv->rates;
7462 list_del(element);
7463 list_add_tail(&network->list, &priv->ieee->network_list);
7464 }
7465 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7466
7467 /* If we reached the end of the list, then we don't have any valid
7468 * matching APs */
7469 if (!network) {
7470 ipw_debug_config(priv);
7471
7472 if (!(priv->status & STATUS_SCANNING)) {
7473 if (!(priv->config & CFG_SPEED_SCAN))
7474 queue_delayed_work(priv->workqueue,
7475 &priv->request_scan,
7476 SCAN_INTERVAL);
7477 else
7478 queue_work(priv->workqueue,
7479 &priv->request_scan);
7480 }
7481
7482 return 0;
7483 }
7484
7485 ipw_associate_network(priv, network, rates, 0);
7486
7487 return 1;
7488 }
7489
7490 static void ipw_bg_associate(void *data)
7491 {
7492 struct ipw_priv *priv = data;
7493 mutex_lock(&priv->mutex);
7494 ipw_associate(data);
7495 mutex_unlock(&priv->mutex);
7496 }
7497
7498 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7499 struct sk_buff *skb)
7500 {
7501 struct ieee80211_hdr *hdr;
7502 u16 fc;
7503
7504 hdr = (struct ieee80211_hdr *)skb->data;
7505 fc = le16_to_cpu(hdr->frame_ctl);
7506 if (!(fc & IEEE80211_FCTL_PROTECTED))
7507 return;
7508
7509 fc &= ~IEEE80211_FCTL_PROTECTED;
7510 hdr->frame_ctl = cpu_to_le16(fc);
7511 switch (priv->ieee->sec.level) {
7512 case SEC_LEVEL_3:
7513 /* Remove CCMP HDR */
7514 memmove(skb->data + IEEE80211_3ADDR_LEN,
7515 skb->data + IEEE80211_3ADDR_LEN + 8,
7516 skb->len - IEEE80211_3ADDR_LEN - 8);
7517 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7518 break;
7519 case SEC_LEVEL_2:
7520 break;
7521 case SEC_LEVEL_1:
7522 /* Remove IV */
7523 memmove(skb->data + IEEE80211_3ADDR_LEN,
7524 skb->data + IEEE80211_3ADDR_LEN + 4,
7525 skb->len - IEEE80211_3ADDR_LEN - 4);
7526 skb_trim(skb, skb->len - 8); /* IV + ICV */
7527 break;
7528 case SEC_LEVEL_0:
7529 break;
7530 default:
7531 printk(KERN_ERR "Unknow security level %d\n",
7532 priv->ieee->sec.level);
7533 break;
7534 }
7535 }
7536
7537 static void ipw_handle_data_packet(struct ipw_priv *priv,
7538 struct ipw_rx_mem_buffer *rxb,
7539 struct ieee80211_rx_stats *stats)
7540 {
7541 struct ieee80211_hdr_4addr *hdr;
7542 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7543
7544 /* We received data from the HW, so stop the watchdog */
7545 priv->net_dev->trans_start = jiffies;
7546
7547 /* We only process data packets if the
7548 * interface is open */
7549 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7550 skb_tailroom(rxb->skb))) {
7551 priv->ieee->stats.rx_errors++;
7552 priv->wstats.discard.misc++;
7553 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7554 return;
7555 } else if (unlikely(!netif_running(priv->net_dev))) {
7556 priv->ieee->stats.rx_dropped++;
7557 priv->wstats.discard.misc++;
7558 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7559 return;
7560 }
7561
7562 /* Advance skb->data to the start of the actual payload */
7563 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7564
7565 /* Set the size of the skb to the size of the frame */
7566 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7567
7568 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7569
7570 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7571 hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data;
7572 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7573 (is_multicast_ether_addr(hdr->addr1) ?
7574 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7575 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7576
7577 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7578 priv->ieee->stats.rx_errors++;
7579 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7580 rxb->skb = NULL;
7581 __ipw_led_activity_on(priv);
7582 }
7583 }
7584
7585 #ifdef CONFIG_IPW2200_RADIOTAP
7586 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7587 struct ipw_rx_mem_buffer *rxb,
7588 struct ieee80211_rx_stats *stats)
7589 {
7590 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7591 struct ipw_rx_frame *frame = &pkt->u.frame;
7592
7593 /* initial pull of some data */
7594 u16 received_channel = frame->received_channel;
7595 u8 antennaAndPhy = frame->antennaAndPhy;
7596 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7597 u16 pktrate = frame->rate;
7598
7599 /* Magic struct that slots into the radiotap header -- no reason
7600 * to build this manually element by element, we can write it much
7601 * more efficiently than we can parse it. ORDER MATTERS HERE */
7602 struct ipw_rt_hdr *ipw_rt;
7603
7604 short len = le16_to_cpu(pkt->u.frame.length);
7605
7606 /* We received data from the HW, so stop the watchdog */
7607 priv->net_dev->trans_start = jiffies;
7608
7609 /* We only process data packets if the
7610 * interface is open */
7611 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7612 skb_tailroom(rxb->skb))) {
7613 priv->ieee->stats.rx_errors++;
7614 priv->wstats.discard.misc++;
7615 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7616 return;
7617 } else if (unlikely(!netif_running(priv->net_dev))) {
7618 priv->ieee->stats.rx_dropped++;
7619 priv->wstats.discard.misc++;
7620 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7621 return;
7622 }
7623
7624 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7625 * that now */
7626 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7627 /* FIXME: Should alloc bigger skb instead */
7628 priv->ieee->stats.rx_dropped++;
7629 priv->wstats.discard.misc++;
7630 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7631 return;
7632 }
7633
7634 /* copy the frame itself */
7635 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7636 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7637
7638 /* Zero the radiotap static buffer ... We only need to zero the bytes NOT
7639 * part of our real header, saves a little time.
7640 *
7641 * No longer necessary since we fill in all our data. Purge before merging
7642 * patch officially.
7643 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7644 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7645 */
7646
7647 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7648
7649 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7650 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7651 ipw_rt->rt_hdr.it_len = sizeof(struct ipw_rt_hdr); /* total header+data */
7652
7653 /* Big bitfield of all the fields we provide in radiotap */
7654 ipw_rt->rt_hdr.it_present =
7655 ((1 << IEEE80211_RADIOTAP_FLAGS) |
7656 (1 << IEEE80211_RADIOTAP_TSFT) |
7657 (1 << IEEE80211_RADIOTAP_RATE) |
7658 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7659 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7660 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7661 (1 << IEEE80211_RADIOTAP_ANTENNA));
7662
7663 /* Zero the flags, we'll add to them as we go */
7664 ipw_rt->rt_flags = 0;
7665
7666 /* Convert signal to DBM */
7667 ipw_rt->rt_dbmsignal = antsignal;
7668
7669 /* Convert the channel data and set the flags */
7670 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7671 if (received_channel > 14) { /* 802.11a */
7672 ipw_rt->rt_chbitmask =
7673 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7674 } else if (antennaAndPhy & 32) { /* 802.11b */
7675 ipw_rt->rt_chbitmask =
7676 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7677 } else { /* 802.11g */
7678 ipw_rt->rt_chbitmask =
7679 (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7680 }
7681
7682 /* set the rate in multiples of 500k/s */
7683 switch (pktrate) {
7684 case IPW_TX_RATE_1MB:
7685 ipw_rt->rt_rate = 2;
7686 break;
7687 case IPW_TX_RATE_2MB:
7688 ipw_rt->rt_rate = 4;
7689 break;
7690 case IPW_TX_RATE_5MB:
7691 ipw_rt->rt_rate = 10;
7692 break;
7693 case IPW_TX_RATE_6MB:
7694 ipw_rt->rt_rate = 12;
7695 break;
7696 case IPW_TX_RATE_9MB:
7697 ipw_rt->rt_rate = 18;
7698 break;
7699 case IPW_TX_RATE_11MB:
7700 ipw_rt->rt_rate = 22;
7701 break;
7702 case IPW_TX_RATE_12MB:
7703 ipw_rt->rt_rate = 24;
7704 break;
7705 case IPW_TX_RATE_18MB:
7706 ipw_rt->rt_rate = 36;
7707 break;
7708 case IPW_TX_RATE_24MB:
7709 ipw_rt->rt_rate = 48;
7710 break;
7711 case IPW_TX_RATE_36MB:
7712 ipw_rt->rt_rate = 72;
7713 break;
7714 case IPW_TX_RATE_48MB:
7715 ipw_rt->rt_rate = 96;
7716 break;
7717 case IPW_TX_RATE_54MB:
7718 ipw_rt->rt_rate = 108;
7719 break;
7720 default:
7721 ipw_rt->rt_rate = 0;
7722 break;
7723 }
7724
7725 /* antenna number */
7726 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7727
7728 /* set the preamble flag if we have it */
7729 if ((antennaAndPhy & 64))
7730 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7731
7732 /* Set the size of the skb to the size of the frame */
7733 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7734
7735 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7736
7737 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7738 priv->ieee->stats.rx_errors++;
7739 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7740 rxb->skb = NULL;
7741 /* no LED during capture */
7742 }
7743 }
7744 #endif
7745
7746 #ifdef CONFIG_IPW2200_PROMISCUOUS
7747 #define ieee80211_is_probe_response(fc) \
7748 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7749 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7750
7751 #define ieee80211_is_management(fc) \
7752 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7753
7754 #define ieee80211_is_control(fc) \
7755 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7756
7757 #define ieee80211_is_data(fc) \
7758 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7759
7760 #define ieee80211_is_assoc_request(fc) \
7761 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7762
7763 #define ieee80211_is_reassoc_request(fc) \
7764 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7765
7766 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7767 struct ipw_rx_mem_buffer *rxb,
7768 struct ieee80211_rx_stats *stats)
7769 {
7770 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7771 struct ipw_rx_frame *frame = &pkt->u.frame;
7772 struct ipw_rt_hdr *ipw_rt;
7773
7774 /* First cache any information we need before we overwrite
7775 * the information provided in the skb from the hardware */
7776 struct ieee80211_hdr *hdr;
7777 u16 channel = frame->received_channel;
7778 u8 phy_flags = frame->antennaAndPhy;
7779 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7780 s8 noise = frame->noise;
7781 u8 rate = frame->rate;
7782 short len = le16_to_cpu(pkt->u.frame.length);
7783 u64 tsf = 0;
7784 struct sk_buff *skb;
7785 int hdr_only = 0;
7786 u16 filter = priv->prom_priv->filter;
7787
7788 /* If the filter is set to not include Rx frames then return */
7789 if (filter & IPW_PROM_NO_RX)
7790 return;
7791
7792 /* We received data from the HW, so stop the watchdog */
7793 priv->prom_net_dev->trans_start = jiffies;
7794
7795 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
7796 priv->prom_priv->ieee->stats.rx_errors++;
7797 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7798 return;
7799 }
7800
7801 /* We only process data packets if the interface is open */
7802 if (unlikely(!netif_running(priv->prom_net_dev))) {
7803 priv->prom_priv->ieee->stats.rx_dropped++;
7804 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7805 return;
7806 }
7807
7808 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7809 * that now */
7810 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7811 /* FIXME: Should alloc bigger skb instead */
7812 priv->prom_priv->ieee->stats.rx_dropped++;
7813 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7814 return;
7815 }
7816
7817 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
7818 if (ieee80211_is_management(hdr->frame_ctl)) {
7819 if (filter & IPW_PROM_NO_MGMT)
7820 return;
7821 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
7822 hdr_only = 1;
7823 } else if (ieee80211_is_control(hdr->frame_ctl)) {
7824 if (filter & IPW_PROM_NO_CTL)
7825 return;
7826 if (filter & IPW_PROM_CTL_HEADER_ONLY)
7827 hdr_only = 1;
7828 } else if (ieee80211_is_data(hdr->frame_ctl)) {
7829 if (filter & IPW_PROM_NO_DATA)
7830 return;
7831 if (filter & IPW_PROM_DATA_HEADER_ONLY)
7832 hdr_only = 1;
7833 }
7834
7835 /* Copy the SKB since this is for the promiscuous side */
7836 skb = skb_copy(rxb->skb, GFP_ATOMIC);
7837 if (skb == NULL) {
7838 IPW_ERROR("skb_clone failed for promiscuous copy.\n");
7839 return;
7840 }
7841
7842 /* copy the frame data to write after where the radiotap header goes */
7843 ipw_rt = (void *)skb->data;
7844
7845 if (hdr_only)
7846 len = ieee80211_get_hdrlen(hdr->frame_ctl);
7847
7848 memcpy(ipw_rt->payload, hdr, len);
7849
7850 /* Zero the radiotap static buffer ... We only need to zero the bytes
7851 * NOT part of our real header, saves a little time.
7852 *
7853 * No longer necessary since we fill in all our data. Purge before
7854 * merging patch officially.
7855 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7856 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7857 */
7858
7859 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7860 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7861 ipw_rt->rt_hdr.it_len = sizeof(*ipw_rt); /* total header+data */
7862
7863 /* Set the size of the skb to the size of the frame */
7864 skb_put(skb, ipw_rt->rt_hdr.it_len + len);
7865
7866 /* Big bitfield of all the fields we provide in radiotap */
7867 ipw_rt->rt_hdr.it_present =
7868 ((1 << IEEE80211_RADIOTAP_FLAGS) |
7869 (1 << IEEE80211_RADIOTAP_TSFT) |
7870 (1 << IEEE80211_RADIOTAP_RATE) |
7871 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7872 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7873 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7874 (1 << IEEE80211_RADIOTAP_ANTENNA));
7875
7876 /* Zero the flags, we'll add to them as we go */
7877 ipw_rt->rt_flags = 0;
7878
7879 ipw_rt->rt_tsf = tsf;
7880
7881 /* Convert to DBM */
7882 ipw_rt->rt_dbmsignal = signal;
7883 ipw_rt->rt_dbmnoise = noise;
7884
7885 /* Convert the channel data and set the flags */
7886 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
7887 if (channel > 14) { /* 802.11a */
7888 ipw_rt->rt_chbitmask =
7889 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7890 } else if (phy_flags & (1 << 5)) { /* 802.11b */
7891 ipw_rt->rt_chbitmask =
7892 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7893 } else { /* 802.11g */
7894 ipw_rt->rt_chbitmask =
7895 (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7896 }
7897
7898 /* set the rate in multiples of 500k/s */
7899 switch (rate) {
7900 case IPW_TX_RATE_1MB:
7901 ipw_rt->rt_rate = 2;
7902 break;
7903 case IPW_TX_RATE_2MB:
7904 ipw_rt->rt_rate = 4;
7905 break;
7906 case IPW_TX_RATE_5MB:
7907 ipw_rt->rt_rate = 10;
7908 break;
7909 case IPW_TX_RATE_6MB:
7910 ipw_rt->rt_rate = 12;
7911 break;
7912 case IPW_TX_RATE_9MB:
7913 ipw_rt->rt_rate = 18;
7914 break;
7915 case IPW_TX_RATE_11MB:
7916 ipw_rt->rt_rate = 22;
7917 break;
7918 case IPW_TX_RATE_12MB:
7919 ipw_rt->rt_rate = 24;
7920 break;
7921 case IPW_TX_RATE_18MB:
7922 ipw_rt->rt_rate = 36;
7923 break;
7924 case IPW_TX_RATE_24MB:
7925 ipw_rt->rt_rate = 48;
7926 break;
7927 case IPW_TX_RATE_36MB:
7928 ipw_rt->rt_rate = 72;
7929 break;
7930 case IPW_TX_RATE_48MB:
7931 ipw_rt->rt_rate = 96;
7932 break;
7933 case IPW_TX_RATE_54MB:
7934 ipw_rt->rt_rate = 108;
7935 break;
7936 default:
7937 ipw_rt->rt_rate = 0;
7938 break;
7939 }
7940
7941 /* antenna number */
7942 ipw_rt->rt_antenna = (phy_flags & 3);
7943
7944 /* set the preamble flag if we have it */
7945 if (phy_flags & (1 << 6))
7946 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7947
7948 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
7949
7950 if (!ieee80211_rx(priv->prom_priv->ieee, skb, stats)) {
7951 priv->prom_priv->ieee->stats.rx_errors++;
7952 dev_kfree_skb_any(skb);
7953 }
7954 }
7955 #endif
7956
7957 static int is_network_packet(struct ipw_priv *priv,
7958 struct ieee80211_hdr_4addr *header)
7959 {
7960 /* Filter incoming packets to determine if they are targetted toward
7961 * this network, discarding packets coming from ourselves */
7962 switch (priv->ieee->iw_mode) {
7963 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
7964 /* packets from our adapter are dropped (echo) */
7965 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
7966 return 0;
7967
7968 /* {broad,multi}cast packets to our BSSID go through */
7969 if (is_multicast_ether_addr(header->addr1))
7970 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
7971
7972 /* packets to our adapter go through */
7973 return !memcmp(header->addr1, priv->net_dev->dev_addr,
7974 ETH_ALEN);
7975
7976 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
7977 /* packets from our adapter are dropped (echo) */
7978 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
7979 return 0;
7980
7981 /* {broad,multi}cast packets to our BSS go through */
7982 if (is_multicast_ether_addr(header->addr1))
7983 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
7984
7985 /* packets to our adapter go through */
7986 return !memcmp(header->addr1, priv->net_dev->dev_addr,
7987 ETH_ALEN);
7988 }
7989
7990 return 1;
7991 }
7992
7993 #define IPW_PACKET_RETRY_TIME HZ
7994
7995 static int is_duplicate_packet(struct ipw_priv *priv,
7996 struct ieee80211_hdr_4addr *header)
7997 {
7998 u16 sc = le16_to_cpu(header->seq_ctl);
7999 u16 seq = WLAN_GET_SEQ_SEQ(sc);
8000 u16 frag = WLAN_GET_SEQ_FRAG(sc);
8001 u16 *last_seq, *last_frag;
8002 unsigned long *last_time;
8003
8004 switch (priv->ieee->iw_mode) {
8005 case IW_MODE_ADHOC:
8006 {
8007 struct list_head *p;
8008 struct ipw_ibss_seq *entry = NULL;
8009 u8 *mac = header->addr2;
8010 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8011
8012 __list_for_each(p, &priv->ibss_mac_hash[index]) {
8013 entry =
8014 list_entry(p, struct ipw_ibss_seq, list);
8015 if (!memcmp(entry->mac, mac, ETH_ALEN))
8016 break;
8017 }
8018 if (p == &priv->ibss_mac_hash[index]) {
8019 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8020 if (!entry) {
8021 IPW_ERROR
8022 ("Cannot malloc new mac entry\n");
8023 return 0;
8024 }
8025 memcpy(entry->mac, mac, ETH_ALEN);
8026 entry->seq_num = seq;
8027 entry->frag_num = frag;
8028 entry->packet_time = jiffies;
8029 list_add(&entry->list,
8030 &priv->ibss_mac_hash[index]);
8031 return 0;
8032 }
8033 last_seq = &entry->seq_num;
8034 last_frag = &entry->frag_num;
8035 last_time = &entry->packet_time;
8036 break;
8037 }
8038 case IW_MODE_INFRA:
8039 last_seq = &priv->last_seq_num;
8040 last_frag = &priv->last_frag_num;
8041 last_time = &priv->last_packet_time;
8042 break;
8043 default:
8044 return 0;
8045 }
8046 if ((*last_seq == seq) &&
8047 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8048 if (*last_frag == frag)
8049 goto drop;
8050 if (*last_frag + 1 != frag)
8051 /* out-of-order fragment */
8052 goto drop;
8053 } else
8054 *last_seq = seq;
8055
8056 *last_frag = frag;
8057 *last_time = jiffies;
8058 return 0;
8059
8060 drop:
8061 /* Comment this line now since we observed the card receives
8062 * duplicate packets but the FCTL_RETRY bit is not set in the
8063 * IBSS mode with fragmentation enabled.
8064 BUG_ON(!(le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_RETRY)); */
8065 return 1;
8066 }
8067
8068 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8069 struct ipw_rx_mem_buffer *rxb,
8070 struct ieee80211_rx_stats *stats)
8071 {
8072 struct sk_buff *skb = rxb->skb;
8073 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8074 struct ieee80211_hdr_4addr *header = (struct ieee80211_hdr_4addr *)
8075 (skb->data + IPW_RX_FRAME_SIZE);
8076
8077 ieee80211_rx_mgt(priv->ieee, header, stats);
8078
8079 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8080 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8081 IEEE80211_STYPE_PROBE_RESP) ||
8082 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8083 IEEE80211_STYPE_BEACON))) {
8084 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8085 ipw_add_station(priv, header->addr2);
8086 }
8087
8088 if (priv->config & CFG_NET_STATS) {
8089 IPW_DEBUG_HC("sending stat packet\n");
8090
8091 /* Set the size of the skb to the size of the full
8092 * ipw header and 802.11 frame */
8093 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8094 IPW_RX_FRAME_SIZE);
8095
8096 /* Advance past the ipw packet header to the 802.11 frame */
8097 skb_pull(skb, IPW_RX_FRAME_SIZE);
8098
8099 /* Push the ieee80211_rx_stats before the 802.11 frame */
8100 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8101
8102 skb->dev = priv->ieee->dev;
8103
8104 /* Point raw at the ieee80211_stats */
8105 skb->mac.raw = skb->data;
8106
8107 skb->pkt_type = PACKET_OTHERHOST;
8108 skb->protocol = __constant_htons(ETH_P_80211_STATS);
8109 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8110 netif_rx(skb);
8111 rxb->skb = NULL;
8112 }
8113 }
8114
8115 /*
8116 * Main entry function for recieving a packet with 80211 headers. This
8117 * should be called when ever the FW has notified us that there is a new
8118 * skb in the recieve queue.
8119 */
8120 static void ipw_rx(struct ipw_priv *priv)
8121 {
8122 struct ipw_rx_mem_buffer *rxb;
8123 struct ipw_rx_packet *pkt;
8124 struct ieee80211_hdr_4addr *header;
8125 u32 r, w, i;
8126 u8 network_packet;
8127
8128 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8129 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8130 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
8131
8132 while (i != r) {
8133 rxb = priv->rxq->queue[i];
8134 if (unlikely(rxb == NULL)) {
8135 printk(KERN_CRIT "Queue not allocated!\n");
8136 break;
8137 }
8138 priv->rxq->queue[i] = NULL;
8139
8140 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8141 IPW_RX_BUF_SIZE,
8142 PCI_DMA_FROMDEVICE);
8143
8144 pkt = (struct ipw_rx_packet *)rxb->skb->data;
8145 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8146 pkt->header.message_type,
8147 pkt->header.rx_seq_num, pkt->header.control_bits);
8148
8149 switch (pkt->header.message_type) {
8150 case RX_FRAME_TYPE: /* 802.11 frame */ {
8151 struct ieee80211_rx_stats stats = {
8152 .rssi =
8153 le16_to_cpu(pkt->u.frame.rssi_dbm) -
8154 IPW_RSSI_TO_DBM,
8155 .signal =
8156 le16_to_cpu(pkt->u.frame.rssi_dbm) -
8157 IPW_RSSI_TO_DBM + 0x100,
8158 .noise =
8159 le16_to_cpu(pkt->u.frame.noise),
8160 .rate = pkt->u.frame.rate,
8161 .mac_time = jiffies,
8162 .received_channel =
8163 pkt->u.frame.received_channel,
8164 .freq =
8165 (pkt->u.frame.
8166 control & (1 << 0)) ?
8167 IEEE80211_24GHZ_BAND :
8168 IEEE80211_52GHZ_BAND,
8169 .len = le16_to_cpu(pkt->u.frame.length),
8170 };
8171
8172 if (stats.rssi != 0)
8173 stats.mask |= IEEE80211_STATMASK_RSSI;
8174 if (stats.signal != 0)
8175 stats.mask |= IEEE80211_STATMASK_SIGNAL;
8176 if (stats.noise != 0)
8177 stats.mask |= IEEE80211_STATMASK_NOISE;
8178 if (stats.rate != 0)
8179 stats.mask |= IEEE80211_STATMASK_RATE;
8180
8181 priv->rx_packets++;
8182
8183 #ifdef CONFIG_IPW2200_PROMISCUOUS
8184 if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8185 ipw_handle_promiscuous_rx(priv, rxb, &stats);
8186 #endif
8187
8188 #ifdef CONFIG_IPW2200_MONITOR
8189 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8190 #ifdef CONFIG_IPW2200_RADIOTAP
8191
8192 ipw_handle_data_packet_monitor(priv,
8193 rxb,
8194 &stats);
8195 #else
8196 ipw_handle_data_packet(priv, rxb,
8197 &stats);
8198 #endif
8199 break;
8200 }
8201 #endif
8202
8203 header =
8204 (struct ieee80211_hdr_4addr *)(rxb->skb->
8205 data +
8206 IPW_RX_FRAME_SIZE);
8207 /* TODO: Check Ad-Hoc dest/source and make sure
8208 * that we are actually parsing these packets
8209 * correctly -- we should probably use the
8210 * frame control of the packet and disregard
8211 * the current iw_mode */
8212
8213 network_packet =
8214 is_network_packet(priv, header);
8215 if (network_packet && priv->assoc_network) {
8216 priv->assoc_network->stats.rssi =
8217 stats.rssi;
8218 priv->exp_avg_rssi =
8219 exponential_average(priv->exp_avg_rssi,
8220 stats.rssi, DEPTH_RSSI);
8221 }
8222
8223 IPW_DEBUG_RX("Frame: len=%u\n",
8224 le16_to_cpu(pkt->u.frame.length));
8225
8226 if (le16_to_cpu(pkt->u.frame.length) <
8227 ieee80211_get_hdrlen(le16_to_cpu(
8228 header->frame_ctl))) {
8229 IPW_DEBUG_DROP
8230 ("Received packet is too small. "
8231 "Dropping.\n");
8232 priv->ieee->stats.rx_errors++;
8233 priv->wstats.discard.misc++;
8234 break;
8235 }
8236
8237 switch (WLAN_FC_GET_TYPE
8238 (le16_to_cpu(header->frame_ctl))) {
8239
8240 case IEEE80211_FTYPE_MGMT:
8241 ipw_handle_mgmt_packet(priv, rxb,
8242 &stats);
8243 break;
8244
8245 case IEEE80211_FTYPE_CTL:
8246 break;
8247
8248 case IEEE80211_FTYPE_DATA:
8249 if (unlikely(!network_packet ||
8250 is_duplicate_packet(priv,
8251 header)))
8252 {
8253 IPW_DEBUG_DROP("Dropping: "
8254 MAC_FMT ", "
8255 MAC_FMT ", "
8256 MAC_FMT "\n",
8257 MAC_ARG(header->
8258 addr1),
8259 MAC_ARG(header->
8260 addr2),
8261 MAC_ARG(header->
8262 addr3));
8263 break;
8264 }
8265
8266 ipw_handle_data_packet(priv, rxb,
8267 &stats);
8268
8269 break;
8270 }
8271 break;
8272 }
8273
8274 case RX_HOST_NOTIFICATION_TYPE:{
8275 IPW_DEBUG_RX
8276 ("Notification: subtype=%02X flags=%02X size=%d\n",
8277 pkt->u.notification.subtype,
8278 pkt->u.notification.flags,
8279 pkt->u.notification.size);
8280 ipw_rx_notification(priv, &pkt->u.notification);
8281 break;
8282 }
8283
8284 default:
8285 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8286 pkt->header.message_type);
8287 break;
8288 }
8289
8290 /* For now we just don't re-use anything. We can tweak this
8291 * later to try and re-use notification packets and SKBs that
8292 * fail to Rx correctly */
8293 if (rxb->skb != NULL) {
8294 dev_kfree_skb_any(rxb->skb);
8295 rxb->skb = NULL;
8296 }
8297
8298 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8299 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8300 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8301
8302 i = (i + 1) % RX_QUEUE_SIZE;
8303 }
8304
8305 /* Backtrack one entry */
8306 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
8307
8308 ipw_rx_queue_restock(priv);
8309 }
8310
8311 #define DEFAULT_RTS_THRESHOLD 2304U
8312 #define MIN_RTS_THRESHOLD 1U
8313 #define MAX_RTS_THRESHOLD 2304U
8314 #define DEFAULT_BEACON_INTERVAL 100U
8315 #define DEFAULT_SHORT_RETRY_LIMIT 7U
8316 #define DEFAULT_LONG_RETRY_LIMIT 4U
8317
8318 /**
8319 * ipw_sw_reset
8320 * @option: options to control different reset behaviour
8321 * 0 = reset everything except the 'disable' module_param
8322 * 1 = reset everything and print out driver info (for probe only)
8323 * 2 = reset everything
8324 */
8325 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8326 {
8327 int band, modulation;
8328 int old_mode = priv->ieee->iw_mode;
8329
8330 /* Initialize module parameter values here */
8331 priv->config = 0;
8332
8333 /* We default to disabling the LED code as right now it causes
8334 * too many systems to lock up... */
8335 if (!led)
8336 priv->config |= CFG_NO_LED;
8337
8338 if (associate)
8339 priv->config |= CFG_ASSOCIATE;
8340 else
8341 IPW_DEBUG_INFO("Auto associate disabled.\n");
8342
8343 if (auto_create)
8344 priv->config |= CFG_ADHOC_CREATE;
8345 else
8346 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8347
8348 priv->config &= ~CFG_STATIC_ESSID;
8349 priv->essid_len = 0;
8350 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8351
8352 if (disable && option) {
8353 priv->status |= STATUS_RF_KILL_SW;
8354 IPW_DEBUG_INFO("Radio disabled.\n");
8355 }
8356
8357 if (channel != 0) {
8358 priv->config |= CFG_STATIC_CHANNEL;
8359 priv->channel = channel;
8360 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
8361 /* TODO: Validate that provided channel is in range */
8362 }
8363 #ifdef CONFIG_IPW2200_QOS
8364 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8365 burst_duration_CCK, burst_duration_OFDM);
8366 #endif /* CONFIG_IPW2200_QOS */
8367
8368 switch (mode) {
8369 case 1:
8370 priv->ieee->iw_mode = IW_MODE_ADHOC;
8371 priv->net_dev->type = ARPHRD_ETHER;
8372
8373 break;
8374 #ifdef CONFIG_IPW2200_MONITOR
8375 case 2:
8376 priv->ieee->iw_mode = IW_MODE_MONITOR;
8377 #ifdef CONFIG_IPW2200_RADIOTAP
8378 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8379 #else
8380 priv->net_dev->type = ARPHRD_IEEE80211;
8381 #endif
8382 break;
8383 #endif
8384 default:
8385 case 0:
8386 priv->net_dev->type = ARPHRD_ETHER;
8387 priv->ieee->iw_mode = IW_MODE_INFRA;
8388 break;
8389 }
8390
8391 if (hwcrypto) {
8392 priv->ieee->host_encrypt = 0;
8393 priv->ieee->host_encrypt_msdu = 0;
8394 priv->ieee->host_decrypt = 0;
8395 priv->ieee->host_mc_decrypt = 0;
8396 }
8397 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8398
8399 /* IPW2200/2915 is abled to do hardware fragmentation. */
8400 priv->ieee->host_open_frag = 0;
8401
8402 if ((priv->pci_dev->device == 0x4223) ||
8403 (priv->pci_dev->device == 0x4224)) {
8404 if (option == 1)
8405 printk(KERN_INFO DRV_NAME
8406 ": Detected Intel PRO/Wireless 2915ABG Network "
8407 "Connection\n");
8408 priv->ieee->abg_true = 1;
8409 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
8410 modulation = IEEE80211_OFDM_MODULATION |
8411 IEEE80211_CCK_MODULATION;
8412 priv->adapter = IPW_2915ABG;
8413 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8414 } else {
8415 if (option == 1)
8416 printk(KERN_INFO DRV_NAME
8417 ": Detected Intel PRO/Wireless 2200BG Network "
8418 "Connection\n");
8419
8420 priv->ieee->abg_true = 0;
8421 band = IEEE80211_24GHZ_BAND;
8422 modulation = IEEE80211_OFDM_MODULATION |
8423 IEEE80211_CCK_MODULATION;
8424 priv->adapter = IPW_2200BG;
8425 priv->ieee->mode = IEEE_G | IEEE_B;
8426 }
8427
8428 priv->ieee->freq_band = band;
8429 priv->ieee->modulation = modulation;
8430
8431 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
8432
8433 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8434 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8435
8436 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8437 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8438 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8439
8440 /* If power management is turned on, default to AC mode */
8441 priv->power_mode = IPW_POWER_AC;
8442 priv->tx_power = IPW_TX_POWER_DEFAULT;
8443
8444 return old_mode == priv->ieee->iw_mode;
8445 }
8446
8447 /*
8448 * This file defines the Wireless Extension handlers. It does not
8449 * define any methods of hardware manipulation and relies on the
8450 * functions defined in ipw_main to provide the HW interaction.
8451 *
8452 * The exception to this is the use of the ipw_get_ordinal()
8453 * function used to poll the hardware vs. making unecessary calls.
8454 *
8455 */
8456
8457 static int ipw_wx_get_name(struct net_device *dev,
8458 struct iw_request_info *info,
8459 union iwreq_data *wrqu, char *extra)
8460 {
8461 struct ipw_priv *priv = ieee80211_priv(dev);
8462 mutex_lock(&priv->mutex);
8463 if (priv->status & STATUS_RF_KILL_MASK)
8464 strcpy(wrqu->name, "radio off");
8465 else if (!(priv->status & STATUS_ASSOCIATED))
8466 strcpy(wrqu->name, "unassociated");
8467 else
8468 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8469 ipw_modes[priv->assoc_request.ieee_mode]);
8470 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8471 mutex_unlock(&priv->mutex);
8472 return 0;
8473 }
8474
8475 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8476 {
8477 if (channel == 0) {
8478 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8479 priv->config &= ~CFG_STATIC_CHANNEL;
8480 IPW_DEBUG_ASSOC("Attempting to associate with new "
8481 "parameters.\n");
8482 ipw_associate(priv);
8483 return 0;
8484 }
8485
8486 priv->config |= CFG_STATIC_CHANNEL;
8487
8488 if (priv->channel == channel) {
8489 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8490 channel);
8491 return 0;
8492 }
8493
8494 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8495 priv->channel = channel;
8496
8497 #ifdef CONFIG_IPW2200_MONITOR
8498 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8499 int i;
8500 if (priv->status & STATUS_SCANNING) {
8501 IPW_DEBUG_SCAN("Scan abort triggered due to "
8502 "channel change.\n");
8503 ipw_abort_scan(priv);
8504 }
8505
8506 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8507 udelay(10);
8508
8509 if (priv->status & STATUS_SCANNING)
8510 IPW_DEBUG_SCAN("Still scanning...\n");
8511 else
8512 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8513 1000 - i);
8514
8515 return 0;
8516 }
8517 #endif /* CONFIG_IPW2200_MONITOR */
8518
8519 /* Network configuration changed -- force [re]association */
8520 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8521 if (!ipw_disassociate(priv))
8522 ipw_associate(priv);
8523
8524 return 0;
8525 }
8526
8527 static int ipw_wx_set_freq(struct net_device *dev,
8528 struct iw_request_info *info,
8529 union iwreq_data *wrqu, char *extra)
8530 {
8531 struct ipw_priv *priv = ieee80211_priv(dev);
8532 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8533 struct iw_freq *fwrq = &wrqu->freq;
8534 int ret = 0, i;
8535 u8 channel, flags;
8536 int band;
8537
8538 if (fwrq->m == 0) {
8539 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8540 mutex_lock(&priv->mutex);
8541 ret = ipw_set_channel(priv, 0);
8542 mutex_unlock(&priv->mutex);
8543 return ret;
8544 }
8545 /* if setting by freq convert to channel */
8546 if (fwrq->e == 1) {
8547 channel = ieee80211_freq_to_channel(priv->ieee, fwrq->m);
8548 if (channel == 0)
8549 return -EINVAL;
8550 } else
8551 channel = fwrq->m;
8552
8553 if (!(band = ieee80211_is_valid_channel(priv->ieee, channel)))
8554 return -EINVAL;
8555
8556 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8557 i = ieee80211_channel_to_index(priv->ieee, channel);
8558 if (i == -1)
8559 return -EINVAL;
8560
8561 flags = (band == IEEE80211_24GHZ_BAND) ?
8562 geo->bg[i].flags : geo->a[i].flags;
8563 if (flags & IEEE80211_CH_PASSIVE_ONLY) {
8564 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8565 return -EINVAL;
8566 }
8567 }
8568
8569 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8570 mutex_lock(&priv->mutex);
8571 ret = ipw_set_channel(priv, channel);
8572 mutex_unlock(&priv->mutex);
8573 return ret;
8574 }
8575
8576 static int ipw_wx_get_freq(struct net_device *dev,
8577 struct iw_request_info *info,
8578 union iwreq_data *wrqu, char *extra)
8579 {
8580 struct ipw_priv *priv = ieee80211_priv(dev);
8581
8582 wrqu->freq.e = 0;
8583
8584 /* If we are associated, trying to associate, or have a statically
8585 * configured CHANNEL then return that; otherwise return ANY */
8586 mutex_lock(&priv->mutex);
8587 if (priv->config & CFG_STATIC_CHANNEL ||
8588 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))
8589 wrqu->freq.m = priv->channel;
8590 else
8591 wrqu->freq.m = 0;
8592
8593 mutex_unlock(&priv->mutex);
8594 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8595 return 0;
8596 }
8597
8598 static int ipw_wx_set_mode(struct net_device *dev,
8599 struct iw_request_info *info,
8600 union iwreq_data *wrqu, char *extra)
8601 {
8602 struct ipw_priv *priv = ieee80211_priv(dev);
8603 int err = 0;
8604
8605 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8606
8607 switch (wrqu->mode) {
8608 #ifdef CONFIG_IPW2200_MONITOR
8609 case IW_MODE_MONITOR:
8610 #endif
8611 case IW_MODE_ADHOC:
8612 case IW_MODE_INFRA:
8613 break;
8614 case IW_MODE_AUTO:
8615 wrqu->mode = IW_MODE_INFRA;
8616 break;
8617 default:
8618 return -EINVAL;
8619 }
8620 if (wrqu->mode == priv->ieee->iw_mode)
8621 return 0;
8622
8623 mutex_lock(&priv->mutex);
8624
8625 ipw_sw_reset(priv, 0);
8626
8627 #ifdef CONFIG_IPW2200_MONITOR
8628 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8629 priv->net_dev->type = ARPHRD_ETHER;
8630
8631 if (wrqu->mode == IW_MODE_MONITOR)
8632 #ifdef CONFIG_IPW2200_RADIOTAP
8633 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8634 #else
8635 priv->net_dev->type = ARPHRD_IEEE80211;
8636 #endif
8637 #endif /* CONFIG_IPW2200_MONITOR */
8638
8639 /* Free the existing firmware and reset the fw_loaded
8640 * flag so ipw_load() will bring in the new firmawre */
8641 free_firmware();
8642
8643 priv->ieee->iw_mode = wrqu->mode;
8644
8645 queue_work(priv->workqueue, &priv->adapter_restart);
8646 mutex_unlock(&priv->mutex);
8647 return err;
8648 }
8649
8650 static int ipw_wx_get_mode(struct net_device *dev,
8651 struct iw_request_info *info,
8652 union iwreq_data *wrqu, char *extra)
8653 {
8654 struct ipw_priv *priv = ieee80211_priv(dev);
8655 mutex_lock(&priv->mutex);
8656 wrqu->mode = priv->ieee->iw_mode;
8657 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8658 mutex_unlock(&priv->mutex);
8659 return 0;
8660 }
8661
8662 /* Values are in microsecond */
8663 static const s32 timeout_duration[] = {
8664 350000,
8665 250000,
8666 75000,
8667 37000,
8668 25000,
8669 };
8670
8671 static const s32 period_duration[] = {
8672 400000,
8673 700000,
8674 1000000,
8675 1000000,
8676 1000000
8677 };
8678
8679 static int ipw_wx_get_range(struct net_device *dev,
8680 struct iw_request_info *info,
8681 union iwreq_data *wrqu, char *extra)
8682 {
8683 struct ipw_priv *priv = ieee80211_priv(dev);
8684 struct iw_range *range = (struct iw_range *)extra;
8685 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8686 int i = 0, j;
8687
8688 wrqu->data.length = sizeof(*range);
8689 memset(range, 0, sizeof(*range));
8690
8691 /* 54Mbs == ~27 Mb/s real (802.11g) */
8692 range->throughput = 27 * 1000 * 1000;
8693
8694 range->max_qual.qual = 100;
8695 /* TODO: Find real max RSSI and stick here */
8696 range->max_qual.level = 0;
8697 range->max_qual.noise = 0;
8698 range->max_qual.updated = 7; /* Updated all three */
8699
8700 range->avg_qual.qual = 70;
8701 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8702 range->avg_qual.level = 0; /* FIXME to real average level */
8703 range->avg_qual.noise = 0;
8704 range->avg_qual.updated = 7; /* Updated all three */
8705 mutex_lock(&priv->mutex);
8706 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8707
8708 for (i = 0; i < range->num_bitrates; i++)
8709 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8710 500000;
8711
8712 range->max_rts = DEFAULT_RTS_THRESHOLD;
8713 range->min_frag = MIN_FRAG_THRESHOLD;
8714 range->max_frag = MAX_FRAG_THRESHOLD;
8715
8716 range->encoding_size[0] = 5;
8717 range->encoding_size[1] = 13;
8718 range->num_encoding_sizes = 2;
8719 range->max_encoding_tokens = WEP_KEYS;
8720
8721 /* Set the Wireless Extension versions */
8722 range->we_version_compiled = WIRELESS_EXT;
8723 range->we_version_source = 18;
8724
8725 i = 0;
8726 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8727 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8728 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8729 (geo->bg[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8730 continue;
8731
8732 range->freq[i].i = geo->bg[j].channel;
8733 range->freq[i].m = geo->bg[j].freq * 100000;
8734 range->freq[i].e = 1;
8735 i++;
8736 }
8737 }
8738
8739 if (priv->ieee->mode & IEEE_A) {
8740 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8741 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8742 (geo->a[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8743 continue;
8744
8745 range->freq[i].i = geo->a[j].channel;
8746 range->freq[i].m = geo->a[j].freq * 100000;
8747 range->freq[i].e = 1;
8748 i++;
8749 }
8750 }
8751
8752 range->num_channels = i;
8753 range->num_frequency = i;
8754
8755 mutex_unlock(&priv->mutex);
8756
8757 /* Event capability (kernel + driver) */
8758 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8759 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8760 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8761 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8762 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8763
8764 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8765 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8766
8767 IPW_DEBUG_WX("GET Range\n");
8768 return 0;
8769 }
8770
8771 static int ipw_wx_set_wap(struct net_device *dev,
8772 struct iw_request_info *info,
8773 union iwreq_data *wrqu, char *extra)
8774 {
8775 struct ipw_priv *priv = ieee80211_priv(dev);
8776
8777 static const unsigned char any[] = {
8778 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
8779 };
8780 static const unsigned char off[] = {
8781 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
8782 };
8783
8784 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8785 return -EINVAL;
8786 mutex_lock(&priv->mutex);
8787 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
8788 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8789 /* we disable mandatory BSSID association */
8790 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8791 priv->config &= ~CFG_STATIC_BSSID;
8792 IPW_DEBUG_ASSOC("Attempting to associate with new "
8793 "parameters.\n");
8794 ipw_associate(priv);
8795 mutex_unlock(&priv->mutex);
8796 return 0;
8797 }
8798
8799 priv->config |= CFG_STATIC_BSSID;
8800 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8801 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8802 mutex_unlock(&priv->mutex);
8803 return 0;
8804 }
8805
8806 IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n",
8807 MAC_ARG(wrqu->ap_addr.sa_data));
8808
8809 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8810
8811 /* Network configuration changed -- force [re]association */
8812 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8813 if (!ipw_disassociate(priv))
8814 ipw_associate(priv);
8815
8816 mutex_unlock(&priv->mutex);
8817 return 0;
8818 }
8819
8820 static int ipw_wx_get_wap(struct net_device *dev,
8821 struct iw_request_info *info,
8822 union iwreq_data *wrqu, char *extra)
8823 {
8824 struct ipw_priv *priv = ieee80211_priv(dev);
8825 /* If we are associated, trying to associate, or have a statically
8826 * configured BSSID then return that; otherwise return ANY */
8827 mutex_lock(&priv->mutex);
8828 if (priv->config & CFG_STATIC_BSSID ||
8829 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8830 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8831 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8832 } else
8833 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
8834
8835 IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
8836 MAC_ARG(wrqu->ap_addr.sa_data));
8837 mutex_unlock(&priv->mutex);
8838 return 0;
8839 }
8840
8841 static int ipw_wx_set_essid(struct net_device *dev,
8842 struct iw_request_info *info,
8843 union iwreq_data *wrqu, char *extra)
8844 {
8845 struct ipw_priv *priv = ieee80211_priv(dev);
8846 char *essid = ""; /* ANY */
8847 int length = 0;
8848 mutex_lock(&priv->mutex);
8849 if (wrqu->essid.flags && wrqu->essid.length) {
8850 length = wrqu->essid.length - 1;
8851 essid = extra;
8852 }
8853 if (length == 0) {
8854 IPW_DEBUG_WX("Setting ESSID to ANY\n");
8855 if ((priv->config & CFG_STATIC_ESSID) &&
8856 !(priv->status & (STATUS_ASSOCIATED |
8857 STATUS_ASSOCIATING))) {
8858 IPW_DEBUG_ASSOC("Attempting to associate with new "
8859 "parameters.\n");
8860 priv->config &= ~CFG_STATIC_ESSID;
8861 ipw_associate(priv);
8862 }
8863 mutex_unlock(&priv->mutex);
8864 return 0;
8865 }
8866
8867 length = min(length, IW_ESSID_MAX_SIZE);
8868
8869 priv->config |= CFG_STATIC_ESSID;
8870
8871 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
8872 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
8873 mutex_unlock(&priv->mutex);
8874 return 0;
8875 }
8876
8877 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length),
8878 length);
8879
8880 priv->essid_len = length;
8881 memcpy(priv->essid, essid, priv->essid_len);
8882
8883 /* Network configuration changed -- force [re]association */
8884 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
8885 if (!ipw_disassociate(priv))
8886 ipw_associate(priv);
8887
8888 mutex_unlock(&priv->mutex);
8889 return 0;
8890 }
8891
8892 static int ipw_wx_get_essid(struct net_device *dev,
8893 struct iw_request_info *info,
8894 union iwreq_data *wrqu, char *extra)
8895 {
8896 struct ipw_priv *priv = ieee80211_priv(dev);
8897
8898 /* If we are associated, trying to associate, or have a statically
8899 * configured ESSID then return that; otherwise return ANY */
8900 mutex_lock(&priv->mutex);
8901 if (priv->config & CFG_STATIC_ESSID ||
8902 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8903 IPW_DEBUG_WX("Getting essid: '%s'\n",
8904 escape_essid(priv->essid, priv->essid_len));
8905 memcpy(extra, priv->essid, priv->essid_len);
8906 wrqu->essid.length = priv->essid_len;
8907 wrqu->essid.flags = 1; /* active */
8908 } else {
8909 IPW_DEBUG_WX("Getting essid: ANY\n");
8910 wrqu->essid.length = 0;
8911 wrqu->essid.flags = 0; /* active */
8912 }
8913 mutex_unlock(&priv->mutex);
8914 return 0;
8915 }
8916
8917 static int ipw_wx_set_nick(struct net_device *dev,
8918 struct iw_request_info *info,
8919 union iwreq_data *wrqu, char *extra)
8920 {
8921 struct ipw_priv *priv = ieee80211_priv(dev);
8922
8923 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
8924 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
8925 return -E2BIG;
8926 mutex_lock(&priv->mutex);
8927 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
8928 memset(priv->nick, 0, sizeof(priv->nick));
8929 memcpy(priv->nick, extra, wrqu->data.length);
8930 IPW_DEBUG_TRACE("<<\n");
8931 mutex_unlock(&priv->mutex);
8932 return 0;
8933
8934 }
8935
8936 static int ipw_wx_get_nick(struct net_device *dev,
8937 struct iw_request_info *info,
8938 union iwreq_data *wrqu, char *extra)
8939 {
8940 struct ipw_priv *priv = ieee80211_priv(dev);
8941 IPW_DEBUG_WX("Getting nick\n");
8942 mutex_lock(&priv->mutex);
8943 wrqu->data.length = strlen(priv->nick) + 1;
8944 memcpy(extra, priv->nick, wrqu->data.length);
8945 wrqu->data.flags = 1; /* active */
8946 mutex_unlock(&priv->mutex);
8947 return 0;
8948 }
8949
8950 static int ipw_wx_set_sens(struct net_device *dev,
8951 struct iw_request_info *info,
8952 union iwreq_data *wrqu, char *extra)
8953 {
8954 struct ipw_priv *priv = ieee80211_priv(dev);
8955 int err = 0;
8956
8957 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
8958 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
8959 mutex_lock(&priv->mutex);
8960
8961 if (wrqu->sens.fixed == 0)
8962 {
8963 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8964 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8965 goto out;
8966 }
8967 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
8968 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
8969 err = -EINVAL;
8970 goto out;
8971 }
8972
8973 priv->roaming_threshold = wrqu->sens.value;
8974 priv->disassociate_threshold = 3*wrqu->sens.value;
8975 out:
8976 mutex_unlock(&priv->mutex);
8977 return err;
8978 }
8979
8980 static int ipw_wx_get_sens(struct net_device *dev,
8981 struct iw_request_info *info,
8982 union iwreq_data *wrqu, char *extra)
8983 {
8984 struct ipw_priv *priv = ieee80211_priv(dev);
8985 mutex_lock(&priv->mutex);
8986 wrqu->sens.fixed = 1;
8987 wrqu->sens.value = priv->roaming_threshold;
8988 mutex_unlock(&priv->mutex);
8989
8990 IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
8991 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
8992
8993 return 0;
8994 }
8995
8996 static int ipw_wx_set_rate(struct net_device *dev,
8997 struct iw_request_info *info,
8998 union iwreq_data *wrqu, char *extra)
8999 {
9000 /* TODO: We should use semaphores or locks for access to priv */
9001 struct ipw_priv *priv = ieee80211_priv(dev);
9002 u32 target_rate = wrqu->bitrate.value;
9003 u32 fixed, mask;
9004
9005 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9006 /* value = X, fixed = 1 means only rate X */
9007 /* value = X, fixed = 0 means all rates lower equal X */
9008
9009 if (target_rate == -1) {
9010 fixed = 0;
9011 mask = IEEE80211_DEFAULT_RATES_MASK;
9012 /* Now we should reassociate */
9013 goto apply;
9014 }
9015
9016 mask = 0;
9017 fixed = wrqu->bitrate.fixed;
9018
9019 if (target_rate == 1000000 || !fixed)
9020 mask |= IEEE80211_CCK_RATE_1MB_MASK;
9021 if (target_rate == 1000000)
9022 goto apply;
9023
9024 if (target_rate == 2000000 || !fixed)
9025 mask |= IEEE80211_CCK_RATE_2MB_MASK;
9026 if (target_rate == 2000000)
9027 goto apply;
9028
9029 if (target_rate == 5500000 || !fixed)
9030 mask |= IEEE80211_CCK_RATE_5MB_MASK;
9031 if (target_rate == 5500000)
9032 goto apply;
9033
9034 if (target_rate == 6000000 || !fixed)
9035 mask |= IEEE80211_OFDM_RATE_6MB_MASK;
9036 if (target_rate == 6000000)
9037 goto apply;
9038
9039 if (target_rate == 9000000 || !fixed)
9040 mask |= IEEE80211_OFDM_RATE_9MB_MASK;
9041 if (target_rate == 9000000)
9042 goto apply;
9043
9044 if (target_rate == 11000000 || !fixed)
9045 mask |= IEEE80211_CCK_RATE_11MB_MASK;
9046 if (target_rate == 11000000)
9047 goto apply;
9048
9049 if (target_rate == 12000000 || !fixed)
9050 mask |= IEEE80211_OFDM_RATE_12MB_MASK;
9051 if (target_rate == 12000000)
9052 goto apply;
9053
9054 if (target_rate == 18000000 || !fixed)
9055 mask |= IEEE80211_OFDM_RATE_18MB_MASK;
9056 if (target_rate == 18000000)
9057 goto apply;
9058
9059 if (target_rate == 24000000 || !fixed)
9060 mask |= IEEE80211_OFDM_RATE_24MB_MASK;
9061 if (target_rate == 24000000)
9062 goto apply;
9063
9064 if (target_rate == 36000000 || !fixed)
9065 mask |= IEEE80211_OFDM_RATE_36MB_MASK;
9066 if (target_rate == 36000000)
9067 goto apply;
9068
9069 if (target_rate == 48000000 || !fixed)
9070 mask |= IEEE80211_OFDM_RATE_48MB_MASK;
9071 if (target_rate == 48000000)
9072 goto apply;
9073
9074 if (target_rate == 54000000 || !fixed)
9075 mask |= IEEE80211_OFDM_RATE_54MB_MASK;
9076 if (target_rate == 54000000)
9077 goto apply;
9078
9079 IPW_DEBUG_WX("invalid rate specified, returning error\n");
9080 return -EINVAL;
9081
9082 apply:
9083 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9084 mask, fixed ? "fixed" : "sub-rates");
9085 mutex_lock(&priv->mutex);
9086 if (mask == IEEE80211_DEFAULT_RATES_MASK) {
9087 priv->config &= ~CFG_FIXED_RATE;
9088 ipw_set_fixed_rate(priv, priv->ieee->mode);
9089 } else
9090 priv->config |= CFG_FIXED_RATE;
9091
9092 if (priv->rates_mask == mask) {
9093 IPW_DEBUG_WX("Mask set to current mask.\n");
9094 mutex_unlock(&priv->mutex);
9095 return 0;
9096 }
9097
9098 priv->rates_mask = mask;
9099
9100 /* Network configuration changed -- force [re]association */
9101 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9102 if (!ipw_disassociate(priv))
9103 ipw_associate(priv);
9104
9105 mutex_unlock(&priv->mutex);
9106 return 0;
9107 }
9108
9109 static int ipw_wx_get_rate(struct net_device *dev,
9110 struct iw_request_info *info,
9111 union iwreq_data *wrqu, char *extra)
9112 {
9113 struct ipw_priv *priv = ieee80211_priv(dev);
9114 mutex_lock(&priv->mutex);
9115 wrqu->bitrate.value = priv->last_rate;
9116 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9117 mutex_unlock(&priv->mutex);
9118 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
9119 return 0;
9120 }
9121
9122 static int ipw_wx_set_rts(struct net_device *dev,
9123 struct iw_request_info *info,
9124 union iwreq_data *wrqu, char *extra)
9125 {
9126 struct ipw_priv *priv = ieee80211_priv(dev);
9127 mutex_lock(&priv->mutex);
9128 if (wrqu->rts.disabled)
9129 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9130 else {
9131 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9132 wrqu->rts.value > MAX_RTS_THRESHOLD) {
9133 mutex_unlock(&priv->mutex);
9134 return -EINVAL;
9135 }
9136 priv->rts_threshold = wrqu->rts.value;
9137 }
9138
9139 ipw_send_rts_threshold(priv, priv->rts_threshold);
9140 mutex_unlock(&priv->mutex);
9141 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
9142 return 0;
9143 }
9144
9145 static int ipw_wx_get_rts(struct net_device *dev,
9146 struct iw_request_info *info,
9147 union iwreq_data *wrqu, char *extra)
9148 {
9149 struct ipw_priv *priv = ieee80211_priv(dev);
9150 mutex_lock(&priv->mutex);
9151 wrqu->rts.value = priv->rts_threshold;
9152 wrqu->rts.fixed = 0; /* no auto select */
9153 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9154 mutex_unlock(&priv->mutex);
9155 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
9156 return 0;
9157 }
9158
9159 static int ipw_wx_set_txpow(struct net_device *dev,
9160 struct iw_request_info *info,
9161 union iwreq_data *wrqu, char *extra)
9162 {
9163 struct ipw_priv *priv = ieee80211_priv(dev);
9164 int err = 0;
9165
9166 mutex_lock(&priv->mutex);
9167 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9168 err = -EINPROGRESS;
9169 goto out;
9170 }
9171
9172 if (!wrqu->power.fixed)
9173 wrqu->power.value = IPW_TX_POWER_DEFAULT;
9174
9175 if (wrqu->power.flags != IW_TXPOW_DBM) {
9176 err = -EINVAL;
9177 goto out;
9178 }
9179
9180 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9181 (wrqu->power.value < IPW_TX_POWER_MIN)) {
9182 err = -EINVAL;
9183 goto out;
9184 }
9185
9186 priv->tx_power = wrqu->power.value;
9187 err = ipw_set_tx_power(priv);
9188 out:
9189 mutex_unlock(&priv->mutex);
9190 return err;
9191 }
9192
9193 static int ipw_wx_get_txpow(struct net_device *dev,
9194 struct iw_request_info *info,
9195 union iwreq_data *wrqu, char *extra)
9196 {
9197 struct ipw_priv *priv = ieee80211_priv(dev);
9198 mutex_lock(&priv->mutex);
9199 wrqu->power.value = priv->tx_power;
9200 wrqu->power.fixed = 1;
9201 wrqu->power.flags = IW_TXPOW_DBM;
9202 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9203 mutex_unlock(&priv->mutex);
9204
9205 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
9206 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9207
9208 return 0;
9209 }
9210
9211 static int ipw_wx_set_frag(struct net_device *dev,
9212 struct iw_request_info *info,
9213 union iwreq_data *wrqu, char *extra)
9214 {
9215 struct ipw_priv *priv = ieee80211_priv(dev);
9216 mutex_lock(&priv->mutex);
9217 if (wrqu->frag.disabled)
9218 priv->ieee->fts = DEFAULT_FTS;
9219 else {
9220 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9221 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9222 mutex_unlock(&priv->mutex);
9223 return -EINVAL;
9224 }
9225
9226 priv->ieee->fts = wrqu->frag.value & ~0x1;
9227 }
9228
9229 ipw_send_frag_threshold(priv, wrqu->frag.value);
9230 mutex_unlock(&priv->mutex);
9231 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
9232 return 0;
9233 }
9234
9235 static int ipw_wx_get_frag(struct net_device *dev,
9236 struct iw_request_info *info,
9237 union iwreq_data *wrqu, char *extra)
9238 {
9239 struct ipw_priv *priv = ieee80211_priv(dev);
9240 mutex_lock(&priv->mutex);
9241 wrqu->frag.value = priv->ieee->fts;
9242 wrqu->frag.fixed = 0; /* no auto select */
9243 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9244 mutex_unlock(&priv->mutex);
9245 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
9246
9247 return 0;
9248 }
9249
9250 static int ipw_wx_set_retry(struct net_device *dev,
9251 struct iw_request_info *info,
9252 union iwreq_data *wrqu, char *extra)
9253 {
9254 struct ipw_priv *priv = ieee80211_priv(dev);
9255
9256 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9257 return -EINVAL;
9258
9259 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9260 return 0;
9261
9262 if (wrqu->retry.value < 0 || wrqu->retry.value > 255)
9263 return -EINVAL;
9264
9265 mutex_lock(&priv->mutex);
9266 if (wrqu->retry.flags & IW_RETRY_MIN)
9267 priv->short_retry_limit = (u8) wrqu->retry.value;
9268 else if (wrqu->retry.flags & IW_RETRY_MAX)
9269 priv->long_retry_limit = (u8) wrqu->retry.value;
9270 else {
9271 priv->short_retry_limit = (u8) wrqu->retry.value;
9272 priv->long_retry_limit = (u8) wrqu->retry.value;
9273 }
9274
9275 ipw_send_retry_limit(priv, priv->short_retry_limit,
9276 priv->long_retry_limit);
9277 mutex_unlock(&priv->mutex);
9278 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9279 priv->short_retry_limit, priv->long_retry_limit);
9280 return 0;
9281 }
9282
9283 static int ipw_wx_get_retry(struct net_device *dev,
9284 struct iw_request_info *info,
9285 union iwreq_data *wrqu, char *extra)
9286 {
9287 struct ipw_priv *priv = ieee80211_priv(dev);
9288
9289 mutex_lock(&priv->mutex);
9290 wrqu->retry.disabled = 0;
9291
9292 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9293 mutex_unlock(&priv->mutex);
9294 return -EINVAL;
9295 }
9296
9297 if (wrqu->retry.flags & IW_RETRY_MAX) {
9298 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
9299 wrqu->retry.value = priv->long_retry_limit;
9300 } else if (wrqu->retry.flags & IW_RETRY_MIN) {
9301 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MIN;
9302 wrqu->retry.value = priv->short_retry_limit;
9303 } else {
9304 wrqu->retry.flags = IW_RETRY_LIMIT;
9305 wrqu->retry.value = priv->short_retry_limit;
9306 }
9307 mutex_unlock(&priv->mutex);
9308
9309 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
9310
9311 return 0;
9312 }
9313
9314 static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
9315 int essid_len)
9316 {
9317 struct ipw_scan_request_ext scan;
9318 int err = 0, scan_type;
9319
9320 if (!(priv->status & STATUS_INIT) ||
9321 (priv->status & STATUS_EXIT_PENDING))
9322 return 0;
9323
9324 mutex_lock(&priv->mutex);
9325
9326 if (priv->status & STATUS_RF_KILL_MASK) {
9327 IPW_DEBUG_HC("Aborting scan due to RF kill activation\n");
9328 priv->status |= STATUS_SCAN_PENDING;
9329 goto done;
9330 }
9331
9332 IPW_DEBUG_HC("starting request direct scan!\n");
9333
9334 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
9335 /* We should not sleep here; otherwise we will block most
9336 * of the system (for instance, we hold rtnl_lock when we
9337 * get here).
9338 */
9339 err = -EAGAIN;
9340 goto done;
9341 }
9342 memset(&scan, 0, sizeof(scan));
9343
9344 if (priv->config & CFG_SPEED_SCAN)
9345 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
9346 cpu_to_le16(30);
9347 else
9348 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
9349 cpu_to_le16(20);
9350
9351 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
9352 cpu_to_le16(20);
9353 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
9354 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
9355
9356 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
9357
9358 err = ipw_send_ssid(priv, essid, essid_len);
9359 if (err) {
9360 IPW_DEBUG_HC("Attempt to send SSID command failed\n");
9361 goto done;
9362 }
9363 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
9364
9365 ipw_add_scan_channels(priv, &scan, scan_type);
9366
9367 err = ipw_send_scan_request_ext(priv, &scan);
9368 if (err) {
9369 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
9370 goto done;
9371 }
9372
9373 priv->status |= STATUS_SCANNING;
9374
9375 done:
9376 mutex_unlock(&priv->mutex);
9377 return err;
9378 }
9379
9380 static int ipw_wx_set_scan(struct net_device *dev,
9381 struct iw_request_info *info,
9382 union iwreq_data *wrqu, char *extra)
9383 {
9384 struct ipw_priv *priv = ieee80211_priv(dev);
9385 struct iw_scan_req *req = NULL;
9386 if (wrqu->data.length
9387 && wrqu->data.length == sizeof(struct iw_scan_req)) {
9388 req = (struct iw_scan_req *)extra;
9389 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9390 ipw_request_direct_scan(priv, req->essid,
9391 req->essid_len);
9392 return 0;
9393 }
9394 }
9395
9396 IPW_DEBUG_WX("Start scan\n");
9397
9398 queue_work(priv->workqueue, &priv->request_scan);
9399
9400 return 0;
9401 }
9402
9403 static int ipw_wx_get_scan(struct net_device *dev,
9404 struct iw_request_info *info,
9405 union iwreq_data *wrqu, char *extra)
9406 {
9407 struct ipw_priv *priv = ieee80211_priv(dev);
9408 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
9409 }
9410
9411 static int ipw_wx_set_encode(struct net_device *dev,
9412 struct iw_request_info *info,
9413 union iwreq_data *wrqu, char *key)
9414 {
9415 struct ipw_priv *priv = ieee80211_priv(dev);
9416 int ret;
9417 u32 cap = priv->capability;
9418
9419 mutex_lock(&priv->mutex);
9420 ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
9421
9422 /* In IBSS mode, we need to notify the firmware to update
9423 * the beacon info after we changed the capability. */
9424 if (cap != priv->capability &&
9425 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9426 priv->status & STATUS_ASSOCIATED)
9427 ipw_disassociate(priv);
9428
9429 mutex_unlock(&priv->mutex);
9430 return ret;
9431 }
9432
9433 static int ipw_wx_get_encode(struct net_device *dev,
9434 struct iw_request_info *info,
9435 union iwreq_data *wrqu, char *key)
9436 {
9437 struct ipw_priv *priv = ieee80211_priv(dev);
9438 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
9439 }
9440
9441 static int ipw_wx_set_power(struct net_device *dev,
9442 struct iw_request_info *info,
9443 union iwreq_data *wrqu, char *extra)
9444 {
9445 struct ipw_priv *priv = ieee80211_priv(dev);
9446 int err;
9447 mutex_lock(&priv->mutex);
9448 if (wrqu->power.disabled) {
9449 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9450 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9451 if (err) {
9452 IPW_DEBUG_WX("failed setting power mode.\n");
9453 mutex_unlock(&priv->mutex);
9454 return err;
9455 }
9456 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9457 mutex_unlock(&priv->mutex);
9458 return 0;
9459 }
9460
9461 switch (wrqu->power.flags & IW_POWER_MODE) {
9462 case IW_POWER_ON: /* If not specified */
9463 case IW_POWER_MODE: /* If set all mask */
9464 case IW_POWER_ALL_R: /* If explicitely state all */
9465 break;
9466 default: /* Otherwise we don't support it */
9467 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9468 wrqu->power.flags);
9469 mutex_unlock(&priv->mutex);
9470 return -EOPNOTSUPP;
9471 }
9472
9473 /* If the user hasn't specified a power management mode yet, default
9474 * to BATTERY */
9475 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9476 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9477 else
9478 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9479 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9480 if (err) {
9481 IPW_DEBUG_WX("failed setting power mode.\n");
9482 mutex_unlock(&priv->mutex);
9483 return err;
9484 }
9485
9486 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9487 mutex_unlock(&priv->mutex);
9488 return 0;
9489 }
9490
9491 static int ipw_wx_get_power(struct net_device *dev,
9492 struct iw_request_info *info,
9493 union iwreq_data *wrqu, char *extra)
9494 {
9495 struct ipw_priv *priv = ieee80211_priv(dev);
9496 mutex_lock(&priv->mutex);
9497 if (!(priv->power_mode & IPW_POWER_ENABLED))
9498 wrqu->power.disabled = 1;
9499 else
9500 wrqu->power.disabled = 0;
9501
9502 mutex_unlock(&priv->mutex);
9503 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9504
9505 return 0;
9506 }
9507
9508 static int ipw_wx_set_powermode(struct net_device *dev,
9509 struct iw_request_info *info,
9510 union iwreq_data *wrqu, char *extra)
9511 {
9512 struct ipw_priv *priv = ieee80211_priv(dev);
9513 int mode = *(int *)extra;
9514 int err;
9515 mutex_lock(&priv->mutex);
9516 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
9517 mode = IPW_POWER_AC;
9518 priv->power_mode = mode;
9519 } else {
9520 priv->power_mode = IPW_POWER_ENABLED | mode;
9521 }
9522
9523 if (priv->power_mode != mode) {
9524 err = ipw_send_power_mode(priv, mode);
9525
9526 if (err) {
9527 IPW_DEBUG_WX("failed setting power mode.\n");
9528 mutex_unlock(&priv->mutex);
9529 return err;
9530 }
9531 }
9532 mutex_unlock(&priv->mutex);
9533 return 0;
9534 }
9535
9536 #define MAX_WX_STRING 80
9537 static int ipw_wx_get_powermode(struct net_device *dev,
9538 struct iw_request_info *info,
9539 union iwreq_data *wrqu, char *extra)
9540 {
9541 struct ipw_priv *priv = ieee80211_priv(dev);
9542 int level = IPW_POWER_LEVEL(priv->power_mode);
9543 char *p = extra;
9544
9545 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9546
9547 switch (level) {
9548 case IPW_POWER_AC:
9549 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9550 break;
9551 case IPW_POWER_BATTERY:
9552 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9553 break;
9554 default:
9555 p += snprintf(p, MAX_WX_STRING - (p - extra),
9556 "(Timeout %dms, Period %dms)",
9557 timeout_duration[level - 1] / 1000,
9558 period_duration[level - 1] / 1000);
9559 }
9560
9561 if (!(priv->power_mode & IPW_POWER_ENABLED))
9562 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9563
9564 wrqu->data.length = p - extra + 1;
9565
9566 return 0;
9567 }
9568
9569 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9570 struct iw_request_info *info,
9571 union iwreq_data *wrqu, char *extra)
9572 {
9573 struct ipw_priv *priv = ieee80211_priv(dev);
9574 int mode = *(int *)extra;
9575 u8 band = 0, modulation = 0;
9576
9577 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9578 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9579 return -EINVAL;
9580 }
9581 mutex_lock(&priv->mutex);
9582 if (priv->adapter == IPW_2915ABG) {
9583 priv->ieee->abg_true = 1;
9584 if (mode & IEEE_A) {
9585 band |= IEEE80211_52GHZ_BAND;
9586 modulation |= IEEE80211_OFDM_MODULATION;
9587 } else
9588 priv->ieee->abg_true = 0;
9589 } else {
9590 if (mode & IEEE_A) {
9591 IPW_WARNING("Attempt to set 2200BG into "
9592 "802.11a mode\n");
9593 mutex_unlock(&priv->mutex);
9594 return -EINVAL;
9595 }
9596
9597 priv->ieee->abg_true = 0;
9598 }
9599
9600 if (mode & IEEE_B) {
9601 band |= IEEE80211_24GHZ_BAND;
9602 modulation |= IEEE80211_CCK_MODULATION;
9603 } else
9604 priv->ieee->abg_true = 0;
9605
9606 if (mode & IEEE_G) {
9607 band |= IEEE80211_24GHZ_BAND;
9608 modulation |= IEEE80211_OFDM_MODULATION;
9609 } else
9610 priv->ieee->abg_true = 0;
9611
9612 priv->ieee->mode = mode;
9613 priv->ieee->freq_band = band;
9614 priv->ieee->modulation = modulation;
9615 init_supported_rates(priv, &priv->rates);
9616
9617 /* Network configuration changed -- force [re]association */
9618 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9619 if (!ipw_disassociate(priv)) {
9620 ipw_send_supported_rates(priv, &priv->rates);
9621 ipw_associate(priv);
9622 }
9623
9624 /* Update the band LEDs */
9625 ipw_led_band_on(priv);
9626
9627 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9628 mode & IEEE_A ? 'a' : '.',
9629 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9630 mutex_unlock(&priv->mutex);
9631 return 0;
9632 }
9633
9634 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9635 struct iw_request_info *info,
9636 union iwreq_data *wrqu, char *extra)
9637 {
9638 struct ipw_priv *priv = ieee80211_priv(dev);
9639 mutex_lock(&priv->mutex);
9640 switch (priv->ieee->mode) {
9641 case IEEE_A:
9642 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9643 break;
9644 case IEEE_B:
9645 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9646 break;
9647 case IEEE_A | IEEE_B:
9648 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9649 break;
9650 case IEEE_G:
9651 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9652 break;
9653 case IEEE_A | IEEE_G:
9654 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9655 break;
9656 case IEEE_B | IEEE_G:
9657 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9658 break;
9659 case IEEE_A | IEEE_B | IEEE_G:
9660 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9661 break;
9662 default:
9663 strncpy(extra, "unknown", MAX_WX_STRING);
9664 break;
9665 }
9666
9667 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9668
9669 wrqu->data.length = strlen(extra) + 1;
9670 mutex_unlock(&priv->mutex);
9671
9672 return 0;
9673 }
9674
9675 static int ipw_wx_set_preamble(struct net_device *dev,
9676 struct iw_request_info *info,
9677 union iwreq_data *wrqu, char *extra)
9678 {
9679 struct ipw_priv *priv = ieee80211_priv(dev);
9680 int mode = *(int *)extra;
9681 mutex_lock(&priv->mutex);
9682 /* Switching from SHORT -> LONG requires a disassociation */
9683 if (mode == 1) {
9684 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9685 priv->config |= CFG_PREAMBLE_LONG;
9686
9687 /* Network configuration changed -- force [re]association */
9688 IPW_DEBUG_ASSOC
9689 ("[re]association triggered due to preamble change.\n");
9690 if (!ipw_disassociate(priv))
9691 ipw_associate(priv);
9692 }
9693 goto done;
9694 }
9695
9696 if (mode == 0) {
9697 priv->config &= ~CFG_PREAMBLE_LONG;
9698 goto done;
9699 }
9700 mutex_unlock(&priv->mutex);
9701 return -EINVAL;
9702
9703 done:
9704 mutex_unlock(&priv->mutex);
9705 return 0;
9706 }
9707
9708 static int ipw_wx_get_preamble(struct net_device *dev,
9709 struct iw_request_info *info,
9710 union iwreq_data *wrqu, char *extra)
9711 {
9712 struct ipw_priv *priv = ieee80211_priv(dev);
9713 mutex_lock(&priv->mutex);
9714 if (priv->config & CFG_PREAMBLE_LONG)
9715 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9716 else
9717 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9718 mutex_unlock(&priv->mutex);
9719 return 0;
9720 }
9721
9722 #ifdef CONFIG_IPW2200_MONITOR
9723 static int ipw_wx_set_monitor(struct net_device *dev,
9724 struct iw_request_info *info,
9725 union iwreq_data *wrqu, char *extra)
9726 {
9727 struct ipw_priv *priv = ieee80211_priv(dev);
9728 int *parms = (int *)extra;
9729 int enable = (parms[0] > 0);
9730 mutex_lock(&priv->mutex);
9731 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9732 if (enable) {
9733 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9734 #ifdef CONFIG_IPW2200_RADIOTAP
9735 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9736 #else
9737 priv->net_dev->type = ARPHRD_IEEE80211;
9738 #endif
9739 queue_work(priv->workqueue, &priv->adapter_restart);
9740 }
9741
9742 ipw_set_channel(priv, parms[1]);
9743 } else {
9744 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9745 mutex_unlock(&priv->mutex);
9746 return 0;
9747 }
9748 priv->net_dev->type = ARPHRD_ETHER;
9749 queue_work(priv->workqueue, &priv->adapter_restart);
9750 }
9751 mutex_unlock(&priv->mutex);
9752 return 0;
9753 }
9754
9755 #endif // CONFIG_IPW2200_MONITOR
9756
9757 static int ipw_wx_reset(struct net_device *dev,
9758 struct iw_request_info *info,
9759 union iwreq_data *wrqu, char *extra)
9760 {
9761 struct ipw_priv *priv = ieee80211_priv(dev);
9762 IPW_DEBUG_WX("RESET\n");
9763 queue_work(priv->workqueue, &priv->adapter_restart);
9764 return 0;
9765 }
9766
9767 static int ipw_wx_sw_reset(struct net_device *dev,
9768 struct iw_request_info *info,
9769 union iwreq_data *wrqu, char *extra)
9770 {
9771 struct ipw_priv *priv = ieee80211_priv(dev);
9772 union iwreq_data wrqu_sec = {
9773 .encoding = {
9774 .flags = IW_ENCODE_DISABLED,
9775 },
9776 };
9777 int ret;
9778
9779 IPW_DEBUG_WX("SW_RESET\n");
9780
9781 mutex_lock(&priv->mutex);
9782
9783 ret = ipw_sw_reset(priv, 2);
9784 if (!ret) {
9785 free_firmware();
9786 ipw_adapter_restart(priv);
9787 }
9788
9789 /* The SW reset bit might have been toggled on by the 'disable'
9790 * module parameter, so take appropriate action */
9791 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9792
9793 mutex_unlock(&priv->mutex);
9794 ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9795 mutex_lock(&priv->mutex);
9796
9797 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9798 /* Configuration likely changed -- force [re]association */
9799 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9800 "reset.\n");
9801 if (!ipw_disassociate(priv))
9802 ipw_associate(priv);
9803 }
9804
9805 mutex_unlock(&priv->mutex);
9806
9807 return 0;
9808 }
9809
9810 /* Rebase the WE IOCTLs to zero for the handler array */
9811 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9812 static iw_handler ipw_wx_handlers[] = {
9813 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9814 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9815 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9816 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9817 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9818 IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
9819 IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
9820 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9821 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9822 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9823 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9824 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9825 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9826 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9827 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9828 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9829 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9830 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9831 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9832 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
9833 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
9834 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
9835 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
9836 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
9837 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
9838 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
9839 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
9840 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
9841 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
9842 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
9843 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
9844 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
9845 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
9846 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
9847 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
9848 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
9849 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
9850 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
9851 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
9852 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
9853 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
9854 };
9855
9856 enum {
9857 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9858 IPW_PRIV_GET_POWER,
9859 IPW_PRIV_SET_MODE,
9860 IPW_PRIV_GET_MODE,
9861 IPW_PRIV_SET_PREAMBLE,
9862 IPW_PRIV_GET_PREAMBLE,
9863 IPW_PRIV_RESET,
9864 IPW_PRIV_SW_RESET,
9865 #ifdef CONFIG_IPW2200_MONITOR
9866 IPW_PRIV_SET_MONITOR,
9867 #endif
9868 };
9869
9870 static struct iw_priv_args ipw_priv_args[] = {
9871 {
9872 .cmd = IPW_PRIV_SET_POWER,
9873 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9874 .name = "set_power"},
9875 {
9876 .cmd = IPW_PRIV_GET_POWER,
9877 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9878 .name = "get_power"},
9879 {
9880 .cmd = IPW_PRIV_SET_MODE,
9881 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9882 .name = "set_mode"},
9883 {
9884 .cmd = IPW_PRIV_GET_MODE,
9885 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9886 .name = "get_mode"},
9887 {
9888 .cmd = IPW_PRIV_SET_PREAMBLE,
9889 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9890 .name = "set_preamble"},
9891 {
9892 .cmd = IPW_PRIV_GET_PREAMBLE,
9893 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
9894 .name = "get_preamble"},
9895 {
9896 IPW_PRIV_RESET,
9897 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
9898 {
9899 IPW_PRIV_SW_RESET,
9900 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
9901 #ifdef CONFIG_IPW2200_MONITOR
9902 {
9903 IPW_PRIV_SET_MONITOR,
9904 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
9905 #endif /* CONFIG_IPW2200_MONITOR */
9906 };
9907
9908 static iw_handler ipw_priv_handler[] = {
9909 ipw_wx_set_powermode,
9910 ipw_wx_get_powermode,
9911 ipw_wx_set_wireless_mode,
9912 ipw_wx_get_wireless_mode,
9913 ipw_wx_set_preamble,
9914 ipw_wx_get_preamble,
9915 ipw_wx_reset,
9916 ipw_wx_sw_reset,
9917 #ifdef CONFIG_IPW2200_MONITOR
9918 ipw_wx_set_monitor,
9919 #endif
9920 };
9921
9922 static struct iw_handler_def ipw_wx_handler_def = {
9923 .standard = ipw_wx_handlers,
9924 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
9925 .num_private = ARRAY_SIZE(ipw_priv_handler),
9926 .num_private_args = ARRAY_SIZE(ipw_priv_args),
9927 .private = ipw_priv_handler,
9928 .private_args = ipw_priv_args,
9929 .get_wireless_stats = ipw_get_wireless_stats,
9930 };
9931
9932 /*
9933 * Get wireless statistics.
9934 * Called by /proc/net/wireless
9935 * Also called by SIOCGIWSTATS
9936 */
9937 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
9938 {
9939 struct ipw_priv *priv = ieee80211_priv(dev);
9940 struct iw_statistics *wstats;
9941
9942 wstats = &priv->wstats;
9943
9944 /* if hw is disabled, then ipw_get_ordinal() can't be called.
9945 * netdev->get_wireless_stats seems to be called before fw is
9946 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
9947 * and associated; if not associcated, the values are all meaningless
9948 * anyway, so set them all to NULL and INVALID */
9949 if (!(priv->status & STATUS_ASSOCIATED)) {
9950 wstats->miss.beacon = 0;
9951 wstats->discard.retries = 0;
9952 wstats->qual.qual = 0;
9953 wstats->qual.level = 0;
9954 wstats->qual.noise = 0;
9955 wstats->qual.updated = 7;
9956 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
9957 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
9958 return wstats;
9959 }
9960
9961 wstats->qual.qual = priv->quality;
9962 wstats->qual.level = priv->exp_avg_rssi;
9963 wstats->qual.noise = priv->exp_avg_noise;
9964 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
9965 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
9966
9967 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
9968 wstats->discard.retries = priv->last_tx_failures;
9969 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
9970
9971 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
9972 goto fail_get_ordinal;
9973 wstats->discard.retries += tx_retry; */
9974
9975 return wstats;
9976 }
9977
9978 /* net device stuff */
9979
9980 static void init_sys_config(struct ipw_sys_config *sys_config)
9981 {
9982 memset(sys_config, 0, sizeof(struct ipw_sys_config));
9983 sys_config->bt_coexistence = 0;
9984 sys_config->answer_broadcast_ssid_probe = 0;
9985 sys_config->accept_all_data_frames = 0;
9986 sys_config->accept_non_directed_frames = 1;
9987 sys_config->exclude_unicast_unencrypted = 0;
9988 sys_config->disable_unicast_decryption = 1;
9989 sys_config->exclude_multicast_unencrypted = 0;
9990 sys_config->disable_multicast_decryption = 1;
9991 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
9992 antenna = CFG_SYS_ANTENNA_BOTH;
9993 sys_config->antenna_diversity = antenna;
9994 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
9995 sys_config->dot11g_auto_detection = 0;
9996 sys_config->enable_cts_to_self = 0;
9997 sys_config->bt_coexist_collision_thr = 0;
9998 sys_config->pass_noise_stats_to_host = 1; //1 -- fix for 256
9999 sys_config->silence_threshold = 0x1e;
10000 }
10001
10002 static int ipw_net_open(struct net_device *dev)
10003 {
10004 struct ipw_priv *priv = ieee80211_priv(dev);
10005 IPW_DEBUG_INFO("dev->open\n");
10006 /* we should be verifying the device is ready to be opened */
10007 mutex_lock(&priv->mutex);
10008 if (!(priv->status & STATUS_RF_KILL_MASK) &&
10009 (priv->status & STATUS_ASSOCIATED))
10010 netif_start_queue(dev);
10011 mutex_unlock(&priv->mutex);
10012 return 0;
10013 }
10014
10015 static int ipw_net_stop(struct net_device *dev)
10016 {
10017 IPW_DEBUG_INFO("dev->close\n");
10018 netif_stop_queue(dev);
10019 return 0;
10020 }
10021
10022 /*
10023 todo:
10024
10025 modify to send one tfd per fragment instead of using chunking. otherwise
10026 we need to heavily modify the ieee80211_skb_to_txb.
10027 */
10028
10029 static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10030 int pri)
10031 {
10032 struct ieee80211_hdr_3addrqos *hdr = (struct ieee80211_hdr_3addrqos *)
10033 txb->fragments[0]->data;
10034 int i = 0;
10035 struct tfd_frame *tfd;
10036 #ifdef CONFIG_IPW2200_QOS
10037 int tx_id = ipw_get_tx_queue_number(priv, pri);
10038 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10039 #else
10040 struct clx2_tx_queue *txq = &priv->txq[0];
10041 #endif
10042 struct clx2_queue *q = &txq->q;
10043 u8 id, hdr_len, unicast;
10044 u16 remaining_bytes;
10045 int fc;
10046
10047 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10048 switch (priv->ieee->iw_mode) {
10049 case IW_MODE_ADHOC:
10050 unicast = !is_multicast_ether_addr(hdr->addr1);
10051 id = ipw_find_station(priv, hdr->addr1);
10052 if (id == IPW_INVALID_STATION) {
10053 id = ipw_add_station(priv, hdr->addr1);
10054 if (id == IPW_INVALID_STATION) {
10055 IPW_WARNING("Attempt to send data to "
10056 "invalid cell: " MAC_FMT "\n",
10057 MAC_ARG(hdr->addr1));
10058 goto drop;
10059 }
10060 }
10061 break;
10062
10063 case IW_MODE_INFRA:
10064 default:
10065 unicast = !is_multicast_ether_addr(hdr->addr3);
10066 id = 0;
10067 break;
10068 }
10069
10070 tfd = &txq->bd[q->first_empty];
10071 txq->txb[q->first_empty] = txb;
10072 memset(tfd, 0, sizeof(*tfd));
10073 tfd->u.data.station_number = id;
10074
10075 tfd->control_flags.message_type = TX_FRAME_TYPE;
10076 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10077
10078 tfd->u.data.cmd_id = DINO_CMD_TX;
10079 tfd->u.data.len = cpu_to_le16(txb->payload_size);
10080 remaining_bytes = txb->payload_size;
10081
10082 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10083 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10084 else
10085 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10086
10087 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10088 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10089
10090 fc = le16_to_cpu(hdr->frame_ctl);
10091 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10092
10093 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10094
10095 if (likely(unicast))
10096 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10097
10098 if (txb->encrypted && !priv->ieee->host_encrypt) {
10099 switch (priv->ieee->sec.level) {
10100 case SEC_LEVEL_3:
10101 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10102 IEEE80211_FCTL_PROTECTED;
10103 /* XXX: ACK flag must be set for CCMP even if it
10104 * is a multicast/broadcast packet, because CCMP
10105 * group communication encrypted by GTK is
10106 * actually done by the AP. */
10107 if (!unicast)
10108 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10109
10110 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10111 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10112 tfd->u.data.key_index = 0;
10113 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10114 break;
10115 case SEC_LEVEL_2:
10116 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10117 IEEE80211_FCTL_PROTECTED;
10118 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10119 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10120 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10121 break;
10122 case SEC_LEVEL_1:
10123 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10124 IEEE80211_FCTL_PROTECTED;
10125 tfd->u.data.key_index = priv->ieee->tx_keyidx;
10126 if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <=
10127 40)
10128 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10129 else
10130 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10131 break;
10132 case SEC_LEVEL_0:
10133 break;
10134 default:
10135 printk(KERN_ERR "Unknow security level %d\n",
10136 priv->ieee->sec.level);
10137 break;
10138 }
10139 } else
10140 /* No hardware encryption */
10141 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10142
10143 #ifdef CONFIG_IPW2200_QOS
10144 if (fc & IEEE80211_STYPE_QOS_DATA)
10145 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10146 #endif /* CONFIG_IPW2200_QOS */
10147
10148 /* payload */
10149 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10150 txb->nr_frags));
10151 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10152 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10153 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10154 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10155 i, le32_to_cpu(tfd->u.data.num_chunks),
10156 txb->fragments[i]->len - hdr_len);
10157 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10158 i, tfd->u.data.num_chunks,
10159 txb->fragments[i]->len - hdr_len);
10160 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10161 txb->fragments[i]->len - hdr_len);
10162
10163 tfd->u.data.chunk_ptr[i] =
10164 cpu_to_le32(pci_map_single
10165 (priv->pci_dev,
10166 txb->fragments[i]->data + hdr_len,
10167 txb->fragments[i]->len - hdr_len,
10168 PCI_DMA_TODEVICE));
10169 tfd->u.data.chunk_len[i] =
10170 cpu_to_le16(txb->fragments[i]->len - hdr_len);
10171 }
10172
10173 if (i != txb->nr_frags) {
10174 struct sk_buff *skb;
10175 u16 remaining_bytes = 0;
10176 int j;
10177
10178 for (j = i; j < txb->nr_frags; j++)
10179 remaining_bytes += txb->fragments[j]->len - hdr_len;
10180
10181 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10182 remaining_bytes);
10183 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10184 if (skb != NULL) {
10185 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10186 for (j = i; j < txb->nr_frags; j++) {
10187 int size = txb->fragments[j]->len - hdr_len;
10188
10189 printk(KERN_INFO "Adding frag %d %d...\n",
10190 j, size);
10191 memcpy(skb_put(skb, size),
10192 txb->fragments[j]->data + hdr_len, size);
10193 }
10194 dev_kfree_skb_any(txb->fragments[i]);
10195 txb->fragments[i] = skb;
10196 tfd->u.data.chunk_ptr[i] =
10197 cpu_to_le32(pci_map_single
10198 (priv->pci_dev, skb->data,
10199 tfd->u.data.chunk_len[i],
10200 PCI_DMA_TODEVICE));
10201
10202 tfd->u.data.num_chunks =
10203 cpu_to_le32(le32_to_cpu(tfd->u.data.num_chunks) +
10204 1);
10205 }
10206 }
10207
10208 /* kick DMA */
10209 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10210 ipw_write32(priv, q->reg_w, q->first_empty);
10211
10212 if (ipw_queue_space(q) < q->high_mark)
10213 netif_stop_queue(priv->net_dev);
10214
10215 return NETDEV_TX_OK;
10216
10217 drop:
10218 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10219 ieee80211_txb_free(txb);
10220 return NETDEV_TX_OK;
10221 }
10222
10223 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10224 {
10225 struct ipw_priv *priv = ieee80211_priv(dev);
10226 #ifdef CONFIG_IPW2200_QOS
10227 int tx_id = ipw_get_tx_queue_number(priv, pri);
10228 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10229 #else
10230 struct clx2_tx_queue *txq = &priv->txq[0];
10231 #endif /* CONFIG_IPW2200_QOS */
10232
10233 if (ipw_queue_space(&txq->q) < txq->q.high_mark)
10234 return 1;
10235
10236 return 0;
10237 }
10238
10239 #ifdef CONFIG_IPW2200_PROMISCUOUS
10240 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10241 struct ieee80211_txb *txb)
10242 {
10243 struct ieee80211_rx_stats dummystats;
10244 struct ieee80211_hdr *hdr;
10245 u8 n;
10246 u16 filter = priv->prom_priv->filter;
10247 int hdr_only = 0;
10248
10249 if (filter & IPW_PROM_NO_TX)
10250 return;
10251
10252 memset(&dummystats, 0, sizeof(dummystats));
10253
10254 /* Filtering of fragment chains is done agains the first fragment */
10255 hdr = (void *)txb->fragments[0]->data;
10256 if (ieee80211_is_management(hdr->frame_ctl)) {
10257 if (filter & IPW_PROM_NO_MGMT)
10258 return;
10259 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10260 hdr_only = 1;
10261 } else if (ieee80211_is_control(hdr->frame_ctl)) {
10262 if (filter & IPW_PROM_NO_CTL)
10263 return;
10264 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10265 hdr_only = 1;
10266 } else if (ieee80211_is_data(hdr->frame_ctl)) {
10267 if (filter & IPW_PROM_NO_DATA)
10268 return;
10269 if (filter & IPW_PROM_DATA_HEADER_ONLY)
10270 hdr_only = 1;
10271 }
10272
10273 for(n=0; n<txb->nr_frags; ++n) {
10274 struct sk_buff *src = txb->fragments[n];
10275 struct sk_buff *dst;
10276 struct ieee80211_radiotap_header *rt_hdr;
10277 int len;
10278
10279 if (hdr_only) {
10280 hdr = (void *)src->data;
10281 len = ieee80211_get_hdrlen(hdr->frame_ctl);
10282 } else
10283 len = src->len;
10284
10285 dst = alloc_skb(
10286 len + IEEE80211_RADIOTAP_HDRLEN, GFP_ATOMIC);
10287 if (!dst) continue;
10288
10289 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10290
10291 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10292 rt_hdr->it_pad = 0;
10293 rt_hdr->it_present = 0; /* after all, it's just an idea */
10294 rt_hdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
10295
10296 *(u16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10297 ieee80211chan2mhz(priv->channel));
10298 if (priv->channel > 14) /* 802.11a */
10299 *(u16*)skb_put(dst, sizeof(u16)) =
10300 cpu_to_le16(IEEE80211_CHAN_OFDM |
10301 IEEE80211_CHAN_5GHZ);
10302 else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10303 *(u16*)skb_put(dst, sizeof(u16)) =
10304 cpu_to_le16(IEEE80211_CHAN_CCK |
10305 IEEE80211_CHAN_2GHZ);
10306 else /* 802.11g */
10307 *(u16*)skb_put(dst, sizeof(u16)) =
10308 cpu_to_le16(IEEE80211_CHAN_OFDM |
10309 IEEE80211_CHAN_2GHZ);
10310
10311 rt_hdr->it_len = dst->len;
10312
10313 memcpy(skb_put(dst, len), src->data, len);
10314
10315 if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats))
10316 dev_kfree_skb_any(dst);
10317 }
10318 }
10319 #endif
10320
10321 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
10322 struct net_device *dev, int pri)
10323 {
10324 struct ipw_priv *priv = ieee80211_priv(dev);
10325 unsigned long flags;
10326 int ret;
10327
10328 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10329 spin_lock_irqsave(&priv->lock, flags);
10330
10331 if (!(priv->status & STATUS_ASSOCIATED)) {
10332 IPW_DEBUG_INFO("Tx attempt while not associated.\n");
10333 priv->ieee->stats.tx_carrier_errors++;
10334 netif_stop_queue(dev);
10335 goto fail_unlock;
10336 }
10337
10338 #ifdef CONFIG_IPW2200_PROMISCUOUS
10339 if (rtap_iface && netif_running(priv->prom_net_dev))
10340 ipw_handle_promiscuous_tx(priv, txb);
10341 #endif
10342
10343 ret = ipw_tx_skb(priv, txb, pri);
10344 if (ret == NETDEV_TX_OK)
10345 __ipw_led_activity_on(priv);
10346 spin_unlock_irqrestore(&priv->lock, flags);
10347
10348 return ret;
10349
10350 fail_unlock:
10351 spin_unlock_irqrestore(&priv->lock, flags);
10352 return 1;
10353 }
10354
10355 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
10356 {
10357 struct ipw_priv *priv = ieee80211_priv(dev);
10358
10359 priv->ieee->stats.tx_packets = priv->tx_packets;
10360 priv->ieee->stats.rx_packets = priv->rx_packets;
10361 return &priv->ieee->stats;
10362 }
10363
10364 static void ipw_net_set_multicast_list(struct net_device *dev)
10365 {
10366
10367 }
10368
10369 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10370 {
10371 struct ipw_priv *priv = ieee80211_priv(dev);
10372 struct sockaddr *addr = p;
10373 if (!is_valid_ether_addr(addr->sa_data))
10374 return -EADDRNOTAVAIL;
10375 mutex_lock(&priv->mutex);
10376 priv->config |= CFG_CUSTOM_MAC;
10377 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10378 printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n",
10379 priv->net_dev->name, MAC_ARG(priv->mac_addr));
10380 queue_work(priv->workqueue, &priv->adapter_restart);
10381 mutex_unlock(&priv->mutex);
10382 return 0;
10383 }
10384
10385 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10386 struct ethtool_drvinfo *info)
10387 {
10388 struct ipw_priv *p = ieee80211_priv(dev);
10389 char vers[64];
10390 char date[32];
10391 u32 len;
10392
10393 strcpy(info->driver, DRV_NAME);
10394 strcpy(info->version, DRV_VERSION);
10395
10396 len = sizeof(vers);
10397 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10398 len = sizeof(date);
10399 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10400
10401 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10402 vers, date);
10403 strcpy(info->bus_info, pci_name(p->pci_dev));
10404 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10405 }
10406
10407 static u32 ipw_ethtool_get_link(struct net_device *dev)
10408 {
10409 struct ipw_priv *priv = ieee80211_priv(dev);
10410 return (priv->status & STATUS_ASSOCIATED) != 0;
10411 }
10412
10413 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10414 {
10415 return IPW_EEPROM_IMAGE_SIZE;
10416 }
10417
10418 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10419 struct ethtool_eeprom *eeprom, u8 * bytes)
10420 {
10421 struct ipw_priv *p = ieee80211_priv(dev);
10422
10423 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10424 return -EINVAL;
10425 mutex_lock(&p->mutex);
10426 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10427 mutex_unlock(&p->mutex);
10428 return 0;
10429 }
10430
10431 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10432 struct ethtool_eeprom *eeprom, u8 * bytes)
10433 {
10434 struct ipw_priv *p = ieee80211_priv(dev);
10435 int i;
10436
10437 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10438 return -EINVAL;
10439 mutex_lock(&p->mutex);
10440 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10441 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10442 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10443 mutex_unlock(&p->mutex);
10444 return 0;
10445 }
10446
10447 static struct ethtool_ops ipw_ethtool_ops = {
10448 .get_link = ipw_ethtool_get_link,
10449 .get_drvinfo = ipw_ethtool_get_drvinfo,
10450 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10451 .get_eeprom = ipw_ethtool_get_eeprom,
10452 .set_eeprom = ipw_ethtool_set_eeprom,
10453 };
10454
10455 static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
10456 {
10457 struct ipw_priv *priv = data;
10458 u32 inta, inta_mask;
10459
10460 if (!priv)
10461 return IRQ_NONE;
10462
10463 spin_lock(&priv->lock);
10464
10465 if (!(priv->status & STATUS_INT_ENABLED)) {
10466 /* Shared IRQ */
10467 goto none;
10468 }
10469
10470 inta = ipw_read32(priv, IPW_INTA_RW);
10471 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10472
10473 if (inta == 0xFFFFFFFF) {
10474 /* Hardware disappeared */
10475 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10476 goto none;
10477 }
10478
10479 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10480 /* Shared interrupt */
10481 goto none;
10482 }
10483
10484 /* tell the device to stop sending interrupts */
10485 ipw_disable_interrupts(priv);
10486
10487 /* ack current interrupts */
10488 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10489 ipw_write32(priv, IPW_INTA_RW, inta);
10490
10491 /* Cache INTA value for our tasklet */
10492 priv->isr_inta = inta;
10493
10494 tasklet_schedule(&priv->irq_tasklet);
10495
10496 spin_unlock(&priv->lock);
10497
10498 return IRQ_HANDLED;
10499 none:
10500 spin_unlock(&priv->lock);
10501 return IRQ_NONE;
10502 }
10503
10504 static void ipw_rf_kill(void *adapter)
10505 {
10506 struct ipw_priv *priv = adapter;
10507 unsigned long flags;
10508
10509 spin_lock_irqsave(&priv->lock, flags);
10510
10511 if (rf_kill_active(priv)) {
10512 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10513 if (priv->workqueue)
10514 queue_delayed_work(priv->workqueue,
10515 &priv->rf_kill, 2 * HZ);
10516 goto exit_unlock;
10517 }
10518
10519 /* RF Kill is now disabled, so bring the device back up */
10520
10521 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10522 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10523 "device\n");
10524
10525 /* we can not do an adapter restart while inside an irq lock */
10526 queue_work(priv->workqueue, &priv->adapter_restart);
10527 } else
10528 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10529 "enabled\n");
10530
10531 exit_unlock:
10532 spin_unlock_irqrestore(&priv->lock, flags);
10533 }
10534
10535 static void ipw_bg_rf_kill(void *data)
10536 {
10537 struct ipw_priv *priv = data;
10538 mutex_lock(&priv->mutex);
10539 ipw_rf_kill(data);
10540 mutex_unlock(&priv->mutex);
10541 }
10542
10543 static void ipw_link_up(struct ipw_priv *priv)
10544 {
10545 priv->last_seq_num = -1;
10546 priv->last_frag_num = -1;
10547 priv->last_packet_time = 0;
10548
10549 netif_carrier_on(priv->net_dev);
10550 if (netif_queue_stopped(priv->net_dev)) {
10551 IPW_DEBUG_NOTIF("waking queue\n");
10552 netif_wake_queue(priv->net_dev);
10553 } else {
10554 IPW_DEBUG_NOTIF("starting queue\n");
10555 netif_start_queue(priv->net_dev);
10556 }
10557
10558 cancel_delayed_work(&priv->request_scan);
10559 ipw_reset_stats(priv);
10560 /* Ensure the rate is updated immediately */
10561 priv->last_rate = ipw_get_current_rate(priv);
10562 ipw_gather_stats(priv);
10563 ipw_led_link_up(priv);
10564 notify_wx_assoc_event(priv);
10565
10566 if (priv->config & CFG_BACKGROUND_SCAN)
10567 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10568 }
10569
10570 static void ipw_bg_link_up(void *data)
10571 {
10572 struct ipw_priv *priv = data;
10573 mutex_lock(&priv->mutex);
10574 ipw_link_up(data);
10575 mutex_unlock(&priv->mutex);
10576 }
10577
10578 static void ipw_link_down(struct ipw_priv *priv)
10579 {
10580 ipw_led_link_down(priv);
10581 netif_carrier_off(priv->net_dev);
10582 netif_stop_queue(priv->net_dev);
10583 notify_wx_assoc_event(priv);
10584
10585 /* Cancel any queued work ... */
10586 cancel_delayed_work(&priv->request_scan);
10587 cancel_delayed_work(&priv->adhoc_check);
10588 cancel_delayed_work(&priv->gather_stats);
10589
10590 ipw_reset_stats(priv);
10591
10592 if (!(priv->status & STATUS_EXIT_PENDING)) {
10593 /* Queue up another scan... */
10594 queue_work(priv->workqueue, &priv->request_scan);
10595 }
10596 }
10597
10598 static void ipw_bg_link_down(void *data)
10599 {
10600 struct ipw_priv *priv = data;
10601 mutex_lock(&priv->mutex);
10602 ipw_link_down(data);
10603 mutex_unlock(&priv->mutex);
10604 }
10605
10606 static int ipw_setup_deferred_work(struct ipw_priv *priv)
10607 {
10608 int ret = 0;
10609
10610 priv->workqueue = create_workqueue(DRV_NAME);
10611 init_waitqueue_head(&priv->wait_command_queue);
10612 init_waitqueue_head(&priv->wait_state);
10613
10614 INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv);
10615 INIT_WORK(&priv->associate, ipw_bg_associate, priv);
10616 INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv);
10617 INIT_WORK(&priv->system_config, ipw_system_config, priv);
10618 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv);
10619 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv);
10620 INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv);
10621 INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv);
10622 INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv);
10623 INIT_WORK(&priv->request_scan,
10624 (void (*)(void *))ipw_request_scan, priv);
10625 INIT_WORK(&priv->gather_stats,
10626 (void (*)(void *))ipw_bg_gather_stats, priv);
10627 INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv);
10628 INIT_WORK(&priv->roam, ipw_bg_roam, priv);
10629 INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv);
10630 INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv);
10631 INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv);
10632 INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on,
10633 priv);
10634 INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off,
10635 priv);
10636 INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off,
10637 priv);
10638 INIT_WORK(&priv->merge_networks,
10639 (void (*)(void *))ipw_merge_adhoc_network, priv);
10640
10641 #ifdef CONFIG_IPW2200_QOS
10642 INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate,
10643 priv);
10644 #endif /* CONFIG_IPW2200_QOS */
10645
10646 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10647 ipw_irq_tasklet, (unsigned long)priv);
10648
10649 return ret;
10650 }
10651
10652 static void shim__set_security(struct net_device *dev,
10653 struct ieee80211_security *sec)
10654 {
10655 struct ipw_priv *priv = ieee80211_priv(dev);
10656 int i;
10657 for (i = 0; i < 4; i++) {
10658 if (sec->flags & (1 << i)) {
10659 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10660 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10661 if (sec->key_sizes[i] == 0)
10662 priv->ieee->sec.flags &= ~(1 << i);
10663 else {
10664 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10665 sec->key_sizes[i]);
10666 priv->ieee->sec.flags |= (1 << i);
10667 }
10668 priv->status |= STATUS_SECURITY_UPDATED;
10669 } else if (sec->level != SEC_LEVEL_1)
10670 priv->ieee->sec.flags &= ~(1 << i);
10671 }
10672
10673 if (sec->flags & SEC_ACTIVE_KEY) {
10674 if (sec->active_key <= 3) {
10675 priv->ieee->sec.active_key = sec->active_key;
10676 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10677 } else
10678 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10679 priv->status |= STATUS_SECURITY_UPDATED;
10680 } else
10681 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10682
10683 if ((sec->flags & SEC_AUTH_MODE) &&
10684 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10685 priv->ieee->sec.auth_mode = sec->auth_mode;
10686 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10687 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10688 priv->capability |= CAP_SHARED_KEY;
10689 else
10690 priv->capability &= ~CAP_SHARED_KEY;
10691 priv->status |= STATUS_SECURITY_UPDATED;
10692 }
10693
10694 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10695 priv->ieee->sec.flags |= SEC_ENABLED;
10696 priv->ieee->sec.enabled = sec->enabled;
10697 priv->status |= STATUS_SECURITY_UPDATED;
10698 if (sec->enabled)
10699 priv->capability |= CAP_PRIVACY_ON;
10700 else
10701 priv->capability &= ~CAP_PRIVACY_ON;
10702 }
10703
10704 if (sec->flags & SEC_ENCRYPT)
10705 priv->ieee->sec.encrypt = sec->encrypt;
10706
10707 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10708 priv->ieee->sec.level = sec->level;
10709 priv->ieee->sec.flags |= SEC_LEVEL;
10710 priv->status |= STATUS_SECURITY_UPDATED;
10711 }
10712
10713 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10714 ipw_set_hwcrypto_keys(priv);
10715
10716 /* To match current functionality of ipw2100 (which works well w/
10717 * various supplicants, we don't force a disassociate if the
10718 * privacy capability changes ... */
10719 #if 0
10720 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10721 (((priv->assoc_request.capability &
10722 WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
10723 (!(priv->assoc_request.capability &
10724 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
10725 IPW_DEBUG_ASSOC("Disassociating due to capability "
10726 "change.\n");
10727 ipw_disassociate(priv);
10728 }
10729 #endif
10730 }
10731
10732 static int init_supported_rates(struct ipw_priv *priv,
10733 struct ipw_supported_rates *rates)
10734 {
10735 /* TODO: Mask out rates based on priv->rates_mask */
10736
10737 memset(rates, 0, sizeof(*rates));
10738 /* configure supported rates */
10739 switch (priv->ieee->freq_band) {
10740 case IEEE80211_52GHZ_BAND:
10741 rates->ieee_mode = IPW_A_MODE;
10742 rates->purpose = IPW_RATE_CAPABILITIES;
10743 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10744 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10745 break;
10746
10747 default: /* Mixed or 2.4Ghz */
10748 rates->ieee_mode = IPW_G_MODE;
10749 rates->purpose = IPW_RATE_CAPABILITIES;
10750 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
10751 IEEE80211_CCK_DEFAULT_RATES_MASK);
10752 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
10753 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10754 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10755 }
10756 break;
10757 }
10758
10759 return 0;
10760 }
10761
10762 static int ipw_config(struct ipw_priv *priv)
10763 {
10764 /* This is only called from ipw_up, which resets/reloads the firmware
10765 so, we don't need to first disable the card before we configure
10766 it */
10767 if (ipw_set_tx_power(priv))
10768 goto error;
10769
10770 /* initialize adapter address */
10771 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10772 goto error;
10773
10774 /* set basic system config settings */
10775 init_sys_config(&priv->sys_config);
10776
10777 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10778 * Does not support BT priority yet (don't abort or defer our Tx) */
10779 if (bt_coexist) {
10780 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10781
10782 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10783 priv->sys_config.bt_coexistence
10784 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10785 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10786 priv->sys_config.bt_coexistence
10787 |= CFG_BT_COEXISTENCE_OOB;
10788 }
10789
10790 #ifdef CONFIG_IPW2200_PROMISCUOUS
10791 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10792 priv->sys_config.accept_all_data_frames = 1;
10793 priv->sys_config.accept_non_directed_frames = 1;
10794 priv->sys_config.accept_all_mgmt_bcpr = 1;
10795 priv->sys_config.accept_all_mgmt_frames = 1;
10796 }
10797 #endif
10798
10799 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10800 priv->sys_config.answer_broadcast_ssid_probe = 1;
10801 else
10802 priv->sys_config.answer_broadcast_ssid_probe = 0;
10803
10804 if (ipw_send_system_config(priv))
10805 goto error;
10806
10807 init_supported_rates(priv, &priv->rates);
10808 if (ipw_send_supported_rates(priv, &priv->rates))
10809 goto error;
10810
10811 /* Set request-to-send threshold */
10812 if (priv->rts_threshold) {
10813 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10814 goto error;
10815 }
10816 #ifdef CONFIG_IPW2200_QOS
10817 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10818 ipw_qos_activate(priv, NULL);
10819 #endif /* CONFIG_IPW2200_QOS */
10820
10821 if (ipw_set_random_seed(priv))
10822 goto error;
10823
10824 /* final state transition to the RUN state */
10825 if (ipw_send_host_complete(priv))
10826 goto error;
10827
10828 priv->status |= STATUS_INIT;
10829
10830 ipw_led_init(priv);
10831 ipw_led_radio_on(priv);
10832 priv->notif_missed_beacons = 0;
10833
10834 /* Set hardware WEP key if it is configured. */
10835 if ((priv->capability & CAP_PRIVACY_ON) &&
10836 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10837 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10838 ipw_set_hwcrypto_keys(priv);
10839
10840 return 0;
10841
10842 error:
10843 return -EIO;
10844 }
10845
10846 /*
10847 * NOTE:
10848 *
10849 * These tables have been tested in conjunction with the
10850 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10851 *
10852 * Altering this values, using it on other hardware, or in geographies
10853 * not intended for resale of the above mentioned Intel adapters has
10854 * not been tested.
10855 *
10856 * Remember to update the table in README.ipw2200 when changing this
10857 * table.
10858 *
10859 */
10860 static const struct ieee80211_geo ipw_geos[] = {
10861 { /* Restricted */
10862 "---",
10863 .bg_channels = 11,
10864 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10865 {2427, 4}, {2432, 5}, {2437, 6},
10866 {2442, 7}, {2447, 8}, {2452, 9},
10867 {2457, 10}, {2462, 11}},
10868 },
10869
10870 { /* Custom US/Canada */
10871 "ZZF",
10872 .bg_channels = 11,
10873 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10874 {2427, 4}, {2432, 5}, {2437, 6},
10875 {2442, 7}, {2447, 8}, {2452, 9},
10876 {2457, 10}, {2462, 11}},
10877 .a_channels = 8,
10878 .a = {{5180, 36},
10879 {5200, 40},
10880 {5220, 44},
10881 {5240, 48},
10882 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10883 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10884 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10885 {5320, 64, IEEE80211_CH_PASSIVE_ONLY}},
10886 },
10887
10888 { /* Rest of World */
10889 "ZZD",
10890 .bg_channels = 13,
10891 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10892 {2427, 4}, {2432, 5}, {2437, 6},
10893 {2442, 7}, {2447, 8}, {2452, 9},
10894 {2457, 10}, {2462, 11}, {2467, 12},
10895 {2472, 13}},
10896 },
10897
10898 { /* Custom USA & Europe & High */
10899 "ZZA",
10900 .bg_channels = 11,
10901 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10902 {2427, 4}, {2432, 5}, {2437, 6},
10903 {2442, 7}, {2447, 8}, {2452, 9},
10904 {2457, 10}, {2462, 11}},
10905 .a_channels = 13,
10906 .a = {{5180, 36},
10907 {5200, 40},
10908 {5220, 44},
10909 {5240, 48},
10910 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10911 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10912 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10913 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10914 {5745, 149},
10915 {5765, 153},
10916 {5785, 157},
10917 {5805, 161},
10918 {5825, 165}},
10919 },
10920
10921 { /* Custom NA & Europe */
10922 "ZZB",
10923 .bg_channels = 11,
10924 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10925 {2427, 4}, {2432, 5}, {2437, 6},
10926 {2442, 7}, {2447, 8}, {2452, 9},
10927 {2457, 10}, {2462, 11}},
10928 .a_channels = 13,
10929 .a = {{5180, 36},
10930 {5200, 40},
10931 {5220, 44},
10932 {5240, 48},
10933 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10934 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10935 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10936 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10937 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10938 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10939 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10940 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10941 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10942 },
10943
10944 { /* Custom Japan */
10945 "ZZC",
10946 .bg_channels = 11,
10947 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10948 {2427, 4}, {2432, 5}, {2437, 6},
10949 {2442, 7}, {2447, 8}, {2452, 9},
10950 {2457, 10}, {2462, 11}},
10951 .a_channels = 4,
10952 .a = {{5170, 34}, {5190, 38},
10953 {5210, 42}, {5230, 46}},
10954 },
10955
10956 { /* Custom */
10957 "ZZM",
10958 .bg_channels = 11,
10959 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10960 {2427, 4}, {2432, 5}, {2437, 6},
10961 {2442, 7}, {2447, 8}, {2452, 9},
10962 {2457, 10}, {2462, 11}},
10963 },
10964
10965 { /* Europe */
10966 "ZZE",
10967 .bg_channels = 13,
10968 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10969 {2427, 4}, {2432, 5}, {2437, 6},
10970 {2442, 7}, {2447, 8}, {2452, 9},
10971 {2457, 10}, {2462, 11}, {2467, 12},
10972 {2472, 13}},
10973 .a_channels = 19,
10974 .a = {{5180, 36},
10975 {5200, 40},
10976 {5220, 44},
10977 {5240, 48},
10978 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10979 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10980 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10981 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10982 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
10983 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
10984 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
10985 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
10986 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
10987 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
10988 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
10989 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
10990 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
10991 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
10992 {5700, 140, IEEE80211_CH_PASSIVE_ONLY}},
10993 },
10994
10995 { /* Custom Japan */
10996 "ZZJ",
10997 .bg_channels = 14,
10998 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10999 {2427, 4}, {2432, 5}, {2437, 6},
11000 {2442, 7}, {2447, 8}, {2452, 9},
11001 {2457, 10}, {2462, 11}, {2467, 12},
11002 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY}},
11003 .a_channels = 4,
11004 .a = {{5170, 34}, {5190, 38},
11005 {5210, 42}, {5230, 46}},
11006 },
11007
11008 { /* Rest of World */
11009 "ZZR",
11010 .bg_channels = 14,
11011 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11012 {2427, 4}, {2432, 5}, {2437, 6},
11013 {2442, 7}, {2447, 8}, {2452, 9},
11014 {2457, 10}, {2462, 11}, {2467, 12},
11015 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY |
11016 IEEE80211_CH_PASSIVE_ONLY}},
11017 },
11018
11019 { /* High Band */
11020 "ZZH",
11021 .bg_channels = 13,
11022 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11023 {2427, 4}, {2432, 5}, {2437, 6},
11024 {2442, 7}, {2447, 8}, {2452, 9},
11025 {2457, 10}, {2462, 11},
11026 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11027 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11028 .a_channels = 4,
11029 .a = {{5745, 149}, {5765, 153},
11030 {5785, 157}, {5805, 161}},
11031 },
11032
11033 { /* Custom Europe */
11034 "ZZG",
11035 .bg_channels = 13,
11036 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11037 {2427, 4}, {2432, 5}, {2437, 6},
11038 {2442, 7}, {2447, 8}, {2452, 9},
11039 {2457, 10}, {2462, 11},
11040 {2467, 12}, {2472, 13}},
11041 .a_channels = 4,
11042 .a = {{5180, 36}, {5200, 40},
11043 {5220, 44}, {5240, 48}},
11044 },
11045
11046 { /* Europe */
11047 "ZZK",
11048 .bg_channels = 13,
11049 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11050 {2427, 4}, {2432, 5}, {2437, 6},
11051 {2442, 7}, {2447, 8}, {2452, 9},
11052 {2457, 10}, {2462, 11},
11053 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11054 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11055 .a_channels = 24,
11056 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11057 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11058 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11059 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11060 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11061 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11062 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11063 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11064 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11065 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11066 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11067 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11068 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11069 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11070 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11071 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11072 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11073 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11074 {5700, 140, IEEE80211_CH_PASSIVE_ONLY},
11075 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11076 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11077 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11078 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11079 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11080 },
11081
11082 { /* Europe */
11083 "ZZL",
11084 .bg_channels = 11,
11085 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11086 {2427, 4}, {2432, 5}, {2437, 6},
11087 {2442, 7}, {2447, 8}, {2452, 9},
11088 {2457, 10}, {2462, 11}},
11089 .a_channels = 13,
11090 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11091 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11092 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11093 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11094 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11095 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11096 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11097 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11098 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11099 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11100 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11101 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11102 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11103 }
11104 };
11105
11106 #define MAX_HW_RESTARTS 5
11107 static int ipw_up(struct ipw_priv *priv)
11108 {
11109 int rc, i, j;
11110
11111 if (priv->status & STATUS_EXIT_PENDING)
11112 return -EIO;
11113
11114 if (cmdlog && !priv->cmdlog) {
11115 priv->cmdlog = kmalloc(sizeof(*priv->cmdlog) * cmdlog,
11116 GFP_KERNEL);
11117 if (priv->cmdlog == NULL) {
11118 IPW_ERROR("Error allocating %d command log entries.\n",
11119 cmdlog);
11120 return -ENOMEM;
11121 } else {
11122 memset(priv->cmdlog, 0, sizeof(*priv->cmdlog) * cmdlog);
11123 priv->cmdlog_len = cmdlog;
11124 }
11125 }
11126
11127 for (i = 0; i < MAX_HW_RESTARTS; i++) {
11128 /* Load the microcode, firmware, and eeprom.
11129 * Also start the clocks. */
11130 rc = ipw_load(priv);
11131 if (rc) {
11132 IPW_ERROR("Unable to load firmware: %d\n", rc);
11133 return rc;
11134 }
11135
11136 ipw_init_ordinals(priv);
11137 if (!(priv->config & CFG_CUSTOM_MAC))
11138 eeprom_parse_mac(priv, priv->mac_addr);
11139 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11140
11141 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11142 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11143 ipw_geos[j].name, 3))
11144 break;
11145 }
11146 if (j == ARRAY_SIZE(ipw_geos)) {
11147 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11148 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11149 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11150 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11151 j = 0;
11152 }
11153 if (ieee80211_set_geo(priv->ieee, &ipw_geos[j])) {
11154 IPW_WARNING("Could not set geography.");
11155 return 0;
11156 }
11157
11158 if (priv->status & STATUS_RF_KILL_SW) {
11159 IPW_WARNING("Radio disabled by module parameter.\n");
11160 return 0;
11161 } else if (rf_kill_active(priv)) {
11162 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11163 "Kill switch must be turned off for "
11164 "wireless networking to work.\n");
11165 queue_delayed_work(priv->workqueue, &priv->rf_kill,
11166 2 * HZ);
11167 return 0;
11168 }
11169
11170 rc = ipw_config(priv);
11171 if (!rc) {
11172 IPW_DEBUG_INFO("Configured device on count %i\n", i);
11173
11174 /* If configure to try and auto-associate, kick
11175 * off a scan. */
11176 queue_work(priv->workqueue, &priv->request_scan);
11177
11178 return 0;
11179 }
11180
11181 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11182 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11183 i, MAX_HW_RESTARTS);
11184
11185 /* We had an error bringing up the hardware, so take it
11186 * all the way back down so we can try again */
11187 ipw_down(priv);
11188 }
11189
11190 /* tried to restart and config the device for as long as our
11191 * patience could withstand */
11192 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11193
11194 return -EIO;
11195 }
11196
11197 static void ipw_bg_up(void *data)
11198 {
11199 struct ipw_priv *priv = data;
11200 mutex_lock(&priv->mutex);
11201 ipw_up(data);
11202 mutex_unlock(&priv->mutex);
11203 }
11204
11205 static void ipw_deinit(struct ipw_priv *priv)
11206 {
11207 int i;
11208
11209 if (priv->status & STATUS_SCANNING) {
11210 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11211 ipw_abort_scan(priv);
11212 }
11213
11214 if (priv->status & STATUS_ASSOCIATED) {
11215 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11216 ipw_disassociate(priv);
11217 }
11218
11219 ipw_led_shutdown(priv);
11220
11221 /* Wait up to 1s for status to change to not scanning and not
11222 * associated (disassociation can take a while for a ful 802.11
11223 * exchange */
11224 for (i = 1000; i && (priv->status &
11225 (STATUS_DISASSOCIATING |
11226 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11227 udelay(10);
11228
11229 if (priv->status & (STATUS_DISASSOCIATING |
11230 STATUS_ASSOCIATED | STATUS_SCANNING))
11231 IPW_DEBUG_INFO("Still associated or scanning...\n");
11232 else
11233 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11234
11235 /* Attempt to disable the card */
11236 ipw_send_card_disable(priv, 0);
11237
11238 priv->status &= ~STATUS_INIT;
11239 }
11240
11241 static void ipw_down(struct ipw_priv *priv)
11242 {
11243 int exit_pending = priv->status & STATUS_EXIT_PENDING;
11244
11245 priv->status |= STATUS_EXIT_PENDING;
11246
11247 if (ipw_is_init(priv))
11248 ipw_deinit(priv);
11249
11250 /* Wipe out the EXIT_PENDING status bit if we are not actually
11251 * exiting the module */
11252 if (!exit_pending)
11253 priv->status &= ~STATUS_EXIT_PENDING;
11254
11255 /* tell the device to stop sending interrupts */
11256 ipw_disable_interrupts(priv);
11257
11258 /* Clear all bits but the RF Kill */
11259 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11260 netif_carrier_off(priv->net_dev);
11261 netif_stop_queue(priv->net_dev);
11262
11263 ipw_stop_nic(priv);
11264
11265 ipw_led_radio_off(priv);
11266 }
11267
11268 static void ipw_bg_down(void *data)
11269 {
11270 struct ipw_priv *priv = data;
11271 mutex_lock(&priv->mutex);
11272 ipw_down(data);
11273 mutex_unlock(&priv->mutex);
11274 }
11275
11276 /* Called by register_netdev() */
11277 static int ipw_net_init(struct net_device *dev)
11278 {
11279 struct ipw_priv *priv = ieee80211_priv(dev);
11280 mutex_lock(&priv->mutex);
11281
11282 if (ipw_up(priv)) {
11283 mutex_unlock(&priv->mutex);
11284 return -EIO;
11285 }
11286
11287 mutex_unlock(&priv->mutex);
11288 return 0;
11289 }
11290
11291 /* PCI driver stuff */
11292 static struct pci_device_id card_ids[] = {
11293 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11294 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11295 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11296 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11297 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11298 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11299 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11300 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11301 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11302 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11303 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11304 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11305 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11306 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11307 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11308 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11309 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11310 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
11311 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11312 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11313 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11314 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11315
11316 /* required last entry */
11317 {0,}
11318 };
11319
11320 MODULE_DEVICE_TABLE(pci, card_ids);
11321
11322 static struct attribute *ipw_sysfs_entries[] = {
11323 &dev_attr_rf_kill.attr,
11324 &dev_attr_direct_dword.attr,
11325 &dev_attr_indirect_byte.attr,
11326 &dev_attr_indirect_dword.attr,
11327 &dev_attr_mem_gpio_reg.attr,
11328 &dev_attr_command_event_reg.attr,
11329 &dev_attr_nic_type.attr,
11330 &dev_attr_status.attr,
11331 &dev_attr_cfg.attr,
11332 &dev_attr_error.attr,
11333 &dev_attr_event_log.attr,
11334 &dev_attr_cmd_log.attr,
11335 &dev_attr_eeprom_delay.attr,
11336 &dev_attr_ucode_version.attr,
11337 &dev_attr_rtc.attr,
11338 &dev_attr_scan_age.attr,
11339 &dev_attr_led.attr,
11340 &dev_attr_speed_scan.attr,
11341 &dev_attr_net_stats.attr,
11342 #ifdef CONFIG_IPW2200_PROMISCUOUS
11343 &dev_attr_rtap_iface.attr,
11344 &dev_attr_rtap_filter.attr,
11345 #endif
11346 NULL
11347 };
11348
11349 static struct attribute_group ipw_attribute_group = {
11350 .name = NULL, /* put in device directory */
11351 .attrs = ipw_sysfs_entries,
11352 };
11353
11354 #ifdef CONFIG_IPW2200_PROMISCUOUS
11355 static int ipw_prom_open(struct net_device *dev)
11356 {
11357 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11358 struct ipw_priv *priv = prom_priv->priv;
11359
11360 IPW_DEBUG_INFO("prom dev->open\n");
11361 netif_carrier_off(dev);
11362 netif_stop_queue(dev);
11363
11364 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11365 priv->sys_config.accept_all_data_frames = 1;
11366 priv->sys_config.accept_non_directed_frames = 1;
11367 priv->sys_config.accept_all_mgmt_bcpr = 1;
11368 priv->sys_config.accept_all_mgmt_frames = 1;
11369
11370 ipw_send_system_config(priv);
11371 }
11372
11373 return 0;
11374 }
11375
11376 static int ipw_prom_stop(struct net_device *dev)
11377 {
11378 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11379 struct ipw_priv *priv = prom_priv->priv;
11380
11381 IPW_DEBUG_INFO("prom dev->stop\n");
11382
11383 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11384 priv->sys_config.accept_all_data_frames = 0;
11385 priv->sys_config.accept_non_directed_frames = 0;
11386 priv->sys_config.accept_all_mgmt_bcpr = 0;
11387 priv->sys_config.accept_all_mgmt_frames = 0;
11388
11389 ipw_send_system_config(priv);
11390 }
11391
11392 return 0;
11393 }
11394
11395 static int ipw_prom_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
11396 {
11397 IPW_DEBUG_INFO("prom dev->xmit\n");
11398 netif_stop_queue(dev);
11399 return -EOPNOTSUPP;
11400 }
11401
11402 static struct net_device_stats *ipw_prom_get_stats(struct net_device *dev)
11403 {
11404 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11405 return &prom_priv->ieee->stats;
11406 }
11407
11408 static int ipw_prom_alloc(struct ipw_priv *priv)
11409 {
11410 int rc = 0;
11411
11412 if (priv->prom_net_dev)
11413 return -EPERM;
11414
11415 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
11416 if (priv->prom_net_dev == NULL)
11417 return -ENOMEM;
11418
11419 priv->prom_priv = ieee80211_priv(priv->prom_net_dev);
11420 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11421 priv->prom_priv->priv = priv;
11422
11423 strcpy(priv->prom_net_dev->name, "rtap%d");
11424
11425 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11426 priv->prom_net_dev->open = ipw_prom_open;
11427 priv->prom_net_dev->stop = ipw_prom_stop;
11428 priv->prom_net_dev->get_stats = ipw_prom_get_stats;
11429 priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit;
11430
11431 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11432
11433 rc = register_netdev(priv->prom_net_dev);
11434 if (rc) {
11435 free_ieee80211(priv->prom_net_dev);
11436 priv->prom_net_dev = NULL;
11437 return rc;
11438 }
11439
11440 return 0;
11441 }
11442
11443 static void ipw_prom_free(struct ipw_priv *priv)
11444 {
11445 if (!priv->prom_net_dev)
11446 return;
11447
11448 unregister_netdev(priv->prom_net_dev);
11449 free_ieee80211(priv->prom_net_dev);
11450
11451 priv->prom_net_dev = NULL;
11452 }
11453
11454 #endif
11455
11456
11457 static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11458 {
11459 int err = 0;
11460 struct net_device *net_dev;
11461 void __iomem *base;
11462 u32 length, val;
11463 struct ipw_priv *priv;
11464 int i;
11465
11466 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
11467 if (net_dev == NULL) {
11468 err = -ENOMEM;
11469 goto out;
11470 }
11471
11472 priv = ieee80211_priv(net_dev);
11473 priv->ieee = netdev_priv(net_dev);
11474
11475 priv->net_dev = net_dev;
11476 priv->pci_dev = pdev;
11477 #ifdef CONFIG_IPW2200_DEBUG
11478 ipw_debug_level = debug;
11479 #endif
11480 spin_lock_init(&priv->lock);
11481 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11482 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11483
11484 mutex_init(&priv->mutex);
11485 if (pci_enable_device(pdev)) {
11486 err = -ENODEV;
11487 goto out_free_ieee80211;
11488 }
11489
11490 pci_set_master(pdev);
11491
11492 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11493 if (!err)
11494 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
11495 if (err) {
11496 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11497 goto out_pci_disable_device;
11498 }
11499
11500 pci_set_drvdata(pdev, priv);
11501
11502 err = pci_request_regions(pdev, DRV_NAME);
11503 if (err)
11504 goto out_pci_disable_device;
11505
11506 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11507 * PCI Tx retries from interfering with C3 CPU state */
11508 pci_read_config_dword(pdev, 0x40, &val);
11509 if ((val & 0x0000ff00) != 0)
11510 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11511
11512 length = pci_resource_len(pdev, 0);
11513 priv->hw_len = length;
11514
11515 base = ioremap_nocache(pci_resource_start(pdev, 0), length);
11516 if (!base) {
11517 err = -ENODEV;
11518 goto out_pci_release_regions;
11519 }
11520
11521 priv->hw_base = base;
11522 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11523 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11524
11525 err = ipw_setup_deferred_work(priv);
11526 if (err) {
11527 IPW_ERROR("Unable to setup deferred work\n");
11528 goto out_iounmap;
11529 }
11530
11531 ipw_sw_reset(priv, 1);
11532
11533 err = request_irq(pdev->irq, ipw_isr, SA_SHIRQ, DRV_NAME, priv);
11534 if (err) {
11535 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11536 goto out_destroy_workqueue;
11537 }
11538
11539 SET_MODULE_OWNER(net_dev);
11540 SET_NETDEV_DEV(net_dev, &pdev->dev);
11541
11542 mutex_lock(&priv->mutex);
11543
11544 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11545 priv->ieee->set_security = shim__set_security;
11546 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11547
11548 #ifdef CONFIG_IPW2200_QOS
11549 priv->ieee->is_qos_active = ipw_is_qos_active;
11550 priv->ieee->handle_probe_response = ipw_handle_beacon;
11551 priv->ieee->handle_beacon = ipw_handle_probe_response;
11552 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11553 #endif /* CONFIG_IPW2200_QOS */
11554
11555 priv->ieee->perfect_rssi = -20;
11556 priv->ieee->worst_rssi = -85;
11557
11558 net_dev->open = ipw_net_open;
11559 net_dev->stop = ipw_net_stop;
11560 net_dev->init = ipw_net_init;
11561 net_dev->get_stats = ipw_net_get_stats;
11562 net_dev->set_multicast_list = ipw_net_set_multicast_list;
11563 net_dev->set_mac_address = ipw_net_set_mac_address;
11564 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11565 net_dev->wireless_data = &priv->wireless_data;
11566 net_dev->wireless_handlers = &ipw_wx_handler_def;
11567 net_dev->ethtool_ops = &ipw_ethtool_ops;
11568 net_dev->irq = pdev->irq;
11569 net_dev->base_addr = (unsigned long)priv->hw_base;
11570 net_dev->mem_start = pci_resource_start(pdev, 0);
11571 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11572
11573 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11574 if (err) {
11575 IPW_ERROR("failed to create sysfs device attributes\n");
11576 mutex_unlock(&priv->mutex);
11577 goto out_release_irq;
11578 }
11579
11580 mutex_unlock(&priv->mutex);
11581 err = register_netdev(net_dev);
11582 if (err) {
11583 IPW_ERROR("failed to register network device\n");
11584 goto out_remove_sysfs;
11585 }
11586
11587 #ifdef CONFIG_IPW2200_PROMISCUOUS
11588 if (rtap_iface) {
11589 err = ipw_prom_alloc(priv);
11590 if (err) {
11591 IPW_ERROR("Failed to register promiscuous network "
11592 "device (error %d).\n", err);
11593 unregister_netdev(priv->net_dev);
11594 goto out_remove_sysfs;
11595 }
11596 }
11597 #endif
11598
11599 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11600 "channels, %d 802.11a channels)\n",
11601 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11602 priv->ieee->geo.a_channels);
11603
11604 return 0;
11605
11606 out_remove_sysfs:
11607 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11608 out_release_irq:
11609 free_irq(pdev->irq, priv);
11610 out_destroy_workqueue:
11611 destroy_workqueue(priv->workqueue);
11612 priv->workqueue = NULL;
11613 out_iounmap:
11614 iounmap(priv->hw_base);
11615 out_pci_release_regions:
11616 pci_release_regions(pdev);
11617 out_pci_disable_device:
11618 pci_disable_device(pdev);
11619 pci_set_drvdata(pdev, NULL);
11620 out_free_ieee80211:
11621 free_ieee80211(priv->net_dev);
11622 out:
11623 return err;
11624 }
11625
11626 static void ipw_pci_remove(struct pci_dev *pdev)
11627 {
11628 struct ipw_priv *priv = pci_get_drvdata(pdev);
11629 struct list_head *p, *q;
11630 int i;
11631
11632 if (!priv)
11633 return;
11634
11635 mutex_lock(&priv->mutex);
11636
11637 priv->status |= STATUS_EXIT_PENDING;
11638 ipw_down(priv);
11639 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11640
11641 mutex_unlock(&priv->mutex);
11642
11643 unregister_netdev(priv->net_dev);
11644
11645 if (priv->rxq) {
11646 ipw_rx_queue_free(priv, priv->rxq);
11647 priv->rxq = NULL;
11648 }
11649 ipw_tx_queue_free(priv);
11650
11651 if (priv->cmdlog) {
11652 kfree(priv->cmdlog);
11653 priv->cmdlog = NULL;
11654 }
11655 /* ipw_down will ensure that there is no more pending work
11656 * in the workqueue's, so we can safely remove them now. */
11657 cancel_delayed_work(&priv->adhoc_check);
11658 cancel_delayed_work(&priv->gather_stats);
11659 cancel_delayed_work(&priv->request_scan);
11660 cancel_delayed_work(&priv->rf_kill);
11661 cancel_delayed_work(&priv->scan_check);
11662 destroy_workqueue(priv->workqueue);
11663 priv->workqueue = NULL;
11664
11665 /* Free MAC hash list for ADHOC */
11666 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11667 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11668 list_del(p);
11669 kfree(list_entry(p, struct ipw_ibss_seq, list));
11670 }
11671 }
11672
11673 if (priv->error) {
11674 ipw_free_error_log(priv->error);
11675 priv->error = NULL;
11676 }
11677
11678 #ifdef CONFIG_IPW2200_PROMISCUOUS
11679 ipw_prom_free(priv);
11680 #endif
11681
11682 free_irq(pdev->irq, priv);
11683 iounmap(priv->hw_base);
11684 pci_release_regions(pdev);
11685 pci_disable_device(pdev);
11686 pci_set_drvdata(pdev, NULL);
11687 free_ieee80211(priv->net_dev);
11688 free_firmware();
11689 }
11690
11691 #ifdef CONFIG_PM
11692 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11693 {
11694 struct ipw_priv *priv = pci_get_drvdata(pdev);
11695 struct net_device *dev = priv->net_dev;
11696
11697 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11698
11699 /* Take down the device; powers it off, etc. */
11700 ipw_down(priv);
11701
11702 /* Remove the PRESENT state of the device */
11703 netif_device_detach(dev);
11704
11705 pci_save_state(pdev);
11706 pci_disable_device(pdev);
11707 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11708
11709 return 0;
11710 }
11711
11712 static int ipw_pci_resume(struct pci_dev *pdev)
11713 {
11714 struct ipw_priv *priv = pci_get_drvdata(pdev);
11715 struct net_device *dev = priv->net_dev;
11716 u32 val;
11717
11718 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11719
11720 pci_set_power_state(pdev, PCI_D0);
11721 pci_enable_device(pdev);
11722 pci_restore_state(pdev);
11723
11724 /*
11725 * Suspend/Resume resets the PCI configuration space, so we have to
11726 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11727 * from interfering with C3 CPU state. pci_restore_state won't help
11728 * here since it only restores the first 64 bytes pci config header.
11729 */
11730 pci_read_config_dword(pdev, 0x40, &val);
11731 if ((val & 0x0000ff00) != 0)
11732 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11733
11734 /* Set the device back into the PRESENT state; this will also wake
11735 * the queue of needed */
11736 netif_device_attach(dev);
11737
11738 /* Bring the device back up */
11739 queue_work(priv->workqueue, &priv->up);
11740
11741 return 0;
11742 }
11743 #endif
11744
11745 /* driver initialization stuff */
11746 static struct pci_driver ipw_driver = {
11747 .name = DRV_NAME,
11748 .id_table = card_ids,
11749 .probe = ipw_pci_probe,
11750 .remove = __devexit_p(ipw_pci_remove),
11751 #ifdef CONFIG_PM
11752 .suspend = ipw_pci_suspend,
11753 .resume = ipw_pci_resume,
11754 #endif
11755 };
11756
11757 static int __init ipw_init(void)
11758 {
11759 int ret;
11760
11761 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11762 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11763
11764 ret = pci_module_init(&ipw_driver);
11765 if (ret) {
11766 IPW_ERROR("Unable to initialize PCI module\n");
11767 return ret;
11768 }
11769
11770 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11771 if (ret) {
11772 IPW_ERROR("Unable to create driver sysfs file\n");
11773 pci_unregister_driver(&ipw_driver);
11774 return ret;
11775 }
11776
11777 return ret;
11778 }
11779
11780 static void __exit ipw_exit(void)
11781 {
11782 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11783 pci_unregister_driver(&ipw_driver);
11784 }
11785
11786 module_param(disable, int, 0444);
11787 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11788
11789 module_param(associate, int, 0444);
11790 MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
11791
11792 module_param(auto_create, int, 0444);
11793 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11794
11795 module_param(led, int, 0444);
11796 MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n");
11797
11798 #ifdef CONFIG_IPW2200_DEBUG
11799 module_param(debug, int, 0444);
11800 MODULE_PARM_DESC(debug, "debug output mask");
11801 #endif
11802
11803 module_param(channel, int, 0444);
11804 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11805
11806 #ifdef CONFIG_IPW2200_PROMISCUOUS
11807 module_param(rtap_iface, int, 0444);
11808 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
11809 #endif
11810
11811 #ifdef CONFIG_IPW2200_QOS
11812 module_param(qos_enable, int, 0444);
11813 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
11814
11815 module_param(qos_burst_enable, int, 0444);
11816 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11817
11818 module_param(qos_no_ack_mask, int, 0444);
11819 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
11820
11821 module_param(burst_duration_CCK, int, 0444);
11822 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
11823
11824 module_param(burst_duration_OFDM, int, 0444);
11825 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
11826 #endif /* CONFIG_IPW2200_QOS */
11827
11828 #ifdef CONFIG_IPW2200_MONITOR
11829 module_param(mode, int, 0444);
11830 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
11831 #else
11832 module_param(mode, int, 0444);
11833 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
11834 #endif
11835
11836 module_param(bt_coexist, int, 0444);
11837 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
11838
11839 module_param(hwcrypto, int, 0444);
11840 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
11841
11842 module_param(cmdlog, int, 0444);
11843 MODULE_PARM_DESC(cmdlog,
11844 "allocate a ring buffer for logging firmware commands");
11845
11846 module_param(roaming, int, 0444);
11847 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
11848
11849 module_param(antenna, int, 0444);
11850 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
11851
11852 module_exit(ipw_exit);
11853 module_init(ipw_init);