]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/wireless/ipw2200.c
Merge branch 'devel' of master.kernel.org:/home/rmk/linux-2.6-mmc
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / ipw2200.c
1 /******************************************************************************
2
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31 ******************************************************************************/
32
33 #include "ipw2200.h"
34 #include <linux/version.h>
35
36
37 #ifndef KBUILD_EXTMOD
38 #define VK "k"
39 #else
40 #define VK
41 #endif
42
43 #ifdef CONFIG_IPW2200_DEBUG
44 #define VD "d"
45 #else
46 #define VD
47 #endif
48
49 #ifdef CONFIG_IPW2200_MONITOR
50 #define VM "m"
51 #else
52 #define VM
53 #endif
54
55 #ifdef CONFIG_IPW2200_PROMISCUOUS
56 #define VP "p"
57 #else
58 #define VP
59 #endif
60
61 #ifdef CONFIG_IPW2200_RADIOTAP
62 #define VR "r"
63 #else
64 #define VR
65 #endif
66
67 #ifdef CONFIG_IPW2200_QOS
68 #define VQ "q"
69 #else
70 #define VQ
71 #endif
72
73 #define IPW2200_VERSION "1.1.2" VK VD VM VP VR VQ
74 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
75 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
76 #define DRV_VERSION IPW2200_VERSION
77
78 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
79
80 MODULE_DESCRIPTION(DRV_DESCRIPTION);
81 MODULE_VERSION(DRV_VERSION);
82 MODULE_AUTHOR(DRV_COPYRIGHT);
83 MODULE_LICENSE("GPL");
84
85 static int cmdlog = 0;
86 #ifdef CONFIG_IPW2200_DEBUG
87 static int debug = 0;
88 #endif
89 static int channel = 0;
90 static int mode = 0;
91
92 static u32 ipw_debug_level;
93 static int associate = 1;
94 static int auto_create = 1;
95 static int led = 0;
96 static int disable = 0;
97 static int bt_coexist = 0;
98 static int hwcrypto = 0;
99 static int roaming = 1;
100 static const char ipw_modes[] = {
101 'a', 'b', 'g', '?'
102 };
103 static int antenna = CFG_SYS_ANTENNA_BOTH;
104
105 #ifdef CONFIG_IPW2200_PROMISCUOUS
106 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
107 #endif
108
109
110 #ifdef CONFIG_IPW2200_QOS
111 static int qos_enable = 0;
112 static int qos_burst_enable = 0;
113 static int qos_no_ack_mask = 0;
114 static int burst_duration_CCK = 0;
115 static int burst_duration_OFDM = 0;
116
117 static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
118 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
119 QOS_TX3_CW_MIN_OFDM},
120 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
121 QOS_TX3_CW_MAX_OFDM},
122 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
123 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
124 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
125 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
126 };
127
128 static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
129 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
130 QOS_TX3_CW_MIN_CCK},
131 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
132 QOS_TX3_CW_MAX_CCK},
133 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
134 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
135 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
136 QOS_TX3_TXOP_LIMIT_CCK}
137 };
138
139 static struct ieee80211_qos_parameters def_parameters_OFDM = {
140 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
141 DEF_TX3_CW_MIN_OFDM},
142 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
143 DEF_TX3_CW_MAX_OFDM},
144 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
145 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
146 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
147 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
148 };
149
150 static struct ieee80211_qos_parameters def_parameters_CCK = {
151 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
152 DEF_TX3_CW_MIN_CCK},
153 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
154 DEF_TX3_CW_MAX_CCK},
155 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
156 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
157 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
158 DEF_TX3_TXOP_LIMIT_CCK}
159 };
160
161 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
162
163 static int from_priority_to_tx_queue[] = {
164 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
165 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
166 };
167
168 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
169
170 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
171 *qos_param);
172 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
173 *qos_param);
174 #endif /* CONFIG_IPW2200_QOS */
175
176 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
177 static void ipw_remove_current_network(struct ipw_priv *priv);
178 static void ipw_rx(struct ipw_priv *priv);
179 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
180 struct clx2_tx_queue *txq, int qindex);
181 static int ipw_queue_reset(struct ipw_priv *priv);
182
183 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
184 int len, int sync);
185
186 static void ipw_tx_queue_free(struct ipw_priv *);
187
188 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
189 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
190 static void ipw_rx_queue_replenish(void *);
191 static int ipw_up(struct ipw_priv *);
192 static void ipw_bg_up(void *);
193 static void ipw_down(struct ipw_priv *);
194 static void ipw_bg_down(void *);
195 static int ipw_config(struct ipw_priv *);
196 static int init_supported_rates(struct ipw_priv *priv,
197 struct ipw_supported_rates *prates);
198 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
199 static void ipw_send_wep_keys(struct ipw_priv *, int);
200
201 static int snprint_line(char *buf, size_t count,
202 const u8 * data, u32 len, u32 ofs)
203 {
204 int out, i, j, l;
205 char c;
206
207 out = snprintf(buf, count, "%08X", ofs);
208
209 for (l = 0, i = 0; i < 2; i++) {
210 out += snprintf(buf + out, count - out, " ");
211 for (j = 0; j < 8 && l < len; j++, l++)
212 out += snprintf(buf + out, count - out, "%02X ",
213 data[(i * 8 + j)]);
214 for (; j < 8; j++)
215 out += snprintf(buf + out, count - out, " ");
216 }
217
218 out += snprintf(buf + out, count - out, " ");
219 for (l = 0, i = 0; i < 2; i++) {
220 out += snprintf(buf + out, count - out, " ");
221 for (j = 0; j < 8 && l < len; j++, l++) {
222 c = data[(i * 8 + j)];
223 if (!isascii(c) || !isprint(c))
224 c = '.';
225
226 out += snprintf(buf + out, count - out, "%c", c);
227 }
228
229 for (; j < 8; j++)
230 out += snprintf(buf + out, count - out, " ");
231 }
232
233 return out;
234 }
235
236 static void printk_buf(int level, const u8 * data, u32 len)
237 {
238 char line[81];
239 u32 ofs = 0;
240 if (!(ipw_debug_level & level))
241 return;
242
243 while (len) {
244 snprint_line(line, sizeof(line), &data[ofs],
245 min(len, 16U), ofs);
246 printk(KERN_DEBUG "%s\n", line);
247 ofs += 16;
248 len -= min(len, 16U);
249 }
250 }
251
252 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
253 {
254 size_t out = size;
255 u32 ofs = 0;
256 int total = 0;
257
258 while (size && len) {
259 out = snprint_line(output, size, &data[ofs],
260 min_t(size_t, len, 16U), ofs);
261
262 ofs += 16;
263 output += out;
264 size -= out;
265 len -= min_t(size_t, len, 16U);
266 total += out;
267 }
268 return total;
269 }
270
271 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
272 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
273 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
274
275 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
276 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
277 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
278
279 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
280 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
281 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
282 {
283 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
284 __LINE__, (u32) (b), (u32) (c));
285 _ipw_write_reg8(a, b, c);
286 }
287
288 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
289 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
290 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
291 {
292 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
293 __LINE__, (u32) (b), (u32) (c));
294 _ipw_write_reg16(a, b, c);
295 }
296
297 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
298 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
299 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
300 {
301 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
302 __LINE__, (u32) (b), (u32) (c));
303 _ipw_write_reg32(a, b, c);
304 }
305
306 /* 8-bit direct write (low 4K) */
307 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
308
309 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
310 #define ipw_write8(ipw, ofs, val) \
311 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
312 _ipw_write8(ipw, ofs, val)
313
314 /* 16-bit direct write (low 4K) */
315 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
316
317 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
318 #define ipw_write16(ipw, ofs, val) \
319 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
320 _ipw_write16(ipw, ofs, val)
321
322 /* 32-bit direct write (low 4K) */
323 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
324
325 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
326 #define ipw_write32(ipw, ofs, val) \
327 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
328 _ipw_write32(ipw, ofs, val)
329
330 /* 8-bit direct read (low 4K) */
331 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
332
333 /* 8-bit direct read (low 4K), with debug wrapper */
334 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
335 {
336 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
337 return _ipw_read8(ipw, ofs);
338 }
339
340 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
341 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
342
343 /* 16-bit direct read (low 4K) */
344 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
345
346 /* 16-bit direct read (low 4K), with debug wrapper */
347 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
348 {
349 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
350 return _ipw_read16(ipw, ofs);
351 }
352
353 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
354 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
355
356 /* 32-bit direct read (low 4K) */
357 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
358
359 /* 32-bit direct read (low 4K), with debug wrapper */
360 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
361 {
362 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
363 return _ipw_read32(ipw, ofs);
364 }
365
366 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
367 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
368
369 /* multi-byte read (above 4K), with debug wrapper */
370 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
371 static inline void __ipw_read_indirect(const char *f, int l,
372 struct ipw_priv *a, u32 b, u8 * c, int d)
373 {
374 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %d bytes\n", f, l, (u32) (b),
375 d);
376 _ipw_read_indirect(a, b, c, d);
377 }
378
379 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
380 #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d)
381
382 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
383 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
384 int num);
385 #define ipw_write_indirect(a, b, c, d) \
386 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
387 _ipw_write_indirect(a, b, c, d)
388
389 /* 32-bit indirect write (above 4K) */
390 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
391 {
392 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
393 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
394 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
395 }
396
397 /* 8-bit indirect write (above 4K) */
398 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
399 {
400 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
401 u32 dif_len = reg - aligned_addr;
402
403 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
404 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
405 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
406 }
407
408 /* 16-bit indirect write (above 4K) */
409 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
410 {
411 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
412 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
413
414 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
415 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
416 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
417 }
418
419 /* 8-bit indirect read (above 4K) */
420 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
421 {
422 u32 word;
423 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
424 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
425 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
426 return (word >> ((reg & 0x3) * 8)) & 0xff;
427 }
428
429 /* 32-bit indirect read (above 4K) */
430 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
431 {
432 u32 value;
433
434 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
435
436 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
437 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
438 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
439 return value;
440 }
441
442 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
443 /* for area above 1st 4K of SRAM/reg space */
444 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
445 int num)
446 {
447 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
448 u32 dif_len = addr - aligned_addr;
449 u32 i;
450
451 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
452
453 if (num <= 0) {
454 return;
455 }
456
457 /* Read the first dword (or portion) byte by byte */
458 if (unlikely(dif_len)) {
459 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
460 /* Start reading at aligned_addr + dif_len */
461 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
462 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
463 aligned_addr += 4;
464 }
465
466 /* Read all of the middle dwords as dwords, with auto-increment */
467 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
468 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
469 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
470
471 /* Read the last dword (or portion) byte by byte */
472 if (unlikely(num)) {
473 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
474 for (i = 0; num > 0; i++, num--)
475 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
476 }
477 }
478
479 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
480 /* for area above 1st 4K of SRAM/reg space */
481 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
482 int num)
483 {
484 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
485 u32 dif_len = addr - aligned_addr;
486 u32 i;
487
488 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
489
490 if (num <= 0) {
491 return;
492 }
493
494 /* Write the first dword (or portion) byte by byte */
495 if (unlikely(dif_len)) {
496 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
497 /* Start writing at aligned_addr + dif_len */
498 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
499 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
500 aligned_addr += 4;
501 }
502
503 /* Write all of the middle dwords as dwords, with auto-increment */
504 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
505 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
506 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
507
508 /* Write the last dword (or portion) byte by byte */
509 if (unlikely(num)) {
510 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
511 for (i = 0; num > 0; i++, num--, buf++)
512 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
513 }
514 }
515
516 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
517 /* for 1st 4K of SRAM/regs space */
518 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
519 int num)
520 {
521 memcpy_toio((priv->hw_base + addr), buf, num);
522 }
523
524 /* Set bit(s) in low 4K of SRAM/regs */
525 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
526 {
527 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
528 }
529
530 /* Clear bit(s) in low 4K of SRAM/regs */
531 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
532 {
533 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
534 }
535
536 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
537 {
538 if (priv->status & STATUS_INT_ENABLED)
539 return;
540 priv->status |= STATUS_INT_ENABLED;
541 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
542 }
543
544 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
545 {
546 if (!(priv->status & STATUS_INT_ENABLED))
547 return;
548 priv->status &= ~STATUS_INT_ENABLED;
549 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
550 }
551
552 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
553 {
554 unsigned long flags;
555
556 spin_lock_irqsave(&priv->irq_lock, flags);
557 __ipw_enable_interrupts(priv);
558 spin_unlock_irqrestore(&priv->irq_lock, flags);
559 }
560
561 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
562 {
563 unsigned long flags;
564
565 spin_lock_irqsave(&priv->irq_lock, flags);
566 __ipw_disable_interrupts(priv);
567 spin_unlock_irqrestore(&priv->irq_lock, flags);
568 }
569
570 #ifdef CONFIG_IPW2200_DEBUG
571 static char *ipw_error_desc(u32 val)
572 {
573 switch (val) {
574 case IPW_FW_ERROR_OK:
575 return "ERROR_OK";
576 case IPW_FW_ERROR_FAIL:
577 return "ERROR_FAIL";
578 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
579 return "MEMORY_UNDERFLOW";
580 case IPW_FW_ERROR_MEMORY_OVERFLOW:
581 return "MEMORY_OVERFLOW";
582 case IPW_FW_ERROR_BAD_PARAM:
583 return "BAD_PARAM";
584 case IPW_FW_ERROR_BAD_CHECKSUM:
585 return "BAD_CHECKSUM";
586 case IPW_FW_ERROR_NMI_INTERRUPT:
587 return "NMI_INTERRUPT";
588 case IPW_FW_ERROR_BAD_DATABASE:
589 return "BAD_DATABASE";
590 case IPW_FW_ERROR_ALLOC_FAIL:
591 return "ALLOC_FAIL";
592 case IPW_FW_ERROR_DMA_UNDERRUN:
593 return "DMA_UNDERRUN";
594 case IPW_FW_ERROR_DMA_STATUS:
595 return "DMA_STATUS";
596 case IPW_FW_ERROR_DINO_ERROR:
597 return "DINO_ERROR";
598 case IPW_FW_ERROR_EEPROM_ERROR:
599 return "EEPROM_ERROR";
600 case IPW_FW_ERROR_SYSASSERT:
601 return "SYSASSERT";
602 case IPW_FW_ERROR_FATAL_ERROR:
603 return "FATAL_ERROR";
604 default:
605 return "UNKNOWN_ERROR";
606 }
607 }
608
609 static void ipw_dump_error_log(struct ipw_priv *priv,
610 struct ipw_fw_error *error)
611 {
612 u32 i;
613
614 if (!error) {
615 IPW_ERROR("Error allocating and capturing error log. "
616 "Nothing to dump.\n");
617 return;
618 }
619
620 IPW_ERROR("Start IPW Error Log Dump:\n");
621 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
622 error->status, error->config);
623
624 for (i = 0; i < error->elem_len; i++)
625 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
626 ipw_error_desc(error->elem[i].desc),
627 error->elem[i].time,
628 error->elem[i].blink1,
629 error->elem[i].blink2,
630 error->elem[i].link1,
631 error->elem[i].link2, error->elem[i].data);
632 for (i = 0; i < error->log_len; i++)
633 IPW_ERROR("%i\t0x%08x\t%i\n",
634 error->log[i].time,
635 error->log[i].data, error->log[i].event);
636 }
637 #endif
638
639 static inline int ipw_is_init(struct ipw_priv *priv)
640 {
641 return (priv->status & STATUS_INIT) ? 1 : 0;
642 }
643
644 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
645 {
646 u32 addr, field_info, field_len, field_count, total_len;
647
648 IPW_DEBUG_ORD("ordinal = %i\n", ord);
649
650 if (!priv || !val || !len) {
651 IPW_DEBUG_ORD("Invalid argument\n");
652 return -EINVAL;
653 }
654
655 /* verify device ordinal tables have been initialized */
656 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
657 IPW_DEBUG_ORD("Access ordinals before initialization\n");
658 return -EINVAL;
659 }
660
661 switch (IPW_ORD_TABLE_ID_MASK & ord) {
662 case IPW_ORD_TABLE_0_MASK:
663 /*
664 * TABLE 0: Direct access to a table of 32 bit values
665 *
666 * This is a very simple table with the data directly
667 * read from the table
668 */
669
670 /* remove the table id from the ordinal */
671 ord &= IPW_ORD_TABLE_VALUE_MASK;
672
673 /* boundary check */
674 if (ord > priv->table0_len) {
675 IPW_DEBUG_ORD("ordinal value (%i) longer then "
676 "max (%i)\n", ord, priv->table0_len);
677 return -EINVAL;
678 }
679
680 /* verify we have enough room to store the value */
681 if (*len < sizeof(u32)) {
682 IPW_DEBUG_ORD("ordinal buffer length too small, "
683 "need %zd\n", sizeof(u32));
684 return -EINVAL;
685 }
686
687 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
688 ord, priv->table0_addr + (ord << 2));
689
690 *len = sizeof(u32);
691 ord <<= 2;
692 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
693 break;
694
695 case IPW_ORD_TABLE_1_MASK:
696 /*
697 * TABLE 1: Indirect access to a table of 32 bit values
698 *
699 * This is a fairly large table of u32 values each
700 * representing starting addr for the data (which is
701 * also a u32)
702 */
703
704 /* remove the table id from the ordinal */
705 ord &= IPW_ORD_TABLE_VALUE_MASK;
706
707 /* boundary check */
708 if (ord > priv->table1_len) {
709 IPW_DEBUG_ORD("ordinal value too long\n");
710 return -EINVAL;
711 }
712
713 /* verify we have enough room to store the value */
714 if (*len < sizeof(u32)) {
715 IPW_DEBUG_ORD("ordinal buffer length too small, "
716 "need %zd\n", sizeof(u32));
717 return -EINVAL;
718 }
719
720 *((u32 *) val) =
721 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
722 *len = sizeof(u32);
723 break;
724
725 case IPW_ORD_TABLE_2_MASK:
726 /*
727 * TABLE 2: Indirect access to a table of variable sized values
728 *
729 * This table consist of six values, each containing
730 * - dword containing the starting offset of the data
731 * - dword containing the lengh in the first 16bits
732 * and the count in the second 16bits
733 */
734
735 /* remove the table id from the ordinal */
736 ord &= IPW_ORD_TABLE_VALUE_MASK;
737
738 /* boundary check */
739 if (ord > priv->table2_len) {
740 IPW_DEBUG_ORD("ordinal value too long\n");
741 return -EINVAL;
742 }
743
744 /* get the address of statistic */
745 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
746
747 /* get the second DW of statistics ;
748 * two 16-bit words - first is length, second is count */
749 field_info =
750 ipw_read_reg32(priv,
751 priv->table2_addr + (ord << 3) +
752 sizeof(u32));
753
754 /* get each entry length */
755 field_len = *((u16 *) & field_info);
756
757 /* get number of entries */
758 field_count = *(((u16 *) & field_info) + 1);
759
760 /* abort if not enought memory */
761 total_len = field_len * field_count;
762 if (total_len > *len) {
763 *len = total_len;
764 return -EINVAL;
765 }
766
767 *len = total_len;
768 if (!total_len)
769 return 0;
770
771 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
772 "field_info = 0x%08x\n",
773 addr, total_len, field_info);
774 ipw_read_indirect(priv, addr, val, total_len);
775 break;
776
777 default:
778 IPW_DEBUG_ORD("Invalid ordinal!\n");
779 return -EINVAL;
780
781 }
782
783 return 0;
784 }
785
786 static void ipw_init_ordinals(struct ipw_priv *priv)
787 {
788 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
789 priv->table0_len = ipw_read32(priv, priv->table0_addr);
790
791 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
792 priv->table0_addr, priv->table0_len);
793
794 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
795 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
796
797 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
798 priv->table1_addr, priv->table1_len);
799
800 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
801 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
802 priv->table2_len &= 0x0000ffff; /* use first two bytes */
803
804 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
805 priv->table2_addr, priv->table2_len);
806
807 }
808
809 static u32 ipw_register_toggle(u32 reg)
810 {
811 reg &= ~IPW_START_STANDBY;
812 if (reg & IPW_GATE_ODMA)
813 reg &= ~IPW_GATE_ODMA;
814 if (reg & IPW_GATE_IDMA)
815 reg &= ~IPW_GATE_IDMA;
816 if (reg & IPW_GATE_ADMA)
817 reg &= ~IPW_GATE_ADMA;
818 return reg;
819 }
820
821 /*
822 * LED behavior:
823 * - On radio ON, turn on any LEDs that require to be on during start
824 * - On initialization, start unassociated blink
825 * - On association, disable unassociated blink
826 * - On disassociation, start unassociated blink
827 * - On radio OFF, turn off any LEDs started during radio on
828 *
829 */
830 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
831 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
832 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
833
834 static void ipw_led_link_on(struct ipw_priv *priv)
835 {
836 unsigned long flags;
837 u32 led;
838
839 /* If configured to not use LEDs, or nic_type is 1,
840 * then we don't toggle a LINK led */
841 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
842 return;
843
844 spin_lock_irqsave(&priv->lock, flags);
845
846 if (!(priv->status & STATUS_RF_KILL_MASK) &&
847 !(priv->status & STATUS_LED_LINK_ON)) {
848 IPW_DEBUG_LED("Link LED On\n");
849 led = ipw_read_reg32(priv, IPW_EVENT_REG);
850 led |= priv->led_association_on;
851
852 led = ipw_register_toggle(led);
853
854 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
855 ipw_write_reg32(priv, IPW_EVENT_REG, led);
856
857 priv->status |= STATUS_LED_LINK_ON;
858
859 /* If we aren't associated, schedule turning the LED off */
860 if (!(priv->status & STATUS_ASSOCIATED))
861 queue_delayed_work(priv->workqueue,
862 &priv->led_link_off,
863 LD_TIME_LINK_ON);
864 }
865
866 spin_unlock_irqrestore(&priv->lock, flags);
867 }
868
869 static void ipw_bg_led_link_on(void *data)
870 {
871 struct ipw_priv *priv = data;
872 mutex_lock(&priv->mutex);
873 ipw_led_link_on(data);
874 mutex_unlock(&priv->mutex);
875 }
876
877 static void ipw_led_link_off(struct ipw_priv *priv)
878 {
879 unsigned long flags;
880 u32 led;
881
882 /* If configured not to use LEDs, or nic type is 1,
883 * then we don't goggle the LINK led. */
884 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
885 return;
886
887 spin_lock_irqsave(&priv->lock, flags);
888
889 if (priv->status & STATUS_LED_LINK_ON) {
890 led = ipw_read_reg32(priv, IPW_EVENT_REG);
891 led &= priv->led_association_off;
892 led = ipw_register_toggle(led);
893
894 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
895 ipw_write_reg32(priv, IPW_EVENT_REG, led);
896
897 IPW_DEBUG_LED("Link LED Off\n");
898
899 priv->status &= ~STATUS_LED_LINK_ON;
900
901 /* If we aren't associated and the radio is on, schedule
902 * turning the LED on (blink while unassociated) */
903 if (!(priv->status & STATUS_RF_KILL_MASK) &&
904 !(priv->status & STATUS_ASSOCIATED))
905 queue_delayed_work(priv->workqueue, &priv->led_link_on,
906 LD_TIME_LINK_OFF);
907
908 }
909
910 spin_unlock_irqrestore(&priv->lock, flags);
911 }
912
913 static void ipw_bg_led_link_off(void *data)
914 {
915 struct ipw_priv *priv = data;
916 mutex_lock(&priv->mutex);
917 ipw_led_link_off(data);
918 mutex_unlock(&priv->mutex);
919 }
920
921 static void __ipw_led_activity_on(struct ipw_priv *priv)
922 {
923 u32 led;
924
925 if (priv->config & CFG_NO_LED)
926 return;
927
928 if (priv->status & STATUS_RF_KILL_MASK)
929 return;
930
931 if (!(priv->status & STATUS_LED_ACT_ON)) {
932 led = ipw_read_reg32(priv, IPW_EVENT_REG);
933 led |= priv->led_activity_on;
934
935 led = ipw_register_toggle(led);
936
937 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
938 ipw_write_reg32(priv, IPW_EVENT_REG, led);
939
940 IPW_DEBUG_LED("Activity LED On\n");
941
942 priv->status |= STATUS_LED_ACT_ON;
943
944 cancel_delayed_work(&priv->led_act_off);
945 queue_delayed_work(priv->workqueue, &priv->led_act_off,
946 LD_TIME_ACT_ON);
947 } else {
948 /* Reschedule LED off for full time period */
949 cancel_delayed_work(&priv->led_act_off);
950 queue_delayed_work(priv->workqueue, &priv->led_act_off,
951 LD_TIME_ACT_ON);
952 }
953 }
954
955 #if 0
956 void ipw_led_activity_on(struct ipw_priv *priv)
957 {
958 unsigned long flags;
959 spin_lock_irqsave(&priv->lock, flags);
960 __ipw_led_activity_on(priv);
961 spin_unlock_irqrestore(&priv->lock, flags);
962 }
963 #endif /* 0 */
964
965 static void ipw_led_activity_off(struct ipw_priv *priv)
966 {
967 unsigned long flags;
968 u32 led;
969
970 if (priv->config & CFG_NO_LED)
971 return;
972
973 spin_lock_irqsave(&priv->lock, flags);
974
975 if (priv->status & STATUS_LED_ACT_ON) {
976 led = ipw_read_reg32(priv, IPW_EVENT_REG);
977 led &= priv->led_activity_off;
978
979 led = ipw_register_toggle(led);
980
981 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
982 ipw_write_reg32(priv, IPW_EVENT_REG, led);
983
984 IPW_DEBUG_LED("Activity LED Off\n");
985
986 priv->status &= ~STATUS_LED_ACT_ON;
987 }
988
989 spin_unlock_irqrestore(&priv->lock, flags);
990 }
991
992 static void ipw_bg_led_activity_off(void *data)
993 {
994 struct ipw_priv *priv = data;
995 mutex_lock(&priv->mutex);
996 ipw_led_activity_off(data);
997 mutex_unlock(&priv->mutex);
998 }
999
1000 static void ipw_led_band_on(struct ipw_priv *priv)
1001 {
1002 unsigned long flags;
1003 u32 led;
1004
1005 /* Only nic type 1 supports mode LEDs */
1006 if (priv->config & CFG_NO_LED ||
1007 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1008 return;
1009
1010 spin_lock_irqsave(&priv->lock, flags);
1011
1012 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1013 if (priv->assoc_network->mode == IEEE_A) {
1014 led |= priv->led_ofdm_on;
1015 led &= priv->led_association_off;
1016 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1017 } else if (priv->assoc_network->mode == IEEE_G) {
1018 led |= priv->led_ofdm_on;
1019 led |= priv->led_association_on;
1020 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1021 } else {
1022 led &= priv->led_ofdm_off;
1023 led |= priv->led_association_on;
1024 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1025 }
1026
1027 led = ipw_register_toggle(led);
1028
1029 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1030 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1031
1032 spin_unlock_irqrestore(&priv->lock, flags);
1033 }
1034
1035 static void ipw_led_band_off(struct ipw_priv *priv)
1036 {
1037 unsigned long flags;
1038 u32 led;
1039
1040 /* Only nic type 1 supports mode LEDs */
1041 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1042 return;
1043
1044 spin_lock_irqsave(&priv->lock, flags);
1045
1046 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1047 led &= priv->led_ofdm_off;
1048 led &= priv->led_association_off;
1049
1050 led = ipw_register_toggle(led);
1051
1052 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1053 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1054
1055 spin_unlock_irqrestore(&priv->lock, flags);
1056 }
1057
1058 static void ipw_led_radio_on(struct ipw_priv *priv)
1059 {
1060 ipw_led_link_on(priv);
1061 }
1062
1063 static void ipw_led_radio_off(struct ipw_priv *priv)
1064 {
1065 ipw_led_activity_off(priv);
1066 ipw_led_link_off(priv);
1067 }
1068
1069 static void ipw_led_link_up(struct ipw_priv *priv)
1070 {
1071 /* Set the Link Led on for all nic types */
1072 ipw_led_link_on(priv);
1073 }
1074
1075 static void ipw_led_link_down(struct ipw_priv *priv)
1076 {
1077 ipw_led_activity_off(priv);
1078 ipw_led_link_off(priv);
1079
1080 if (priv->status & STATUS_RF_KILL_MASK)
1081 ipw_led_radio_off(priv);
1082 }
1083
1084 static void ipw_led_init(struct ipw_priv *priv)
1085 {
1086 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1087
1088 /* Set the default PINs for the link and activity leds */
1089 priv->led_activity_on = IPW_ACTIVITY_LED;
1090 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1091
1092 priv->led_association_on = IPW_ASSOCIATED_LED;
1093 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1094
1095 /* Set the default PINs for the OFDM leds */
1096 priv->led_ofdm_on = IPW_OFDM_LED;
1097 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1098
1099 switch (priv->nic_type) {
1100 case EEPROM_NIC_TYPE_1:
1101 /* In this NIC type, the LEDs are reversed.... */
1102 priv->led_activity_on = IPW_ASSOCIATED_LED;
1103 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1104 priv->led_association_on = IPW_ACTIVITY_LED;
1105 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1106
1107 if (!(priv->config & CFG_NO_LED))
1108 ipw_led_band_on(priv);
1109
1110 /* And we don't blink link LEDs for this nic, so
1111 * just return here */
1112 return;
1113
1114 case EEPROM_NIC_TYPE_3:
1115 case EEPROM_NIC_TYPE_2:
1116 case EEPROM_NIC_TYPE_4:
1117 case EEPROM_NIC_TYPE_0:
1118 break;
1119
1120 default:
1121 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1122 priv->nic_type);
1123 priv->nic_type = EEPROM_NIC_TYPE_0;
1124 break;
1125 }
1126
1127 if (!(priv->config & CFG_NO_LED)) {
1128 if (priv->status & STATUS_ASSOCIATED)
1129 ipw_led_link_on(priv);
1130 else
1131 ipw_led_link_off(priv);
1132 }
1133 }
1134
1135 static void ipw_led_shutdown(struct ipw_priv *priv)
1136 {
1137 ipw_led_activity_off(priv);
1138 ipw_led_link_off(priv);
1139 ipw_led_band_off(priv);
1140 cancel_delayed_work(&priv->led_link_on);
1141 cancel_delayed_work(&priv->led_link_off);
1142 cancel_delayed_work(&priv->led_act_off);
1143 }
1144
1145 /*
1146 * The following adds a new attribute to the sysfs representation
1147 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1148 * used for controling the debug level.
1149 *
1150 * See the level definitions in ipw for details.
1151 */
1152 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1153 {
1154 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1155 }
1156
1157 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1158 size_t count)
1159 {
1160 char *p = (char *)buf;
1161 u32 val;
1162
1163 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1164 p++;
1165 if (p[0] == 'x' || p[0] == 'X')
1166 p++;
1167 val = simple_strtoul(p, &p, 16);
1168 } else
1169 val = simple_strtoul(p, &p, 10);
1170 if (p == buf)
1171 printk(KERN_INFO DRV_NAME
1172 ": %s is not in hex or decimal form.\n", buf);
1173 else
1174 ipw_debug_level = val;
1175
1176 return strnlen(buf, count);
1177 }
1178
1179 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1180 show_debug_level, store_debug_level);
1181
1182 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1183 {
1184 /* length = 1st dword in log */
1185 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1186 }
1187
1188 static void ipw_capture_event_log(struct ipw_priv *priv,
1189 u32 log_len, struct ipw_event *log)
1190 {
1191 u32 base;
1192
1193 if (log_len) {
1194 base = ipw_read32(priv, IPW_EVENT_LOG);
1195 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1196 (u8 *) log, sizeof(*log) * log_len);
1197 }
1198 }
1199
1200 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1201 {
1202 struct ipw_fw_error *error;
1203 u32 log_len = ipw_get_event_log_len(priv);
1204 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1205 u32 elem_len = ipw_read_reg32(priv, base);
1206
1207 error = kmalloc(sizeof(*error) +
1208 sizeof(*error->elem) * elem_len +
1209 sizeof(*error->log) * log_len, GFP_ATOMIC);
1210 if (!error) {
1211 IPW_ERROR("Memory allocation for firmware error log "
1212 "failed.\n");
1213 return NULL;
1214 }
1215 error->jiffies = jiffies;
1216 error->status = priv->status;
1217 error->config = priv->config;
1218 error->elem_len = elem_len;
1219 error->log_len = log_len;
1220 error->elem = (struct ipw_error_elem *)error->payload;
1221 error->log = (struct ipw_event *)(error->elem + elem_len);
1222
1223 ipw_capture_event_log(priv, log_len, error->log);
1224
1225 if (elem_len)
1226 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1227 sizeof(*error->elem) * elem_len);
1228
1229 return error;
1230 }
1231
1232 static ssize_t show_event_log(struct device *d,
1233 struct device_attribute *attr, char *buf)
1234 {
1235 struct ipw_priv *priv = dev_get_drvdata(d);
1236 u32 log_len = ipw_get_event_log_len(priv);
1237 struct ipw_event log[log_len];
1238 u32 len = 0, i;
1239
1240 ipw_capture_event_log(priv, log_len, log);
1241
1242 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1243 for (i = 0; i < log_len; i++)
1244 len += snprintf(buf + len, PAGE_SIZE - len,
1245 "\n%08X%08X%08X",
1246 log[i].time, log[i].event, log[i].data);
1247 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1248 return len;
1249 }
1250
1251 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1252
1253 static ssize_t show_error(struct device *d,
1254 struct device_attribute *attr, char *buf)
1255 {
1256 struct ipw_priv *priv = dev_get_drvdata(d);
1257 u32 len = 0, i;
1258 if (!priv->error)
1259 return 0;
1260 len += snprintf(buf + len, PAGE_SIZE - len,
1261 "%08lX%08X%08X%08X",
1262 priv->error->jiffies,
1263 priv->error->status,
1264 priv->error->config, priv->error->elem_len);
1265 for (i = 0; i < priv->error->elem_len; i++)
1266 len += snprintf(buf + len, PAGE_SIZE - len,
1267 "\n%08X%08X%08X%08X%08X%08X%08X",
1268 priv->error->elem[i].time,
1269 priv->error->elem[i].desc,
1270 priv->error->elem[i].blink1,
1271 priv->error->elem[i].blink2,
1272 priv->error->elem[i].link1,
1273 priv->error->elem[i].link2,
1274 priv->error->elem[i].data);
1275
1276 len += snprintf(buf + len, PAGE_SIZE - len,
1277 "\n%08X", priv->error->log_len);
1278 for (i = 0; i < priv->error->log_len; i++)
1279 len += snprintf(buf + len, PAGE_SIZE - len,
1280 "\n%08X%08X%08X",
1281 priv->error->log[i].time,
1282 priv->error->log[i].event,
1283 priv->error->log[i].data);
1284 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1285 return len;
1286 }
1287
1288 static ssize_t clear_error(struct device *d,
1289 struct device_attribute *attr,
1290 const char *buf, size_t count)
1291 {
1292 struct ipw_priv *priv = dev_get_drvdata(d);
1293
1294 kfree(priv->error);
1295 priv->error = NULL;
1296 return count;
1297 }
1298
1299 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1300
1301 static ssize_t show_cmd_log(struct device *d,
1302 struct device_attribute *attr, char *buf)
1303 {
1304 struct ipw_priv *priv = dev_get_drvdata(d);
1305 u32 len = 0, i;
1306 if (!priv->cmdlog)
1307 return 0;
1308 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1309 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1310 i = (i + 1) % priv->cmdlog_len) {
1311 len +=
1312 snprintf(buf + len, PAGE_SIZE - len,
1313 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1314 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1315 priv->cmdlog[i].cmd.len);
1316 len +=
1317 snprintk_buf(buf + len, PAGE_SIZE - len,
1318 (u8 *) priv->cmdlog[i].cmd.param,
1319 priv->cmdlog[i].cmd.len);
1320 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1321 }
1322 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1323 return len;
1324 }
1325
1326 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1327
1328 #ifdef CONFIG_IPW2200_PROMISCUOUS
1329 static void ipw_prom_free(struct ipw_priv *priv);
1330 static int ipw_prom_alloc(struct ipw_priv *priv);
1331 static ssize_t store_rtap_iface(struct device *d,
1332 struct device_attribute *attr,
1333 const char *buf, size_t count)
1334 {
1335 struct ipw_priv *priv = dev_get_drvdata(d);
1336 int rc = 0;
1337
1338 if (count < 1)
1339 return -EINVAL;
1340
1341 switch (buf[0]) {
1342 case '0':
1343 if (!rtap_iface)
1344 return count;
1345
1346 if (netif_running(priv->prom_net_dev)) {
1347 IPW_WARNING("Interface is up. Cannot unregister.\n");
1348 return count;
1349 }
1350
1351 ipw_prom_free(priv);
1352 rtap_iface = 0;
1353 break;
1354
1355 case '1':
1356 if (rtap_iface)
1357 return count;
1358
1359 rc = ipw_prom_alloc(priv);
1360 if (!rc)
1361 rtap_iface = 1;
1362 break;
1363
1364 default:
1365 return -EINVAL;
1366 }
1367
1368 if (rc) {
1369 IPW_ERROR("Failed to register promiscuous network "
1370 "device (error %d).\n", rc);
1371 }
1372
1373 return count;
1374 }
1375
1376 static ssize_t show_rtap_iface(struct device *d,
1377 struct device_attribute *attr,
1378 char *buf)
1379 {
1380 struct ipw_priv *priv = dev_get_drvdata(d);
1381 if (rtap_iface)
1382 return sprintf(buf, "%s", priv->prom_net_dev->name);
1383 else {
1384 buf[0] = '-';
1385 buf[1] = '1';
1386 buf[2] = '\0';
1387 return 3;
1388 }
1389 }
1390
1391 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1392 store_rtap_iface);
1393
1394 static ssize_t store_rtap_filter(struct device *d,
1395 struct device_attribute *attr,
1396 const char *buf, size_t count)
1397 {
1398 struct ipw_priv *priv = dev_get_drvdata(d);
1399
1400 if (!priv->prom_priv) {
1401 IPW_ERROR("Attempting to set filter without "
1402 "rtap_iface enabled.\n");
1403 return -EPERM;
1404 }
1405
1406 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1407
1408 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1409 BIT_ARG16(priv->prom_priv->filter));
1410
1411 return count;
1412 }
1413
1414 static ssize_t show_rtap_filter(struct device *d,
1415 struct device_attribute *attr,
1416 char *buf)
1417 {
1418 struct ipw_priv *priv = dev_get_drvdata(d);
1419 return sprintf(buf, "0x%04X",
1420 priv->prom_priv ? priv->prom_priv->filter : 0);
1421 }
1422
1423 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1424 store_rtap_filter);
1425 #endif
1426
1427 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1428 char *buf)
1429 {
1430 struct ipw_priv *priv = dev_get_drvdata(d);
1431 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1432 }
1433
1434 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1435 const char *buf, size_t count)
1436 {
1437 struct ipw_priv *priv = dev_get_drvdata(d);
1438 #ifdef CONFIG_IPW2200_DEBUG
1439 struct net_device *dev = priv->net_dev;
1440 #endif
1441 char buffer[] = "00000000";
1442 unsigned long len =
1443 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1444 unsigned long val;
1445 char *p = buffer;
1446
1447 IPW_DEBUG_INFO("enter\n");
1448
1449 strncpy(buffer, buf, len);
1450 buffer[len] = 0;
1451
1452 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1453 p++;
1454 if (p[0] == 'x' || p[0] == 'X')
1455 p++;
1456 val = simple_strtoul(p, &p, 16);
1457 } else
1458 val = simple_strtoul(p, &p, 10);
1459 if (p == buffer) {
1460 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1461 } else {
1462 priv->ieee->scan_age = val;
1463 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1464 }
1465
1466 IPW_DEBUG_INFO("exit\n");
1467 return len;
1468 }
1469
1470 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1471
1472 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1473 char *buf)
1474 {
1475 struct ipw_priv *priv = dev_get_drvdata(d);
1476 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1477 }
1478
1479 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1480 const char *buf, size_t count)
1481 {
1482 struct ipw_priv *priv = dev_get_drvdata(d);
1483
1484 IPW_DEBUG_INFO("enter\n");
1485
1486 if (count == 0)
1487 return 0;
1488
1489 if (*buf == 0) {
1490 IPW_DEBUG_LED("Disabling LED control.\n");
1491 priv->config |= CFG_NO_LED;
1492 ipw_led_shutdown(priv);
1493 } else {
1494 IPW_DEBUG_LED("Enabling LED control.\n");
1495 priv->config &= ~CFG_NO_LED;
1496 ipw_led_init(priv);
1497 }
1498
1499 IPW_DEBUG_INFO("exit\n");
1500 return count;
1501 }
1502
1503 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1504
1505 static ssize_t show_status(struct device *d,
1506 struct device_attribute *attr, char *buf)
1507 {
1508 struct ipw_priv *p = d->driver_data;
1509 return sprintf(buf, "0x%08x\n", (int)p->status);
1510 }
1511
1512 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1513
1514 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1515 char *buf)
1516 {
1517 struct ipw_priv *p = d->driver_data;
1518 return sprintf(buf, "0x%08x\n", (int)p->config);
1519 }
1520
1521 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1522
1523 static ssize_t show_nic_type(struct device *d,
1524 struct device_attribute *attr, char *buf)
1525 {
1526 struct ipw_priv *priv = d->driver_data;
1527 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1528 }
1529
1530 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1531
1532 static ssize_t show_ucode_version(struct device *d,
1533 struct device_attribute *attr, char *buf)
1534 {
1535 u32 len = sizeof(u32), tmp = 0;
1536 struct ipw_priv *p = d->driver_data;
1537
1538 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1539 return 0;
1540
1541 return sprintf(buf, "0x%08x\n", tmp);
1542 }
1543
1544 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1545
1546 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1547 char *buf)
1548 {
1549 u32 len = sizeof(u32), tmp = 0;
1550 struct ipw_priv *p = d->driver_data;
1551
1552 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1553 return 0;
1554
1555 return sprintf(buf, "0x%08x\n", tmp);
1556 }
1557
1558 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1559
1560 /*
1561 * Add a device attribute to view/control the delay between eeprom
1562 * operations.
1563 */
1564 static ssize_t show_eeprom_delay(struct device *d,
1565 struct device_attribute *attr, char *buf)
1566 {
1567 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
1568 return sprintf(buf, "%i\n", n);
1569 }
1570 static ssize_t store_eeprom_delay(struct device *d,
1571 struct device_attribute *attr,
1572 const char *buf, size_t count)
1573 {
1574 struct ipw_priv *p = d->driver_data;
1575 sscanf(buf, "%i", &p->eeprom_delay);
1576 return strnlen(buf, count);
1577 }
1578
1579 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1580 show_eeprom_delay, store_eeprom_delay);
1581
1582 static ssize_t show_command_event_reg(struct device *d,
1583 struct device_attribute *attr, char *buf)
1584 {
1585 u32 reg = 0;
1586 struct ipw_priv *p = d->driver_data;
1587
1588 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1589 return sprintf(buf, "0x%08x\n", reg);
1590 }
1591 static ssize_t store_command_event_reg(struct device *d,
1592 struct device_attribute *attr,
1593 const char *buf, size_t count)
1594 {
1595 u32 reg;
1596 struct ipw_priv *p = d->driver_data;
1597
1598 sscanf(buf, "%x", &reg);
1599 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1600 return strnlen(buf, count);
1601 }
1602
1603 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1604 show_command_event_reg, store_command_event_reg);
1605
1606 static ssize_t show_mem_gpio_reg(struct device *d,
1607 struct device_attribute *attr, char *buf)
1608 {
1609 u32 reg = 0;
1610 struct ipw_priv *p = d->driver_data;
1611
1612 reg = ipw_read_reg32(p, 0x301100);
1613 return sprintf(buf, "0x%08x\n", reg);
1614 }
1615 static ssize_t store_mem_gpio_reg(struct device *d,
1616 struct device_attribute *attr,
1617 const char *buf, size_t count)
1618 {
1619 u32 reg;
1620 struct ipw_priv *p = d->driver_data;
1621
1622 sscanf(buf, "%x", &reg);
1623 ipw_write_reg32(p, 0x301100, reg);
1624 return strnlen(buf, count);
1625 }
1626
1627 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1628 show_mem_gpio_reg, store_mem_gpio_reg);
1629
1630 static ssize_t show_indirect_dword(struct device *d,
1631 struct device_attribute *attr, char *buf)
1632 {
1633 u32 reg = 0;
1634 struct ipw_priv *priv = d->driver_data;
1635
1636 if (priv->status & STATUS_INDIRECT_DWORD)
1637 reg = ipw_read_reg32(priv, priv->indirect_dword);
1638 else
1639 reg = 0;
1640
1641 return sprintf(buf, "0x%08x\n", reg);
1642 }
1643 static ssize_t store_indirect_dword(struct device *d,
1644 struct device_attribute *attr,
1645 const char *buf, size_t count)
1646 {
1647 struct ipw_priv *priv = d->driver_data;
1648
1649 sscanf(buf, "%x", &priv->indirect_dword);
1650 priv->status |= STATUS_INDIRECT_DWORD;
1651 return strnlen(buf, count);
1652 }
1653
1654 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1655 show_indirect_dword, store_indirect_dword);
1656
1657 static ssize_t show_indirect_byte(struct device *d,
1658 struct device_attribute *attr, char *buf)
1659 {
1660 u8 reg = 0;
1661 struct ipw_priv *priv = d->driver_data;
1662
1663 if (priv->status & STATUS_INDIRECT_BYTE)
1664 reg = ipw_read_reg8(priv, priv->indirect_byte);
1665 else
1666 reg = 0;
1667
1668 return sprintf(buf, "0x%02x\n", reg);
1669 }
1670 static ssize_t store_indirect_byte(struct device *d,
1671 struct device_attribute *attr,
1672 const char *buf, size_t count)
1673 {
1674 struct ipw_priv *priv = d->driver_data;
1675
1676 sscanf(buf, "%x", &priv->indirect_byte);
1677 priv->status |= STATUS_INDIRECT_BYTE;
1678 return strnlen(buf, count);
1679 }
1680
1681 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1682 show_indirect_byte, store_indirect_byte);
1683
1684 static ssize_t show_direct_dword(struct device *d,
1685 struct device_attribute *attr, char *buf)
1686 {
1687 u32 reg = 0;
1688 struct ipw_priv *priv = d->driver_data;
1689
1690 if (priv->status & STATUS_DIRECT_DWORD)
1691 reg = ipw_read32(priv, priv->direct_dword);
1692 else
1693 reg = 0;
1694
1695 return sprintf(buf, "0x%08x\n", reg);
1696 }
1697 static ssize_t store_direct_dword(struct device *d,
1698 struct device_attribute *attr,
1699 const char *buf, size_t count)
1700 {
1701 struct ipw_priv *priv = d->driver_data;
1702
1703 sscanf(buf, "%x", &priv->direct_dword);
1704 priv->status |= STATUS_DIRECT_DWORD;
1705 return strnlen(buf, count);
1706 }
1707
1708 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1709 show_direct_dword, store_direct_dword);
1710
1711 static int rf_kill_active(struct ipw_priv *priv)
1712 {
1713 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1714 priv->status |= STATUS_RF_KILL_HW;
1715 else
1716 priv->status &= ~STATUS_RF_KILL_HW;
1717
1718 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1719 }
1720
1721 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1722 char *buf)
1723 {
1724 /* 0 - RF kill not enabled
1725 1 - SW based RF kill active (sysfs)
1726 2 - HW based RF kill active
1727 3 - Both HW and SW baed RF kill active */
1728 struct ipw_priv *priv = d->driver_data;
1729 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1730 (rf_kill_active(priv) ? 0x2 : 0x0);
1731 return sprintf(buf, "%i\n", val);
1732 }
1733
1734 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1735 {
1736 if ((disable_radio ? 1 : 0) ==
1737 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1738 return 0;
1739
1740 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1741 disable_radio ? "OFF" : "ON");
1742
1743 if (disable_radio) {
1744 priv->status |= STATUS_RF_KILL_SW;
1745
1746 if (priv->workqueue)
1747 cancel_delayed_work(&priv->request_scan);
1748 queue_work(priv->workqueue, &priv->down);
1749 } else {
1750 priv->status &= ~STATUS_RF_KILL_SW;
1751 if (rf_kill_active(priv)) {
1752 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1753 "disabled by HW switch\n");
1754 /* Make sure the RF_KILL check timer is running */
1755 cancel_delayed_work(&priv->rf_kill);
1756 queue_delayed_work(priv->workqueue, &priv->rf_kill,
1757 2 * HZ);
1758 } else
1759 queue_work(priv->workqueue, &priv->up);
1760 }
1761
1762 return 1;
1763 }
1764
1765 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1766 const char *buf, size_t count)
1767 {
1768 struct ipw_priv *priv = d->driver_data;
1769
1770 ipw_radio_kill_sw(priv, buf[0] == '1');
1771
1772 return count;
1773 }
1774
1775 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1776
1777 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1778 char *buf)
1779 {
1780 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1781 int pos = 0, len = 0;
1782 if (priv->config & CFG_SPEED_SCAN) {
1783 while (priv->speed_scan[pos] != 0)
1784 len += sprintf(&buf[len], "%d ",
1785 priv->speed_scan[pos++]);
1786 return len + sprintf(&buf[len], "\n");
1787 }
1788
1789 return sprintf(buf, "0\n");
1790 }
1791
1792 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1793 const char *buf, size_t count)
1794 {
1795 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1796 int channel, pos = 0;
1797 const char *p = buf;
1798
1799 /* list of space separated channels to scan, optionally ending with 0 */
1800 while ((channel = simple_strtol(p, NULL, 0))) {
1801 if (pos == MAX_SPEED_SCAN - 1) {
1802 priv->speed_scan[pos] = 0;
1803 break;
1804 }
1805
1806 if (ieee80211_is_valid_channel(priv->ieee, channel))
1807 priv->speed_scan[pos++] = channel;
1808 else
1809 IPW_WARNING("Skipping invalid channel request: %d\n",
1810 channel);
1811 p = strchr(p, ' ');
1812 if (!p)
1813 break;
1814 while (*p == ' ' || *p == '\t')
1815 p++;
1816 }
1817
1818 if (pos == 0)
1819 priv->config &= ~CFG_SPEED_SCAN;
1820 else {
1821 priv->speed_scan_pos = 0;
1822 priv->config |= CFG_SPEED_SCAN;
1823 }
1824
1825 return count;
1826 }
1827
1828 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1829 store_speed_scan);
1830
1831 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1832 char *buf)
1833 {
1834 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1835 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1836 }
1837
1838 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1839 const char *buf, size_t count)
1840 {
1841 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1842 if (buf[0] == '1')
1843 priv->config |= CFG_NET_STATS;
1844 else
1845 priv->config &= ~CFG_NET_STATS;
1846
1847 return count;
1848 }
1849
1850 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1851 show_net_stats, store_net_stats);
1852
1853 static void notify_wx_assoc_event(struct ipw_priv *priv)
1854 {
1855 union iwreq_data wrqu;
1856 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1857 if (priv->status & STATUS_ASSOCIATED)
1858 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1859 else
1860 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1861 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1862 }
1863
1864 static void ipw_irq_tasklet(struct ipw_priv *priv)
1865 {
1866 u32 inta, inta_mask, handled = 0;
1867 unsigned long flags;
1868 int rc = 0;
1869
1870 spin_lock_irqsave(&priv->irq_lock, flags);
1871
1872 inta = ipw_read32(priv, IPW_INTA_RW);
1873 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1874 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1875
1876 /* Add any cached INTA values that need to be handled */
1877 inta |= priv->isr_inta;
1878
1879 spin_unlock_irqrestore(&priv->irq_lock, flags);
1880
1881 spin_lock_irqsave(&priv->lock, flags);
1882
1883 /* handle all the justifications for the interrupt */
1884 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1885 ipw_rx(priv);
1886 handled |= IPW_INTA_BIT_RX_TRANSFER;
1887 }
1888
1889 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1890 IPW_DEBUG_HC("Command completed.\n");
1891 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1892 priv->status &= ~STATUS_HCMD_ACTIVE;
1893 wake_up_interruptible(&priv->wait_command_queue);
1894 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1895 }
1896
1897 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1898 IPW_DEBUG_TX("TX_QUEUE_1\n");
1899 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1900 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1901 }
1902
1903 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1904 IPW_DEBUG_TX("TX_QUEUE_2\n");
1905 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1906 handled |= IPW_INTA_BIT_TX_QUEUE_2;
1907 }
1908
1909 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1910 IPW_DEBUG_TX("TX_QUEUE_3\n");
1911 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1912 handled |= IPW_INTA_BIT_TX_QUEUE_3;
1913 }
1914
1915 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1916 IPW_DEBUG_TX("TX_QUEUE_4\n");
1917 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1918 handled |= IPW_INTA_BIT_TX_QUEUE_4;
1919 }
1920
1921 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
1922 IPW_WARNING("STATUS_CHANGE\n");
1923 handled |= IPW_INTA_BIT_STATUS_CHANGE;
1924 }
1925
1926 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1927 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1928 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
1929 }
1930
1931 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1932 IPW_WARNING("HOST_CMD_DONE\n");
1933 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1934 }
1935
1936 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
1937 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1938 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
1939 }
1940
1941 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1942 IPW_WARNING("PHY_OFF_DONE\n");
1943 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1944 }
1945
1946 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
1947 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1948 priv->status |= STATUS_RF_KILL_HW;
1949 wake_up_interruptible(&priv->wait_command_queue);
1950 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1951 cancel_delayed_work(&priv->request_scan);
1952 schedule_work(&priv->link_down);
1953 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
1954 handled |= IPW_INTA_BIT_RF_KILL_DONE;
1955 }
1956
1957 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
1958 IPW_WARNING("Firmware error detected. Restarting.\n");
1959 if (priv->error) {
1960 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
1961 #ifdef CONFIG_IPW2200_DEBUG
1962 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1963 struct ipw_fw_error *error =
1964 ipw_alloc_error_log(priv);
1965 ipw_dump_error_log(priv, error);
1966 kfree(error);
1967 }
1968 #endif
1969 } else {
1970 priv->error = ipw_alloc_error_log(priv);
1971 if (priv->error)
1972 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
1973 else
1974 IPW_DEBUG_FW("Error allocating sysfs 'error' "
1975 "log.\n");
1976 #ifdef CONFIG_IPW2200_DEBUG
1977 if (ipw_debug_level & IPW_DL_FW_ERRORS)
1978 ipw_dump_error_log(priv, priv->error);
1979 #endif
1980 }
1981
1982 /* XXX: If hardware encryption is for WPA/WPA2,
1983 * we have to notify the supplicant. */
1984 if (priv->ieee->sec.encrypt) {
1985 priv->status &= ~STATUS_ASSOCIATED;
1986 notify_wx_assoc_event(priv);
1987 }
1988
1989 /* Keep the restart process from trying to send host
1990 * commands by clearing the INIT status bit */
1991 priv->status &= ~STATUS_INIT;
1992
1993 /* Cancel currently queued command. */
1994 priv->status &= ~STATUS_HCMD_ACTIVE;
1995 wake_up_interruptible(&priv->wait_command_queue);
1996
1997 queue_work(priv->workqueue, &priv->adapter_restart);
1998 handled |= IPW_INTA_BIT_FATAL_ERROR;
1999 }
2000
2001 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2002 IPW_ERROR("Parity error\n");
2003 handled |= IPW_INTA_BIT_PARITY_ERROR;
2004 }
2005
2006 if (handled != inta) {
2007 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2008 }
2009
2010 spin_unlock_irqrestore(&priv->lock, flags);
2011
2012 /* enable all interrupts */
2013 ipw_enable_interrupts(priv);
2014 }
2015
2016 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2017 static char *get_cmd_string(u8 cmd)
2018 {
2019 switch (cmd) {
2020 IPW_CMD(HOST_COMPLETE);
2021 IPW_CMD(POWER_DOWN);
2022 IPW_CMD(SYSTEM_CONFIG);
2023 IPW_CMD(MULTICAST_ADDRESS);
2024 IPW_CMD(SSID);
2025 IPW_CMD(ADAPTER_ADDRESS);
2026 IPW_CMD(PORT_TYPE);
2027 IPW_CMD(RTS_THRESHOLD);
2028 IPW_CMD(FRAG_THRESHOLD);
2029 IPW_CMD(POWER_MODE);
2030 IPW_CMD(WEP_KEY);
2031 IPW_CMD(TGI_TX_KEY);
2032 IPW_CMD(SCAN_REQUEST);
2033 IPW_CMD(SCAN_REQUEST_EXT);
2034 IPW_CMD(ASSOCIATE);
2035 IPW_CMD(SUPPORTED_RATES);
2036 IPW_CMD(SCAN_ABORT);
2037 IPW_CMD(TX_FLUSH);
2038 IPW_CMD(QOS_PARAMETERS);
2039 IPW_CMD(DINO_CONFIG);
2040 IPW_CMD(RSN_CAPABILITIES);
2041 IPW_CMD(RX_KEY);
2042 IPW_CMD(CARD_DISABLE);
2043 IPW_CMD(SEED_NUMBER);
2044 IPW_CMD(TX_POWER);
2045 IPW_CMD(COUNTRY_INFO);
2046 IPW_CMD(AIRONET_INFO);
2047 IPW_CMD(AP_TX_POWER);
2048 IPW_CMD(CCKM_INFO);
2049 IPW_CMD(CCX_VER_INFO);
2050 IPW_CMD(SET_CALIBRATION);
2051 IPW_CMD(SENSITIVITY_CALIB);
2052 IPW_CMD(RETRY_LIMIT);
2053 IPW_CMD(IPW_PRE_POWER_DOWN);
2054 IPW_CMD(VAP_BEACON_TEMPLATE);
2055 IPW_CMD(VAP_DTIM_PERIOD);
2056 IPW_CMD(EXT_SUPPORTED_RATES);
2057 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2058 IPW_CMD(VAP_QUIET_INTERVALS);
2059 IPW_CMD(VAP_CHANNEL_SWITCH);
2060 IPW_CMD(VAP_MANDATORY_CHANNELS);
2061 IPW_CMD(VAP_CELL_PWR_LIMIT);
2062 IPW_CMD(VAP_CF_PARAM_SET);
2063 IPW_CMD(VAP_SET_BEACONING_STATE);
2064 IPW_CMD(MEASUREMENT);
2065 IPW_CMD(POWER_CAPABILITY);
2066 IPW_CMD(SUPPORTED_CHANNELS);
2067 IPW_CMD(TPC_REPORT);
2068 IPW_CMD(WME_INFO);
2069 IPW_CMD(PRODUCTION_COMMAND);
2070 default:
2071 return "UNKNOWN";
2072 }
2073 }
2074
2075 #define HOST_COMPLETE_TIMEOUT HZ
2076
2077 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2078 {
2079 int rc = 0;
2080 unsigned long flags;
2081
2082 spin_lock_irqsave(&priv->lock, flags);
2083 if (priv->status & STATUS_HCMD_ACTIVE) {
2084 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2085 get_cmd_string(cmd->cmd));
2086 spin_unlock_irqrestore(&priv->lock, flags);
2087 return -EAGAIN;
2088 }
2089
2090 priv->status |= STATUS_HCMD_ACTIVE;
2091
2092 if (priv->cmdlog) {
2093 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2094 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2095 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2096 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2097 cmd->len);
2098 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2099 }
2100
2101 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2102 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2103 priv->status);
2104
2105 #ifndef DEBUG_CMD_WEP_KEY
2106 if (cmd->cmd == IPW_CMD_WEP_KEY)
2107 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2108 else
2109 #endif
2110 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2111
2112 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2113 if (rc) {
2114 priv->status &= ~STATUS_HCMD_ACTIVE;
2115 IPW_ERROR("Failed to send %s: Reason %d\n",
2116 get_cmd_string(cmd->cmd), rc);
2117 spin_unlock_irqrestore(&priv->lock, flags);
2118 goto exit;
2119 }
2120 spin_unlock_irqrestore(&priv->lock, flags);
2121
2122 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2123 !(priv->
2124 status & STATUS_HCMD_ACTIVE),
2125 HOST_COMPLETE_TIMEOUT);
2126 if (rc == 0) {
2127 spin_lock_irqsave(&priv->lock, flags);
2128 if (priv->status & STATUS_HCMD_ACTIVE) {
2129 IPW_ERROR("Failed to send %s: Command timed out.\n",
2130 get_cmd_string(cmd->cmd));
2131 priv->status &= ~STATUS_HCMD_ACTIVE;
2132 spin_unlock_irqrestore(&priv->lock, flags);
2133 rc = -EIO;
2134 goto exit;
2135 }
2136 spin_unlock_irqrestore(&priv->lock, flags);
2137 } else
2138 rc = 0;
2139
2140 if (priv->status & STATUS_RF_KILL_HW) {
2141 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2142 get_cmd_string(cmd->cmd));
2143 rc = -EIO;
2144 goto exit;
2145 }
2146
2147 exit:
2148 if (priv->cmdlog) {
2149 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2150 priv->cmdlog_pos %= priv->cmdlog_len;
2151 }
2152 return rc;
2153 }
2154
2155 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2156 {
2157 struct host_cmd cmd = {
2158 .cmd = command,
2159 };
2160
2161 return __ipw_send_cmd(priv, &cmd);
2162 }
2163
2164 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2165 void *data)
2166 {
2167 struct host_cmd cmd = {
2168 .cmd = command,
2169 .len = len,
2170 .param = data,
2171 };
2172
2173 return __ipw_send_cmd(priv, &cmd);
2174 }
2175
2176 static int ipw_send_host_complete(struct ipw_priv *priv)
2177 {
2178 if (!priv) {
2179 IPW_ERROR("Invalid args\n");
2180 return -1;
2181 }
2182
2183 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2184 }
2185
2186 static int ipw_send_system_config(struct ipw_priv *priv)
2187 {
2188 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2189 sizeof(priv->sys_config),
2190 &priv->sys_config);
2191 }
2192
2193 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2194 {
2195 if (!priv || !ssid) {
2196 IPW_ERROR("Invalid args\n");
2197 return -1;
2198 }
2199
2200 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2201 ssid);
2202 }
2203
2204 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2205 {
2206 if (!priv || !mac) {
2207 IPW_ERROR("Invalid args\n");
2208 return -1;
2209 }
2210
2211 IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
2212 priv->net_dev->name, MAC_ARG(mac));
2213
2214 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2215 }
2216
2217 /*
2218 * NOTE: This must be executed from our workqueue as it results in udelay
2219 * being called which may corrupt the keyboard if executed on default
2220 * workqueue
2221 */
2222 static void ipw_adapter_restart(void *adapter)
2223 {
2224 struct ipw_priv *priv = adapter;
2225
2226 if (priv->status & STATUS_RF_KILL_MASK)
2227 return;
2228
2229 ipw_down(priv);
2230
2231 if (priv->assoc_network &&
2232 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2233 ipw_remove_current_network(priv);
2234
2235 if (ipw_up(priv)) {
2236 IPW_ERROR("Failed to up device\n");
2237 return;
2238 }
2239 }
2240
2241 static void ipw_bg_adapter_restart(void *data)
2242 {
2243 struct ipw_priv *priv = data;
2244 mutex_lock(&priv->mutex);
2245 ipw_adapter_restart(data);
2246 mutex_unlock(&priv->mutex);
2247 }
2248
2249 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2250
2251 static void ipw_scan_check(void *data)
2252 {
2253 struct ipw_priv *priv = data;
2254 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2255 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2256 "adapter after (%dms).\n",
2257 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2258 queue_work(priv->workqueue, &priv->adapter_restart);
2259 }
2260 }
2261
2262 static void ipw_bg_scan_check(void *data)
2263 {
2264 struct ipw_priv *priv = data;
2265 mutex_lock(&priv->mutex);
2266 ipw_scan_check(data);
2267 mutex_unlock(&priv->mutex);
2268 }
2269
2270 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2271 struct ipw_scan_request_ext *request)
2272 {
2273 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2274 sizeof(*request), request);
2275 }
2276
2277 static int ipw_send_scan_abort(struct ipw_priv *priv)
2278 {
2279 if (!priv) {
2280 IPW_ERROR("Invalid args\n");
2281 return -1;
2282 }
2283
2284 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2285 }
2286
2287 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2288 {
2289 struct ipw_sensitivity_calib calib = {
2290 .beacon_rssi_raw = sens,
2291 };
2292
2293 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2294 &calib);
2295 }
2296
2297 static int ipw_send_associate(struct ipw_priv *priv,
2298 struct ipw_associate *associate)
2299 {
2300 struct ipw_associate tmp_associate;
2301
2302 if (!priv || !associate) {
2303 IPW_ERROR("Invalid args\n");
2304 return -1;
2305 }
2306
2307 memcpy(&tmp_associate, associate, sizeof(*associate));
2308 tmp_associate.policy_support =
2309 cpu_to_le16(tmp_associate.policy_support);
2310 tmp_associate.assoc_tsf_msw = cpu_to_le32(tmp_associate.assoc_tsf_msw);
2311 tmp_associate.assoc_tsf_lsw = cpu_to_le32(tmp_associate.assoc_tsf_lsw);
2312 tmp_associate.capability = cpu_to_le16(tmp_associate.capability);
2313 tmp_associate.listen_interval =
2314 cpu_to_le16(tmp_associate.listen_interval);
2315 tmp_associate.beacon_interval =
2316 cpu_to_le16(tmp_associate.beacon_interval);
2317 tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window);
2318
2319 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(tmp_associate),
2320 &tmp_associate);
2321 }
2322
2323 static int ipw_send_supported_rates(struct ipw_priv *priv,
2324 struct ipw_supported_rates *rates)
2325 {
2326 if (!priv || !rates) {
2327 IPW_ERROR("Invalid args\n");
2328 return -1;
2329 }
2330
2331 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2332 rates);
2333 }
2334
2335 static int ipw_set_random_seed(struct ipw_priv *priv)
2336 {
2337 u32 val;
2338
2339 if (!priv) {
2340 IPW_ERROR("Invalid args\n");
2341 return -1;
2342 }
2343
2344 get_random_bytes(&val, sizeof(val));
2345
2346 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2347 }
2348
2349 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2350 {
2351 if (!priv) {
2352 IPW_ERROR("Invalid args\n");
2353 return -1;
2354 }
2355
2356 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off),
2357 &phy_off);
2358 }
2359
2360 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2361 {
2362 if (!priv || !power) {
2363 IPW_ERROR("Invalid args\n");
2364 return -1;
2365 }
2366
2367 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2368 }
2369
2370 static int ipw_set_tx_power(struct ipw_priv *priv)
2371 {
2372 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
2373 struct ipw_tx_power tx_power;
2374 s8 max_power;
2375 int i;
2376
2377 memset(&tx_power, 0, sizeof(tx_power));
2378
2379 /* configure device for 'G' band */
2380 tx_power.ieee_mode = IPW_G_MODE;
2381 tx_power.num_channels = geo->bg_channels;
2382 for (i = 0; i < geo->bg_channels; i++) {
2383 max_power = geo->bg[i].max_power;
2384 tx_power.channels_tx_power[i].channel_number =
2385 geo->bg[i].channel;
2386 tx_power.channels_tx_power[i].tx_power = max_power ?
2387 min(max_power, priv->tx_power) : priv->tx_power;
2388 }
2389 if (ipw_send_tx_power(priv, &tx_power))
2390 return -EIO;
2391
2392 /* configure device to also handle 'B' band */
2393 tx_power.ieee_mode = IPW_B_MODE;
2394 if (ipw_send_tx_power(priv, &tx_power))
2395 return -EIO;
2396
2397 /* configure device to also handle 'A' band */
2398 if (priv->ieee->abg_true) {
2399 tx_power.ieee_mode = IPW_A_MODE;
2400 tx_power.num_channels = geo->a_channels;
2401 for (i = 0; i < tx_power.num_channels; i++) {
2402 max_power = geo->a[i].max_power;
2403 tx_power.channels_tx_power[i].channel_number =
2404 geo->a[i].channel;
2405 tx_power.channels_tx_power[i].tx_power = max_power ?
2406 min(max_power, priv->tx_power) : priv->tx_power;
2407 }
2408 if (ipw_send_tx_power(priv, &tx_power))
2409 return -EIO;
2410 }
2411 return 0;
2412 }
2413
2414 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2415 {
2416 struct ipw_rts_threshold rts_threshold = {
2417 .rts_threshold = rts,
2418 };
2419
2420 if (!priv) {
2421 IPW_ERROR("Invalid args\n");
2422 return -1;
2423 }
2424
2425 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2426 sizeof(rts_threshold), &rts_threshold);
2427 }
2428
2429 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2430 {
2431 struct ipw_frag_threshold frag_threshold = {
2432 .frag_threshold = frag,
2433 };
2434
2435 if (!priv) {
2436 IPW_ERROR("Invalid args\n");
2437 return -1;
2438 }
2439
2440 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2441 sizeof(frag_threshold), &frag_threshold);
2442 }
2443
2444 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2445 {
2446 u32 param;
2447
2448 if (!priv) {
2449 IPW_ERROR("Invalid args\n");
2450 return -1;
2451 }
2452
2453 /* If on battery, set to 3, if AC set to CAM, else user
2454 * level */
2455 switch (mode) {
2456 case IPW_POWER_BATTERY:
2457 param = IPW_POWER_INDEX_3;
2458 break;
2459 case IPW_POWER_AC:
2460 param = IPW_POWER_MODE_CAM;
2461 break;
2462 default:
2463 param = mode;
2464 break;
2465 }
2466
2467 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2468 &param);
2469 }
2470
2471 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2472 {
2473 struct ipw_retry_limit retry_limit = {
2474 .short_retry_limit = slimit,
2475 .long_retry_limit = llimit
2476 };
2477
2478 if (!priv) {
2479 IPW_ERROR("Invalid args\n");
2480 return -1;
2481 }
2482
2483 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2484 &retry_limit);
2485 }
2486
2487 /*
2488 * The IPW device contains a Microwire compatible EEPROM that stores
2489 * various data like the MAC address. Usually the firmware has exclusive
2490 * access to the eeprom, but during device initialization (before the
2491 * device driver has sent the HostComplete command to the firmware) the
2492 * device driver has read access to the EEPROM by way of indirect addressing
2493 * through a couple of memory mapped registers.
2494 *
2495 * The following is a simplified implementation for pulling data out of the
2496 * the eeprom, along with some helper functions to find information in
2497 * the per device private data's copy of the eeprom.
2498 *
2499 * NOTE: To better understand how these functions work (i.e what is a chip
2500 * select and why do have to keep driving the eeprom clock?), read
2501 * just about any data sheet for a Microwire compatible EEPROM.
2502 */
2503
2504 /* write a 32 bit value into the indirect accessor register */
2505 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2506 {
2507 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2508
2509 /* the eeprom requires some time to complete the operation */
2510 udelay(p->eeprom_delay);
2511
2512 return;
2513 }
2514
2515 /* perform a chip select operation */
2516 static void eeprom_cs(struct ipw_priv *priv)
2517 {
2518 eeprom_write_reg(priv, 0);
2519 eeprom_write_reg(priv, EEPROM_BIT_CS);
2520 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2521 eeprom_write_reg(priv, EEPROM_BIT_CS);
2522 }
2523
2524 /* perform a chip select operation */
2525 static void eeprom_disable_cs(struct ipw_priv *priv)
2526 {
2527 eeprom_write_reg(priv, EEPROM_BIT_CS);
2528 eeprom_write_reg(priv, 0);
2529 eeprom_write_reg(priv, EEPROM_BIT_SK);
2530 }
2531
2532 /* push a single bit down to the eeprom */
2533 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2534 {
2535 int d = (bit ? EEPROM_BIT_DI : 0);
2536 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2537 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2538 }
2539
2540 /* push an opcode followed by an address down to the eeprom */
2541 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2542 {
2543 int i;
2544
2545 eeprom_cs(priv);
2546 eeprom_write_bit(priv, 1);
2547 eeprom_write_bit(priv, op & 2);
2548 eeprom_write_bit(priv, op & 1);
2549 for (i = 7; i >= 0; i--) {
2550 eeprom_write_bit(priv, addr & (1 << i));
2551 }
2552 }
2553
2554 /* pull 16 bits off the eeprom, one bit at a time */
2555 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2556 {
2557 int i;
2558 u16 r = 0;
2559
2560 /* Send READ Opcode */
2561 eeprom_op(priv, EEPROM_CMD_READ, addr);
2562
2563 /* Send dummy bit */
2564 eeprom_write_reg(priv, EEPROM_BIT_CS);
2565
2566 /* Read the byte off the eeprom one bit at a time */
2567 for (i = 0; i < 16; i++) {
2568 u32 data = 0;
2569 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2570 eeprom_write_reg(priv, EEPROM_BIT_CS);
2571 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2572 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2573 }
2574
2575 /* Send another dummy bit */
2576 eeprom_write_reg(priv, 0);
2577 eeprom_disable_cs(priv);
2578
2579 return r;
2580 }
2581
2582 /* helper function for pulling the mac address out of the private */
2583 /* data's copy of the eeprom data */
2584 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2585 {
2586 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2587 }
2588
2589 /*
2590 * Either the device driver (i.e. the host) or the firmware can
2591 * load eeprom data into the designated region in SRAM. If neither
2592 * happens then the FW will shutdown with a fatal error.
2593 *
2594 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2595 * bit needs region of shared SRAM needs to be non-zero.
2596 */
2597 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2598 {
2599 int i;
2600 u16 *eeprom = (u16 *) priv->eeprom;
2601
2602 IPW_DEBUG_TRACE(">>\n");
2603
2604 /* read entire contents of eeprom into private buffer */
2605 for (i = 0; i < 128; i++)
2606 eeprom[i] = le16_to_cpu(eeprom_read_u16(priv, (u8) i));
2607
2608 /*
2609 If the data looks correct, then copy it to our private
2610 copy. Otherwise let the firmware know to perform the operation
2611 on its own.
2612 */
2613 if (priv->eeprom[EEPROM_VERSION] != 0) {
2614 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2615
2616 /* write the eeprom data to sram */
2617 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2618 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2619
2620 /* Do not load eeprom data on fatal error or suspend */
2621 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2622 } else {
2623 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2624
2625 /* Load eeprom data on fatal error or suspend */
2626 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2627 }
2628
2629 IPW_DEBUG_TRACE("<<\n");
2630 }
2631
2632 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2633 {
2634 count >>= 2;
2635 if (!count)
2636 return;
2637 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2638 while (count--)
2639 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2640 }
2641
2642 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2643 {
2644 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2645 CB_NUMBER_OF_ELEMENTS_SMALL *
2646 sizeof(struct command_block));
2647 }
2648
2649 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2650 { /* start dma engine but no transfers yet */
2651
2652 IPW_DEBUG_FW(">> : \n");
2653
2654 /* Start the dma */
2655 ipw_fw_dma_reset_command_blocks(priv);
2656
2657 /* Write CB base address */
2658 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2659
2660 IPW_DEBUG_FW("<< : \n");
2661 return 0;
2662 }
2663
2664 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2665 {
2666 u32 control = 0;
2667
2668 IPW_DEBUG_FW(">> :\n");
2669
2670 //set the Stop and Abort bit
2671 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2672 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2673 priv->sram_desc.last_cb_index = 0;
2674
2675 IPW_DEBUG_FW("<< \n");
2676 }
2677
2678 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2679 struct command_block *cb)
2680 {
2681 u32 address =
2682 IPW_SHARED_SRAM_DMA_CONTROL +
2683 (sizeof(struct command_block) * index);
2684 IPW_DEBUG_FW(">> :\n");
2685
2686 ipw_write_indirect(priv, address, (u8 *) cb,
2687 (int)sizeof(struct command_block));
2688
2689 IPW_DEBUG_FW("<< :\n");
2690 return 0;
2691
2692 }
2693
2694 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2695 {
2696 u32 control = 0;
2697 u32 index = 0;
2698
2699 IPW_DEBUG_FW(">> :\n");
2700
2701 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2702 ipw_fw_dma_write_command_block(priv, index,
2703 &priv->sram_desc.cb_list[index]);
2704
2705 /* Enable the DMA in the CSR register */
2706 ipw_clear_bit(priv, IPW_RESET_REG,
2707 IPW_RESET_REG_MASTER_DISABLED |
2708 IPW_RESET_REG_STOP_MASTER);
2709
2710 /* Set the Start bit. */
2711 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2712 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2713
2714 IPW_DEBUG_FW("<< :\n");
2715 return 0;
2716 }
2717
2718 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2719 {
2720 u32 address;
2721 u32 register_value = 0;
2722 u32 cb_fields_address = 0;
2723
2724 IPW_DEBUG_FW(">> :\n");
2725 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2726 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2727
2728 /* Read the DMA Controlor register */
2729 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2730 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2731
2732 /* Print the CB values */
2733 cb_fields_address = address;
2734 register_value = ipw_read_reg32(priv, cb_fields_address);
2735 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2736
2737 cb_fields_address += sizeof(u32);
2738 register_value = ipw_read_reg32(priv, cb_fields_address);
2739 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2740
2741 cb_fields_address += sizeof(u32);
2742 register_value = ipw_read_reg32(priv, cb_fields_address);
2743 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2744 register_value);
2745
2746 cb_fields_address += sizeof(u32);
2747 register_value = ipw_read_reg32(priv, cb_fields_address);
2748 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2749
2750 IPW_DEBUG_FW(">> :\n");
2751 }
2752
2753 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2754 {
2755 u32 current_cb_address = 0;
2756 u32 current_cb_index = 0;
2757
2758 IPW_DEBUG_FW("<< :\n");
2759 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2760
2761 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2762 sizeof(struct command_block);
2763
2764 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2765 current_cb_index, current_cb_address);
2766
2767 IPW_DEBUG_FW(">> :\n");
2768 return current_cb_index;
2769
2770 }
2771
2772 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2773 u32 src_address,
2774 u32 dest_address,
2775 u32 length,
2776 int interrupt_enabled, int is_last)
2777 {
2778
2779 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2780 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2781 CB_DEST_SIZE_LONG;
2782 struct command_block *cb;
2783 u32 last_cb_element = 0;
2784
2785 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2786 src_address, dest_address, length);
2787
2788 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2789 return -1;
2790
2791 last_cb_element = priv->sram_desc.last_cb_index;
2792 cb = &priv->sram_desc.cb_list[last_cb_element];
2793 priv->sram_desc.last_cb_index++;
2794
2795 /* Calculate the new CB control word */
2796 if (interrupt_enabled)
2797 control |= CB_INT_ENABLED;
2798
2799 if (is_last)
2800 control |= CB_LAST_VALID;
2801
2802 control |= length;
2803
2804 /* Calculate the CB Element's checksum value */
2805 cb->status = control ^ src_address ^ dest_address;
2806
2807 /* Copy the Source and Destination addresses */
2808 cb->dest_addr = dest_address;
2809 cb->source_addr = src_address;
2810
2811 /* Copy the Control Word last */
2812 cb->control = control;
2813
2814 return 0;
2815 }
2816
2817 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2818 u32 src_phys, u32 dest_address, u32 length)
2819 {
2820 u32 bytes_left = length;
2821 u32 src_offset = 0;
2822 u32 dest_offset = 0;
2823 int status = 0;
2824 IPW_DEBUG_FW(">> \n");
2825 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2826 src_phys, dest_address, length);
2827 while (bytes_left > CB_MAX_LENGTH) {
2828 status = ipw_fw_dma_add_command_block(priv,
2829 src_phys + src_offset,
2830 dest_address +
2831 dest_offset,
2832 CB_MAX_LENGTH, 0, 0);
2833 if (status) {
2834 IPW_DEBUG_FW_INFO(": Failed\n");
2835 return -1;
2836 } else
2837 IPW_DEBUG_FW_INFO(": Added new cb\n");
2838
2839 src_offset += CB_MAX_LENGTH;
2840 dest_offset += CB_MAX_LENGTH;
2841 bytes_left -= CB_MAX_LENGTH;
2842 }
2843
2844 /* add the buffer tail */
2845 if (bytes_left > 0) {
2846 status =
2847 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2848 dest_address + dest_offset,
2849 bytes_left, 0, 0);
2850 if (status) {
2851 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2852 return -1;
2853 } else
2854 IPW_DEBUG_FW_INFO
2855 (": Adding new cb - the buffer tail\n");
2856 }
2857
2858 IPW_DEBUG_FW("<< \n");
2859 return 0;
2860 }
2861
2862 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2863 {
2864 u32 current_index = 0, previous_index;
2865 u32 watchdog = 0;
2866
2867 IPW_DEBUG_FW(">> : \n");
2868
2869 current_index = ipw_fw_dma_command_block_index(priv);
2870 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2871 (int)priv->sram_desc.last_cb_index);
2872
2873 while (current_index < priv->sram_desc.last_cb_index) {
2874 udelay(50);
2875 previous_index = current_index;
2876 current_index = ipw_fw_dma_command_block_index(priv);
2877
2878 if (previous_index < current_index) {
2879 watchdog = 0;
2880 continue;
2881 }
2882 if (++watchdog > 400) {
2883 IPW_DEBUG_FW_INFO("Timeout\n");
2884 ipw_fw_dma_dump_command_block(priv);
2885 ipw_fw_dma_abort(priv);
2886 return -1;
2887 }
2888 }
2889
2890 ipw_fw_dma_abort(priv);
2891
2892 /*Disable the DMA in the CSR register */
2893 ipw_set_bit(priv, IPW_RESET_REG,
2894 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2895
2896 IPW_DEBUG_FW("<< dmaWaitSync \n");
2897 return 0;
2898 }
2899
2900 static void ipw_remove_current_network(struct ipw_priv *priv)
2901 {
2902 struct list_head *element, *safe;
2903 struct ieee80211_network *network = NULL;
2904 unsigned long flags;
2905
2906 spin_lock_irqsave(&priv->ieee->lock, flags);
2907 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2908 network = list_entry(element, struct ieee80211_network, list);
2909 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2910 list_del(element);
2911 list_add_tail(&network->list,
2912 &priv->ieee->network_free_list);
2913 }
2914 }
2915 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2916 }
2917
2918 /**
2919 * Check that card is still alive.
2920 * Reads debug register from domain0.
2921 * If card is present, pre-defined value should
2922 * be found there.
2923 *
2924 * @param priv
2925 * @return 1 if card is present, 0 otherwise
2926 */
2927 static inline int ipw_alive(struct ipw_priv *priv)
2928 {
2929 return ipw_read32(priv, 0x90) == 0xd55555d5;
2930 }
2931
2932 /* timeout in msec, attempted in 10-msec quanta */
2933 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2934 int timeout)
2935 {
2936 int i = 0;
2937
2938 do {
2939 if ((ipw_read32(priv, addr) & mask) == mask)
2940 return i;
2941 mdelay(10);
2942 i += 10;
2943 } while (i < timeout);
2944
2945 return -ETIME;
2946 }
2947
2948 /* These functions load the firmware and micro code for the operation of
2949 * the ipw hardware. It assumes the buffer has all the bits for the
2950 * image and the caller is handling the memory allocation and clean up.
2951 */
2952
2953 static int ipw_stop_master(struct ipw_priv *priv)
2954 {
2955 int rc;
2956
2957 IPW_DEBUG_TRACE(">> \n");
2958 /* stop master. typical delay - 0 */
2959 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
2960
2961 /* timeout is in msec, polled in 10-msec quanta */
2962 rc = ipw_poll_bit(priv, IPW_RESET_REG,
2963 IPW_RESET_REG_MASTER_DISABLED, 100);
2964 if (rc < 0) {
2965 IPW_ERROR("wait for stop master failed after 100ms\n");
2966 return -1;
2967 }
2968
2969 IPW_DEBUG_INFO("stop master %dms\n", rc);
2970
2971 return rc;
2972 }
2973
2974 static void ipw_arc_release(struct ipw_priv *priv)
2975 {
2976 IPW_DEBUG_TRACE(">> \n");
2977 mdelay(5);
2978
2979 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2980
2981 /* no one knows timing, for safety add some delay */
2982 mdelay(5);
2983 }
2984
2985 struct fw_chunk {
2986 u32 address;
2987 u32 length;
2988 };
2989
2990 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
2991 {
2992 int rc = 0, i, addr;
2993 u8 cr = 0;
2994 u16 *image;
2995
2996 image = (u16 *) data;
2997
2998 IPW_DEBUG_TRACE(">> \n");
2999
3000 rc = ipw_stop_master(priv);
3001
3002 if (rc < 0)
3003 return rc;
3004
3005 // spin_lock_irqsave(&priv->lock, flags);
3006
3007 for (addr = IPW_SHARED_LOWER_BOUND;
3008 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3009 ipw_write32(priv, addr, 0);
3010 }
3011
3012 /* no ucode (yet) */
3013 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3014 /* destroy DMA queues */
3015 /* reset sequence */
3016
3017 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3018 ipw_arc_release(priv);
3019 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3020 mdelay(1);
3021
3022 /* reset PHY */
3023 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3024 mdelay(1);
3025
3026 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3027 mdelay(1);
3028
3029 /* enable ucode store */
3030 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3031 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3032 mdelay(1);
3033
3034 /* write ucode */
3035 /**
3036 * @bug
3037 * Do NOT set indirect address register once and then
3038 * store data to indirect data register in the loop.
3039 * It seems very reasonable, but in this case DINO do not
3040 * accept ucode. It is essential to set address each time.
3041 */
3042 /* load new ipw uCode */
3043 for (i = 0; i < len / 2; i++)
3044 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3045 cpu_to_le16(image[i]));
3046
3047 /* enable DINO */
3048 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3049 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3050
3051 /* this is where the igx / win driver deveates from the VAP driver. */
3052
3053 /* wait for alive response */
3054 for (i = 0; i < 100; i++) {
3055 /* poll for incoming data */
3056 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3057 if (cr & DINO_RXFIFO_DATA)
3058 break;
3059 mdelay(1);
3060 }
3061
3062 if (cr & DINO_RXFIFO_DATA) {
3063 /* alive_command_responce size is NOT multiple of 4 */
3064 u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3065
3066 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3067 response_buffer[i] =
3068 le32_to_cpu(ipw_read_reg32(priv,
3069 IPW_BASEBAND_RX_FIFO_READ));
3070 memcpy(&priv->dino_alive, response_buffer,
3071 sizeof(priv->dino_alive));
3072 if (priv->dino_alive.alive_command == 1
3073 && priv->dino_alive.ucode_valid == 1) {
3074 rc = 0;
3075 IPW_DEBUG_INFO
3076 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3077 "of %02d/%02d/%02d %02d:%02d\n",
3078 priv->dino_alive.software_revision,
3079 priv->dino_alive.software_revision,
3080 priv->dino_alive.device_identifier,
3081 priv->dino_alive.device_identifier,
3082 priv->dino_alive.time_stamp[0],
3083 priv->dino_alive.time_stamp[1],
3084 priv->dino_alive.time_stamp[2],
3085 priv->dino_alive.time_stamp[3],
3086 priv->dino_alive.time_stamp[4]);
3087 } else {
3088 IPW_DEBUG_INFO("Microcode is not alive\n");
3089 rc = -EINVAL;
3090 }
3091 } else {
3092 IPW_DEBUG_INFO("No alive response from DINO\n");
3093 rc = -ETIME;
3094 }
3095
3096 /* disable DINO, otherwise for some reason
3097 firmware have problem getting alive resp. */
3098 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3099
3100 // spin_unlock_irqrestore(&priv->lock, flags);
3101
3102 return rc;
3103 }
3104
3105 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3106 {
3107 int rc = -1;
3108 int offset = 0;
3109 struct fw_chunk *chunk;
3110 dma_addr_t shared_phys;
3111 u8 *shared_virt;
3112
3113 IPW_DEBUG_TRACE("<< : \n");
3114 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
3115
3116 if (!shared_virt)
3117 return -ENOMEM;
3118
3119 memmove(shared_virt, data, len);
3120
3121 /* Start the Dma */
3122 rc = ipw_fw_dma_enable(priv);
3123
3124 if (priv->sram_desc.last_cb_index > 0) {
3125 /* the DMA is already ready this would be a bug. */
3126 BUG();
3127 goto out;
3128 }
3129
3130 do {
3131 chunk = (struct fw_chunk *)(data + offset);
3132 offset += sizeof(struct fw_chunk);
3133 /* build DMA packet and queue up for sending */
3134 /* dma to chunk->address, the chunk->length bytes from data +
3135 * offeset*/
3136 /* Dma loading */
3137 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
3138 le32_to_cpu(chunk->address),
3139 le32_to_cpu(chunk->length));
3140 if (rc) {
3141 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3142 goto out;
3143 }
3144
3145 offset += le32_to_cpu(chunk->length);
3146 } while (offset < len);
3147
3148 /* Run the DMA and wait for the answer */
3149 rc = ipw_fw_dma_kick(priv);
3150 if (rc) {
3151 IPW_ERROR("dmaKick Failed\n");
3152 goto out;
3153 }
3154
3155 rc = ipw_fw_dma_wait(priv);
3156 if (rc) {
3157 IPW_ERROR("dmaWaitSync Failed\n");
3158 goto out;
3159 }
3160 out:
3161 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
3162 return rc;
3163 }
3164
3165 /* stop nic */
3166 static int ipw_stop_nic(struct ipw_priv *priv)
3167 {
3168 int rc = 0;
3169
3170 /* stop */
3171 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3172
3173 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3174 IPW_RESET_REG_MASTER_DISABLED, 500);
3175 if (rc < 0) {
3176 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3177 return rc;
3178 }
3179
3180 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3181
3182 return rc;
3183 }
3184
3185 static void ipw_start_nic(struct ipw_priv *priv)
3186 {
3187 IPW_DEBUG_TRACE(">>\n");
3188
3189 /* prvHwStartNic release ARC */
3190 ipw_clear_bit(priv, IPW_RESET_REG,
3191 IPW_RESET_REG_MASTER_DISABLED |
3192 IPW_RESET_REG_STOP_MASTER |
3193 CBD_RESET_REG_PRINCETON_RESET);
3194
3195 /* enable power management */
3196 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3197 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3198
3199 IPW_DEBUG_TRACE("<<\n");
3200 }
3201
3202 static int ipw_init_nic(struct ipw_priv *priv)
3203 {
3204 int rc;
3205
3206 IPW_DEBUG_TRACE(">>\n");
3207 /* reset */
3208 /*prvHwInitNic */
3209 /* set "initialization complete" bit to move adapter to D0 state */
3210 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3211
3212 /* low-level PLL activation */
3213 ipw_write32(priv, IPW_READ_INT_REGISTER,
3214 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3215
3216 /* wait for clock stabilization */
3217 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3218 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3219 if (rc < 0)
3220 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3221
3222 /* assert SW reset */
3223 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3224
3225 udelay(10);
3226
3227 /* set "initialization complete" bit to move adapter to D0 state */
3228 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3229
3230 IPW_DEBUG_TRACE(">>\n");
3231 return 0;
3232 }
3233
3234 /* Call this function from process context, it will sleep in request_firmware.
3235 * Probe is an ok place to call this from.
3236 */
3237 static int ipw_reset_nic(struct ipw_priv *priv)
3238 {
3239 int rc = 0;
3240 unsigned long flags;
3241
3242 IPW_DEBUG_TRACE(">>\n");
3243
3244 rc = ipw_init_nic(priv);
3245
3246 spin_lock_irqsave(&priv->lock, flags);
3247 /* Clear the 'host command active' bit... */
3248 priv->status &= ~STATUS_HCMD_ACTIVE;
3249 wake_up_interruptible(&priv->wait_command_queue);
3250 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3251 wake_up_interruptible(&priv->wait_state);
3252 spin_unlock_irqrestore(&priv->lock, flags);
3253
3254 IPW_DEBUG_TRACE("<<\n");
3255 return rc;
3256 }
3257
3258
3259 struct ipw_fw {
3260 __le32 ver;
3261 __le32 boot_size;
3262 __le32 ucode_size;
3263 __le32 fw_size;
3264 u8 data[0];
3265 };
3266
3267 static int ipw_get_fw(struct ipw_priv *priv,
3268 const struct firmware **raw, const char *name)
3269 {
3270 struct ipw_fw *fw;
3271 int rc;
3272
3273 /* ask firmware_class module to get the boot firmware off disk */
3274 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3275 if (rc < 0) {
3276 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3277 return rc;
3278 }
3279
3280 if ((*raw)->size < sizeof(*fw)) {
3281 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3282 return -EINVAL;
3283 }
3284
3285 fw = (void *)(*raw)->data;
3286
3287 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3288 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3289 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3290 name, (*raw)->size);
3291 return -EINVAL;
3292 }
3293
3294 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3295 name,
3296 le32_to_cpu(fw->ver) >> 16,
3297 le32_to_cpu(fw->ver) & 0xff,
3298 (*raw)->size - sizeof(*fw));
3299 return 0;
3300 }
3301
3302 #define IPW_RX_BUF_SIZE (3000)
3303
3304 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3305 struct ipw_rx_queue *rxq)
3306 {
3307 unsigned long flags;
3308 int i;
3309
3310 spin_lock_irqsave(&rxq->lock, flags);
3311
3312 INIT_LIST_HEAD(&rxq->rx_free);
3313 INIT_LIST_HEAD(&rxq->rx_used);
3314
3315 /* Fill the rx_used queue with _all_ of the Rx buffers */
3316 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3317 /* In the reset function, these buffers may have been allocated
3318 * to an SKB, so we need to unmap and free potential storage */
3319 if (rxq->pool[i].skb != NULL) {
3320 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3321 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3322 dev_kfree_skb(rxq->pool[i].skb);
3323 rxq->pool[i].skb = NULL;
3324 }
3325 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3326 }
3327
3328 /* Set us so that we have processed and used all buffers, but have
3329 * not restocked the Rx queue with fresh buffers */
3330 rxq->read = rxq->write = 0;
3331 rxq->processed = RX_QUEUE_SIZE - 1;
3332 rxq->free_count = 0;
3333 spin_unlock_irqrestore(&rxq->lock, flags);
3334 }
3335
3336 #ifdef CONFIG_PM
3337 static int fw_loaded = 0;
3338 static const struct firmware *raw = NULL;
3339
3340 static void free_firmware(void)
3341 {
3342 if (fw_loaded) {
3343 release_firmware(raw);
3344 raw = NULL;
3345 fw_loaded = 0;
3346 }
3347 }
3348 #else
3349 #define free_firmware() do {} while (0)
3350 #endif
3351
3352 static int ipw_load(struct ipw_priv *priv)
3353 {
3354 #ifndef CONFIG_PM
3355 const struct firmware *raw = NULL;
3356 #endif
3357 struct ipw_fw *fw;
3358 u8 *boot_img, *ucode_img, *fw_img;
3359 u8 *name = NULL;
3360 int rc = 0, retries = 3;
3361
3362 switch (priv->ieee->iw_mode) {
3363 case IW_MODE_ADHOC:
3364 name = "ipw2200-ibss.fw";
3365 break;
3366 #ifdef CONFIG_IPW2200_MONITOR
3367 case IW_MODE_MONITOR:
3368 name = "ipw2200-sniffer.fw";
3369 break;
3370 #endif
3371 case IW_MODE_INFRA:
3372 name = "ipw2200-bss.fw";
3373 break;
3374 }
3375
3376 if (!name) {
3377 rc = -EINVAL;
3378 goto error;
3379 }
3380
3381 #ifdef CONFIG_PM
3382 if (!fw_loaded) {
3383 #endif
3384 rc = ipw_get_fw(priv, &raw, name);
3385 if (rc < 0)
3386 goto error;
3387 #ifdef CONFIG_PM
3388 }
3389 #endif
3390
3391 fw = (void *)raw->data;
3392 boot_img = &fw->data[0];
3393 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3394 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3395 le32_to_cpu(fw->ucode_size)];
3396
3397 if (rc < 0)
3398 goto error;
3399
3400 if (!priv->rxq)
3401 priv->rxq = ipw_rx_queue_alloc(priv);
3402 else
3403 ipw_rx_queue_reset(priv, priv->rxq);
3404 if (!priv->rxq) {
3405 IPW_ERROR("Unable to initialize Rx queue\n");
3406 goto error;
3407 }
3408
3409 retry:
3410 /* Ensure interrupts are disabled */
3411 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3412 priv->status &= ~STATUS_INT_ENABLED;
3413
3414 /* ack pending interrupts */
3415 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3416
3417 ipw_stop_nic(priv);
3418
3419 rc = ipw_reset_nic(priv);
3420 if (rc < 0) {
3421 IPW_ERROR("Unable to reset NIC\n");
3422 goto error;
3423 }
3424
3425 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3426 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3427
3428 /* DMA the initial boot firmware into the device */
3429 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3430 if (rc < 0) {
3431 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3432 goto error;
3433 }
3434
3435 /* kick start the device */
3436 ipw_start_nic(priv);
3437
3438 /* wait for the device to finish its initial startup sequence */
3439 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3440 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3441 if (rc < 0) {
3442 IPW_ERROR("device failed to boot initial fw image\n");
3443 goto error;
3444 }
3445 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3446
3447 /* ack fw init done interrupt */
3448 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3449
3450 /* DMA the ucode into the device */
3451 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3452 if (rc < 0) {
3453 IPW_ERROR("Unable to load ucode: %d\n", rc);
3454 goto error;
3455 }
3456
3457 /* stop nic */
3458 ipw_stop_nic(priv);
3459
3460 /* DMA bss firmware into the device */
3461 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3462 if (rc < 0) {
3463 IPW_ERROR("Unable to load firmware: %d\n", rc);
3464 goto error;
3465 }
3466 #ifdef CONFIG_PM
3467 fw_loaded = 1;
3468 #endif
3469
3470 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3471
3472 rc = ipw_queue_reset(priv);
3473 if (rc < 0) {
3474 IPW_ERROR("Unable to initialize queues\n");
3475 goto error;
3476 }
3477
3478 /* Ensure interrupts are disabled */
3479 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3480 /* ack pending interrupts */
3481 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3482
3483 /* kick start the device */
3484 ipw_start_nic(priv);
3485
3486 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3487 if (retries > 0) {
3488 IPW_WARNING("Parity error. Retrying init.\n");
3489 retries--;
3490 goto retry;
3491 }
3492
3493 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3494 rc = -EIO;
3495 goto error;
3496 }
3497
3498 /* wait for the device */
3499 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3500 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3501 if (rc < 0) {
3502 IPW_ERROR("device failed to start within 500ms\n");
3503 goto error;
3504 }
3505 IPW_DEBUG_INFO("device response after %dms\n", rc);
3506
3507 /* ack fw init done interrupt */
3508 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3509
3510 /* read eeprom data and initialize the eeprom region of sram */
3511 priv->eeprom_delay = 1;
3512 ipw_eeprom_init_sram(priv);
3513
3514 /* enable interrupts */
3515 ipw_enable_interrupts(priv);
3516
3517 /* Ensure our queue has valid packets */
3518 ipw_rx_queue_replenish(priv);
3519
3520 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3521
3522 /* ack pending interrupts */
3523 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3524
3525 #ifndef CONFIG_PM
3526 release_firmware(raw);
3527 #endif
3528 return 0;
3529
3530 error:
3531 if (priv->rxq) {
3532 ipw_rx_queue_free(priv, priv->rxq);
3533 priv->rxq = NULL;
3534 }
3535 ipw_tx_queue_free(priv);
3536 if (raw)
3537 release_firmware(raw);
3538 #ifdef CONFIG_PM
3539 fw_loaded = 0;
3540 raw = NULL;
3541 #endif
3542
3543 return rc;
3544 }
3545
3546 /**
3547 * DMA services
3548 *
3549 * Theory of operation
3550 *
3551 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3552 * 2 empty entries always kept in the buffer to protect from overflow.
3553 *
3554 * For Tx queue, there are low mark and high mark limits. If, after queuing
3555 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3556 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3557 * Tx queue resumed.
3558 *
3559 * The IPW operates with six queues, one receive queue in the device's
3560 * sram, one transmit queue for sending commands to the device firmware,
3561 * and four transmit queues for data.
3562 *
3563 * The four transmit queues allow for performing quality of service (qos)
3564 * transmissions as per the 802.11 protocol. Currently Linux does not
3565 * provide a mechanism to the user for utilizing prioritized queues, so
3566 * we only utilize the first data transmit queue (queue1).
3567 */
3568
3569 /**
3570 * Driver allocates buffers of this size for Rx
3571 */
3572
3573 static inline int ipw_queue_space(const struct clx2_queue *q)
3574 {
3575 int s = q->last_used - q->first_empty;
3576 if (s <= 0)
3577 s += q->n_bd;
3578 s -= 2; /* keep some reserve to not confuse empty and full situations */
3579 if (s < 0)
3580 s = 0;
3581 return s;
3582 }
3583
3584 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3585 {
3586 return (++index == n_bd) ? 0 : index;
3587 }
3588
3589 /**
3590 * Initialize common DMA queue structure
3591 *
3592 * @param q queue to init
3593 * @param count Number of BD's to allocate. Should be power of 2
3594 * @param read_register Address for 'read' register
3595 * (not offset within BAR, full address)
3596 * @param write_register Address for 'write' register
3597 * (not offset within BAR, full address)
3598 * @param base_register Address for 'base' register
3599 * (not offset within BAR, full address)
3600 * @param size Address for 'size' register
3601 * (not offset within BAR, full address)
3602 */
3603 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3604 int count, u32 read, u32 write, u32 base, u32 size)
3605 {
3606 q->n_bd = count;
3607
3608 q->low_mark = q->n_bd / 4;
3609 if (q->low_mark < 4)
3610 q->low_mark = 4;
3611
3612 q->high_mark = q->n_bd / 8;
3613 if (q->high_mark < 2)
3614 q->high_mark = 2;
3615
3616 q->first_empty = q->last_used = 0;
3617 q->reg_r = read;
3618 q->reg_w = write;
3619
3620 ipw_write32(priv, base, q->dma_addr);
3621 ipw_write32(priv, size, count);
3622 ipw_write32(priv, read, 0);
3623 ipw_write32(priv, write, 0);
3624
3625 _ipw_read32(priv, 0x90);
3626 }
3627
3628 static int ipw_queue_tx_init(struct ipw_priv *priv,
3629 struct clx2_tx_queue *q,
3630 int count, u32 read, u32 write, u32 base, u32 size)
3631 {
3632 struct pci_dev *dev = priv->pci_dev;
3633
3634 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3635 if (!q->txb) {
3636 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3637 return -ENOMEM;
3638 }
3639
3640 q->bd =
3641 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3642 if (!q->bd) {
3643 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3644 sizeof(q->bd[0]) * count);
3645 kfree(q->txb);
3646 q->txb = NULL;
3647 return -ENOMEM;
3648 }
3649
3650 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3651 return 0;
3652 }
3653
3654 /**
3655 * Free one TFD, those at index [txq->q.last_used].
3656 * Do NOT advance any indexes
3657 *
3658 * @param dev
3659 * @param txq
3660 */
3661 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3662 struct clx2_tx_queue *txq)
3663 {
3664 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3665 struct pci_dev *dev = priv->pci_dev;
3666 int i;
3667
3668 /* classify bd */
3669 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3670 /* nothing to cleanup after for host commands */
3671 return;
3672
3673 /* sanity check */
3674 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3675 IPW_ERROR("Too many chunks: %i\n",
3676 le32_to_cpu(bd->u.data.num_chunks));
3677 /** @todo issue fatal error, it is quite serious situation */
3678 return;
3679 }
3680
3681 /* unmap chunks if any */
3682 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3683 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3684 le16_to_cpu(bd->u.data.chunk_len[i]),
3685 PCI_DMA_TODEVICE);
3686 if (txq->txb[txq->q.last_used]) {
3687 ieee80211_txb_free(txq->txb[txq->q.last_used]);
3688 txq->txb[txq->q.last_used] = NULL;
3689 }
3690 }
3691 }
3692
3693 /**
3694 * Deallocate DMA queue.
3695 *
3696 * Empty queue by removing and destroying all BD's.
3697 * Free all buffers.
3698 *
3699 * @param dev
3700 * @param q
3701 */
3702 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3703 {
3704 struct clx2_queue *q = &txq->q;
3705 struct pci_dev *dev = priv->pci_dev;
3706
3707 if (q->n_bd == 0)
3708 return;
3709
3710 /* first, empty all BD's */
3711 for (; q->first_empty != q->last_used;
3712 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3713 ipw_queue_tx_free_tfd(priv, txq);
3714 }
3715
3716 /* free buffers belonging to queue itself */
3717 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3718 q->dma_addr);
3719 kfree(txq->txb);
3720
3721 /* 0 fill whole structure */
3722 memset(txq, 0, sizeof(*txq));
3723 }
3724
3725 /**
3726 * Destroy all DMA queues and structures
3727 *
3728 * @param priv
3729 */
3730 static void ipw_tx_queue_free(struct ipw_priv *priv)
3731 {
3732 /* Tx CMD queue */
3733 ipw_queue_tx_free(priv, &priv->txq_cmd);
3734
3735 /* Tx queues */
3736 ipw_queue_tx_free(priv, &priv->txq[0]);
3737 ipw_queue_tx_free(priv, &priv->txq[1]);
3738 ipw_queue_tx_free(priv, &priv->txq[2]);
3739 ipw_queue_tx_free(priv, &priv->txq[3]);
3740 }
3741
3742 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3743 {
3744 /* First 3 bytes are manufacturer */
3745 bssid[0] = priv->mac_addr[0];
3746 bssid[1] = priv->mac_addr[1];
3747 bssid[2] = priv->mac_addr[2];
3748
3749 /* Last bytes are random */
3750 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3751
3752 bssid[0] &= 0xfe; /* clear multicast bit */
3753 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3754 }
3755
3756 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3757 {
3758 struct ipw_station_entry entry;
3759 int i;
3760
3761 for (i = 0; i < priv->num_stations; i++) {
3762 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3763 /* Another node is active in network */
3764 priv->missed_adhoc_beacons = 0;
3765 if (!(priv->config & CFG_STATIC_CHANNEL))
3766 /* when other nodes drop out, we drop out */
3767 priv->config &= ~CFG_ADHOC_PERSIST;
3768
3769 return i;
3770 }
3771 }
3772
3773 if (i == MAX_STATIONS)
3774 return IPW_INVALID_STATION;
3775
3776 IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
3777
3778 entry.reserved = 0;
3779 entry.support_mode = 0;
3780 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3781 memcpy(priv->stations[i], bssid, ETH_ALEN);
3782 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3783 &entry, sizeof(entry));
3784 priv->num_stations++;
3785
3786 return i;
3787 }
3788
3789 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3790 {
3791 int i;
3792
3793 for (i = 0; i < priv->num_stations; i++)
3794 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3795 return i;
3796
3797 return IPW_INVALID_STATION;
3798 }
3799
3800 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3801 {
3802 int err;
3803
3804 if (priv->status & STATUS_ASSOCIATING) {
3805 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3806 queue_work(priv->workqueue, &priv->disassociate);
3807 return;
3808 }
3809
3810 if (!(priv->status & STATUS_ASSOCIATED)) {
3811 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3812 return;
3813 }
3814
3815 IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
3816 "on channel %d.\n",
3817 MAC_ARG(priv->assoc_request.bssid),
3818 priv->assoc_request.channel);
3819
3820 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3821 priv->status |= STATUS_DISASSOCIATING;
3822
3823 if (quiet)
3824 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3825 else
3826 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3827
3828 err = ipw_send_associate(priv, &priv->assoc_request);
3829 if (err) {
3830 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3831 "failed.\n");
3832 return;
3833 }
3834
3835 }
3836
3837 static int ipw_disassociate(void *data)
3838 {
3839 struct ipw_priv *priv = data;
3840 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3841 return 0;
3842 ipw_send_disassociate(data, 0);
3843 return 1;
3844 }
3845
3846 static void ipw_bg_disassociate(void *data)
3847 {
3848 struct ipw_priv *priv = data;
3849 mutex_lock(&priv->mutex);
3850 ipw_disassociate(data);
3851 mutex_unlock(&priv->mutex);
3852 }
3853
3854 static void ipw_system_config(void *data)
3855 {
3856 struct ipw_priv *priv = data;
3857
3858 #ifdef CONFIG_IPW2200_PROMISCUOUS
3859 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3860 priv->sys_config.accept_all_data_frames = 1;
3861 priv->sys_config.accept_non_directed_frames = 1;
3862 priv->sys_config.accept_all_mgmt_bcpr = 1;
3863 priv->sys_config.accept_all_mgmt_frames = 1;
3864 }
3865 #endif
3866
3867 ipw_send_system_config(priv);
3868 }
3869
3870 struct ipw_status_code {
3871 u16 status;
3872 const char *reason;
3873 };
3874
3875 static const struct ipw_status_code ipw_status_codes[] = {
3876 {0x00, "Successful"},
3877 {0x01, "Unspecified failure"},
3878 {0x0A, "Cannot support all requested capabilities in the "
3879 "Capability information field"},
3880 {0x0B, "Reassociation denied due to inability to confirm that "
3881 "association exists"},
3882 {0x0C, "Association denied due to reason outside the scope of this "
3883 "standard"},
3884 {0x0D,
3885 "Responding station does not support the specified authentication "
3886 "algorithm"},
3887 {0x0E,
3888 "Received an Authentication frame with authentication sequence "
3889 "transaction sequence number out of expected sequence"},
3890 {0x0F, "Authentication rejected because of challenge failure"},
3891 {0x10, "Authentication rejected due to timeout waiting for next "
3892 "frame in sequence"},
3893 {0x11, "Association denied because AP is unable to handle additional "
3894 "associated stations"},
3895 {0x12,
3896 "Association denied due to requesting station not supporting all "
3897 "of the datarates in the BSSBasicServiceSet Parameter"},
3898 {0x13,
3899 "Association denied due to requesting station not supporting "
3900 "short preamble operation"},
3901 {0x14,
3902 "Association denied due to requesting station not supporting "
3903 "PBCC encoding"},
3904 {0x15,
3905 "Association denied due to requesting station not supporting "
3906 "channel agility"},
3907 {0x19,
3908 "Association denied due to requesting station not supporting "
3909 "short slot operation"},
3910 {0x1A,
3911 "Association denied due to requesting station not supporting "
3912 "DSSS-OFDM operation"},
3913 {0x28, "Invalid Information Element"},
3914 {0x29, "Group Cipher is not valid"},
3915 {0x2A, "Pairwise Cipher is not valid"},
3916 {0x2B, "AKMP is not valid"},
3917 {0x2C, "Unsupported RSN IE version"},
3918 {0x2D, "Invalid RSN IE Capabilities"},
3919 {0x2E, "Cipher suite is rejected per security policy"},
3920 };
3921
3922 #ifdef CONFIG_IPW2200_DEBUG
3923 static const char *ipw_get_status_code(u16 status)
3924 {
3925 int i;
3926 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3927 if (ipw_status_codes[i].status == (status & 0xff))
3928 return ipw_status_codes[i].reason;
3929 return "Unknown status value.";
3930 }
3931 #endif
3932
3933 static void inline average_init(struct average *avg)
3934 {
3935 memset(avg, 0, sizeof(*avg));
3936 }
3937
3938 #define DEPTH_RSSI 8
3939 #define DEPTH_NOISE 16
3940 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
3941 {
3942 return ((depth-1)*prev_avg + val)/depth;
3943 }
3944
3945 static void average_add(struct average *avg, s16 val)
3946 {
3947 avg->sum -= avg->entries[avg->pos];
3948 avg->sum += val;
3949 avg->entries[avg->pos++] = val;
3950 if (unlikely(avg->pos == AVG_ENTRIES)) {
3951 avg->init = 1;
3952 avg->pos = 0;
3953 }
3954 }
3955
3956 static s16 average_value(struct average *avg)
3957 {
3958 if (!unlikely(avg->init)) {
3959 if (avg->pos)
3960 return avg->sum / avg->pos;
3961 return 0;
3962 }
3963
3964 return avg->sum / AVG_ENTRIES;
3965 }
3966
3967 static void ipw_reset_stats(struct ipw_priv *priv)
3968 {
3969 u32 len = sizeof(u32);
3970
3971 priv->quality = 0;
3972
3973 average_init(&priv->average_missed_beacons);
3974 priv->exp_avg_rssi = -60;
3975 priv->exp_avg_noise = -85 + 0x100;
3976
3977 priv->last_rate = 0;
3978 priv->last_missed_beacons = 0;
3979 priv->last_rx_packets = 0;
3980 priv->last_tx_packets = 0;
3981 priv->last_tx_failures = 0;
3982
3983 /* Firmware managed, reset only when NIC is restarted, so we have to
3984 * normalize on the current value */
3985 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
3986 &priv->last_rx_err, &len);
3987 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
3988 &priv->last_tx_failures, &len);
3989
3990 /* Driver managed, reset with each association */
3991 priv->missed_adhoc_beacons = 0;
3992 priv->missed_beacons = 0;
3993 priv->tx_packets = 0;
3994 priv->rx_packets = 0;
3995
3996 }
3997
3998 static u32 ipw_get_max_rate(struct ipw_priv *priv)
3999 {
4000 u32 i = 0x80000000;
4001 u32 mask = priv->rates_mask;
4002 /* If currently associated in B mode, restrict the maximum
4003 * rate match to B rates */
4004 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4005 mask &= IEEE80211_CCK_RATES_MASK;
4006
4007 /* TODO: Verify that the rate is supported by the current rates
4008 * list. */
4009
4010 while (i && !(mask & i))
4011 i >>= 1;
4012 switch (i) {
4013 case IEEE80211_CCK_RATE_1MB_MASK:
4014 return 1000000;
4015 case IEEE80211_CCK_RATE_2MB_MASK:
4016 return 2000000;
4017 case IEEE80211_CCK_RATE_5MB_MASK:
4018 return 5500000;
4019 case IEEE80211_OFDM_RATE_6MB_MASK:
4020 return 6000000;
4021 case IEEE80211_OFDM_RATE_9MB_MASK:
4022 return 9000000;
4023 case IEEE80211_CCK_RATE_11MB_MASK:
4024 return 11000000;
4025 case IEEE80211_OFDM_RATE_12MB_MASK:
4026 return 12000000;
4027 case IEEE80211_OFDM_RATE_18MB_MASK:
4028 return 18000000;
4029 case IEEE80211_OFDM_RATE_24MB_MASK:
4030 return 24000000;
4031 case IEEE80211_OFDM_RATE_36MB_MASK:
4032 return 36000000;
4033 case IEEE80211_OFDM_RATE_48MB_MASK:
4034 return 48000000;
4035 case IEEE80211_OFDM_RATE_54MB_MASK:
4036 return 54000000;
4037 }
4038
4039 if (priv->ieee->mode == IEEE_B)
4040 return 11000000;
4041 else
4042 return 54000000;
4043 }
4044
4045 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4046 {
4047 u32 rate, len = sizeof(rate);
4048 int err;
4049
4050 if (!(priv->status & STATUS_ASSOCIATED))
4051 return 0;
4052
4053 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4054 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4055 &len);
4056 if (err) {
4057 IPW_DEBUG_INFO("failed querying ordinals.\n");
4058 return 0;
4059 }
4060 } else
4061 return ipw_get_max_rate(priv);
4062
4063 switch (rate) {
4064 case IPW_TX_RATE_1MB:
4065 return 1000000;
4066 case IPW_TX_RATE_2MB:
4067 return 2000000;
4068 case IPW_TX_RATE_5MB:
4069 return 5500000;
4070 case IPW_TX_RATE_6MB:
4071 return 6000000;
4072 case IPW_TX_RATE_9MB:
4073 return 9000000;
4074 case IPW_TX_RATE_11MB:
4075 return 11000000;
4076 case IPW_TX_RATE_12MB:
4077 return 12000000;
4078 case IPW_TX_RATE_18MB:
4079 return 18000000;
4080 case IPW_TX_RATE_24MB:
4081 return 24000000;
4082 case IPW_TX_RATE_36MB:
4083 return 36000000;
4084 case IPW_TX_RATE_48MB:
4085 return 48000000;
4086 case IPW_TX_RATE_54MB:
4087 return 54000000;
4088 }
4089
4090 return 0;
4091 }
4092
4093 #define IPW_STATS_INTERVAL (2 * HZ)
4094 static void ipw_gather_stats(struct ipw_priv *priv)
4095 {
4096 u32 rx_err, rx_err_delta, rx_packets_delta;
4097 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4098 u32 missed_beacons_percent, missed_beacons_delta;
4099 u32 quality = 0;
4100 u32 len = sizeof(u32);
4101 s16 rssi;
4102 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4103 rate_quality;
4104 u32 max_rate;
4105
4106 if (!(priv->status & STATUS_ASSOCIATED)) {
4107 priv->quality = 0;
4108 return;
4109 }
4110
4111 /* Update the statistics */
4112 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4113 &priv->missed_beacons, &len);
4114 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4115 priv->last_missed_beacons = priv->missed_beacons;
4116 if (priv->assoc_request.beacon_interval) {
4117 missed_beacons_percent = missed_beacons_delta *
4118 (HZ * priv->assoc_request.beacon_interval) /
4119 (IPW_STATS_INTERVAL * 10);
4120 } else {
4121 missed_beacons_percent = 0;
4122 }
4123 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4124
4125 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4126 rx_err_delta = rx_err - priv->last_rx_err;
4127 priv->last_rx_err = rx_err;
4128
4129 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4130 tx_failures_delta = tx_failures - priv->last_tx_failures;
4131 priv->last_tx_failures = tx_failures;
4132
4133 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4134 priv->last_rx_packets = priv->rx_packets;
4135
4136 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4137 priv->last_tx_packets = priv->tx_packets;
4138
4139 /* Calculate quality based on the following:
4140 *
4141 * Missed beacon: 100% = 0, 0% = 70% missed
4142 * Rate: 60% = 1Mbs, 100% = Max
4143 * Rx and Tx errors represent a straight % of total Rx/Tx
4144 * RSSI: 100% = > -50, 0% = < -80
4145 * Rx errors: 100% = 0, 0% = 50% missed
4146 *
4147 * The lowest computed quality is used.
4148 *
4149 */
4150 #define BEACON_THRESHOLD 5
4151 beacon_quality = 100 - missed_beacons_percent;
4152 if (beacon_quality < BEACON_THRESHOLD)
4153 beacon_quality = 0;
4154 else
4155 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4156 (100 - BEACON_THRESHOLD);
4157 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4158 beacon_quality, missed_beacons_percent);
4159
4160 priv->last_rate = ipw_get_current_rate(priv);
4161 max_rate = ipw_get_max_rate(priv);
4162 rate_quality = priv->last_rate * 40 / max_rate + 60;
4163 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4164 rate_quality, priv->last_rate / 1000000);
4165
4166 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4167 rx_quality = 100 - (rx_err_delta * 100) /
4168 (rx_packets_delta + rx_err_delta);
4169 else
4170 rx_quality = 100;
4171 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4172 rx_quality, rx_err_delta, rx_packets_delta);
4173
4174 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4175 tx_quality = 100 - (tx_failures_delta * 100) /
4176 (tx_packets_delta + tx_failures_delta);
4177 else
4178 tx_quality = 100;
4179 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4180 tx_quality, tx_failures_delta, tx_packets_delta);
4181
4182 rssi = priv->exp_avg_rssi;
4183 signal_quality =
4184 (100 *
4185 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4186 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4187 (priv->ieee->perfect_rssi - rssi) *
4188 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4189 62 * (priv->ieee->perfect_rssi - rssi))) /
4190 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4191 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4192 if (signal_quality > 100)
4193 signal_quality = 100;
4194 else if (signal_quality < 1)
4195 signal_quality = 0;
4196
4197 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4198 signal_quality, rssi);
4199
4200 quality = min(beacon_quality,
4201 min(rate_quality,
4202 min(tx_quality, min(rx_quality, signal_quality))));
4203 if (quality == beacon_quality)
4204 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4205 quality);
4206 if (quality == rate_quality)
4207 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4208 quality);
4209 if (quality == tx_quality)
4210 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4211 quality);
4212 if (quality == rx_quality)
4213 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4214 quality);
4215 if (quality == signal_quality)
4216 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4217 quality);
4218
4219 priv->quality = quality;
4220
4221 queue_delayed_work(priv->workqueue, &priv->gather_stats,
4222 IPW_STATS_INTERVAL);
4223 }
4224
4225 static void ipw_bg_gather_stats(void *data)
4226 {
4227 struct ipw_priv *priv = data;
4228 mutex_lock(&priv->mutex);
4229 ipw_gather_stats(data);
4230 mutex_unlock(&priv->mutex);
4231 }
4232
4233 /* Missed beacon behavior:
4234 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4235 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4236 * Above disassociate threshold, give up and stop scanning.
4237 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4238 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4239 int missed_count)
4240 {
4241 priv->notif_missed_beacons = missed_count;
4242
4243 if (missed_count > priv->disassociate_threshold &&
4244 priv->status & STATUS_ASSOCIATED) {
4245 /* If associated and we've hit the missed
4246 * beacon threshold, disassociate, turn
4247 * off roaming, and abort any active scans */
4248 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4249 IPW_DL_STATE | IPW_DL_ASSOC,
4250 "Missed beacon: %d - disassociate\n", missed_count);
4251 priv->status &= ~STATUS_ROAMING;
4252 if (priv->status & STATUS_SCANNING) {
4253 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4254 IPW_DL_STATE,
4255 "Aborting scan with missed beacon.\n");
4256 queue_work(priv->workqueue, &priv->abort_scan);
4257 }
4258
4259 queue_work(priv->workqueue, &priv->disassociate);
4260 return;
4261 }
4262
4263 if (priv->status & STATUS_ROAMING) {
4264 /* If we are currently roaming, then just
4265 * print a debug statement... */
4266 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4267 "Missed beacon: %d - roam in progress\n",
4268 missed_count);
4269 return;
4270 }
4271
4272 if (roaming &&
4273 (missed_count > priv->roaming_threshold &&
4274 missed_count <= priv->disassociate_threshold)) {
4275 /* If we are not already roaming, set the ROAM
4276 * bit in the status and kick off a scan.
4277 * This can happen several times before we reach
4278 * disassociate_threshold. */
4279 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4280 "Missed beacon: %d - initiate "
4281 "roaming\n", missed_count);
4282 if (!(priv->status & STATUS_ROAMING)) {
4283 priv->status |= STATUS_ROAMING;
4284 if (!(priv->status & STATUS_SCANNING))
4285 queue_work(priv->workqueue,
4286 &priv->request_scan);
4287 }
4288 return;
4289 }
4290
4291 if (priv->status & STATUS_SCANNING) {
4292 /* Stop scan to keep fw from getting
4293 * stuck (only if we aren't roaming --
4294 * otherwise we'll never scan more than 2 or 3
4295 * channels..) */
4296 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4297 "Aborting scan with missed beacon.\n");
4298 queue_work(priv->workqueue, &priv->abort_scan);
4299 }
4300
4301 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4302 }
4303
4304 /**
4305 * Handle host notification packet.
4306 * Called from interrupt routine
4307 */
4308 static void ipw_rx_notification(struct ipw_priv *priv,
4309 struct ipw_rx_notification *notif)
4310 {
4311 notif->size = le16_to_cpu(notif->size);
4312
4313 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size);
4314
4315 switch (notif->subtype) {
4316 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4317 struct notif_association *assoc = &notif->u.assoc;
4318
4319 switch (assoc->state) {
4320 case CMAS_ASSOCIATED:{
4321 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4322 IPW_DL_ASSOC,
4323 "associated: '%s' " MAC_FMT
4324 " \n",
4325 escape_essid(priv->essid,
4326 priv->essid_len),
4327 MAC_ARG(priv->bssid));
4328
4329 switch (priv->ieee->iw_mode) {
4330 case IW_MODE_INFRA:
4331 memcpy(priv->ieee->bssid,
4332 priv->bssid, ETH_ALEN);
4333 break;
4334
4335 case IW_MODE_ADHOC:
4336 memcpy(priv->ieee->bssid,
4337 priv->bssid, ETH_ALEN);
4338
4339 /* clear out the station table */
4340 priv->num_stations = 0;
4341
4342 IPW_DEBUG_ASSOC
4343 ("queueing adhoc check\n");
4344 queue_delayed_work(priv->
4345 workqueue,
4346 &priv->
4347 adhoc_check,
4348 priv->
4349 assoc_request.
4350 beacon_interval);
4351 break;
4352 }
4353
4354 priv->status &= ~STATUS_ASSOCIATING;
4355 priv->status |= STATUS_ASSOCIATED;
4356 queue_work(priv->workqueue,
4357 &priv->system_config);
4358
4359 #ifdef CONFIG_IPW2200_QOS
4360 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4361 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_ctl))
4362 if ((priv->status & STATUS_AUTH) &&
4363 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4364 == IEEE80211_STYPE_ASSOC_RESP)) {
4365 if ((sizeof
4366 (struct
4367 ieee80211_assoc_response)
4368 <= notif->size)
4369 && (notif->size <= 2314)) {
4370 struct
4371 ieee80211_rx_stats
4372 stats = {
4373 .len =
4374 notif->
4375 size - 1,
4376 };
4377
4378 IPW_DEBUG_QOS
4379 ("QoS Associate "
4380 "size %d\n",
4381 notif->size);
4382 ieee80211_rx_mgt(priv->
4383 ieee,
4384 (struct
4385 ieee80211_hdr_4addr
4386 *)
4387 &notif->u.raw, &stats);
4388 }
4389 }
4390 #endif
4391
4392 schedule_work(&priv->link_up);
4393
4394 break;
4395 }
4396
4397 case CMAS_AUTHENTICATED:{
4398 if (priv->
4399 status & (STATUS_ASSOCIATED |
4400 STATUS_AUTH)) {
4401 #ifdef CONFIG_IPW2200_DEBUG
4402 struct notif_authenticate *auth
4403 = &notif->u.auth;
4404 IPW_DEBUG(IPW_DL_NOTIF |
4405 IPW_DL_STATE |
4406 IPW_DL_ASSOC,
4407 "deauthenticated: '%s' "
4408 MAC_FMT
4409 ": (0x%04X) - %s \n",
4410 escape_essid(priv->
4411 essid,
4412 priv->
4413 essid_len),
4414 MAC_ARG(priv->bssid),
4415 ntohs(auth->status),
4416 ipw_get_status_code
4417 (ntohs
4418 (auth->status)));
4419 #endif
4420
4421 priv->status &=
4422 ~(STATUS_ASSOCIATING |
4423 STATUS_AUTH |
4424 STATUS_ASSOCIATED);
4425
4426 schedule_work(&priv->link_down);
4427 break;
4428 }
4429
4430 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4431 IPW_DL_ASSOC,
4432 "authenticated: '%s' " MAC_FMT
4433 "\n",
4434 escape_essid(priv->essid,
4435 priv->essid_len),
4436 MAC_ARG(priv->bssid));
4437 break;
4438 }
4439
4440 case CMAS_INIT:{
4441 if (priv->status & STATUS_AUTH) {
4442 struct
4443 ieee80211_assoc_response
4444 *resp;
4445 resp =
4446 (struct
4447 ieee80211_assoc_response
4448 *)&notif->u.raw;
4449 IPW_DEBUG(IPW_DL_NOTIF |
4450 IPW_DL_STATE |
4451 IPW_DL_ASSOC,
4452 "association failed (0x%04X): %s\n",
4453 ntohs(resp->status),
4454 ipw_get_status_code
4455 (ntohs
4456 (resp->status)));
4457 }
4458
4459 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4460 IPW_DL_ASSOC,
4461 "disassociated: '%s' " MAC_FMT
4462 " \n",
4463 escape_essid(priv->essid,
4464 priv->essid_len),
4465 MAC_ARG(priv->bssid));
4466
4467 priv->status &=
4468 ~(STATUS_DISASSOCIATING |
4469 STATUS_ASSOCIATING |
4470 STATUS_ASSOCIATED | STATUS_AUTH);
4471 if (priv->assoc_network
4472 && (priv->assoc_network->
4473 capability &
4474 WLAN_CAPABILITY_IBSS))
4475 ipw_remove_current_network
4476 (priv);
4477
4478 schedule_work(&priv->link_down);
4479
4480 break;
4481 }
4482
4483 case CMAS_RX_ASSOC_RESP:
4484 break;
4485
4486 default:
4487 IPW_ERROR("assoc: unknown (%d)\n",
4488 assoc->state);
4489 break;
4490 }
4491
4492 break;
4493 }
4494
4495 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4496 struct notif_authenticate *auth = &notif->u.auth;
4497 switch (auth->state) {
4498 case CMAS_AUTHENTICATED:
4499 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4500 "authenticated: '%s' " MAC_FMT " \n",
4501 escape_essid(priv->essid,
4502 priv->essid_len),
4503 MAC_ARG(priv->bssid));
4504 priv->status |= STATUS_AUTH;
4505 break;
4506
4507 case CMAS_INIT:
4508 if (priv->status & STATUS_AUTH) {
4509 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4510 IPW_DL_ASSOC,
4511 "authentication failed (0x%04X): %s\n",
4512 ntohs(auth->status),
4513 ipw_get_status_code(ntohs
4514 (auth->
4515 status)));
4516 }
4517 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4518 IPW_DL_ASSOC,
4519 "deauthenticated: '%s' " MAC_FMT "\n",
4520 escape_essid(priv->essid,
4521 priv->essid_len),
4522 MAC_ARG(priv->bssid));
4523
4524 priv->status &= ~(STATUS_ASSOCIATING |
4525 STATUS_AUTH |
4526 STATUS_ASSOCIATED);
4527
4528 schedule_work(&priv->link_down);
4529 break;
4530
4531 case CMAS_TX_AUTH_SEQ_1:
4532 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4533 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4534 break;
4535 case CMAS_RX_AUTH_SEQ_2:
4536 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4537 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4538 break;
4539 case CMAS_AUTH_SEQ_1_PASS:
4540 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4541 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4542 break;
4543 case CMAS_AUTH_SEQ_1_FAIL:
4544 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4545 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4546 break;
4547 case CMAS_TX_AUTH_SEQ_3:
4548 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4549 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4550 break;
4551 case CMAS_RX_AUTH_SEQ_4:
4552 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4553 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4554 break;
4555 case CMAS_AUTH_SEQ_2_PASS:
4556 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4557 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4558 break;
4559 case CMAS_AUTH_SEQ_2_FAIL:
4560 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4561 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4562 break;
4563 case CMAS_TX_ASSOC:
4564 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4565 IPW_DL_ASSOC, "TX_ASSOC\n");
4566 break;
4567 case CMAS_RX_ASSOC_RESP:
4568 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4569 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4570
4571 break;
4572 case CMAS_ASSOCIATED:
4573 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4574 IPW_DL_ASSOC, "ASSOCIATED\n");
4575 break;
4576 default:
4577 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4578 auth->state);
4579 break;
4580 }
4581 break;
4582 }
4583
4584 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4585 struct notif_channel_result *x =
4586 &notif->u.channel_result;
4587
4588 if (notif->size == sizeof(*x)) {
4589 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4590 x->channel_num);
4591 } else {
4592 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4593 "(should be %zd)\n",
4594 notif->size, sizeof(*x));
4595 }
4596 break;
4597 }
4598
4599 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4600 struct notif_scan_complete *x = &notif->u.scan_complete;
4601 if (notif->size == sizeof(*x)) {
4602 IPW_DEBUG_SCAN
4603 ("Scan completed: type %d, %d channels, "
4604 "%d status\n", x->scan_type,
4605 x->num_channels, x->status);
4606 } else {
4607 IPW_ERROR("Scan completed of wrong size %d "
4608 "(should be %zd)\n",
4609 notif->size, sizeof(*x));
4610 }
4611
4612 priv->status &=
4613 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4614
4615 wake_up_interruptible(&priv->wait_state);
4616 cancel_delayed_work(&priv->scan_check);
4617
4618 if (priv->status & STATUS_EXIT_PENDING)
4619 break;
4620
4621 priv->ieee->scans++;
4622
4623 #ifdef CONFIG_IPW2200_MONITOR
4624 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4625 priv->status |= STATUS_SCAN_FORCED;
4626 queue_work(priv->workqueue,
4627 &priv->request_scan);
4628 break;
4629 }
4630 priv->status &= ~STATUS_SCAN_FORCED;
4631 #endif /* CONFIG_IPW2200_MONITOR */
4632
4633 if (!(priv->status & (STATUS_ASSOCIATED |
4634 STATUS_ASSOCIATING |
4635 STATUS_ROAMING |
4636 STATUS_DISASSOCIATING)))
4637 queue_work(priv->workqueue, &priv->associate);
4638 else if (priv->status & STATUS_ROAMING) {
4639 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4640 /* If a scan completed and we are in roam mode, then
4641 * the scan that completed was the one requested as a
4642 * result of entering roam... so, schedule the
4643 * roam work */
4644 queue_work(priv->workqueue,
4645 &priv->roam);
4646 else
4647 /* Don't schedule if we aborted the scan */
4648 priv->status &= ~STATUS_ROAMING;
4649 } else if (priv->status & STATUS_SCAN_PENDING)
4650 queue_work(priv->workqueue,
4651 &priv->request_scan);
4652 else if (priv->config & CFG_BACKGROUND_SCAN
4653 && priv->status & STATUS_ASSOCIATED)
4654 queue_delayed_work(priv->workqueue,
4655 &priv->request_scan, HZ);
4656
4657 /* Send an empty event to user space.
4658 * We don't send the received data on the event because
4659 * it would require us to do complex transcoding, and
4660 * we want to minimise the work done in the irq handler
4661 * Use a request to extract the data.
4662 * Also, we generate this even for any scan, regardless
4663 * on how the scan was initiated. User space can just
4664 * sync on periodic scan to get fresh data...
4665 * Jean II */
4666 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) {
4667 union iwreq_data wrqu;
4668
4669 wrqu.data.length = 0;
4670 wrqu.data.flags = 0;
4671 wireless_send_event(priv->net_dev, SIOCGIWSCAN,
4672 &wrqu, NULL);
4673 }
4674 break;
4675 }
4676
4677 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4678 struct notif_frag_length *x = &notif->u.frag_len;
4679
4680 if (notif->size == sizeof(*x))
4681 IPW_ERROR("Frag length: %d\n",
4682 le16_to_cpu(x->frag_length));
4683 else
4684 IPW_ERROR("Frag length of wrong size %d "
4685 "(should be %zd)\n",
4686 notif->size, sizeof(*x));
4687 break;
4688 }
4689
4690 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4691 struct notif_link_deterioration *x =
4692 &notif->u.link_deterioration;
4693
4694 if (notif->size == sizeof(*x)) {
4695 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4696 "link deterioration: type %d, cnt %d\n",
4697 x->silence_notification_type,
4698 x->silence_count);
4699 memcpy(&priv->last_link_deterioration, x,
4700 sizeof(*x));
4701 } else {
4702 IPW_ERROR("Link Deterioration of wrong size %d "
4703 "(should be %zd)\n",
4704 notif->size, sizeof(*x));
4705 }
4706 break;
4707 }
4708
4709 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4710 IPW_ERROR("Dino config\n");
4711 if (priv->hcmd
4712 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4713 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4714
4715 break;
4716 }
4717
4718 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4719 struct notif_beacon_state *x = &notif->u.beacon_state;
4720 if (notif->size != sizeof(*x)) {
4721 IPW_ERROR
4722 ("Beacon state of wrong size %d (should "
4723 "be %zd)\n", notif->size, sizeof(*x));
4724 break;
4725 }
4726
4727 if (le32_to_cpu(x->state) ==
4728 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4729 ipw_handle_missed_beacon(priv,
4730 le32_to_cpu(x->
4731 number));
4732
4733 break;
4734 }
4735
4736 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4737 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4738 if (notif->size == sizeof(*x)) {
4739 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4740 "0x%02x station %d\n",
4741 x->key_state, x->security_type,
4742 x->station_index);
4743 break;
4744 }
4745
4746 IPW_ERROR
4747 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4748 notif->size, sizeof(*x));
4749 break;
4750 }
4751
4752 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4753 struct notif_calibration *x = &notif->u.calibration;
4754
4755 if (notif->size == sizeof(*x)) {
4756 memcpy(&priv->calib, x, sizeof(*x));
4757 IPW_DEBUG_INFO("TODO: Calibration\n");
4758 break;
4759 }
4760
4761 IPW_ERROR
4762 ("Calibration of wrong size %d (should be %zd)\n",
4763 notif->size, sizeof(*x));
4764 break;
4765 }
4766
4767 case HOST_NOTIFICATION_NOISE_STATS:{
4768 if (notif->size == sizeof(u32)) {
4769 priv->exp_avg_noise =
4770 exponential_average(priv->exp_avg_noise,
4771 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4772 DEPTH_NOISE);
4773 break;
4774 }
4775
4776 IPW_ERROR
4777 ("Noise stat is wrong size %d (should be %zd)\n",
4778 notif->size, sizeof(u32));
4779 break;
4780 }
4781
4782 default:
4783 IPW_DEBUG_NOTIF("Unknown notification: "
4784 "subtype=%d,flags=0x%2x,size=%d\n",
4785 notif->subtype, notif->flags, notif->size);
4786 }
4787 }
4788
4789 /**
4790 * Destroys all DMA structures and initialise them again
4791 *
4792 * @param priv
4793 * @return error code
4794 */
4795 static int ipw_queue_reset(struct ipw_priv *priv)
4796 {
4797 int rc = 0;
4798 /** @todo customize queue sizes */
4799 int nTx = 64, nTxCmd = 8;
4800 ipw_tx_queue_free(priv);
4801 /* Tx CMD queue */
4802 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4803 IPW_TX_CMD_QUEUE_READ_INDEX,
4804 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4805 IPW_TX_CMD_QUEUE_BD_BASE,
4806 IPW_TX_CMD_QUEUE_BD_SIZE);
4807 if (rc) {
4808 IPW_ERROR("Tx Cmd queue init failed\n");
4809 goto error;
4810 }
4811 /* Tx queue(s) */
4812 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4813 IPW_TX_QUEUE_0_READ_INDEX,
4814 IPW_TX_QUEUE_0_WRITE_INDEX,
4815 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4816 if (rc) {
4817 IPW_ERROR("Tx 0 queue init failed\n");
4818 goto error;
4819 }
4820 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4821 IPW_TX_QUEUE_1_READ_INDEX,
4822 IPW_TX_QUEUE_1_WRITE_INDEX,
4823 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4824 if (rc) {
4825 IPW_ERROR("Tx 1 queue init failed\n");
4826 goto error;
4827 }
4828 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4829 IPW_TX_QUEUE_2_READ_INDEX,
4830 IPW_TX_QUEUE_2_WRITE_INDEX,
4831 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4832 if (rc) {
4833 IPW_ERROR("Tx 2 queue init failed\n");
4834 goto error;
4835 }
4836 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4837 IPW_TX_QUEUE_3_READ_INDEX,
4838 IPW_TX_QUEUE_3_WRITE_INDEX,
4839 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4840 if (rc) {
4841 IPW_ERROR("Tx 3 queue init failed\n");
4842 goto error;
4843 }
4844 /* statistics */
4845 priv->rx_bufs_min = 0;
4846 priv->rx_pend_max = 0;
4847 return rc;
4848
4849 error:
4850 ipw_tx_queue_free(priv);
4851 return rc;
4852 }
4853
4854 /**
4855 * Reclaim Tx queue entries no more used by NIC.
4856 *
4857 * When FW adwances 'R' index, all entries between old and
4858 * new 'R' index need to be reclaimed. As result, some free space
4859 * forms. If there is enough free space (> low mark), wake Tx queue.
4860 *
4861 * @note Need to protect against garbage in 'R' index
4862 * @param priv
4863 * @param txq
4864 * @param qindex
4865 * @return Number of used entries remains in the queue
4866 */
4867 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4868 struct clx2_tx_queue *txq, int qindex)
4869 {
4870 u32 hw_tail;
4871 int used;
4872 struct clx2_queue *q = &txq->q;
4873
4874 hw_tail = ipw_read32(priv, q->reg_r);
4875 if (hw_tail >= q->n_bd) {
4876 IPW_ERROR
4877 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4878 hw_tail, q->n_bd);
4879 goto done;
4880 }
4881 for (; q->last_used != hw_tail;
4882 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4883 ipw_queue_tx_free_tfd(priv, txq);
4884 priv->tx_packets++;
4885 }
4886 done:
4887 if ((ipw_queue_space(q) > q->low_mark) &&
4888 (qindex >= 0) &&
4889 (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev))
4890 netif_wake_queue(priv->net_dev);
4891 used = q->first_empty - q->last_used;
4892 if (used < 0)
4893 used += q->n_bd;
4894
4895 return used;
4896 }
4897
4898 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4899 int len, int sync)
4900 {
4901 struct clx2_tx_queue *txq = &priv->txq_cmd;
4902 struct clx2_queue *q = &txq->q;
4903 struct tfd_frame *tfd;
4904
4905 if (ipw_queue_space(q) < (sync ? 1 : 2)) {
4906 IPW_ERROR("No space for Tx\n");
4907 return -EBUSY;
4908 }
4909
4910 tfd = &txq->bd[q->first_empty];
4911 txq->txb[q->first_empty] = NULL;
4912
4913 memset(tfd, 0, sizeof(*tfd));
4914 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
4915 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
4916 priv->hcmd_seq++;
4917 tfd->u.cmd.index = hcmd;
4918 tfd->u.cmd.length = len;
4919 memcpy(tfd->u.cmd.payload, buf, len);
4920 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
4921 ipw_write32(priv, q->reg_w, q->first_empty);
4922 _ipw_read32(priv, 0x90);
4923
4924 return 0;
4925 }
4926
4927 /*
4928 * Rx theory of operation
4929 *
4930 * The host allocates 32 DMA target addresses and passes the host address
4931 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
4932 * 0 to 31
4933 *
4934 * Rx Queue Indexes
4935 * The host/firmware share two index registers for managing the Rx buffers.
4936 *
4937 * The READ index maps to the first position that the firmware may be writing
4938 * to -- the driver can read up to (but not including) this position and get
4939 * good data.
4940 * The READ index is managed by the firmware once the card is enabled.
4941 *
4942 * The WRITE index maps to the last position the driver has read from -- the
4943 * position preceding WRITE is the last slot the firmware can place a packet.
4944 *
4945 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4946 * WRITE = READ.
4947 *
4948 * During initialization the host sets up the READ queue position to the first
4949 * INDEX position, and WRITE to the last (READ - 1 wrapped)
4950 *
4951 * When the firmware places a packet in a buffer it will advance the READ index
4952 * and fire the RX interrupt. The driver can then query the READ index and
4953 * process as many packets as possible, moving the WRITE index forward as it
4954 * resets the Rx queue buffers with new memory.
4955 *
4956 * The management in the driver is as follows:
4957 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
4958 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
4959 * to replensish the ipw->rxq->rx_free.
4960 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
4961 * ipw->rxq is replenished and the READ INDEX is updated (updating the
4962 * 'processed' and 'read' driver indexes as well)
4963 * + A received packet is processed and handed to the kernel network stack,
4964 * detached from the ipw->rxq. The driver 'processed' index is updated.
4965 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
4966 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
4967 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
4968 * were enough free buffers and RX_STALLED is set it is cleared.
4969 *
4970 *
4971 * Driver sequence:
4972 *
4973 * ipw_rx_queue_alloc() Allocates rx_free
4974 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
4975 * ipw_rx_queue_restock
4976 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
4977 * queue, updates firmware pointers, and updates
4978 * the WRITE index. If insufficient rx_free buffers
4979 * are available, schedules ipw_rx_queue_replenish
4980 *
4981 * -- enable interrupts --
4982 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
4983 * READ INDEX, detaching the SKB from the pool.
4984 * Moves the packet buffer from queue to rx_used.
4985 * Calls ipw_rx_queue_restock to refill any empty
4986 * slots.
4987 * ...
4988 *
4989 */
4990
4991 /*
4992 * If there are slots in the RX queue that need to be restocked,
4993 * and we have free pre-allocated buffers, fill the ranks as much
4994 * as we can pulling from rx_free.
4995 *
4996 * This moves the 'write' index forward to catch up with 'processed', and
4997 * also updates the memory address in the firmware to reference the new
4998 * target buffer.
4999 */
5000 static void ipw_rx_queue_restock(struct ipw_priv *priv)
5001 {
5002 struct ipw_rx_queue *rxq = priv->rxq;
5003 struct list_head *element;
5004 struct ipw_rx_mem_buffer *rxb;
5005 unsigned long flags;
5006 int write;
5007
5008 spin_lock_irqsave(&rxq->lock, flags);
5009 write = rxq->write;
5010 while ((rxq->write != rxq->processed) && (rxq->free_count)) {
5011 element = rxq->rx_free.next;
5012 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5013 list_del(element);
5014
5015 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5016 rxb->dma_addr);
5017 rxq->queue[rxq->write] = rxb;
5018 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5019 rxq->free_count--;
5020 }
5021 spin_unlock_irqrestore(&rxq->lock, flags);
5022
5023 /* If the pre-allocated buffer pool is dropping low, schedule to
5024 * refill it */
5025 if (rxq->free_count <= RX_LOW_WATERMARK)
5026 queue_work(priv->workqueue, &priv->rx_replenish);
5027
5028 /* If we've added more space for the firmware to place data, tell it */
5029 if (write != rxq->write)
5030 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5031 }
5032
5033 /*
5034 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5035 * Also restock the Rx queue via ipw_rx_queue_restock.
5036 *
5037 * This is called as a scheduled work item (except for during intialization)
5038 */
5039 static void ipw_rx_queue_replenish(void *data)
5040 {
5041 struct ipw_priv *priv = data;
5042 struct ipw_rx_queue *rxq = priv->rxq;
5043 struct list_head *element;
5044 struct ipw_rx_mem_buffer *rxb;
5045 unsigned long flags;
5046
5047 spin_lock_irqsave(&rxq->lock, flags);
5048 while (!list_empty(&rxq->rx_used)) {
5049 element = rxq->rx_used.next;
5050 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5051 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5052 if (!rxb->skb) {
5053 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5054 priv->net_dev->name);
5055 /* We don't reschedule replenish work here -- we will
5056 * call the restock method and if it still needs
5057 * more buffers it will schedule replenish */
5058 break;
5059 }
5060 list_del(element);
5061
5062 rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data;
5063 rxb->dma_addr =
5064 pci_map_single(priv->pci_dev, rxb->skb->data,
5065 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5066
5067 list_add_tail(&rxb->list, &rxq->rx_free);
5068 rxq->free_count++;
5069 }
5070 spin_unlock_irqrestore(&rxq->lock, flags);
5071
5072 ipw_rx_queue_restock(priv);
5073 }
5074
5075 static void ipw_bg_rx_queue_replenish(void *data)
5076 {
5077 struct ipw_priv *priv = data;
5078 mutex_lock(&priv->mutex);
5079 ipw_rx_queue_replenish(data);
5080 mutex_unlock(&priv->mutex);
5081 }
5082
5083 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5084 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5085 * This free routine walks the list of POOL entries and if SKB is set to
5086 * non NULL it is unmapped and freed
5087 */
5088 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5089 {
5090 int i;
5091
5092 if (!rxq)
5093 return;
5094
5095 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5096 if (rxq->pool[i].skb != NULL) {
5097 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5098 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5099 dev_kfree_skb(rxq->pool[i].skb);
5100 }
5101 }
5102
5103 kfree(rxq);
5104 }
5105
5106 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5107 {
5108 struct ipw_rx_queue *rxq;
5109 int i;
5110
5111 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5112 if (unlikely(!rxq)) {
5113 IPW_ERROR("memory allocation failed\n");
5114 return NULL;
5115 }
5116 spin_lock_init(&rxq->lock);
5117 INIT_LIST_HEAD(&rxq->rx_free);
5118 INIT_LIST_HEAD(&rxq->rx_used);
5119
5120 /* Fill the rx_used queue with _all_ of the Rx buffers */
5121 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5122 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5123
5124 /* Set us so that we have processed and used all buffers, but have
5125 * not restocked the Rx queue with fresh buffers */
5126 rxq->read = rxq->write = 0;
5127 rxq->processed = RX_QUEUE_SIZE - 1;
5128 rxq->free_count = 0;
5129
5130 return rxq;
5131 }
5132
5133 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5134 {
5135 rate &= ~IEEE80211_BASIC_RATE_MASK;
5136 if (ieee_mode == IEEE_A) {
5137 switch (rate) {
5138 case IEEE80211_OFDM_RATE_6MB:
5139 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
5140 1 : 0;
5141 case IEEE80211_OFDM_RATE_9MB:
5142 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
5143 1 : 0;
5144 case IEEE80211_OFDM_RATE_12MB:
5145 return priv->
5146 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5147 case IEEE80211_OFDM_RATE_18MB:
5148 return priv->
5149 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5150 case IEEE80211_OFDM_RATE_24MB:
5151 return priv->
5152 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5153 case IEEE80211_OFDM_RATE_36MB:
5154 return priv->
5155 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5156 case IEEE80211_OFDM_RATE_48MB:
5157 return priv->
5158 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5159 case IEEE80211_OFDM_RATE_54MB:
5160 return priv->
5161 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5162 default:
5163 return 0;
5164 }
5165 }
5166
5167 /* B and G mixed */
5168 switch (rate) {
5169 case IEEE80211_CCK_RATE_1MB:
5170 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
5171 case IEEE80211_CCK_RATE_2MB:
5172 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
5173 case IEEE80211_CCK_RATE_5MB:
5174 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
5175 case IEEE80211_CCK_RATE_11MB:
5176 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
5177 }
5178
5179 /* If we are limited to B modulations, bail at this point */
5180 if (ieee_mode == IEEE_B)
5181 return 0;
5182
5183 /* G */
5184 switch (rate) {
5185 case IEEE80211_OFDM_RATE_6MB:
5186 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
5187 case IEEE80211_OFDM_RATE_9MB:
5188 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
5189 case IEEE80211_OFDM_RATE_12MB:
5190 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5191 case IEEE80211_OFDM_RATE_18MB:
5192 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5193 case IEEE80211_OFDM_RATE_24MB:
5194 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5195 case IEEE80211_OFDM_RATE_36MB:
5196 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5197 case IEEE80211_OFDM_RATE_48MB:
5198 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5199 case IEEE80211_OFDM_RATE_54MB:
5200 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5201 }
5202
5203 return 0;
5204 }
5205
5206 static int ipw_compatible_rates(struct ipw_priv *priv,
5207 const struct ieee80211_network *network,
5208 struct ipw_supported_rates *rates)
5209 {
5210 int num_rates, i;
5211
5212 memset(rates, 0, sizeof(*rates));
5213 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5214 rates->num_rates = 0;
5215 for (i = 0; i < num_rates; i++) {
5216 if (!ipw_is_rate_in_mask(priv, network->mode,
5217 network->rates[i])) {
5218
5219 if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
5220 IPW_DEBUG_SCAN("Adding masked mandatory "
5221 "rate %02X\n",
5222 network->rates[i]);
5223 rates->supported_rates[rates->num_rates++] =
5224 network->rates[i];
5225 continue;
5226 }
5227
5228 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5229 network->rates[i], priv->rates_mask);
5230 continue;
5231 }
5232
5233 rates->supported_rates[rates->num_rates++] = network->rates[i];
5234 }
5235
5236 num_rates = min(network->rates_ex_len,
5237 (u8) (IPW_MAX_RATES - num_rates));
5238 for (i = 0; i < num_rates; i++) {
5239 if (!ipw_is_rate_in_mask(priv, network->mode,
5240 network->rates_ex[i])) {
5241 if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
5242 IPW_DEBUG_SCAN("Adding masked mandatory "
5243 "rate %02X\n",
5244 network->rates_ex[i]);
5245 rates->supported_rates[rates->num_rates++] =
5246 network->rates[i];
5247 continue;
5248 }
5249
5250 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5251 network->rates_ex[i], priv->rates_mask);
5252 continue;
5253 }
5254
5255 rates->supported_rates[rates->num_rates++] =
5256 network->rates_ex[i];
5257 }
5258
5259 return 1;
5260 }
5261
5262 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5263 const struct ipw_supported_rates *src)
5264 {
5265 u8 i;
5266 for (i = 0; i < src->num_rates; i++)
5267 dest->supported_rates[i] = src->supported_rates[i];
5268 dest->num_rates = src->num_rates;
5269 }
5270
5271 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5272 * mask should ever be used -- right now all callers to add the scan rates are
5273 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5274 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5275 u8 modulation, u32 rate_mask)
5276 {
5277 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5278 IEEE80211_BASIC_RATE_MASK : 0;
5279
5280 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
5281 rates->supported_rates[rates->num_rates++] =
5282 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
5283
5284 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
5285 rates->supported_rates[rates->num_rates++] =
5286 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
5287
5288 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
5289 rates->supported_rates[rates->num_rates++] = basic_mask |
5290 IEEE80211_CCK_RATE_5MB;
5291
5292 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
5293 rates->supported_rates[rates->num_rates++] = basic_mask |
5294 IEEE80211_CCK_RATE_11MB;
5295 }
5296
5297 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5298 u8 modulation, u32 rate_mask)
5299 {
5300 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5301 IEEE80211_BASIC_RATE_MASK : 0;
5302
5303 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
5304 rates->supported_rates[rates->num_rates++] = basic_mask |
5305 IEEE80211_OFDM_RATE_6MB;
5306
5307 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
5308 rates->supported_rates[rates->num_rates++] =
5309 IEEE80211_OFDM_RATE_9MB;
5310
5311 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
5312 rates->supported_rates[rates->num_rates++] = basic_mask |
5313 IEEE80211_OFDM_RATE_12MB;
5314
5315 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
5316 rates->supported_rates[rates->num_rates++] =
5317 IEEE80211_OFDM_RATE_18MB;
5318
5319 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
5320 rates->supported_rates[rates->num_rates++] = basic_mask |
5321 IEEE80211_OFDM_RATE_24MB;
5322
5323 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
5324 rates->supported_rates[rates->num_rates++] =
5325 IEEE80211_OFDM_RATE_36MB;
5326
5327 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
5328 rates->supported_rates[rates->num_rates++] =
5329 IEEE80211_OFDM_RATE_48MB;
5330
5331 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
5332 rates->supported_rates[rates->num_rates++] =
5333 IEEE80211_OFDM_RATE_54MB;
5334 }
5335
5336 struct ipw_network_match {
5337 struct ieee80211_network *network;
5338 struct ipw_supported_rates rates;
5339 };
5340
5341 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5342 struct ipw_network_match *match,
5343 struct ieee80211_network *network,
5344 int roaming)
5345 {
5346 struct ipw_supported_rates rates;
5347
5348 /* Verify that this network's capability is compatible with the
5349 * current mode (AdHoc or Infrastructure) */
5350 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5351 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5352 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded due to "
5353 "capability mismatch.\n",
5354 escape_essid(network->ssid, network->ssid_len),
5355 MAC_ARG(network->bssid));
5356 return 0;
5357 }
5358
5359 /* If we do not have an ESSID for this AP, we can not associate with
5360 * it */
5361 if (network->flags & NETWORK_EMPTY_ESSID) {
5362 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5363 "because of hidden ESSID.\n",
5364 escape_essid(network->ssid, network->ssid_len),
5365 MAC_ARG(network->bssid));
5366 return 0;
5367 }
5368
5369 if (unlikely(roaming)) {
5370 /* If we are roaming, then ensure check if this is a valid
5371 * network to try and roam to */
5372 if ((network->ssid_len != match->network->ssid_len) ||
5373 memcmp(network->ssid, match->network->ssid,
5374 network->ssid_len)) {
5375 IPW_DEBUG_MERGE("Netowrk '%s (" MAC_FMT ")' excluded "
5376 "because of non-network ESSID.\n",
5377 escape_essid(network->ssid,
5378 network->ssid_len),
5379 MAC_ARG(network->bssid));
5380 return 0;
5381 }
5382 } else {
5383 /* If an ESSID has been configured then compare the broadcast
5384 * ESSID to ours */
5385 if ((priv->config & CFG_STATIC_ESSID) &&
5386 ((network->ssid_len != priv->essid_len) ||
5387 memcmp(network->ssid, priv->essid,
5388 min(network->ssid_len, priv->essid_len)))) {
5389 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5390
5391 strncpy(escaped,
5392 escape_essid(network->ssid, network->ssid_len),
5393 sizeof(escaped));
5394 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5395 "because of ESSID mismatch: '%s'.\n",
5396 escaped, MAC_ARG(network->bssid),
5397 escape_essid(priv->essid,
5398 priv->essid_len));
5399 return 0;
5400 }
5401 }
5402
5403 /* If the old network rate is better than this one, don't bother
5404 * testing everything else. */
5405
5406 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5407 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5408 "current network.\n",
5409 escape_essid(match->network->ssid,
5410 match->network->ssid_len));
5411 return 0;
5412 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5413 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5414 "current network.\n",
5415 escape_essid(match->network->ssid,
5416 match->network->ssid_len));
5417 return 0;
5418 }
5419
5420 /* Now go through and see if the requested network is valid... */
5421 if (priv->ieee->scan_age != 0 &&
5422 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5423 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5424 "because of age: %ums.\n",
5425 escape_essid(network->ssid, network->ssid_len),
5426 MAC_ARG(network->bssid),
5427 jiffies_to_msecs(jiffies -
5428 network->last_scanned));
5429 return 0;
5430 }
5431
5432 if ((priv->config & CFG_STATIC_CHANNEL) &&
5433 (network->channel != priv->channel)) {
5434 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5435 "because of channel mismatch: %d != %d.\n",
5436 escape_essid(network->ssid, network->ssid_len),
5437 MAC_ARG(network->bssid),
5438 network->channel, priv->channel);
5439 return 0;
5440 }
5441
5442 /* Verify privacy compatability */
5443 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5444 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5445 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5446 "because of privacy mismatch: %s != %s.\n",
5447 escape_essid(network->ssid, network->ssid_len),
5448 MAC_ARG(network->bssid),
5449 priv->
5450 capability & CAP_PRIVACY_ON ? "on" : "off",
5451 network->
5452 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5453 "off");
5454 return 0;
5455 }
5456
5457 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5458 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5459 "because of the same BSSID match: " MAC_FMT
5460 ".\n", escape_essid(network->ssid,
5461 network->ssid_len),
5462 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5463 return 0;
5464 }
5465
5466 /* Filter out any incompatible freq / mode combinations */
5467 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5468 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5469 "because of invalid frequency/mode "
5470 "combination.\n",
5471 escape_essid(network->ssid, network->ssid_len),
5472 MAC_ARG(network->bssid));
5473 return 0;
5474 }
5475
5476 /* Ensure that the rates supported by the driver are compatible with
5477 * this AP, including verification of basic rates (mandatory) */
5478 if (!ipw_compatible_rates(priv, network, &rates)) {
5479 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5480 "because configured rate mask excludes "
5481 "AP mandatory rate.\n",
5482 escape_essid(network->ssid, network->ssid_len),
5483 MAC_ARG(network->bssid));
5484 return 0;
5485 }
5486
5487 if (rates.num_rates == 0) {
5488 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5489 "because of no compatible rates.\n",
5490 escape_essid(network->ssid, network->ssid_len),
5491 MAC_ARG(network->bssid));
5492 return 0;
5493 }
5494
5495 /* TODO: Perform any further minimal comparititive tests. We do not
5496 * want to put too much policy logic here; intelligent scan selection
5497 * should occur within a generic IEEE 802.11 user space tool. */
5498
5499 /* Set up 'new' AP to this network */
5500 ipw_copy_rates(&match->rates, &rates);
5501 match->network = network;
5502 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' is a viable match.\n",
5503 escape_essid(network->ssid, network->ssid_len),
5504 MAC_ARG(network->bssid));
5505
5506 return 1;
5507 }
5508
5509 static void ipw_merge_adhoc_network(void *data)
5510 {
5511 struct ipw_priv *priv = data;
5512 struct ieee80211_network *network = NULL;
5513 struct ipw_network_match match = {
5514 .network = priv->assoc_network
5515 };
5516
5517 if ((priv->status & STATUS_ASSOCIATED) &&
5518 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5519 /* First pass through ROAM process -- look for a better
5520 * network */
5521 unsigned long flags;
5522
5523 spin_lock_irqsave(&priv->ieee->lock, flags);
5524 list_for_each_entry(network, &priv->ieee->network_list, list) {
5525 if (network != priv->assoc_network)
5526 ipw_find_adhoc_network(priv, &match, network,
5527 1);
5528 }
5529 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5530
5531 if (match.network == priv->assoc_network) {
5532 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5533 "merge to.\n");
5534 return;
5535 }
5536
5537 mutex_lock(&priv->mutex);
5538 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5539 IPW_DEBUG_MERGE("remove network %s\n",
5540 escape_essid(priv->essid,
5541 priv->essid_len));
5542 ipw_remove_current_network(priv);
5543 }
5544
5545 ipw_disassociate(priv);
5546 priv->assoc_network = match.network;
5547 mutex_unlock(&priv->mutex);
5548 return;
5549 }
5550 }
5551
5552 static int ipw_best_network(struct ipw_priv *priv,
5553 struct ipw_network_match *match,
5554 struct ieee80211_network *network, int roaming)
5555 {
5556 struct ipw_supported_rates rates;
5557
5558 /* Verify that this network's capability is compatible with the
5559 * current mode (AdHoc or Infrastructure) */
5560 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5561 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5562 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5563 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5564 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to "
5565 "capability mismatch.\n",
5566 escape_essid(network->ssid, network->ssid_len),
5567 MAC_ARG(network->bssid));
5568 return 0;
5569 }
5570
5571 /* If we do not have an ESSID for this AP, we can not associate with
5572 * it */
5573 if (network->flags & NETWORK_EMPTY_ESSID) {
5574 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5575 "because of hidden ESSID.\n",
5576 escape_essid(network->ssid, network->ssid_len),
5577 MAC_ARG(network->bssid));
5578 return 0;
5579 }
5580
5581 if (unlikely(roaming)) {
5582 /* If we are roaming, then ensure check if this is a valid
5583 * network to try and roam to */
5584 if ((network->ssid_len != match->network->ssid_len) ||
5585 memcmp(network->ssid, match->network->ssid,
5586 network->ssid_len)) {
5587 IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded "
5588 "because of non-network ESSID.\n",
5589 escape_essid(network->ssid,
5590 network->ssid_len),
5591 MAC_ARG(network->bssid));
5592 return 0;
5593 }
5594 } else {
5595 /* If an ESSID has been configured then compare the broadcast
5596 * ESSID to ours */
5597 if ((priv->config & CFG_STATIC_ESSID) &&
5598 ((network->ssid_len != priv->essid_len) ||
5599 memcmp(network->ssid, priv->essid,
5600 min(network->ssid_len, priv->essid_len)))) {
5601 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5602 strncpy(escaped,
5603 escape_essid(network->ssid, network->ssid_len),
5604 sizeof(escaped));
5605 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5606 "because of ESSID mismatch: '%s'.\n",
5607 escaped, MAC_ARG(network->bssid),
5608 escape_essid(priv->essid,
5609 priv->essid_len));
5610 return 0;
5611 }
5612 }
5613
5614 /* If the old network rate is better than this one, don't bother
5615 * testing everything else. */
5616 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5617 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5618 strncpy(escaped,
5619 escape_essid(network->ssid, network->ssid_len),
5620 sizeof(escaped));
5621 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because "
5622 "'%s (" MAC_FMT ")' has a stronger signal.\n",
5623 escaped, MAC_ARG(network->bssid),
5624 escape_essid(match->network->ssid,
5625 match->network->ssid_len),
5626 MAC_ARG(match->network->bssid));
5627 return 0;
5628 }
5629
5630 /* If this network has already had an association attempt within the
5631 * last 3 seconds, do not try and associate again... */
5632 if (network->last_associate &&
5633 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5634 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5635 "because of storming (%ums since last "
5636 "assoc attempt).\n",
5637 escape_essid(network->ssid, network->ssid_len),
5638 MAC_ARG(network->bssid),
5639 jiffies_to_msecs(jiffies -
5640 network->last_associate));
5641 return 0;
5642 }
5643
5644 /* Now go through and see if the requested network is valid... */
5645 if (priv->ieee->scan_age != 0 &&
5646 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5647 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5648 "because of age: %ums.\n",
5649 escape_essid(network->ssid, network->ssid_len),
5650 MAC_ARG(network->bssid),
5651 jiffies_to_msecs(jiffies -
5652 network->last_scanned));
5653 return 0;
5654 }
5655
5656 if ((priv->config & CFG_STATIC_CHANNEL) &&
5657 (network->channel != priv->channel)) {
5658 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5659 "because of channel mismatch: %d != %d.\n",
5660 escape_essid(network->ssid, network->ssid_len),
5661 MAC_ARG(network->bssid),
5662 network->channel, priv->channel);
5663 return 0;
5664 }
5665
5666 /* Verify privacy compatability */
5667 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5668 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5669 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5670 "because of privacy mismatch: %s != %s.\n",
5671 escape_essid(network->ssid, network->ssid_len),
5672 MAC_ARG(network->bssid),
5673 priv->capability & CAP_PRIVACY_ON ? "on" :
5674 "off",
5675 network->capability &
5676 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5677 return 0;
5678 }
5679
5680 if ((priv->config & CFG_STATIC_BSSID) &&
5681 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5682 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5683 "because of BSSID mismatch: " MAC_FMT ".\n",
5684 escape_essid(network->ssid, network->ssid_len),
5685 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5686 return 0;
5687 }
5688
5689 /* Filter out any incompatible freq / mode combinations */
5690 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5691 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5692 "because of invalid frequency/mode "
5693 "combination.\n",
5694 escape_essid(network->ssid, network->ssid_len),
5695 MAC_ARG(network->bssid));
5696 return 0;
5697 }
5698
5699 /* Filter out invalid channel in current GEO */
5700 if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) {
5701 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5702 "because of invalid channel in current GEO\n",
5703 escape_essid(network->ssid, network->ssid_len),
5704 MAC_ARG(network->bssid));
5705 return 0;
5706 }
5707
5708 /* Ensure that the rates supported by the driver are compatible with
5709 * this AP, including verification of basic rates (mandatory) */
5710 if (!ipw_compatible_rates(priv, network, &rates)) {
5711 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5712 "because configured rate mask excludes "
5713 "AP mandatory rate.\n",
5714 escape_essid(network->ssid, network->ssid_len),
5715 MAC_ARG(network->bssid));
5716 return 0;
5717 }
5718
5719 if (rates.num_rates == 0) {
5720 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5721 "because of no compatible rates.\n",
5722 escape_essid(network->ssid, network->ssid_len),
5723 MAC_ARG(network->bssid));
5724 return 0;
5725 }
5726
5727 /* TODO: Perform any further minimal comparititive tests. We do not
5728 * want to put too much policy logic here; intelligent scan selection
5729 * should occur within a generic IEEE 802.11 user space tool. */
5730
5731 /* Set up 'new' AP to this network */
5732 ipw_copy_rates(&match->rates, &rates);
5733 match->network = network;
5734
5735 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n",
5736 escape_essid(network->ssid, network->ssid_len),
5737 MAC_ARG(network->bssid));
5738
5739 return 1;
5740 }
5741
5742 static void ipw_adhoc_create(struct ipw_priv *priv,
5743 struct ieee80211_network *network)
5744 {
5745 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
5746 int i;
5747
5748 /*
5749 * For the purposes of scanning, we can set our wireless mode
5750 * to trigger scans across combinations of bands, but when it
5751 * comes to creating a new ad-hoc network, we have tell the FW
5752 * exactly which band to use.
5753 *
5754 * We also have the possibility of an invalid channel for the
5755 * chossen band. Attempting to create a new ad-hoc network
5756 * with an invalid channel for wireless mode will trigger a
5757 * FW fatal error.
5758 *
5759 */
5760 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
5761 case IEEE80211_52GHZ_BAND:
5762 network->mode = IEEE_A;
5763 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5764 BUG_ON(i == -1);
5765 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5766 IPW_WARNING("Overriding invalid channel\n");
5767 priv->channel = geo->a[0].channel;
5768 }
5769 break;
5770
5771 case IEEE80211_24GHZ_BAND:
5772 if (priv->ieee->mode & IEEE_G)
5773 network->mode = IEEE_G;
5774 else
5775 network->mode = IEEE_B;
5776 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5777 BUG_ON(i == -1);
5778 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5779 IPW_WARNING("Overriding invalid channel\n");
5780 priv->channel = geo->bg[0].channel;
5781 }
5782 break;
5783
5784 default:
5785 IPW_WARNING("Overriding invalid channel\n");
5786 if (priv->ieee->mode & IEEE_A) {
5787 network->mode = IEEE_A;
5788 priv->channel = geo->a[0].channel;
5789 } else if (priv->ieee->mode & IEEE_G) {
5790 network->mode = IEEE_G;
5791 priv->channel = geo->bg[0].channel;
5792 } else {
5793 network->mode = IEEE_B;
5794 priv->channel = geo->bg[0].channel;
5795 }
5796 break;
5797 }
5798
5799 network->channel = priv->channel;
5800 priv->config |= CFG_ADHOC_PERSIST;
5801 ipw_create_bssid(priv, network->bssid);
5802 network->ssid_len = priv->essid_len;
5803 memcpy(network->ssid, priv->essid, priv->essid_len);
5804 memset(&network->stats, 0, sizeof(network->stats));
5805 network->capability = WLAN_CAPABILITY_IBSS;
5806 if (!(priv->config & CFG_PREAMBLE_LONG))
5807 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5808 if (priv->capability & CAP_PRIVACY_ON)
5809 network->capability |= WLAN_CAPABILITY_PRIVACY;
5810 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5811 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5812 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5813 memcpy(network->rates_ex,
5814 &priv->rates.supported_rates[network->rates_len],
5815 network->rates_ex_len);
5816 network->last_scanned = 0;
5817 network->flags = 0;
5818 network->last_associate = 0;
5819 network->time_stamp[0] = 0;
5820 network->time_stamp[1] = 0;
5821 network->beacon_interval = 100; /* Default */
5822 network->listen_interval = 10; /* Default */
5823 network->atim_window = 0; /* Default */
5824 network->wpa_ie_len = 0;
5825 network->rsn_ie_len = 0;
5826 }
5827
5828 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5829 {
5830 struct ipw_tgi_tx_key key;
5831
5832 if (!(priv->ieee->sec.flags & (1 << index)))
5833 return;
5834
5835 key.key_id = index;
5836 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5837 key.security_type = type;
5838 key.station_index = 0; /* always 0 for BSS */
5839 key.flags = 0;
5840 /* 0 for new key; previous value of counter (after fatal error) */
5841 key.tx_counter[0] = 0;
5842 key.tx_counter[1] = 0;
5843
5844 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5845 }
5846
5847 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5848 {
5849 struct ipw_wep_key key;
5850 int i;
5851
5852 key.cmd_id = DINO_CMD_WEP_KEY;
5853 key.seq_num = 0;
5854
5855 /* Note: AES keys cannot be set for multiple times.
5856 * Only set it at the first time. */
5857 for (i = 0; i < 4; i++) {
5858 key.key_index = i | type;
5859 if (!(priv->ieee->sec.flags & (1 << i))) {
5860 key.key_size = 0;
5861 continue;
5862 }
5863
5864 key.key_size = priv->ieee->sec.key_sizes[i];
5865 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5866
5867 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5868 }
5869 }
5870
5871 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5872 {
5873 if (priv->ieee->host_encrypt)
5874 return;
5875
5876 switch (level) {
5877 case SEC_LEVEL_3:
5878 priv->sys_config.disable_unicast_decryption = 0;
5879 priv->ieee->host_decrypt = 0;
5880 break;
5881 case SEC_LEVEL_2:
5882 priv->sys_config.disable_unicast_decryption = 1;
5883 priv->ieee->host_decrypt = 1;
5884 break;
5885 case SEC_LEVEL_1:
5886 priv->sys_config.disable_unicast_decryption = 0;
5887 priv->ieee->host_decrypt = 0;
5888 break;
5889 case SEC_LEVEL_0:
5890 priv->sys_config.disable_unicast_decryption = 1;
5891 break;
5892 default:
5893 break;
5894 }
5895 }
5896
5897 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5898 {
5899 if (priv->ieee->host_encrypt)
5900 return;
5901
5902 switch (level) {
5903 case SEC_LEVEL_3:
5904 priv->sys_config.disable_multicast_decryption = 0;
5905 break;
5906 case SEC_LEVEL_2:
5907 priv->sys_config.disable_multicast_decryption = 1;
5908 break;
5909 case SEC_LEVEL_1:
5910 priv->sys_config.disable_multicast_decryption = 0;
5911 break;
5912 case SEC_LEVEL_0:
5913 priv->sys_config.disable_multicast_decryption = 1;
5914 break;
5915 default:
5916 break;
5917 }
5918 }
5919
5920 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
5921 {
5922 switch (priv->ieee->sec.level) {
5923 case SEC_LEVEL_3:
5924 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5925 ipw_send_tgi_tx_key(priv,
5926 DCT_FLAG_EXT_SECURITY_CCM,
5927 priv->ieee->sec.active_key);
5928
5929 if (!priv->ieee->host_mc_decrypt)
5930 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
5931 break;
5932 case SEC_LEVEL_2:
5933 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5934 ipw_send_tgi_tx_key(priv,
5935 DCT_FLAG_EXT_SECURITY_TKIP,
5936 priv->ieee->sec.active_key);
5937 break;
5938 case SEC_LEVEL_1:
5939 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
5940 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
5941 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
5942 break;
5943 case SEC_LEVEL_0:
5944 default:
5945 break;
5946 }
5947 }
5948
5949 static void ipw_adhoc_check(void *data)
5950 {
5951 struct ipw_priv *priv = data;
5952
5953 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
5954 !(priv->config & CFG_ADHOC_PERSIST)) {
5955 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
5956 IPW_DL_STATE | IPW_DL_ASSOC,
5957 "Missed beacon: %d - disassociate\n",
5958 priv->missed_adhoc_beacons);
5959 ipw_remove_current_network(priv);
5960 ipw_disassociate(priv);
5961 return;
5962 }
5963
5964 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
5965 priv->assoc_request.beacon_interval);
5966 }
5967
5968 static void ipw_bg_adhoc_check(void *data)
5969 {
5970 struct ipw_priv *priv = data;
5971 mutex_lock(&priv->mutex);
5972 ipw_adhoc_check(data);
5973 mutex_unlock(&priv->mutex);
5974 }
5975
5976 #ifdef CONFIG_IPW2200_DEBUG
5977 static void ipw_debug_config(struct ipw_priv *priv)
5978 {
5979 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
5980 "[CFG 0x%08X]\n", priv->config);
5981 if (priv->config & CFG_STATIC_CHANNEL)
5982 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
5983 else
5984 IPW_DEBUG_INFO("Channel unlocked.\n");
5985 if (priv->config & CFG_STATIC_ESSID)
5986 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
5987 escape_essid(priv->essid, priv->essid_len));
5988 else
5989 IPW_DEBUG_INFO("ESSID unlocked.\n");
5990 if (priv->config & CFG_STATIC_BSSID)
5991 IPW_DEBUG_INFO("BSSID locked to " MAC_FMT "\n",
5992 MAC_ARG(priv->bssid));
5993 else
5994 IPW_DEBUG_INFO("BSSID unlocked.\n");
5995 if (priv->capability & CAP_PRIVACY_ON)
5996 IPW_DEBUG_INFO("PRIVACY on\n");
5997 else
5998 IPW_DEBUG_INFO("PRIVACY off\n");
5999 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6000 }
6001 #else
6002 #define ipw_debug_config(x) do {} while (0)
6003 #endif
6004
6005 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6006 {
6007 /* TODO: Verify that this works... */
6008 struct ipw_fixed_rate fr = {
6009 .tx_rates = priv->rates_mask
6010 };
6011 u32 reg;
6012 u16 mask = 0;
6013
6014 /* Identify 'current FW band' and match it with the fixed
6015 * Tx rates */
6016
6017 switch (priv->ieee->freq_band) {
6018 case IEEE80211_52GHZ_BAND: /* A only */
6019 /* IEEE_A */
6020 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
6021 /* Invalid fixed rate mask */
6022 IPW_DEBUG_WX
6023 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6024 fr.tx_rates = 0;
6025 break;
6026 }
6027
6028 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
6029 break;
6030
6031 default: /* 2.4Ghz or Mixed */
6032 /* IEEE_B */
6033 if (mode == IEEE_B) {
6034 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
6035 /* Invalid fixed rate mask */
6036 IPW_DEBUG_WX
6037 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6038 fr.tx_rates = 0;
6039 }
6040 break;
6041 }
6042
6043 /* IEEE_G */
6044 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
6045 IEEE80211_OFDM_RATES_MASK)) {
6046 /* Invalid fixed rate mask */
6047 IPW_DEBUG_WX
6048 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6049 fr.tx_rates = 0;
6050 break;
6051 }
6052
6053 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
6054 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
6055 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
6056 }
6057
6058 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
6059 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
6060 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
6061 }
6062
6063 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
6064 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
6065 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
6066 }
6067
6068 fr.tx_rates |= mask;
6069 break;
6070 }
6071
6072 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6073 ipw_write_reg32(priv, reg, *(u32 *) & fr);
6074 }
6075
6076 static void ipw_abort_scan(struct ipw_priv *priv)
6077 {
6078 int err;
6079
6080 if (priv->status & STATUS_SCAN_ABORTING) {
6081 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6082 return;
6083 }
6084 priv->status |= STATUS_SCAN_ABORTING;
6085
6086 err = ipw_send_scan_abort(priv);
6087 if (err)
6088 IPW_DEBUG_HC("Request to abort scan failed.\n");
6089 }
6090
6091 static void ipw_add_scan_channels(struct ipw_priv *priv,
6092 struct ipw_scan_request_ext *scan,
6093 int scan_type)
6094 {
6095 int channel_index = 0;
6096 const struct ieee80211_geo *geo;
6097 int i;
6098
6099 geo = ieee80211_get_geo(priv->ieee);
6100
6101 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
6102 int start = channel_index;
6103 for (i = 0; i < geo->a_channels; i++) {
6104 if ((priv->status & STATUS_ASSOCIATED) &&
6105 geo->a[i].channel == priv->channel)
6106 continue;
6107 channel_index++;
6108 scan->channels_list[channel_index] = geo->a[i].channel;
6109 ipw_set_scan_type(scan, channel_index,
6110 geo->a[i].
6111 flags & IEEE80211_CH_PASSIVE_ONLY ?
6112 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6113 scan_type);
6114 }
6115
6116 if (start != channel_index) {
6117 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6118 (channel_index - start);
6119 channel_index++;
6120 }
6121 }
6122
6123 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
6124 int start = channel_index;
6125 if (priv->config & CFG_SPEED_SCAN) {
6126 int index;
6127 u8 channels[IEEE80211_24GHZ_CHANNELS] = {
6128 /* nop out the list */
6129 [0] = 0
6130 };
6131
6132 u8 channel;
6133 while (channel_index < IPW_SCAN_CHANNELS) {
6134 channel =
6135 priv->speed_scan[priv->speed_scan_pos];
6136 if (channel == 0) {
6137 priv->speed_scan_pos = 0;
6138 channel = priv->speed_scan[0];
6139 }
6140 if ((priv->status & STATUS_ASSOCIATED) &&
6141 channel == priv->channel) {
6142 priv->speed_scan_pos++;
6143 continue;
6144 }
6145
6146 /* If this channel has already been
6147 * added in scan, break from loop
6148 * and this will be the first channel
6149 * in the next scan.
6150 */
6151 if (channels[channel - 1] != 0)
6152 break;
6153
6154 channels[channel - 1] = 1;
6155 priv->speed_scan_pos++;
6156 channel_index++;
6157 scan->channels_list[channel_index] = channel;
6158 index =
6159 ieee80211_channel_to_index(priv->ieee, channel);
6160 ipw_set_scan_type(scan, channel_index,
6161 geo->bg[index].
6162 flags &
6163 IEEE80211_CH_PASSIVE_ONLY ?
6164 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6165 : scan_type);
6166 }
6167 } else {
6168 for (i = 0; i < geo->bg_channels; i++) {
6169 if ((priv->status & STATUS_ASSOCIATED) &&
6170 geo->bg[i].channel == priv->channel)
6171 continue;
6172 channel_index++;
6173 scan->channels_list[channel_index] =
6174 geo->bg[i].channel;
6175 ipw_set_scan_type(scan, channel_index,
6176 geo->bg[i].
6177 flags &
6178 IEEE80211_CH_PASSIVE_ONLY ?
6179 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6180 : scan_type);
6181 }
6182 }
6183
6184 if (start != channel_index) {
6185 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6186 (channel_index - start);
6187 }
6188 }
6189 }
6190
6191 static int ipw_request_scan(struct ipw_priv *priv)
6192 {
6193 struct ipw_scan_request_ext scan;
6194 int err = 0, scan_type;
6195
6196 if (!(priv->status & STATUS_INIT) ||
6197 (priv->status & STATUS_EXIT_PENDING))
6198 return 0;
6199
6200 mutex_lock(&priv->mutex);
6201
6202 if (priv->status & STATUS_SCANNING) {
6203 IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n");
6204 priv->status |= STATUS_SCAN_PENDING;
6205 goto done;
6206 }
6207
6208 if (!(priv->status & STATUS_SCAN_FORCED) &&
6209 priv->status & STATUS_SCAN_ABORTING) {
6210 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6211 priv->status |= STATUS_SCAN_PENDING;
6212 goto done;
6213 }
6214
6215 if (priv->status & STATUS_RF_KILL_MASK) {
6216 IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6217 priv->status |= STATUS_SCAN_PENDING;
6218 goto done;
6219 }
6220
6221 memset(&scan, 0, sizeof(scan));
6222
6223 if (priv->config & CFG_SPEED_SCAN)
6224 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6225 cpu_to_le16(30);
6226 else
6227 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6228 cpu_to_le16(20);
6229
6230 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6231 cpu_to_le16(20);
6232 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
6233
6234 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
6235
6236 #ifdef CONFIG_IPW2200_MONITOR
6237 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6238 u8 channel;
6239 u8 band = 0;
6240
6241 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
6242 case IEEE80211_52GHZ_BAND:
6243 band = (u8) (IPW_A_MODE << 6) | 1;
6244 channel = priv->channel;
6245 break;
6246
6247 case IEEE80211_24GHZ_BAND:
6248 band = (u8) (IPW_B_MODE << 6) | 1;
6249 channel = priv->channel;
6250 break;
6251
6252 default:
6253 band = (u8) (IPW_B_MODE << 6) | 1;
6254 channel = 9;
6255 break;
6256 }
6257
6258 scan.channels_list[0] = band;
6259 scan.channels_list[1] = channel;
6260 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6261
6262 /* NOTE: The card will sit on this channel for this time
6263 * period. Scan aborts are timing sensitive and frequently
6264 * result in firmware restarts. As such, it is best to
6265 * set a small dwell_time here and just keep re-issuing
6266 * scans. Otherwise fast channel hopping will not actually
6267 * hop channels.
6268 *
6269 * TODO: Move SPEED SCAN support to all modes and bands */
6270 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6271 cpu_to_le16(2000);
6272 } else {
6273 #endif /* CONFIG_IPW2200_MONITOR */
6274 /* If we are roaming, then make this a directed scan for the
6275 * current network. Otherwise, ensure that every other scan
6276 * is a fast channel hop scan */
6277 if ((priv->status & STATUS_ROAMING)
6278 || (!(priv->status & STATUS_ASSOCIATED)
6279 && (priv->config & CFG_STATIC_ESSID)
6280 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6281 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6282 if (err) {
6283 IPW_DEBUG_HC("Attempt to send SSID command "
6284 "failed.\n");
6285 goto done;
6286 }
6287
6288 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6289 } else
6290 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6291
6292 ipw_add_scan_channels(priv, &scan, scan_type);
6293 #ifdef CONFIG_IPW2200_MONITOR
6294 }
6295 #endif
6296
6297 err = ipw_send_scan_request_ext(priv, &scan);
6298 if (err) {
6299 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6300 goto done;
6301 }
6302
6303 priv->status |= STATUS_SCANNING;
6304 priv->status &= ~STATUS_SCAN_PENDING;
6305 queue_delayed_work(priv->workqueue, &priv->scan_check,
6306 IPW_SCAN_CHECK_WATCHDOG);
6307 done:
6308 mutex_unlock(&priv->mutex);
6309 return err;
6310 }
6311
6312 static void ipw_bg_abort_scan(void *data)
6313 {
6314 struct ipw_priv *priv = data;
6315 mutex_lock(&priv->mutex);
6316 ipw_abort_scan(data);
6317 mutex_unlock(&priv->mutex);
6318 }
6319
6320 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6321 {
6322 /* This is called when wpa_supplicant loads and closes the driver
6323 * interface. */
6324 priv->ieee->wpa_enabled = value;
6325 return 0;
6326 }
6327
6328 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6329 {
6330 struct ieee80211_device *ieee = priv->ieee;
6331 struct ieee80211_security sec = {
6332 .flags = SEC_AUTH_MODE,
6333 };
6334 int ret = 0;
6335
6336 if (value & IW_AUTH_ALG_SHARED_KEY) {
6337 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6338 ieee->open_wep = 0;
6339 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6340 sec.auth_mode = WLAN_AUTH_OPEN;
6341 ieee->open_wep = 1;
6342 } else if (value & IW_AUTH_ALG_LEAP) {
6343 sec.auth_mode = WLAN_AUTH_LEAP;
6344 ieee->open_wep = 1;
6345 } else
6346 return -EINVAL;
6347
6348 if (ieee->set_security)
6349 ieee->set_security(ieee->dev, &sec);
6350 else
6351 ret = -EOPNOTSUPP;
6352
6353 return ret;
6354 }
6355
6356 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6357 int wpa_ie_len)
6358 {
6359 /* make sure WPA is enabled */
6360 ipw_wpa_enable(priv, 1);
6361 }
6362
6363 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6364 char *capabilities, int length)
6365 {
6366 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6367
6368 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6369 capabilities);
6370 }
6371
6372 /*
6373 * WE-18 support
6374 */
6375
6376 /* SIOCSIWGENIE */
6377 static int ipw_wx_set_genie(struct net_device *dev,
6378 struct iw_request_info *info,
6379 union iwreq_data *wrqu, char *extra)
6380 {
6381 struct ipw_priv *priv = ieee80211_priv(dev);
6382 struct ieee80211_device *ieee = priv->ieee;
6383 u8 *buf;
6384 int err = 0;
6385
6386 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6387 (wrqu->data.length && extra == NULL))
6388 return -EINVAL;
6389
6390 //mutex_lock(&priv->mutex);
6391
6392 //if (!ieee->wpa_enabled) {
6393 // err = -EOPNOTSUPP;
6394 // goto out;
6395 //}
6396
6397 if (wrqu->data.length) {
6398 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6399 if (buf == NULL) {
6400 err = -ENOMEM;
6401 goto out;
6402 }
6403
6404 memcpy(buf, extra, wrqu->data.length);
6405 kfree(ieee->wpa_ie);
6406 ieee->wpa_ie = buf;
6407 ieee->wpa_ie_len = wrqu->data.length;
6408 } else {
6409 kfree(ieee->wpa_ie);
6410 ieee->wpa_ie = NULL;
6411 ieee->wpa_ie_len = 0;
6412 }
6413
6414 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6415 out:
6416 //mutex_unlock(&priv->mutex);
6417 return err;
6418 }
6419
6420 /* SIOCGIWGENIE */
6421 static int ipw_wx_get_genie(struct net_device *dev,
6422 struct iw_request_info *info,
6423 union iwreq_data *wrqu, char *extra)
6424 {
6425 struct ipw_priv *priv = ieee80211_priv(dev);
6426 struct ieee80211_device *ieee = priv->ieee;
6427 int err = 0;
6428
6429 //mutex_lock(&priv->mutex);
6430
6431 //if (!ieee->wpa_enabled) {
6432 // err = -EOPNOTSUPP;
6433 // goto out;
6434 //}
6435
6436 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6437 wrqu->data.length = 0;
6438 goto out;
6439 }
6440
6441 if (wrqu->data.length < ieee->wpa_ie_len) {
6442 err = -E2BIG;
6443 goto out;
6444 }
6445
6446 wrqu->data.length = ieee->wpa_ie_len;
6447 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6448
6449 out:
6450 //mutex_unlock(&priv->mutex);
6451 return err;
6452 }
6453
6454 static int wext_cipher2level(int cipher)
6455 {
6456 switch (cipher) {
6457 case IW_AUTH_CIPHER_NONE:
6458 return SEC_LEVEL_0;
6459 case IW_AUTH_CIPHER_WEP40:
6460 case IW_AUTH_CIPHER_WEP104:
6461 return SEC_LEVEL_1;
6462 case IW_AUTH_CIPHER_TKIP:
6463 return SEC_LEVEL_2;
6464 case IW_AUTH_CIPHER_CCMP:
6465 return SEC_LEVEL_3;
6466 default:
6467 return -1;
6468 }
6469 }
6470
6471 /* SIOCSIWAUTH */
6472 static int ipw_wx_set_auth(struct net_device *dev,
6473 struct iw_request_info *info,
6474 union iwreq_data *wrqu, char *extra)
6475 {
6476 struct ipw_priv *priv = ieee80211_priv(dev);
6477 struct ieee80211_device *ieee = priv->ieee;
6478 struct iw_param *param = &wrqu->param;
6479 struct ieee80211_crypt_data *crypt;
6480 unsigned long flags;
6481 int ret = 0;
6482
6483 switch (param->flags & IW_AUTH_INDEX) {
6484 case IW_AUTH_WPA_VERSION:
6485 break;
6486 case IW_AUTH_CIPHER_PAIRWISE:
6487 ipw_set_hw_decrypt_unicast(priv,
6488 wext_cipher2level(param->value));
6489 break;
6490 case IW_AUTH_CIPHER_GROUP:
6491 ipw_set_hw_decrypt_multicast(priv,
6492 wext_cipher2level(param->value));
6493 break;
6494 case IW_AUTH_KEY_MGMT:
6495 /*
6496 * ipw2200 does not use these parameters
6497 */
6498 break;
6499
6500 case IW_AUTH_TKIP_COUNTERMEASURES:
6501 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6502 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6503 break;
6504
6505 flags = crypt->ops->get_flags(crypt->priv);
6506
6507 if (param->value)
6508 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6509 else
6510 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6511
6512 crypt->ops->set_flags(flags, crypt->priv);
6513
6514 break;
6515
6516 case IW_AUTH_DROP_UNENCRYPTED:{
6517 /* HACK:
6518 *
6519 * wpa_supplicant calls set_wpa_enabled when the driver
6520 * is loaded and unloaded, regardless of if WPA is being
6521 * used. No other calls are made which can be used to
6522 * determine if encryption will be used or not prior to
6523 * association being expected. If encryption is not being
6524 * used, drop_unencrypted is set to false, else true -- we
6525 * can use this to determine if the CAP_PRIVACY_ON bit should
6526 * be set.
6527 */
6528 struct ieee80211_security sec = {
6529 .flags = SEC_ENABLED,
6530 .enabled = param->value,
6531 };
6532 priv->ieee->drop_unencrypted = param->value;
6533 /* We only change SEC_LEVEL for open mode. Others
6534 * are set by ipw_wpa_set_encryption.
6535 */
6536 if (!param->value) {
6537 sec.flags |= SEC_LEVEL;
6538 sec.level = SEC_LEVEL_0;
6539 } else {
6540 sec.flags |= SEC_LEVEL;
6541 sec.level = SEC_LEVEL_1;
6542 }
6543 if (priv->ieee->set_security)
6544 priv->ieee->set_security(priv->ieee->dev, &sec);
6545 break;
6546 }
6547
6548 case IW_AUTH_80211_AUTH_ALG:
6549 ret = ipw_wpa_set_auth_algs(priv, param->value);
6550 break;
6551
6552 case IW_AUTH_WPA_ENABLED:
6553 ret = ipw_wpa_enable(priv, param->value);
6554 ipw_disassociate(priv);
6555 break;
6556
6557 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6558 ieee->ieee802_1x = param->value;
6559 break;
6560
6561 //case IW_AUTH_ROAMING_CONTROL:
6562 case IW_AUTH_PRIVACY_INVOKED:
6563 ieee->privacy_invoked = param->value;
6564 break;
6565
6566 default:
6567 return -EOPNOTSUPP;
6568 }
6569 return ret;
6570 }
6571
6572 /* SIOCGIWAUTH */
6573 static int ipw_wx_get_auth(struct net_device *dev,
6574 struct iw_request_info *info,
6575 union iwreq_data *wrqu, char *extra)
6576 {
6577 struct ipw_priv *priv = ieee80211_priv(dev);
6578 struct ieee80211_device *ieee = priv->ieee;
6579 struct ieee80211_crypt_data *crypt;
6580 struct iw_param *param = &wrqu->param;
6581 int ret = 0;
6582
6583 switch (param->flags & IW_AUTH_INDEX) {
6584 case IW_AUTH_WPA_VERSION:
6585 case IW_AUTH_CIPHER_PAIRWISE:
6586 case IW_AUTH_CIPHER_GROUP:
6587 case IW_AUTH_KEY_MGMT:
6588 /*
6589 * wpa_supplicant will control these internally
6590 */
6591 ret = -EOPNOTSUPP;
6592 break;
6593
6594 case IW_AUTH_TKIP_COUNTERMEASURES:
6595 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6596 if (!crypt || !crypt->ops->get_flags)
6597 break;
6598
6599 param->value = (crypt->ops->get_flags(crypt->priv) &
6600 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6601
6602 break;
6603
6604 case IW_AUTH_DROP_UNENCRYPTED:
6605 param->value = ieee->drop_unencrypted;
6606 break;
6607
6608 case IW_AUTH_80211_AUTH_ALG:
6609 param->value = ieee->sec.auth_mode;
6610 break;
6611
6612 case IW_AUTH_WPA_ENABLED:
6613 param->value = ieee->wpa_enabled;
6614 break;
6615
6616 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6617 param->value = ieee->ieee802_1x;
6618 break;
6619
6620 case IW_AUTH_ROAMING_CONTROL:
6621 case IW_AUTH_PRIVACY_INVOKED:
6622 param->value = ieee->privacy_invoked;
6623 break;
6624
6625 default:
6626 return -EOPNOTSUPP;
6627 }
6628 return 0;
6629 }
6630
6631 /* SIOCSIWENCODEEXT */
6632 static int ipw_wx_set_encodeext(struct net_device *dev,
6633 struct iw_request_info *info,
6634 union iwreq_data *wrqu, char *extra)
6635 {
6636 struct ipw_priv *priv = ieee80211_priv(dev);
6637 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6638
6639 if (hwcrypto) {
6640 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6641 /* IPW HW can't build TKIP MIC,
6642 host decryption still needed */
6643 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6644 priv->ieee->host_mc_decrypt = 1;
6645 else {
6646 priv->ieee->host_encrypt = 0;
6647 priv->ieee->host_encrypt_msdu = 1;
6648 priv->ieee->host_decrypt = 1;
6649 }
6650 } else {
6651 priv->ieee->host_encrypt = 0;
6652 priv->ieee->host_encrypt_msdu = 0;
6653 priv->ieee->host_decrypt = 0;
6654 priv->ieee->host_mc_decrypt = 0;
6655 }
6656 }
6657
6658 return ieee80211_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6659 }
6660
6661 /* SIOCGIWENCODEEXT */
6662 static int ipw_wx_get_encodeext(struct net_device *dev,
6663 struct iw_request_info *info,
6664 union iwreq_data *wrqu, char *extra)
6665 {
6666 struct ipw_priv *priv = ieee80211_priv(dev);
6667 return ieee80211_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6668 }
6669
6670 /* SIOCSIWMLME */
6671 static int ipw_wx_set_mlme(struct net_device *dev,
6672 struct iw_request_info *info,
6673 union iwreq_data *wrqu, char *extra)
6674 {
6675 struct ipw_priv *priv = ieee80211_priv(dev);
6676 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6677 u16 reason;
6678
6679 reason = cpu_to_le16(mlme->reason_code);
6680
6681 switch (mlme->cmd) {
6682 case IW_MLME_DEAUTH:
6683 // silently ignore
6684 break;
6685
6686 case IW_MLME_DISASSOC:
6687 ipw_disassociate(priv);
6688 break;
6689
6690 default:
6691 return -EOPNOTSUPP;
6692 }
6693 return 0;
6694 }
6695
6696 #ifdef CONFIG_IPW2200_QOS
6697
6698 /* QoS */
6699 /*
6700 * get the modulation type of the current network or
6701 * the card current mode
6702 */
6703 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6704 {
6705 u8 mode = 0;
6706
6707 if (priv->status & STATUS_ASSOCIATED) {
6708 unsigned long flags;
6709
6710 spin_lock_irqsave(&priv->ieee->lock, flags);
6711 mode = priv->assoc_network->mode;
6712 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6713 } else {
6714 mode = priv->ieee->mode;
6715 }
6716 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6717 return mode;
6718 }
6719
6720 /*
6721 * Handle management frame beacon and probe response
6722 */
6723 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6724 int active_network,
6725 struct ieee80211_network *network)
6726 {
6727 u32 size = sizeof(struct ieee80211_qos_parameters);
6728
6729 if (network->capability & WLAN_CAPABILITY_IBSS)
6730 network->qos_data.active = network->qos_data.supported;
6731
6732 if (network->flags & NETWORK_HAS_QOS_MASK) {
6733 if (active_network &&
6734 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6735 network->qos_data.active = network->qos_data.supported;
6736
6737 if ((network->qos_data.active == 1) && (active_network == 1) &&
6738 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6739 (network->qos_data.old_param_count !=
6740 network->qos_data.param_count)) {
6741 network->qos_data.old_param_count =
6742 network->qos_data.param_count;
6743 schedule_work(&priv->qos_activate);
6744 IPW_DEBUG_QOS("QoS parameters change call "
6745 "qos_activate\n");
6746 }
6747 } else {
6748 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6749 memcpy(&network->qos_data.parameters,
6750 &def_parameters_CCK, size);
6751 else
6752 memcpy(&network->qos_data.parameters,
6753 &def_parameters_OFDM, size);
6754
6755 if ((network->qos_data.active == 1) && (active_network == 1)) {
6756 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6757 schedule_work(&priv->qos_activate);
6758 }
6759
6760 network->qos_data.active = 0;
6761 network->qos_data.supported = 0;
6762 }
6763 if ((priv->status & STATUS_ASSOCIATED) &&
6764 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6765 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6766 if ((network->capability & WLAN_CAPABILITY_IBSS) &&
6767 !(network->flags & NETWORK_EMPTY_ESSID))
6768 if ((network->ssid_len ==
6769 priv->assoc_network->ssid_len) &&
6770 !memcmp(network->ssid,
6771 priv->assoc_network->ssid,
6772 network->ssid_len)) {
6773 queue_work(priv->workqueue,
6774 &priv->merge_networks);
6775 }
6776 }
6777
6778 return 0;
6779 }
6780
6781 /*
6782 * This function set up the firmware to support QoS. It sends
6783 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6784 */
6785 static int ipw_qos_activate(struct ipw_priv *priv,
6786 struct ieee80211_qos_data *qos_network_data)
6787 {
6788 int err;
6789 struct ieee80211_qos_parameters qos_parameters[QOS_QOS_SETS];
6790 struct ieee80211_qos_parameters *active_one = NULL;
6791 u32 size = sizeof(struct ieee80211_qos_parameters);
6792 u32 burst_duration;
6793 int i;
6794 u8 type;
6795
6796 type = ipw_qos_current_mode(priv);
6797
6798 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6799 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6800 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6801 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6802
6803 if (qos_network_data == NULL) {
6804 if (type == IEEE_B) {
6805 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6806 active_one = &def_parameters_CCK;
6807 } else
6808 active_one = &def_parameters_OFDM;
6809
6810 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6811 burst_duration = ipw_qos_get_burst_duration(priv);
6812 for (i = 0; i < QOS_QUEUE_NUM; i++)
6813 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6814 (u16) burst_duration;
6815 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6816 if (type == IEEE_B) {
6817 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
6818 type);
6819 if (priv->qos_data.qos_enable == 0)
6820 active_one = &def_parameters_CCK;
6821 else
6822 active_one = priv->qos_data.def_qos_parm_CCK;
6823 } else {
6824 if (priv->qos_data.qos_enable == 0)
6825 active_one = &def_parameters_OFDM;
6826 else
6827 active_one = priv->qos_data.def_qos_parm_OFDM;
6828 }
6829 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6830 } else {
6831 unsigned long flags;
6832 int active;
6833
6834 spin_lock_irqsave(&priv->ieee->lock, flags);
6835 active_one = &(qos_network_data->parameters);
6836 qos_network_data->old_param_count =
6837 qos_network_data->param_count;
6838 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6839 active = qos_network_data->supported;
6840 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6841
6842 if (active == 0) {
6843 burst_duration = ipw_qos_get_burst_duration(priv);
6844 for (i = 0; i < QOS_QUEUE_NUM; i++)
6845 qos_parameters[QOS_PARAM_SET_ACTIVE].
6846 tx_op_limit[i] = (u16) burst_duration;
6847 }
6848 }
6849
6850 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6851 err = ipw_send_qos_params_command(priv,
6852 (struct ieee80211_qos_parameters *)
6853 &(qos_parameters[0]));
6854 if (err)
6855 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6856
6857 return err;
6858 }
6859
6860 /*
6861 * send IPW_CMD_WME_INFO to the firmware
6862 */
6863 static int ipw_qos_set_info_element(struct ipw_priv *priv)
6864 {
6865 int ret = 0;
6866 struct ieee80211_qos_information_element qos_info;
6867
6868 if (priv == NULL)
6869 return -1;
6870
6871 qos_info.elementID = QOS_ELEMENT_ID;
6872 qos_info.length = sizeof(struct ieee80211_qos_information_element) - 2;
6873
6874 qos_info.version = QOS_VERSION_1;
6875 qos_info.ac_info = 0;
6876
6877 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
6878 qos_info.qui_type = QOS_OUI_TYPE;
6879 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
6880
6881 ret = ipw_send_qos_info_command(priv, &qos_info);
6882 if (ret != 0) {
6883 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
6884 }
6885 return ret;
6886 }
6887
6888 /*
6889 * Set the QoS parameter with the association request structure
6890 */
6891 static int ipw_qos_association(struct ipw_priv *priv,
6892 struct ieee80211_network *network)
6893 {
6894 int err = 0;
6895 struct ieee80211_qos_data *qos_data = NULL;
6896 struct ieee80211_qos_data ibss_data = {
6897 .supported = 1,
6898 .active = 1,
6899 };
6900
6901 switch (priv->ieee->iw_mode) {
6902 case IW_MODE_ADHOC:
6903 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
6904
6905 qos_data = &ibss_data;
6906 break;
6907
6908 case IW_MODE_INFRA:
6909 qos_data = &network->qos_data;
6910 break;
6911
6912 default:
6913 BUG();
6914 break;
6915 }
6916
6917 err = ipw_qos_activate(priv, qos_data);
6918 if (err) {
6919 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
6920 return err;
6921 }
6922
6923 if (priv->qos_data.qos_enable && qos_data->supported) {
6924 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
6925 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
6926 return ipw_qos_set_info_element(priv);
6927 }
6928
6929 return 0;
6930 }
6931
6932 /*
6933 * handling the beaconing responces. if we get different QoS setting
6934 * of the network from the the associated setting adjust the QoS
6935 * setting
6936 */
6937 static int ipw_qos_association_resp(struct ipw_priv *priv,
6938 struct ieee80211_network *network)
6939 {
6940 int ret = 0;
6941 unsigned long flags;
6942 u32 size = sizeof(struct ieee80211_qos_parameters);
6943 int set_qos_param = 0;
6944
6945 if ((priv == NULL) || (network == NULL) ||
6946 (priv->assoc_network == NULL))
6947 return ret;
6948
6949 if (!(priv->status & STATUS_ASSOCIATED))
6950 return ret;
6951
6952 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
6953 return ret;
6954
6955 spin_lock_irqsave(&priv->ieee->lock, flags);
6956 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
6957 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
6958 sizeof(struct ieee80211_qos_data));
6959 priv->assoc_network->qos_data.active = 1;
6960 if ((network->qos_data.old_param_count !=
6961 network->qos_data.param_count)) {
6962 set_qos_param = 1;
6963 network->qos_data.old_param_count =
6964 network->qos_data.param_count;
6965 }
6966
6967 } else {
6968 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
6969 memcpy(&priv->assoc_network->qos_data.parameters,
6970 &def_parameters_CCK, size);
6971 else
6972 memcpy(&priv->assoc_network->qos_data.parameters,
6973 &def_parameters_OFDM, size);
6974 priv->assoc_network->qos_data.active = 0;
6975 priv->assoc_network->qos_data.supported = 0;
6976 set_qos_param = 1;
6977 }
6978
6979 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6980
6981 if (set_qos_param == 1)
6982 schedule_work(&priv->qos_activate);
6983
6984 return ret;
6985 }
6986
6987 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
6988 {
6989 u32 ret = 0;
6990
6991 if ((priv == NULL))
6992 return 0;
6993
6994 if (!(priv->ieee->modulation & IEEE80211_OFDM_MODULATION))
6995 ret = priv->qos_data.burst_duration_CCK;
6996 else
6997 ret = priv->qos_data.burst_duration_OFDM;
6998
6999 return ret;
7000 }
7001
7002 /*
7003 * Initialize the setting of QoS global
7004 */
7005 static void ipw_qos_init(struct ipw_priv *priv, int enable,
7006 int burst_enable, u32 burst_duration_CCK,
7007 u32 burst_duration_OFDM)
7008 {
7009 priv->qos_data.qos_enable = enable;
7010
7011 if (priv->qos_data.qos_enable) {
7012 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7013 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7014 IPW_DEBUG_QOS("QoS is enabled\n");
7015 } else {
7016 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7017 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7018 IPW_DEBUG_QOS("QoS is not enabled\n");
7019 }
7020
7021 priv->qos_data.burst_enable = burst_enable;
7022
7023 if (burst_enable) {
7024 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7025 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7026 } else {
7027 priv->qos_data.burst_duration_CCK = 0;
7028 priv->qos_data.burst_duration_OFDM = 0;
7029 }
7030 }
7031
7032 /*
7033 * map the packet priority to the right TX Queue
7034 */
7035 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7036 {
7037 if (priority > 7 || !priv->qos_data.qos_enable)
7038 priority = 0;
7039
7040 return from_priority_to_tx_queue[priority] - 1;
7041 }
7042
7043 static int ipw_is_qos_active(struct net_device *dev,
7044 struct sk_buff *skb)
7045 {
7046 struct ipw_priv *priv = ieee80211_priv(dev);
7047 struct ieee80211_qos_data *qos_data = NULL;
7048 int active, supported;
7049 u8 *daddr = skb->data + ETH_ALEN;
7050 int unicast = !is_multicast_ether_addr(daddr);
7051
7052 if (!(priv->status & STATUS_ASSOCIATED))
7053 return 0;
7054
7055 qos_data = &priv->assoc_network->qos_data;
7056
7057 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7058 if (unicast == 0)
7059 qos_data->active = 0;
7060 else
7061 qos_data->active = qos_data->supported;
7062 }
7063 active = qos_data->active;
7064 supported = qos_data->supported;
7065 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7066 "unicast %d\n",
7067 priv->qos_data.qos_enable, active, supported, unicast);
7068 if (active && priv->qos_data.qos_enable)
7069 return 1;
7070
7071 return 0;
7072
7073 }
7074 /*
7075 * add QoS parameter to the TX command
7076 */
7077 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7078 u16 priority,
7079 struct tfd_data *tfd)
7080 {
7081 int tx_queue_id = 0;
7082
7083
7084 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7085 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7086
7087 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7088 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7089 tfd->tfd.tfd_26.mchdr.qos_ctrl |= CTRL_QOS_NO_ACK;
7090 }
7091 return 0;
7092 }
7093
7094 /*
7095 * background support to run QoS activate functionality
7096 */
7097 static void ipw_bg_qos_activate(void *data)
7098 {
7099 struct ipw_priv *priv = data;
7100
7101 if (priv == NULL)
7102 return;
7103
7104 mutex_lock(&priv->mutex);
7105
7106 if (priv->status & STATUS_ASSOCIATED)
7107 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7108
7109 mutex_unlock(&priv->mutex);
7110 }
7111
7112 static int ipw_handle_probe_response(struct net_device *dev,
7113 struct ieee80211_probe_response *resp,
7114 struct ieee80211_network *network)
7115 {
7116 struct ipw_priv *priv = ieee80211_priv(dev);
7117 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7118 (network == priv->assoc_network));
7119
7120 ipw_qos_handle_probe_response(priv, active_network, network);
7121
7122 return 0;
7123 }
7124
7125 static int ipw_handle_beacon(struct net_device *dev,
7126 struct ieee80211_beacon *resp,
7127 struct ieee80211_network *network)
7128 {
7129 struct ipw_priv *priv = ieee80211_priv(dev);
7130 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7131 (network == priv->assoc_network));
7132
7133 ipw_qos_handle_probe_response(priv, active_network, network);
7134
7135 return 0;
7136 }
7137
7138 static int ipw_handle_assoc_response(struct net_device *dev,
7139 struct ieee80211_assoc_response *resp,
7140 struct ieee80211_network *network)
7141 {
7142 struct ipw_priv *priv = ieee80211_priv(dev);
7143 ipw_qos_association_resp(priv, network);
7144 return 0;
7145 }
7146
7147 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
7148 *qos_param)
7149 {
7150 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7151 sizeof(*qos_param) * 3, qos_param);
7152 }
7153
7154 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
7155 *qos_param)
7156 {
7157 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7158 qos_param);
7159 }
7160
7161 #endif /* CONFIG_IPW2200_QOS */
7162
7163 static int ipw_associate_network(struct ipw_priv *priv,
7164 struct ieee80211_network *network,
7165 struct ipw_supported_rates *rates, int roaming)
7166 {
7167 int err;
7168
7169 if (priv->config & CFG_FIXED_RATE)
7170 ipw_set_fixed_rate(priv, network->mode);
7171
7172 if (!(priv->config & CFG_STATIC_ESSID)) {
7173 priv->essid_len = min(network->ssid_len,
7174 (u8) IW_ESSID_MAX_SIZE);
7175 memcpy(priv->essid, network->ssid, priv->essid_len);
7176 }
7177
7178 network->last_associate = jiffies;
7179
7180 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7181 priv->assoc_request.channel = network->channel;
7182 priv->assoc_request.auth_key = 0;
7183
7184 if ((priv->capability & CAP_PRIVACY_ON) &&
7185 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7186 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7187 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7188
7189 if (priv->ieee->sec.level == SEC_LEVEL_1)
7190 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7191
7192 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7193 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7194 priv->assoc_request.auth_type = AUTH_LEAP;
7195 else
7196 priv->assoc_request.auth_type = AUTH_OPEN;
7197
7198 if (priv->ieee->wpa_ie_len) {
7199 priv->assoc_request.policy_support = 0x02; /* RSN active */
7200 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7201 priv->ieee->wpa_ie_len);
7202 }
7203
7204 /*
7205 * It is valid for our ieee device to support multiple modes, but
7206 * when it comes to associating to a given network we have to choose
7207 * just one mode.
7208 */
7209 if (network->mode & priv->ieee->mode & IEEE_A)
7210 priv->assoc_request.ieee_mode = IPW_A_MODE;
7211 else if (network->mode & priv->ieee->mode & IEEE_G)
7212 priv->assoc_request.ieee_mode = IPW_G_MODE;
7213 else if (network->mode & priv->ieee->mode & IEEE_B)
7214 priv->assoc_request.ieee_mode = IPW_B_MODE;
7215
7216 priv->assoc_request.capability = network->capability;
7217 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7218 && !(priv->config & CFG_PREAMBLE_LONG)) {
7219 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7220 } else {
7221 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7222
7223 /* Clear the short preamble if we won't be supporting it */
7224 priv->assoc_request.capability &=
7225 ~WLAN_CAPABILITY_SHORT_PREAMBLE;
7226 }
7227
7228 /* Clear capability bits that aren't used in Ad Hoc */
7229 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7230 priv->assoc_request.capability &=
7231 ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
7232
7233 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7234 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7235 roaming ? "Rea" : "A",
7236 escape_essid(priv->essid, priv->essid_len),
7237 network->channel,
7238 ipw_modes[priv->assoc_request.ieee_mode],
7239 rates->num_rates,
7240 (priv->assoc_request.preamble_length ==
7241 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7242 network->capability &
7243 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7244 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7245 priv->capability & CAP_PRIVACY_ON ?
7246 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7247 "(open)") : "",
7248 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7249 priv->capability & CAP_PRIVACY_ON ?
7250 '1' + priv->ieee->sec.active_key : '.',
7251 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7252
7253 priv->assoc_request.beacon_interval = network->beacon_interval;
7254 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7255 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7256 priv->assoc_request.assoc_type = HC_IBSS_START;
7257 priv->assoc_request.assoc_tsf_msw = 0;
7258 priv->assoc_request.assoc_tsf_lsw = 0;
7259 } else {
7260 if (unlikely(roaming))
7261 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7262 else
7263 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7264 priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
7265 priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
7266 }
7267
7268 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7269
7270 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7271 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7272 priv->assoc_request.atim_window = network->atim_window;
7273 } else {
7274 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7275 priv->assoc_request.atim_window = 0;
7276 }
7277
7278 priv->assoc_request.listen_interval = network->listen_interval;
7279
7280 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7281 if (err) {
7282 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7283 return err;
7284 }
7285
7286 rates->ieee_mode = priv->assoc_request.ieee_mode;
7287 rates->purpose = IPW_RATE_CONNECT;
7288 ipw_send_supported_rates(priv, rates);
7289
7290 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7291 priv->sys_config.dot11g_auto_detection = 1;
7292 else
7293 priv->sys_config.dot11g_auto_detection = 0;
7294
7295 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7296 priv->sys_config.answer_broadcast_ssid_probe = 1;
7297 else
7298 priv->sys_config.answer_broadcast_ssid_probe = 0;
7299
7300 err = ipw_send_system_config(priv);
7301 if (err) {
7302 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7303 return err;
7304 }
7305
7306 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7307 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7308 if (err) {
7309 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7310 return err;
7311 }
7312
7313 /*
7314 * If preemption is enabled, it is possible for the association
7315 * to complete before we return from ipw_send_associate. Therefore
7316 * we have to be sure and update our priviate data first.
7317 */
7318 priv->channel = network->channel;
7319 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7320 priv->status |= STATUS_ASSOCIATING;
7321 priv->status &= ~STATUS_SECURITY_UPDATED;
7322
7323 priv->assoc_network = network;
7324
7325 #ifdef CONFIG_IPW2200_QOS
7326 ipw_qos_association(priv, network);
7327 #endif
7328
7329 err = ipw_send_associate(priv, &priv->assoc_request);
7330 if (err) {
7331 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7332 return err;
7333 }
7334
7335 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n",
7336 escape_essid(priv->essid, priv->essid_len),
7337 MAC_ARG(priv->bssid));
7338
7339 return 0;
7340 }
7341
7342 static void ipw_roam(void *data)
7343 {
7344 struct ipw_priv *priv = data;
7345 struct ieee80211_network *network = NULL;
7346 struct ipw_network_match match = {
7347 .network = priv->assoc_network
7348 };
7349
7350 /* The roaming process is as follows:
7351 *
7352 * 1. Missed beacon threshold triggers the roaming process by
7353 * setting the status ROAM bit and requesting a scan.
7354 * 2. When the scan completes, it schedules the ROAM work
7355 * 3. The ROAM work looks at all of the known networks for one that
7356 * is a better network than the currently associated. If none
7357 * found, the ROAM process is over (ROAM bit cleared)
7358 * 4. If a better network is found, a disassociation request is
7359 * sent.
7360 * 5. When the disassociation completes, the roam work is again
7361 * scheduled. The second time through, the driver is no longer
7362 * associated, and the newly selected network is sent an
7363 * association request.
7364 * 6. At this point ,the roaming process is complete and the ROAM
7365 * status bit is cleared.
7366 */
7367
7368 /* If we are no longer associated, and the roaming bit is no longer
7369 * set, then we are not actively roaming, so just return */
7370 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7371 return;
7372
7373 if (priv->status & STATUS_ASSOCIATED) {
7374 /* First pass through ROAM process -- look for a better
7375 * network */
7376 unsigned long flags;
7377 u8 rssi = priv->assoc_network->stats.rssi;
7378 priv->assoc_network->stats.rssi = -128;
7379 spin_lock_irqsave(&priv->ieee->lock, flags);
7380 list_for_each_entry(network, &priv->ieee->network_list, list) {
7381 if (network != priv->assoc_network)
7382 ipw_best_network(priv, &match, network, 1);
7383 }
7384 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7385 priv->assoc_network->stats.rssi = rssi;
7386
7387 if (match.network == priv->assoc_network) {
7388 IPW_DEBUG_ASSOC("No better APs in this network to "
7389 "roam to.\n");
7390 priv->status &= ~STATUS_ROAMING;
7391 ipw_debug_config(priv);
7392 return;
7393 }
7394
7395 ipw_send_disassociate(priv, 1);
7396 priv->assoc_network = match.network;
7397
7398 return;
7399 }
7400
7401 /* Second pass through ROAM process -- request association */
7402 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7403 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7404 priv->status &= ~STATUS_ROAMING;
7405 }
7406
7407 static void ipw_bg_roam(void *data)
7408 {
7409 struct ipw_priv *priv = data;
7410 mutex_lock(&priv->mutex);
7411 ipw_roam(data);
7412 mutex_unlock(&priv->mutex);
7413 }
7414
7415 static int ipw_associate(void *data)
7416 {
7417 struct ipw_priv *priv = data;
7418
7419 struct ieee80211_network *network = NULL;
7420 struct ipw_network_match match = {
7421 .network = NULL
7422 };
7423 struct ipw_supported_rates *rates;
7424 struct list_head *element;
7425 unsigned long flags;
7426
7427 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7428 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7429 return 0;
7430 }
7431
7432 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7433 IPW_DEBUG_ASSOC("Not attempting association (already in "
7434 "progress)\n");
7435 return 0;
7436 }
7437
7438 if (priv->status & STATUS_DISASSOCIATING) {
7439 IPW_DEBUG_ASSOC("Not attempting association (in "
7440 "disassociating)\n ");
7441 queue_work(priv->workqueue, &priv->associate);
7442 return 0;
7443 }
7444
7445 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7446 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7447 "initialized)\n");
7448 return 0;
7449 }
7450
7451 if (!(priv->config & CFG_ASSOCIATE) &&
7452 !(priv->config & (CFG_STATIC_ESSID |
7453 CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
7454 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7455 return 0;
7456 }
7457
7458 /* Protect our use of the network_list */
7459 spin_lock_irqsave(&priv->ieee->lock, flags);
7460 list_for_each_entry(network, &priv->ieee->network_list, list)
7461 ipw_best_network(priv, &match, network, 0);
7462
7463 network = match.network;
7464 rates = &match.rates;
7465
7466 if (network == NULL &&
7467 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7468 priv->config & CFG_ADHOC_CREATE &&
7469 priv->config & CFG_STATIC_ESSID &&
7470 priv->config & CFG_STATIC_CHANNEL &&
7471 !list_empty(&priv->ieee->network_free_list)) {
7472 element = priv->ieee->network_free_list.next;
7473 network = list_entry(element, struct ieee80211_network, list);
7474 ipw_adhoc_create(priv, network);
7475 rates = &priv->rates;
7476 list_del(element);
7477 list_add_tail(&network->list, &priv->ieee->network_list);
7478 }
7479 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7480
7481 /* If we reached the end of the list, then we don't have any valid
7482 * matching APs */
7483 if (!network) {
7484 ipw_debug_config(priv);
7485
7486 if (!(priv->status & STATUS_SCANNING)) {
7487 if (!(priv->config & CFG_SPEED_SCAN))
7488 queue_delayed_work(priv->workqueue,
7489 &priv->request_scan,
7490 SCAN_INTERVAL);
7491 else
7492 queue_work(priv->workqueue,
7493 &priv->request_scan);
7494 }
7495
7496 return 0;
7497 }
7498
7499 ipw_associate_network(priv, network, rates, 0);
7500
7501 return 1;
7502 }
7503
7504 static void ipw_bg_associate(void *data)
7505 {
7506 struct ipw_priv *priv = data;
7507 mutex_lock(&priv->mutex);
7508 ipw_associate(data);
7509 mutex_unlock(&priv->mutex);
7510 }
7511
7512 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7513 struct sk_buff *skb)
7514 {
7515 struct ieee80211_hdr *hdr;
7516 u16 fc;
7517
7518 hdr = (struct ieee80211_hdr *)skb->data;
7519 fc = le16_to_cpu(hdr->frame_ctl);
7520 if (!(fc & IEEE80211_FCTL_PROTECTED))
7521 return;
7522
7523 fc &= ~IEEE80211_FCTL_PROTECTED;
7524 hdr->frame_ctl = cpu_to_le16(fc);
7525 switch (priv->ieee->sec.level) {
7526 case SEC_LEVEL_3:
7527 /* Remove CCMP HDR */
7528 memmove(skb->data + IEEE80211_3ADDR_LEN,
7529 skb->data + IEEE80211_3ADDR_LEN + 8,
7530 skb->len - IEEE80211_3ADDR_LEN - 8);
7531 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7532 break;
7533 case SEC_LEVEL_2:
7534 break;
7535 case SEC_LEVEL_1:
7536 /* Remove IV */
7537 memmove(skb->data + IEEE80211_3ADDR_LEN,
7538 skb->data + IEEE80211_3ADDR_LEN + 4,
7539 skb->len - IEEE80211_3ADDR_LEN - 4);
7540 skb_trim(skb, skb->len - 8); /* IV + ICV */
7541 break;
7542 case SEC_LEVEL_0:
7543 break;
7544 default:
7545 printk(KERN_ERR "Unknow security level %d\n",
7546 priv->ieee->sec.level);
7547 break;
7548 }
7549 }
7550
7551 static void ipw_handle_data_packet(struct ipw_priv *priv,
7552 struct ipw_rx_mem_buffer *rxb,
7553 struct ieee80211_rx_stats *stats)
7554 {
7555 struct ieee80211_hdr_4addr *hdr;
7556 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7557
7558 /* We received data from the HW, so stop the watchdog */
7559 priv->net_dev->trans_start = jiffies;
7560
7561 /* We only process data packets if the
7562 * interface is open */
7563 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7564 skb_tailroom(rxb->skb))) {
7565 priv->ieee->stats.rx_errors++;
7566 priv->wstats.discard.misc++;
7567 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7568 return;
7569 } else if (unlikely(!netif_running(priv->net_dev))) {
7570 priv->ieee->stats.rx_dropped++;
7571 priv->wstats.discard.misc++;
7572 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7573 return;
7574 }
7575
7576 /* Advance skb->data to the start of the actual payload */
7577 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7578
7579 /* Set the size of the skb to the size of the frame */
7580 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7581
7582 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7583
7584 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7585 hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data;
7586 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7587 (is_multicast_ether_addr(hdr->addr1) ?
7588 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7589 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7590
7591 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7592 priv->ieee->stats.rx_errors++;
7593 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7594 rxb->skb = NULL;
7595 __ipw_led_activity_on(priv);
7596 }
7597 }
7598
7599 #ifdef CONFIG_IPW2200_RADIOTAP
7600 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7601 struct ipw_rx_mem_buffer *rxb,
7602 struct ieee80211_rx_stats *stats)
7603 {
7604 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7605 struct ipw_rx_frame *frame = &pkt->u.frame;
7606
7607 /* initial pull of some data */
7608 u16 received_channel = frame->received_channel;
7609 u8 antennaAndPhy = frame->antennaAndPhy;
7610 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7611 u16 pktrate = frame->rate;
7612
7613 /* Magic struct that slots into the radiotap header -- no reason
7614 * to build this manually element by element, we can write it much
7615 * more efficiently than we can parse it. ORDER MATTERS HERE */
7616 struct ipw_rt_hdr *ipw_rt;
7617
7618 short len = le16_to_cpu(pkt->u.frame.length);
7619
7620 /* We received data from the HW, so stop the watchdog */
7621 priv->net_dev->trans_start = jiffies;
7622
7623 /* We only process data packets if the
7624 * interface is open */
7625 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7626 skb_tailroom(rxb->skb))) {
7627 priv->ieee->stats.rx_errors++;
7628 priv->wstats.discard.misc++;
7629 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7630 return;
7631 } else if (unlikely(!netif_running(priv->net_dev))) {
7632 priv->ieee->stats.rx_dropped++;
7633 priv->wstats.discard.misc++;
7634 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7635 return;
7636 }
7637
7638 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7639 * that now */
7640 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7641 /* FIXME: Should alloc bigger skb instead */
7642 priv->ieee->stats.rx_dropped++;
7643 priv->wstats.discard.misc++;
7644 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7645 return;
7646 }
7647
7648 /* copy the frame itself */
7649 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7650 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7651
7652 /* Zero the radiotap static buffer ... We only need to zero the bytes NOT
7653 * part of our real header, saves a little time.
7654 *
7655 * No longer necessary since we fill in all our data. Purge before merging
7656 * patch officially.
7657 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7658 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7659 */
7660
7661 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7662
7663 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7664 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7665 ipw_rt->rt_hdr.it_len = sizeof(struct ipw_rt_hdr); /* total header+data */
7666
7667 /* Big bitfield of all the fields we provide in radiotap */
7668 ipw_rt->rt_hdr.it_present =
7669 ((1 << IEEE80211_RADIOTAP_FLAGS) |
7670 (1 << IEEE80211_RADIOTAP_TSFT) |
7671 (1 << IEEE80211_RADIOTAP_RATE) |
7672 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7673 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7674 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7675 (1 << IEEE80211_RADIOTAP_ANTENNA));
7676
7677 /* Zero the flags, we'll add to them as we go */
7678 ipw_rt->rt_flags = 0;
7679
7680 /* Convert signal to DBM */
7681 ipw_rt->rt_dbmsignal = antsignal;
7682
7683 /* Convert the channel data and set the flags */
7684 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7685 if (received_channel > 14) { /* 802.11a */
7686 ipw_rt->rt_chbitmask =
7687 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7688 } else if (antennaAndPhy & 32) { /* 802.11b */
7689 ipw_rt->rt_chbitmask =
7690 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7691 } else { /* 802.11g */
7692 ipw_rt->rt_chbitmask =
7693 (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7694 }
7695
7696 /* set the rate in multiples of 500k/s */
7697 switch (pktrate) {
7698 case IPW_TX_RATE_1MB:
7699 ipw_rt->rt_rate = 2;
7700 break;
7701 case IPW_TX_RATE_2MB:
7702 ipw_rt->rt_rate = 4;
7703 break;
7704 case IPW_TX_RATE_5MB:
7705 ipw_rt->rt_rate = 10;
7706 break;
7707 case IPW_TX_RATE_6MB:
7708 ipw_rt->rt_rate = 12;
7709 break;
7710 case IPW_TX_RATE_9MB:
7711 ipw_rt->rt_rate = 18;
7712 break;
7713 case IPW_TX_RATE_11MB:
7714 ipw_rt->rt_rate = 22;
7715 break;
7716 case IPW_TX_RATE_12MB:
7717 ipw_rt->rt_rate = 24;
7718 break;
7719 case IPW_TX_RATE_18MB:
7720 ipw_rt->rt_rate = 36;
7721 break;
7722 case IPW_TX_RATE_24MB:
7723 ipw_rt->rt_rate = 48;
7724 break;
7725 case IPW_TX_RATE_36MB:
7726 ipw_rt->rt_rate = 72;
7727 break;
7728 case IPW_TX_RATE_48MB:
7729 ipw_rt->rt_rate = 96;
7730 break;
7731 case IPW_TX_RATE_54MB:
7732 ipw_rt->rt_rate = 108;
7733 break;
7734 default:
7735 ipw_rt->rt_rate = 0;
7736 break;
7737 }
7738
7739 /* antenna number */
7740 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7741
7742 /* set the preamble flag if we have it */
7743 if ((antennaAndPhy & 64))
7744 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7745
7746 /* Set the size of the skb to the size of the frame */
7747 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7748
7749 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7750
7751 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7752 priv->ieee->stats.rx_errors++;
7753 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7754 rxb->skb = NULL;
7755 /* no LED during capture */
7756 }
7757 }
7758 #endif
7759
7760 #ifdef CONFIG_IPW2200_PROMISCUOUS
7761 #define ieee80211_is_probe_response(fc) \
7762 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7763 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7764
7765 #define ieee80211_is_management(fc) \
7766 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7767
7768 #define ieee80211_is_control(fc) \
7769 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7770
7771 #define ieee80211_is_data(fc) \
7772 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7773
7774 #define ieee80211_is_assoc_request(fc) \
7775 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7776
7777 #define ieee80211_is_reassoc_request(fc) \
7778 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7779
7780 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7781 struct ipw_rx_mem_buffer *rxb,
7782 struct ieee80211_rx_stats *stats)
7783 {
7784 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7785 struct ipw_rx_frame *frame = &pkt->u.frame;
7786 struct ipw_rt_hdr *ipw_rt;
7787
7788 /* First cache any information we need before we overwrite
7789 * the information provided in the skb from the hardware */
7790 struct ieee80211_hdr *hdr;
7791 u16 channel = frame->received_channel;
7792 u8 phy_flags = frame->antennaAndPhy;
7793 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7794 s8 noise = frame->noise;
7795 u8 rate = frame->rate;
7796 short len = le16_to_cpu(pkt->u.frame.length);
7797 u64 tsf = 0;
7798 struct sk_buff *skb;
7799 int hdr_only = 0;
7800 u16 filter = priv->prom_priv->filter;
7801
7802 /* If the filter is set to not include Rx frames then return */
7803 if (filter & IPW_PROM_NO_RX)
7804 return;
7805
7806 /* We received data from the HW, so stop the watchdog */
7807 priv->prom_net_dev->trans_start = jiffies;
7808
7809 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
7810 priv->prom_priv->ieee->stats.rx_errors++;
7811 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7812 return;
7813 }
7814
7815 /* We only process data packets if the interface is open */
7816 if (unlikely(!netif_running(priv->prom_net_dev))) {
7817 priv->prom_priv->ieee->stats.rx_dropped++;
7818 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7819 return;
7820 }
7821
7822 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7823 * that now */
7824 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7825 /* FIXME: Should alloc bigger skb instead */
7826 priv->prom_priv->ieee->stats.rx_dropped++;
7827 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7828 return;
7829 }
7830
7831 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
7832 if (ieee80211_is_management(hdr->frame_ctl)) {
7833 if (filter & IPW_PROM_NO_MGMT)
7834 return;
7835 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
7836 hdr_only = 1;
7837 } else if (ieee80211_is_control(hdr->frame_ctl)) {
7838 if (filter & IPW_PROM_NO_CTL)
7839 return;
7840 if (filter & IPW_PROM_CTL_HEADER_ONLY)
7841 hdr_only = 1;
7842 } else if (ieee80211_is_data(hdr->frame_ctl)) {
7843 if (filter & IPW_PROM_NO_DATA)
7844 return;
7845 if (filter & IPW_PROM_DATA_HEADER_ONLY)
7846 hdr_only = 1;
7847 }
7848
7849 /* Copy the SKB since this is for the promiscuous side */
7850 skb = skb_copy(rxb->skb, GFP_ATOMIC);
7851 if (skb == NULL) {
7852 IPW_ERROR("skb_clone failed for promiscuous copy.\n");
7853 return;
7854 }
7855
7856 /* copy the frame data to write after where the radiotap header goes */
7857 ipw_rt = (void *)skb->data;
7858
7859 if (hdr_only)
7860 len = ieee80211_get_hdrlen(hdr->frame_ctl);
7861
7862 memcpy(ipw_rt->payload, hdr, len);
7863
7864 /* Zero the radiotap static buffer ... We only need to zero the bytes
7865 * NOT part of our real header, saves a little time.
7866 *
7867 * No longer necessary since we fill in all our data. Purge before
7868 * merging patch officially.
7869 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7870 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7871 */
7872
7873 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7874 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7875 ipw_rt->rt_hdr.it_len = sizeof(*ipw_rt); /* total header+data */
7876
7877 /* Set the size of the skb to the size of the frame */
7878 skb_put(skb, ipw_rt->rt_hdr.it_len + len);
7879
7880 /* Big bitfield of all the fields we provide in radiotap */
7881 ipw_rt->rt_hdr.it_present =
7882 ((1 << IEEE80211_RADIOTAP_FLAGS) |
7883 (1 << IEEE80211_RADIOTAP_TSFT) |
7884 (1 << IEEE80211_RADIOTAP_RATE) |
7885 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7886 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7887 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7888 (1 << IEEE80211_RADIOTAP_ANTENNA));
7889
7890 /* Zero the flags, we'll add to them as we go */
7891 ipw_rt->rt_flags = 0;
7892
7893 ipw_rt->rt_tsf = tsf;
7894
7895 /* Convert to DBM */
7896 ipw_rt->rt_dbmsignal = signal;
7897 ipw_rt->rt_dbmnoise = noise;
7898
7899 /* Convert the channel data and set the flags */
7900 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
7901 if (channel > 14) { /* 802.11a */
7902 ipw_rt->rt_chbitmask =
7903 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7904 } else if (phy_flags & (1 << 5)) { /* 802.11b */
7905 ipw_rt->rt_chbitmask =
7906 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7907 } else { /* 802.11g */
7908 ipw_rt->rt_chbitmask =
7909 (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7910 }
7911
7912 /* set the rate in multiples of 500k/s */
7913 switch (rate) {
7914 case IPW_TX_RATE_1MB:
7915 ipw_rt->rt_rate = 2;
7916 break;
7917 case IPW_TX_RATE_2MB:
7918 ipw_rt->rt_rate = 4;
7919 break;
7920 case IPW_TX_RATE_5MB:
7921 ipw_rt->rt_rate = 10;
7922 break;
7923 case IPW_TX_RATE_6MB:
7924 ipw_rt->rt_rate = 12;
7925 break;
7926 case IPW_TX_RATE_9MB:
7927 ipw_rt->rt_rate = 18;
7928 break;
7929 case IPW_TX_RATE_11MB:
7930 ipw_rt->rt_rate = 22;
7931 break;
7932 case IPW_TX_RATE_12MB:
7933 ipw_rt->rt_rate = 24;
7934 break;
7935 case IPW_TX_RATE_18MB:
7936 ipw_rt->rt_rate = 36;
7937 break;
7938 case IPW_TX_RATE_24MB:
7939 ipw_rt->rt_rate = 48;
7940 break;
7941 case IPW_TX_RATE_36MB:
7942 ipw_rt->rt_rate = 72;
7943 break;
7944 case IPW_TX_RATE_48MB:
7945 ipw_rt->rt_rate = 96;
7946 break;
7947 case IPW_TX_RATE_54MB:
7948 ipw_rt->rt_rate = 108;
7949 break;
7950 default:
7951 ipw_rt->rt_rate = 0;
7952 break;
7953 }
7954
7955 /* antenna number */
7956 ipw_rt->rt_antenna = (phy_flags & 3);
7957
7958 /* set the preamble flag if we have it */
7959 if (phy_flags & (1 << 6))
7960 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7961
7962 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
7963
7964 if (!ieee80211_rx(priv->prom_priv->ieee, skb, stats)) {
7965 priv->prom_priv->ieee->stats.rx_errors++;
7966 dev_kfree_skb_any(skb);
7967 }
7968 }
7969 #endif
7970
7971 static int is_network_packet(struct ipw_priv *priv,
7972 struct ieee80211_hdr_4addr *header)
7973 {
7974 /* Filter incoming packets to determine if they are targetted toward
7975 * this network, discarding packets coming from ourselves */
7976 switch (priv->ieee->iw_mode) {
7977 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
7978 /* packets from our adapter are dropped (echo) */
7979 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
7980 return 0;
7981
7982 /* {broad,multi}cast packets to our BSSID go through */
7983 if (is_multicast_ether_addr(header->addr1))
7984 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
7985
7986 /* packets to our adapter go through */
7987 return !memcmp(header->addr1, priv->net_dev->dev_addr,
7988 ETH_ALEN);
7989
7990 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
7991 /* packets from our adapter are dropped (echo) */
7992 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
7993 return 0;
7994
7995 /* {broad,multi}cast packets to our BSS go through */
7996 if (is_multicast_ether_addr(header->addr1))
7997 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
7998
7999 /* packets to our adapter go through */
8000 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8001 ETH_ALEN);
8002 }
8003
8004 return 1;
8005 }
8006
8007 #define IPW_PACKET_RETRY_TIME HZ
8008
8009 static int is_duplicate_packet(struct ipw_priv *priv,
8010 struct ieee80211_hdr_4addr *header)
8011 {
8012 u16 sc = le16_to_cpu(header->seq_ctl);
8013 u16 seq = WLAN_GET_SEQ_SEQ(sc);
8014 u16 frag = WLAN_GET_SEQ_FRAG(sc);
8015 u16 *last_seq, *last_frag;
8016 unsigned long *last_time;
8017
8018 switch (priv->ieee->iw_mode) {
8019 case IW_MODE_ADHOC:
8020 {
8021 struct list_head *p;
8022 struct ipw_ibss_seq *entry = NULL;
8023 u8 *mac = header->addr2;
8024 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8025
8026 __list_for_each(p, &priv->ibss_mac_hash[index]) {
8027 entry =
8028 list_entry(p, struct ipw_ibss_seq, list);
8029 if (!memcmp(entry->mac, mac, ETH_ALEN))
8030 break;
8031 }
8032 if (p == &priv->ibss_mac_hash[index]) {
8033 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8034 if (!entry) {
8035 IPW_ERROR
8036 ("Cannot malloc new mac entry\n");
8037 return 0;
8038 }
8039 memcpy(entry->mac, mac, ETH_ALEN);
8040 entry->seq_num = seq;
8041 entry->frag_num = frag;
8042 entry->packet_time = jiffies;
8043 list_add(&entry->list,
8044 &priv->ibss_mac_hash[index]);
8045 return 0;
8046 }
8047 last_seq = &entry->seq_num;
8048 last_frag = &entry->frag_num;
8049 last_time = &entry->packet_time;
8050 break;
8051 }
8052 case IW_MODE_INFRA:
8053 last_seq = &priv->last_seq_num;
8054 last_frag = &priv->last_frag_num;
8055 last_time = &priv->last_packet_time;
8056 break;
8057 default:
8058 return 0;
8059 }
8060 if ((*last_seq == seq) &&
8061 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8062 if (*last_frag == frag)
8063 goto drop;
8064 if (*last_frag + 1 != frag)
8065 /* out-of-order fragment */
8066 goto drop;
8067 } else
8068 *last_seq = seq;
8069
8070 *last_frag = frag;
8071 *last_time = jiffies;
8072 return 0;
8073
8074 drop:
8075 /* Comment this line now since we observed the card receives
8076 * duplicate packets but the FCTL_RETRY bit is not set in the
8077 * IBSS mode with fragmentation enabled.
8078 BUG_ON(!(le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_RETRY)); */
8079 return 1;
8080 }
8081
8082 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8083 struct ipw_rx_mem_buffer *rxb,
8084 struct ieee80211_rx_stats *stats)
8085 {
8086 struct sk_buff *skb = rxb->skb;
8087 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8088 struct ieee80211_hdr_4addr *header = (struct ieee80211_hdr_4addr *)
8089 (skb->data + IPW_RX_FRAME_SIZE);
8090
8091 ieee80211_rx_mgt(priv->ieee, header, stats);
8092
8093 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8094 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8095 IEEE80211_STYPE_PROBE_RESP) ||
8096 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8097 IEEE80211_STYPE_BEACON))) {
8098 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8099 ipw_add_station(priv, header->addr2);
8100 }
8101
8102 if (priv->config & CFG_NET_STATS) {
8103 IPW_DEBUG_HC("sending stat packet\n");
8104
8105 /* Set the size of the skb to the size of the full
8106 * ipw header and 802.11 frame */
8107 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8108 IPW_RX_FRAME_SIZE);
8109
8110 /* Advance past the ipw packet header to the 802.11 frame */
8111 skb_pull(skb, IPW_RX_FRAME_SIZE);
8112
8113 /* Push the ieee80211_rx_stats before the 802.11 frame */
8114 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8115
8116 skb->dev = priv->ieee->dev;
8117
8118 /* Point raw at the ieee80211_stats */
8119 skb->mac.raw = skb->data;
8120
8121 skb->pkt_type = PACKET_OTHERHOST;
8122 skb->protocol = __constant_htons(ETH_P_80211_STATS);
8123 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8124 netif_rx(skb);
8125 rxb->skb = NULL;
8126 }
8127 }
8128
8129 /*
8130 * Main entry function for recieving a packet with 80211 headers. This
8131 * should be called when ever the FW has notified us that there is a new
8132 * skb in the recieve queue.
8133 */
8134 static void ipw_rx(struct ipw_priv *priv)
8135 {
8136 struct ipw_rx_mem_buffer *rxb;
8137 struct ipw_rx_packet *pkt;
8138 struct ieee80211_hdr_4addr *header;
8139 u32 r, w, i;
8140 u8 network_packet;
8141
8142 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8143 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8144 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
8145
8146 while (i != r) {
8147 rxb = priv->rxq->queue[i];
8148 if (unlikely(rxb == NULL)) {
8149 printk(KERN_CRIT "Queue not allocated!\n");
8150 break;
8151 }
8152 priv->rxq->queue[i] = NULL;
8153
8154 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8155 IPW_RX_BUF_SIZE,
8156 PCI_DMA_FROMDEVICE);
8157
8158 pkt = (struct ipw_rx_packet *)rxb->skb->data;
8159 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8160 pkt->header.message_type,
8161 pkt->header.rx_seq_num, pkt->header.control_bits);
8162
8163 switch (pkt->header.message_type) {
8164 case RX_FRAME_TYPE: /* 802.11 frame */ {
8165 struct ieee80211_rx_stats stats = {
8166 .rssi =
8167 le16_to_cpu(pkt->u.frame.rssi_dbm) -
8168 IPW_RSSI_TO_DBM,
8169 .signal =
8170 le16_to_cpu(pkt->u.frame.rssi_dbm) -
8171 IPW_RSSI_TO_DBM + 0x100,
8172 .noise =
8173 le16_to_cpu(pkt->u.frame.noise),
8174 .rate = pkt->u.frame.rate,
8175 .mac_time = jiffies,
8176 .received_channel =
8177 pkt->u.frame.received_channel,
8178 .freq =
8179 (pkt->u.frame.
8180 control & (1 << 0)) ?
8181 IEEE80211_24GHZ_BAND :
8182 IEEE80211_52GHZ_BAND,
8183 .len = le16_to_cpu(pkt->u.frame.length),
8184 };
8185
8186 if (stats.rssi != 0)
8187 stats.mask |= IEEE80211_STATMASK_RSSI;
8188 if (stats.signal != 0)
8189 stats.mask |= IEEE80211_STATMASK_SIGNAL;
8190 if (stats.noise != 0)
8191 stats.mask |= IEEE80211_STATMASK_NOISE;
8192 if (stats.rate != 0)
8193 stats.mask |= IEEE80211_STATMASK_RATE;
8194
8195 priv->rx_packets++;
8196
8197 #ifdef CONFIG_IPW2200_PROMISCUOUS
8198 if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8199 ipw_handle_promiscuous_rx(priv, rxb, &stats);
8200 #endif
8201
8202 #ifdef CONFIG_IPW2200_MONITOR
8203 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8204 #ifdef CONFIG_IPW2200_RADIOTAP
8205
8206 ipw_handle_data_packet_monitor(priv,
8207 rxb,
8208 &stats);
8209 #else
8210 ipw_handle_data_packet(priv, rxb,
8211 &stats);
8212 #endif
8213 break;
8214 }
8215 #endif
8216
8217 header =
8218 (struct ieee80211_hdr_4addr *)(rxb->skb->
8219 data +
8220 IPW_RX_FRAME_SIZE);
8221 /* TODO: Check Ad-Hoc dest/source and make sure
8222 * that we are actually parsing these packets
8223 * correctly -- we should probably use the
8224 * frame control of the packet and disregard
8225 * the current iw_mode */
8226
8227 network_packet =
8228 is_network_packet(priv, header);
8229 if (network_packet && priv->assoc_network) {
8230 priv->assoc_network->stats.rssi =
8231 stats.rssi;
8232 priv->exp_avg_rssi =
8233 exponential_average(priv->exp_avg_rssi,
8234 stats.rssi, DEPTH_RSSI);
8235 }
8236
8237 IPW_DEBUG_RX("Frame: len=%u\n",
8238 le16_to_cpu(pkt->u.frame.length));
8239
8240 if (le16_to_cpu(pkt->u.frame.length) <
8241 ieee80211_get_hdrlen(le16_to_cpu(
8242 header->frame_ctl))) {
8243 IPW_DEBUG_DROP
8244 ("Received packet is too small. "
8245 "Dropping.\n");
8246 priv->ieee->stats.rx_errors++;
8247 priv->wstats.discard.misc++;
8248 break;
8249 }
8250
8251 switch (WLAN_FC_GET_TYPE
8252 (le16_to_cpu(header->frame_ctl))) {
8253
8254 case IEEE80211_FTYPE_MGMT:
8255 ipw_handle_mgmt_packet(priv, rxb,
8256 &stats);
8257 break;
8258
8259 case IEEE80211_FTYPE_CTL:
8260 break;
8261
8262 case IEEE80211_FTYPE_DATA:
8263 if (unlikely(!network_packet ||
8264 is_duplicate_packet(priv,
8265 header)))
8266 {
8267 IPW_DEBUG_DROP("Dropping: "
8268 MAC_FMT ", "
8269 MAC_FMT ", "
8270 MAC_FMT "\n",
8271 MAC_ARG(header->
8272 addr1),
8273 MAC_ARG(header->
8274 addr2),
8275 MAC_ARG(header->
8276 addr3));
8277 break;
8278 }
8279
8280 ipw_handle_data_packet(priv, rxb,
8281 &stats);
8282
8283 break;
8284 }
8285 break;
8286 }
8287
8288 case RX_HOST_NOTIFICATION_TYPE:{
8289 IPW_DEBUG_RX
8290 ("Notification: subtype=%02X flags=%02X size=%d\n",
8291 pkt->u.notification.subtype,
8292 pkt->u.notification.flags,
8293 pkt->u.notification.size);
8294 ipw_rx_notification(priv, &pkt->u.notification);
8295 break;
8296 }
8297
8298 default:
8299 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8300 pkt->header.message_type);
8301 break;
8302 }
8303
8304 /* For now we just don't re-use anything. We can tweak this
8305 * later to try and re-use notification packets and SKBs that
8306 * fail to Rx correctly */
8307 if (rxb->skb != NULL) {
8308 dev_kfree_skb_any(rxb->skb);
8309 rxb->skb = NULL;
8310 }
8311
8312 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8313 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8314 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8315
8316 i = (i + 1) % RX_QUEUE_SIZE;
8317 }
8318
8319 /* Backtrack one entry */
8320 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
8321
8322 ipw_rx_queue_restock(priv);
8323 }
8324
8325 #define DEFAULT_RTS_THRESHOLD 2304U
8326 #define MIN_RTS_THRESHOLD 1U
8327 #define MAX_RTS_THRESHOLD 2304U
8328 #define DEFAULT_BEACON_INTERVAL 100U
8329 #define DEFAULT_SHORT_RETRY_LIMIT 7U
8330 #define DEFAULT_LONG_RETRY_LIMIT 4U
8331
8332 /**
8333 * ipw_sw_reset
8334 * @option: options to control different reset behaviour
8335 * 0 = reset everything except the 'disable' module_param
8336 * 1 = reset everything and print out driver info (for probe only)
8337 * 2 = reset everything
8338 */
8339 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8340 {
8341 int band, modulation;
8342 int old_mode = priv->ieee->iw_mode;
8343
8344 /* Initialize module parameter values here */
8345 priv->config = 0;
8346
8347 /* We default to disabling the LED code as right now it causes
8348 * too many systems to lock up... */
8349 if (!led)
8350 priv->config |= CFG_NO_LED;
8351
8352 if (associate)
8353 priv->config |= CFG_ASSOCIATE;
8354 else
8355 IPW_DEBUG_INFO("Auto associate disabled.\n");
8356
8357 if (auto_create)
8358 priv->config |= CFG_ADHOC_CREATE;
8359 else
8360 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8361
8362 priv->config &= ~CFG_STATIC_ESSID;
8363 priv->essid_len = 0;
8364 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8365
8366 if (disable && option) {
8367 priv->status |= STATUS_RF_KILL_SW;
8368 IPW_DEBUG_INFO("Radio disabled.\n");
8369 }
8370
8371 if (channel != 0) {
8372 priv->config |= CFG_STATIC_CHANNEL;
8373 priv->channel = channel;
8374 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
8375 /* TODO: Validate that provided channel is in range */
8376 }
8377 #ifdef CONFIG_IPW2200_QOS
8378 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8379 burst_duration_CCK, burst_duration_OFDM);
8380 #endif /* CONFIG_IPW2200_QOS */
8381
8382 switch (mode) {
8383 case 1:
8384 priv->ieee->iw_mode = IW_MODE_ADHOC;
8385 priv->net_dev->type = ARPHRD_ETHER;
8386
8387 break;
8388 #ifdef CONFIG_IPW2200_MONITOR
8389 case 2:
8390 priv->ieee->iw_mode = IW_MODE_MONITOR;
8391 #ifdef CONFIG_IPW2200_RADIOTAP
8392 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8393 #else
8394 priv->net_dev->type = ARPHRD_IEEE80211;
8395 #endif
8396 break;
8397 #endif
8398 default:
8399 case 0:
8400 priv->net_dev->type = ARPHRD_ETHER;
8401 priv->ieee->iw_mode = IW_MODE_INFRA;
8402 break;
8403 }
8404
8405 if (hwcrypto) {
8406 priv->ieee->host_encrypt = 0;
8407 priv->ieee->host_encrypt_msdu = 0;
8408 priv->ieee->host_decrypt = 0;
8409 priv->ieee->host_mc_decrypt = 0;
8410 }
8411 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8412
8413 /* IPW2200/2915 is abled to do hardware fragmentation. */
8414 priv->ieee->host_open_frag = 0;
8415
8416 if ((priv->pci_dev->device == 0x4223) ||
8417 (priv->pci_dev->device == 0x4224)) {
8418 if (option == 1)
8419 printk(KERN_INFO DRV_NAME
8420 ": Detected Intel PRO/Wireless 2915ABG Network "
8421 "Connection\n");
8422 priv->ieee->abg_true = 1;
8423 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
8424 modulation = IEEE80211_OFDM_MODULATION |
8425 IEEE80211_CCK_MODULATION;
8426 priv->adapter = IPW_2915ABG;
8427 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8428 } else {
8429 if (option == 1)
8430 printk(KERN_INFO DRV_NAME
8431 ": Detected Intel PRO/Wireless 2200BG Network "
8432 "Connection\n");
8433
8434 priv->ieee->abg_true = 0;
8435 band = IEEE80211_24GHZ_BAND;
8436 modulation = IEEE80211_OFDM_MODULATION |
8437 IEEE80211_CCK_MODULATION;
8438 priv->adapter = IPW_2200BG;
8439 priv->ieee->mode = IEEE_G | IEEE_B;
8440 }
8441
8442 priv->ieee->freq_band = band;
8443 priv->ieee->modulation = modulation;
8444
8445 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
8446
8447 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8448 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8449
8450 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8451 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8452 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8453
8454 /* If power management is turned on, default to AC mode */
8455 priv->power_mode = IPW_POWER_AC;
8456 priv->tx_power = IPW_TX_POWER_DEFAULT;
8457
8458 return old_mode == priv->ieee->iw_mode;
8459 }
8460
8461 /*
8462 * This file defines the Wireless Extension handlers. It does not
8463 * define any methods of hardware manipulation and relies on the
8464 * functions defined in ipw_main to provide the HW interaction.
8465 *
8466 * The exception to this is the use of the ipw_get_ordinal()
8467 * function used to poll the hardware vs. making unecessary calls.
8468 *
8469 */
8470
8471 static int ipw_wx_get_name(struct net_device *dev,
8472 struct iw_request_info *info,
8473 union iwreq_data *wrqu, char *extra)
8474 {
8475 struct ipw_priv *priv = ieee80211_priv(dev);
8476 mutex_lock(&priv->mutex);
8477 if (priv->status & STATUS_RF_KILL_MASK)
8478 strcpy(wrqu->name, "radio off");
8479 else if (!(priv->status & STATUS_ASSOCIATED))
8480 strcpy(wrqu->name, "unassociated");
8481 else
8482 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8483 ipw_modes[priv->assoc_request.ieee_mode]);
8484 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8485 mutex_unlock(&priv->mutex);
8486 return 0;
8487 }
8488
8489 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8490 {
8491 if (channel == 0) {
8492 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8493 priv->config &= ~CFG_STATIC_CHANNEL;
8494 IPW_DEBUG_ASSOC("Attempting to associate with new "
8495 "parameters.\n");
8496 ipw_associate(priv);
8497 return 0;
8498 }
8499
8500 priv->config |= CFG_STATIC_CHANNEL;
8501
8502 if (priv->channel == channel) {
8503 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8504 channel);
8505 return 0;
8506 }
8507
8508 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8509 priv->channel = channel;
8510
8511 #ifdef CONFIG_IPW2200_MONITOR
8512 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8513 int i;
8514 if (priv->status & STATUS_SCANNING) {
8515 IPW_DEBUG_SCAN("Scan abort triggered due to "
8516 "channel change.\n");
8517 ipw_abort_scan(priv);
8518 }
8519
8520 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8521 udelay(10);
8522
8523 if (priv->status & STATUS_SCANNING)
8524 IPW_DEBUG_SCAN("Still scanning...\n");
8525 else
8526 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8527 1000 - i);
8528
8529 return 0;
8530 }
8531 #endif /* CONFIG_IPW2200_MONITOR */
8532
8533 /* Network configuration changed -- force [re]association */
8534 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8535 if (!ipw_disassociate(priv))
8536 ipw_associate(priv);
8537
8538 return 0;
8539 }
8540
8541 static int ipw_wx_set_freq(struct net_device *dev,
8542 struct iw_request_info *info,
8543 union iwreq_data *wrqu, char *extra)
8544 {
8545 struct ipw_priv *priv = ieee80211_priv(dev);
8546 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8547 struct iw_freq *fwrq = &wrqu->freq;
8548 int ret = 0, i;
8549 u8 channel, flags;
8550 int band;
8551
8552 if (fwrq->m == 0) {
8553 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8554 mutex_lock(&priv->mutex);
8555 ret = ipw_set_channel(priv, 0);
8556 mutex_unlock(&priv->mutex);
8557 return ret;
8558 }
8559 /* if setting by freq convert to channel */
8560 if (fwrq->e == 1) {
8561 channel = ieee80211_freq_to_channel(priv->ieee, fwrq->m);
8562 if (channel == 0)
8563 return -EINVAL;
8564 } else
8565 channel = fwrq->m;
8566
8567 if (!(band = ieee80211_is_valid_channel(priv->ieee, channel)))
8568 return -EINVAL;
8569
8570 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8571 i = ieee80211_channel_to_index(priv->ieee, channel);
8572 if (i == -1)
8573 return -EINVAL;
8574
8575 flags = (band == IEEE80211_24GHZ_BAND) ?
8576 geo->bg[i].flags : geo->a[i].flags;
8577 if (flags & IEEE80211_CH_PASSIVE_ONLY) {
8578 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8579 return -EINVAL;
8580 }
8581 }
8582
8583 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8584 mutex_lock(&priv->mutex);
8585 ret = ipw_set_channel(priv, channel);
8586 mutex_unlock(&priv->mutex);
8587 return ret;
8588 }
8589
8590 static int ipw_wx_get_freq(struct net_device *dev,
8591 struct iw_request_info *info,
8592 union iwreq_data *wrqu, char *extra)
8593 {
8594 struct ipw_priv *priv = ieee80211_priv(dev);
8595
8596 wrqu->freq.e = 0;
8597
8598 /* If we are associated, trying to associate, or have a statically
8599 * configured CHANNEL then return that; otherwise return ANY */
8600 mutex_lock(&priv->mutex);
8601 if (priv->config & CFG_STATIC_CHANNEL ||
8602 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))
8603 wrqu->freq.m = priv->channel;
8604 else
8605 wrqu->freq.m = 0;
8606
8607 mutex_unlock(&priv->mutex);
8608 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8609 return 0;
8610 }
8611
8612 static int ipw_wx_set_mode(struct net_device *dev,
8613 struct iw_request_info *info,
8614 union iwreq_data *wrqu, char *extra)
8615 {
8616 struct ipw_priv *priv = ieee80211_priv(dev);
8617 int err = 0;
8618
8619 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8620
8621 switch (wrqu->mode) {
8622 #ifdef CONFIG_IPW2200_MONITOR
8623 case IW_MODE_MONITOR:
8624 #endif
8625 case IW_MODE_ADHOC:
8626 case IW_MODE_INFRA:
8627 break;
8628 case IW_MODE_AUTO:
8629 wrqu->mode = IW_MODE_INFRA;
8630 break;
8631 default:
8632 return -EINVAL;
8633 }
8634 if (wrqu->mode == priv->ieee->iw_mode)
8635 return 0;
8636
8637 mutex_lock(&priv->mutex);
8638
8639 ipw_sw_reset(priv, 0);
8640
8641 #ifdef CONFIG_IPW2200_MONITOR
8642 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8643 priv->net_dev->type = ARPHRD_ETHER;
8644
8645 if (wrqu->mode == IW_MODE_MONITOR)
8646 #ifdef CONFIG_IPW2200_RADIOTAP
8647 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8648 #else
8649 priv->net_dev->type = ARPHRD_IEEE80211;
8650 #endif
8651 #endif /* CONFIG_IPW2200_MONITOR */
8652
8653 /* Free the existing firmware and reset the fw_loaded
8654 * flag so ipw_load() will bring in the new firmawre */
8655 free_firmware();
8656
8657 priv->ieee->iw_mode = wrqu->mode;
8658
8659 queue_work(priv->workqueue, &priv->adapter_restart);
8660 mutex_unlock(&priv->mutex);
8661 return err;
8662 }
8663
8664 static int ipw_wx_get_mode(struct net_device *dev,
8665 struct iw_request_info *info,
8666 union iwreq_data *wrqu, char *extra)
8667 {
8668 struct ipw_priv *priv = ieee80211_priv(dev);
8669 mutex_lock(&priv->mutex);
8670 wrqu->mode = priv->ieee->iw_mode;
8671 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8672 mutex_unlock(&priv->mutex);
8673 return 0;
8674 }
8675
8676 /* Values are in microsecond */
8677 static const s32 timeout_duration[] = {
8678 350000,
8679 250000,
8680 75000,
8681 37000,
8682 25000,
8683 };
8684
8685 static const s32 period_duration[] = {
8686 400000,
8687 700000,
8688 1000000,
8689 1000000,
8690 1000000
8691 };
8692
8693 static int ipw_wx_get_range(struct net_device *dev,
8694 struct iw_request_info *info,
8695 union iwreq_data *wrqu, char *extra)
8696 {
8697 struct ipw_priv *priv = ieee80211_priv(dev);
8698 struct iw_range *range = (struct iw_range *)extra;
8699 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8700 int i = 0, j;
8701
8702 wrqu->data.length = sizeof(*range);
8703 memset(range, 0, sizeof(*range));
8704
8705 /* 54Mbs == ~27 Mb/s real (802.11g) */
8706 range->throughput = 27 * 1000 * 1000;
8707
8708 range->max_qual.qual = 100;
8709 /* TODO: Find real max RSSI and stick here */
8710 range->max_qual.level = 0;
8711 range->max_qual.noise = 0;
8712 range->max_qual.updated = 7; /* Updated all three */
8713
8714 range->avg_qual.qual = 70;
8715 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8716 range->avg_qual.level = 0; /* FIXME to real average level */
8717 range->avg_qual.noise = 0;
8718 range->avg_qual.updated = 7; /* Updated all three */
8719 mutex_lock(&priv->mutex);
8720 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8721
8722 for (i = 0; i < range->num_bitrates; i++)
8723 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8724 500000;
8725
8726 range->max_rts = DEFAULT_RTS_THRESHOLD;
8727 range->min_frag = MIN_FRAG_THRESHOLD;
8728 range->max_frag = MAX_FRAG_THRESHOLD;
8729
8730 range->encoding_size[0] = 5;
8731 range->encoding_size[1] = 13;
8732 range->num_encoding_sizes = 2;
8733 range->max_encoding_tokens = WEP_KEYS;
8734
8735 /* Set the Wireless Extension versions */
8736 range->we_version_compiled = WIRELESS_EXT;
8737 range->we_version_source = 18;
8738
8739 i = 0;
8740 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8741 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8742 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8743 (geo->bg[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8744 continue;
8745
8746 range->freq[i].i = geo->bg[j].channel;
8747 range->freq[i].m = geo->bg[j].freq * 100000;
8748 range->freq[i].e = 1;
8749 i++;
8750 }
8751 }
8752
8753 if (priv->ieee->mode & IEEE_A) {
8754 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8755 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8756 (geo->a[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8757 continue;
8758
8759 range->freq[i].i = geo->a[j].channel;
8760 range->freq[i].m = geo->a[j].freq * 100000;
8761 range->freq[i].e = 1;
8762 i++;
8763 }
8764 }
8765
8766 range->num_channels = i;
8767 range->num_frequency = i;
8768
8769 mutex_unlock(&priv->mutex);
8770
8771 /* Event capability (kernel + driver) */
8772 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8773 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8774 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8775 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8776 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8777
8778 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8779 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8780
8781 IPW_DEBUG_WX("GET Range\n");
8782 return 0;
8783 }
8784
8785 static int ipw_wx_set_wap(struct net_device *dev,
8786 struct iw_request_info *info,
8787 union iwreq_data *wrqu, char *extra)
8788 {
8789 struct ipw_priv *priv = ieee80211_priv(dev);
8790
8791 static const unsigned char any[] = {
8792 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
8793 };
8794 static const unsigned char off[] = {
8795 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
8796 };
8797
8798 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8799 return -EINVAL;
8800 mutex_lock(&priv->mutex);
8801 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
8802 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8803 /* we disable mandatory BSSID association */
8804 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8805 priv->config &= ~CFG_STATIC_BSSID;
8806 IPW_DEBUG_ASSOC("Attempting to associate with new "
8807 "parameters.\n");
8808 ipw_associate(priv);
8809 mutex_unlock(&priv->mutex);
8810 return 0;
8811 }
8812
8813 priv->config |= CFG_STATIC_BSSID;
8814 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8815 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8816 mutex_unlock(&priv->mutex);
8817 return 0;
8818 }
8819
8820 IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n",
8821 MAC_ARG(wrqu->ap_addr.sa_data));
8822
8823 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8824
8825 /* Network configuration changed -- force [re]association */
8826 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8827 if (!ipw_disassociate(priv))
8828 ipw_associate(priv);
8829
8830 mutex_unlock(&priv->mutex);
8831 return 0;
8832 }
8833
8834 static int ipw_wx_get_wap(struct net_device *dev,
8835 struct iw_request_info *info,
8836 union iwreq_data *wrqu, char *extra)
8837 {
8838 struct ipw_priv *priv = ieee80211_priv(dev);
8839 /* If we are associated, trying to associate, or have a statically
8840 * configured BSSID then return that; otherwise return ANY */
8841 mutex_lock(&priv->mutex);
8842 if (priv->config & CFG_STATIC_BSSID ||
8843 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8844 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8845 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8846 } else
8847 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
8848
8849 IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
8850 MAC_ARG(wrqu->ap_addr.sa_data));
8851 mutex_unlock(&priv->mutex);
8852 return 0;
8853 }
8854
8855 static int ipw_wx_set_essid(struct net_device *dev,
8856 struct iw_request_info *info,
8857 union iwreq_data *wrqu, char *extra)
8858 {
8859 struct ipw_priv *priv = ieee80211_priv(dev);
8860 char *essid = ""; /* ANY */
8861 int length = 0;
8862 mutex_lock(&priv->mutex);
8863 if (wrqu->essid.flags && wrqu->essid.length) {
8864 length = wrqu->essid.length - 1;
8865 essid = extra;
8866 }
8867 if (length == 0) {
8868 IPW_DEBUG_WX("Setting ESSID to ANY\n");
8869 if ((priv->config & CFG_STATIC_ESSID) &&
8870 !(priv->status & (STATUS_ASSOCIATED |
8871 STATUS_ASSOCIATING))) {
8872 IPW_DEBUG_ASSOC("Attempting to associate with new "
8873 "parameters.\n");
8874 priv->config &= ~CFG_STATIC_ESSID;
8875 ipw_associate(priv);
8876 }
8877 mutex_unlock(&priv->mutex);
8878 return 0;
8879 }
8880
8881 length = min(length, IW_ESSID_MAX_SIZE);
8882
8883 priv->config |= CFG_STATIC_ESSID;
8884
8885 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
8886 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
8887 mutex_unlock(&priv->mutex);
8888 return 0;
8889 }
8890
8891 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length),
8892 length);
8893
8894 priv->essid_len = length;
8895 memcpy(priv->essid, essid, priv->essid_len);
8896
8897 /* Network configuration changed -- force [re]association */
8898 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
8899 if (!ipw_disassociate(priv))
8900 ipw_associate(priv);
8901
8902 mutex_unlock(&priv->mutex);
8903 return 0;
8904 }
8905
8906 static int ipw_wx_get_essid(struct net_device *dev,
8907 struct iw_request_info *info,
8908 union iwreq_data *wrqu, char *extra)
8909 {
8910 struct ipw_priv *priv = ieee80211_priv(dev);
8911
8912 /* If we are associated, trying to associate, or have a statically
8913 * configured ESSID then return that; otherwise return ANY */
8914 mutex_lock(&priv->mutex);
8915 if (priv->config & CFG_STATIC_ESSID ||
8916 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8917 IPW_DEBUG_WX("Getting essid: '%s'\n",
8918 escape_essid(priv->essid, priv->essid_len));
8919 memcpy(extra, priv->essid, priv->essid_len);
8920 wrqu->essid.length = priv->essid_len;
8921 wrqu->essid.flags = 1; /* active */
8922 } else {
8923 IPW_DEBUG_WX("Getting essid: ANY\n");
8924 wrqu->essid.length = 0;
8925 wrqu->essid.flags = 0; /* active */
8926 }
8927 mutex_unlock(&priv->mutex);
8928 return 0;
8929 }
8930
8931 static int ipw_wx_set_nick(struct net_device *dev,
8932 struct iw_request_info *info,
8933 union iwreq_data *wrqu, char *extra)
8934 {
8935 struct ipw_priv *priv = ieee80211_priv(dev);
8936
8937 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
8938 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
8939 return -E2BIG;
8940 mutex_lock(&priv->mutex);
8941 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
8942 memset(priv->nick, 0, sizeof(priv->nick));
8943 memcpy(priv->nick, extra, wrqu->data.length);
8944 IPW_DEBUG_TRACE("<<\n");
8945 mutex_unlock(&priv->mutex);
8946 return 0;
8947
8948 }
8949
8950 static int ipw_wx_get_nick(struct net_device *dev,
8951 struct iw_request_info *info,
8952 union iwreq_data *wrqu, char *extra)
8953 {
8954 struct ipw_priv *priv = ieee80211_priv(dev);
8955 IPW_DEBUG_WX("Getting nick\n");
8956 mutex_lock(&priv->mutex);
8957 wrqu->data.length = strlen(priv->nick) + 1;
8958 memcpy(extra, priv->nick, wrqu->data.length);
8959 wrqu->data.flags = 1; /* active */
8960 mutex_unlock(&priv->mutex);
8961 return 0;
8962 }
8963
8964 static int ipw_wx_set_sens(struct net_device *dev,
8965 struct iw_request_info *info,
8966 union iwreq_data *wrqu, char *extra)
8967 {
8968 struct ipw_priv *priv = ieee80211_priv(dev);
8969 int err = 0;
8970
8971 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
8972 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
8973 mutex_lock(&priv->mutex);
8974
8975 if (wrqu->sens.fixed == 0)
8976 {
8977 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8978 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8979 goto out;
8980 }
8981 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
8982 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
8983 err = -EINVAL;
8984 goto out;
8985 }
8986
8987 priv->roaming_threshold = wrqu->sens.value;
8988 priv->disassociate_threshold = 3*wrqu->sens.value;
8989 out:
8990 mutex_unlock(&priv->mutex);
8991 return err;
8992 }
8993
8994 static int ipw_wx_get_sens(struct net_device *dev,
8995 struct iw_request_info *info,
8996 union iwreq_data *wrqu, char *extra)
8997 {
8998 struct ipw_priv *priv = ieee80211_priv(dev);
8999 mutex_lock(&priv->mutex);
9000 wrqu->sens.fixed = 1;
9001 wrqu->sens.value = priv->roaming_threshold;
9002 mutex_unlock(&priv->mutex);
9003
9004 IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
9005 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9006
9007 return 0;
9008 }
9009
9010 static int ipw_wx_set_rate(struct net_device *dev,
9011 struct iw_request_info *info,
9012 union iwreq_data *wrqu, char *extra)
9013 {
9014 /* TODO: We should use semaphores or locks for access to priv */
9015 struct ipw_priv *priv = ieee80211_priv(dev);
9016 u32 target_rate = wrqu->bitrate.value;
9017 u32 fixed, mask;
9018
9019 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9020 /* value = X, fixed = 1 means only rate X */
9021 /* value = X, fixed = 0 means all rates lower equal X */
9022
9023 if (target_rate == -1) {
9024 fixed = 0;
9025 mask = IEEE80211_DEFAULT_RATES_MASK;
9026 /* Now we should reassociate */
9027 goto apply;
9028 }
9029
9030 mask = 0;
9031 fixed = wrqu->bitrate.fixed;
9032
9033 if (target_rate == 1000000 || !fixed)
9034 mask |= IEEE80211_CCK_RATE_1MB_MASK;
9035 if (target_rate == 1000000)
9036 goto apply;
9037
9038 if (target_rate == 2000000 || !fixed)
9039 mask |= IEEE80211_CCK_RATE_2MB_MASK;
9040 if (target_rate == 2000000)
9041 goto apply;
9042
9043 if (target_rate == 5500000 || !fixed)
9044 mask |= IEEE80211_CCK_RATE_5MB_MASK;
9045 if (target_rate == 5500000)
9046 goto apply;
9047
9048 if (target_rate == 6000000 || !fixed)
9049 mask |= IEEE80211_OFDM_RATE_6MB_MASK;
9050 if (target_rate == 6000000)
9051 goto apply;
9052
9053 if (target_rate == 9000000 || !fixed)
9054 mask |= IEEE80211_OFDM_RATE_9MB_MASK;
9055 if (target_rate == 9000000)
9056 goto apply;
9057
9058 if (target_rate == 11000000 || !fixed)
9059 mask |= IEEE80211_CCK_RATE_11MB_MASK;
9060 if (target_rate == 11000000)
9061 goto apply;
9062
9063 if (target_rate == 12000000 || !fixed)
9064 mask |= IEEE80211_OFDM_RATE_12MB_MASK;
9065 if (target_rate == 12000000)
9066 goto apply;
9067
9068 if (target_rate == 18000000 || !fixed)
9069 mask |= IEEE80211_OFDM_RATE_18MB_MASK;
9070 if (target_rate == 18000000)
9071 goto apply;
9072
9073 if (target_rate == 24000000 || !fixed)
9074 mask |= IEEE80211_OFDM_RATE_24MB_MASK;
9075 if (target_rate == 24000000)
9076 goto apply;
9077
9078 if (target_rate == 36000000 || !fixed)
9079 mask |= IEEE80211_OFDM_RATE_36MB_MASK;
9080 if (target_rate == 36000000)
9081 goto apply;
9082
9083 if (target_rate == 48000000 || !fixed)
9084 mask |= IEEE80211_OFDM_RATE_48MB_MASK;
9085 if (target_rate == 48000000)
9086 goto apply;
9087
9088 if (target_rate == 54000000 || !fixed)
9089 mask |= IEEE80211_OFDM_RATE_54MB_MASK;
9090 if (target_rate == 54000000)
9091 goto apply;
9092
9093 IPW_DEBUG_WX("invalid rate specified, returning error\n");
9094 return -EINVAL;
9095
9096 apply:
9097 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9098 mask, fixed ? "fixed" : "sub-rates");
9099 mutex_lock(&priv->mutex);
9100 if (mask == IEEE80211_DEFAULT_RATES_MASK) {
9101 priv->config &= ~CFG_FIXED_RATE;
9102 ipw_set_fixed_rate(priv, priv->ieee->mode);
9103 } else
9104 priv->config |= CFG_FIXED_RATE;
9105
9106 if (priv->rates_mask == mask) {
9107 IPW_DEBUG_WX("Mask set to current mask.\n");
9108 mutex_unlock(&priv->mutex);
9109 return 0;
9110 }
9111
9112 priv->rates_mask = mask;
9113
9114 /* Network configuration changed -- force [re]association */
9115 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9116 if (!ipw_disassociate(priv))
9117 ipw_associate(priv);
9118
9119 mutex_unlock(&priv->mutex);
9120 return 0;
9121 }
9122
9123 static int ipw_wx_get_rate(struct net_device *dev,
9124 struct iw_request_info *info,
9125 union iwreq_data *wrqu, char *extra)
9126 {
9127 struct ipw_priv *priv = ieee80211_priv(dev);
9128 mutex_lock(&priv->mutex);
9129 wrqu->bitrate.value = priv->last_rate;
9130 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9131 mutex_unlock(&priv->mutex);
9132 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
9133 return 0;
9134 }
9135
9136 static int ipw_wx_set_rts(struct net_device *dev,
9137 struct iw_request_info *info,
9138 union iwreq_data *wrqu, char *extra)
9139 {
9140 struct ipw_priv *priv = ieee80211_priv(dev);
9141 mutex_lock(&priv->mutex);
9142 if (wrqu->rts.disabled)
9143 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9144 else {
9145 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9146 wrqu->rts.value > MAX_RTS_THRESHOLD) {
9147 mutex_unlock(&priv->mutex);
9148 return -EINVAL;
9149 }
9150 priv->rts_threshold = wrqu->rts.value;
9151 }
9152
9153 ipw_send_rts_threshold(priv, priv->rts_threshold);
9154 mutex_unlock(&priv->mutex);
9155 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
9156 return 0;
9157 }
9158
9159 static int ipw_wx_get_rts(struct net_device *dev,
9160 struct iw_request_info *info,
9161 union iwreq_data *wrqu, char *extra)
9162 {
9163 struct ipw_priv *priv = ieee80211_priv(dev);
9164 mutex_lock(&priv->mutex);
9165 wrqu->rts.value = priv->rts_threshold;
9166 wrqu->rts.fixed = 0; /* no auto select */
9167 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9168 mutex_unlock(&priv->mutex);
9169 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
9170 return 0;
9171 }
9172
9173 static int ipw_wx_set_txpow(struct net_device *dev,
9174 struct iw_request_info *info,
9175 union iwreq_data *wrqu, char *extra)
9176 {
9177 struct ipw_priv *priv = ieee80211_priv(dev);
9178 int err = 0;
9179
9180 mutex_lock(&priv->mutex);
9181 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9182 err = -EINPROGRESS;
9183 goto out;
9184 }
9185
9186 if (!wrqu->power.fixed)
9187 wrqu->power.value = IPW_TX_POWER_DEFAULT;
9188
9189 if (wrqu->power.flags != IW_TXPOW_DBM) {
9190 err = -EINVAL;
9191 goto out;
9192 }
9193
9194 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9195 (wrqu->power.value < IPW_TX_POWER_MIN)) {
9196 err = -EINVAL;
9197 goto out;
9198 }
9199
9200 priv->tx_power = wrqu->power.value;
9201 err = ipw_set_tx_power(priv);
9202 out:
9203 mutex_unlock(&priv->mutex);
9204 return err;
9205 }
9206
9207 static int ipw_wx_get_txpow(struct net_device *dev,
9208 struct iw_request_info *info,
9209 union iwreq_data *wrqu, char *extra)
9210 {
9211 struct ipw_priv *priv = ieee80211_priv(dev);
9212 mutex_lock(&priv->mutex);
9213 wrqu->power.value = priv->tx_power;
9214 wrqu->power.fixed = 1;
9215 wrqu->power.flags = IW_TXPOW_DBM;
9216 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9217 mutex_unlock(&priv->mutex);
9218
9219 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
9220 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9221
9222 return 0;
9223 }
9224
9225 static int ipw_wx_set_frag(struct net_device *dev,
9226 struct iw_request_info *info,
9227 union iwreq_data *wrqu, char *extra)
9228 {
9229 struct ipw_priv *priv = ieee80211_priv(dev);
9230 mutex_lock(&priv->mutex);
9231 if (wrqu->frag.disabled)
9232 priv->ieee->fts = DEFAULT_FTS;
9233 else {
9234 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9235 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9236 mutex_unlock(&priv->mutex);
9237 return -EINVAL;
9238 }
9239
9240 priv->ieee->fts = wrqu->frag.value & ~0x1;
9241 }
9242
9243 ipw_send_frag_threshold(priv, wrqu->frag.value);
9244 mutex_unlock(&priv->mutex);
9245 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
9246 return 0;
9247 }
9248
9249 static int ipw_wx_get_frag(struct net_device *dev,
9250 struct iw_request_info *info,
9251 union iwreq_data *wrqu, char *extra)
9252 {
9253 struct ipw_priv *priv = ieee80211_priv(dev);
9254 mutex_lock(&priv->mutex);
9255 wrqu->frag.value = priv->ieee->fts;
9256 wrqu->frag.fixed = 0; /* no auto select */
9257 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9258 mutex_unlock(&priv->mutex);
9259 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
9260
9261 return 0;
9262 }
9263
9264 static int ipw_wx_set_retry(struct net_device *dev,
9265 struct iw_request_info *info,
9266 union iwreq_data *wrqu, char *extra)
9267 {
9268 struct ipw_priv *priv = ieee80211_priv(dev);
9269
9270 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9271 return -EINVAL;
9272
9273 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9274 return 0;
9275
9276 if (wrqu->retry.value < 0 || wrqu->retry.value > 255)
9277 return -EINVAL;
9278
9279 mutex_lock(&priv->mutex);
9280 if (wrqu->retry.flags & IW_RETRY_MIN)
9281 priv->short_retry_limit = (u8) wrqu->retry.value;
9282 else if (wrqu->retry.flags & IW_RETRY_MAX)
9283 priv->long_retry_limit = (u8) wrqu->retry.value;
9284 else {
9285 priv->short_retry_limit = (u8) wrqu->retry.value;
9286 priv->long_retry_limit = (u8) wrqu->retry.value;
9287 }
9288
9289 ipw_send_retry_limit(priv, priv->short_retry_limit,
9290 priv->long_retry_limit);
9291 mutex_unlock(&priv->mutex);
9292 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9293 priv->short_retry_limit, priv->long_retry_limit);
9294 return 0;
9295 }
9296
9297 static int ipw_wx_get_retry(struct net_device *dev,
9298 struct iw_request_info *info,
9299 union iwreq_data *wrqu, char *extra)
9300 {
9301 struct ipw_priv *priv = ieee80211_priv(dev);
9302
9303 mutex_lock(&priv->mutex);
9304 wrqu->retry.disabled = 0;
9305
9306 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9307 mutex_unlock(&priv->mutex);
9308 return -EINVAL;
9309 }
9310
9311 if (wrqu->retry.flags & IW_RETRY_MAX) {
9312 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
9313 wrqu->retry.value = priv->long_retry_limit;
9314 } else if (wrqu->retry.flags & IW_RETRY_MIN) {
9315 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MIN;
9316 wrqu->retry.value = priv->short_retry_limit;
9317 } else {
9318 wrqu->retry.flags = IW_RETRY_LIMIT;
9319 wrqu->retry.value = priv->short_retry_limit;
9320 }
9321 mutex_unlock(&priv->mutex);
9322
9323 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
9324
9325 return 0;
9326 }
9327
9328 static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
9329 int essid_len)
9330 {
9331 struct ipw_scan_request_ext scan;
9332 int err = 0, scan_type;
9333
9334 if (!(priv->status & STATUS_INIT) ||
9335 (priv->status & STATUS_EXIT_PENDING))
9336 return 0;
9337
9338 mutex_lock(&priv->mutex);
9339
9340 if (priv->status & STATUS_RF_KILL_MASK) {
9341 IPW_DEBUG_HC("Aborting scan due to RF kill activation\n");
9342 priv->status |= STATUS_SCAN_PENDING;
9343 goto done;
9344 }
9345
9346 IPW_DEBUG_HC("starting request direct scan!\n");
9347
9348 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
9349 /* We should not sleep here; otherwise we will block most
9350 * of the system (for instance, we hold rtnl_lock when we
9351 * get here).
9352 */
9353 err = -EAGAIN;
9354 goto done;
9355 }
9356 memset(&scan, 0, sizeof(scan));
9357
9358 if (priv->config & CFG_SPEED_SCAN)
9359 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
9360 cpu_to_le16(30);
9361 else
9362 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
9363 cpu_to_le16(20);
9364
9365 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
9366 cpu_to_le16(20);
9367 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
9368 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
9369
9370 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
9371
9372 err = ipw_send_ssid(priv, essid, essid_len);
9373 if (err) {
9374 IPW_DEBUG_HC("Attempt to send SSID command failed\n");
9375 goto done;
9376 }
9377 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
9378
9379 ipw_add_scan_channels(priv, &scan, scan_type);
9380
9381 err = ipw_send_scan_request_ext(priv, &scan);
9382 if (err) {
9383 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
9384 goto done;
9385 }
9386
9387 priv->status |= STATUS_SCANNING;
9388
9389 done:
9390 mutex_unlock(&priv->mutex);
9391 return err;
9392 }
9393
9394 static int ipw_wx_set_scan(struct net_device *dev,
9395 struct iw_request_info *info,
9396 union iwreq_data *wrqu, char *extra)
9397 {
9398 struct ipw_priv *priv = ieee80211_priv(dev);
9399 struct iw_scan_req *req = NULL;
9400 if (wrqu->data.length
9401 && wrqu->data.length == sizeof(struct iw_scan_req)) {
9402 req = (struct iw_scan_req *)extra;
9403 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9404 ipw_request_direct_scan(priv, req->essid,
9405 req->essid_len);
9406 return 0;
9407 }
9408 }
9409
9410 IPW_DEBUG_WX("Start scan\n");
9411
9412 queue_work(priv->workqueue, &priv->request_scan);
9413
9414 return 0;
9415 }
9416
9417 static int ipw_wx_get_scan(struct net_device *dev,
9418 struct iw_request_info *info,
9419 union iwreq_data *wrqu, char *extra)
9420 {
9421 struct ipw_priv *priv = ieee80211_priv(dev);
9422 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
9423 }
9424
9425 static int ipw_wx_set_encode(struct net_device *dev,
9426 struct iw_request_info *info,
9427 union iwreq_data *wrqu, char *key)
9428 {
9429 struct ipw_priv *priv = ieee80211_priv(dev);
9430 int ret;
9431 u32 cap = priv->capability;
9432
9433 mutex_lock(&priv->mutex);
9434 ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
9435
9436 /* In IBSS mode, we need to notify the firmware to update
9437 * the beacon info after we changed the capability. */
9438 if (cap != priv->capability &&
9439 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9440 priv->status & STATUS_ASSOCIATED)
9441 ipw_disassociate(priv);
9442
9443 mutex_unlock(&priv->mutex);
9444 return ret;
9445 }
9446
9447 static int ipw_wx_get_encode(struct net_device *dev,
9448 struct iw_request_info *info,
9449 union iwreq_data *wrqu, char *key)
9450 {
9451 struct ipw_priv *priv = ieee80211_priv(dev);
9452 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
9453 }
9454
9455 static int ipw_wx_set_power(struct net_device *dev,
9456 struct iw_request_info *info,
9457 union iwreq_data *wrqu, char *extra)
9458 {
9459 struct ipw_priv *priv = ieee80211_priv(dev);
9460 int err;
9461 mutex_lock(&priv->mutex);
9462 if (wrqu->power.disabled) {
9463 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9464 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9465 if (err) {
9466 IPW_DEBUG_WX("failed setting power mode.\n");
9467 mutex_unlock(&priv->mutex);
9468 return err;
9469 }
9470 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9471 mutex_unlock(&priv->mutex);
9472 return 0;
9473 }
9474
9475 switch (wrqu->power.flags & IW_POWER_MODE) {
9476 case IW_POWER_ON: /* If not specified */
9477 case IW_POWER_MODE: /* If set all mask */
9478 case IW_POWER_ALL_R: /* If explicitely state all */
9479 break;
9480 default: /* Otherwise we don't support it */
9481 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9482 wrqu->power.flags);
9483 mutex_unlock(&priv->mutex);
9484 return -EOPNOTSUPP;
9485 }
9486
9487 /* If the user hasn't specified a power management mode yet, default
9488 * to BATTERY */
9489 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9490 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9491 else
9492 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9493 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9494 if (err) {
9495 IPW_DEBUG_WX("failed setting power mode.\n");
9496 mutex_unlock(&priv->mutex);
9497 return err;
9498 }
9499
9500 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9501 mutex_unlock(&priv->mutex);
9502 return 0;
9503 }
9504
9505 static int ipw_wx_get_power(struct net_device *dev,
9506 struct iw_request_info *info,
9507 union iwreq_data *wrqu, char *extra)
9508 {
9509 struct ipw_priv *priv = ieee80211_priv(dev);
9510 mutex_lock(&priv->mutex);
9511 if (!(priv->power_mode & IPW_POWER_ENABLED))
9512 wrqu->power.disabled = 1;
9513 else
9514 wrqu->power.disabled = 0;
9515
9516 mutex_unlock(&priv->mutex);
9517 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9518
9519 return 0;
9520 }
9521
9522 static int ipw_wx_set_powermode(struct net_device *dev,
9523 struct iw_request_info *info,
9524 union iwreq_data *wrqu, char *extra)
9525 {
9526 struct ipw_priv *priv = ieee80211_priv(dev);
9527 int mode = *(int *)extra;
9528 int err;
9529 mutex_lock(&priv->mutex);
9530 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
9531 mode = IPW_POWER_AC;
9532 priv->power_mode = mode;
9533 } else {
9534 priv->power_mode = IPW_POWER_ENABLED | mode;
9535 }
9536
9537 if (priv->power_mode != mode) {
9538 err = ipw_send_power_mode(priv, mode);
9539
9540 if (err) {
9541 IPW_DEBUG_WX("failed setting power mode.\n");
9542 mutex_unlock(&priv->mutex);
9543 return err;
9544 }
9545 }
9546 mutex_unlock(&priv->mutex);
9547 return 0;
9548 }
9549
9550 #define MAX_WX_STRING 80
9551 static int ipw_wx_get_powermode(struct net_device *dev,
9552 struct iw_request_info *info,
9553 union iwreq_data *wrqu, char *extra)
9554 {
9555 struct ipw_priv *priv = ieee80211_priv(dev);
9556 int level = IPW_POWER_LEVEL(priv->power_mode);
9557 char *p = extra;
9558
9559 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9560
9561 switch (level) {
9562 case IPW_POWER_AC:
9563 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9564 break;
9565 case IPW_POWER_BATTERY:
9566 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9567 break;
9568 default:
9569 p += snprintf(p, MAX_WX_STRING - (p - extra),
9570 "(Timeout %dms, Period %dms)",
9571 timeout_duration[level - 1] / 1000,
9572 period_duration[level - 1] / 1000);
9573 }
9574
9575 if (!(priv->power_mode & IPW_POWER_ENABLED))
9576 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9577
9578 wrqu->data.length = p - extra + 1;
9579
9580 return 0;
9581 }
9582
9583 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9584 struct iw_request_info *info,
9585 union iwreq_data *wrqu, char *extra)
9586 {
9587 struct ipw_priv *priv = ieee80211_priv(dev);
9588 int mode = *(int *)extra;
9589 u8 band = 0, modulation = 0;
9590
9591 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9592 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9593 return -EINVAL;
9594 }
9595 mutex_lock(&priv->mutex);
9596 if (priv->adapter == IPW_2915ABG) {
9597 priv->ieee->abg_true = 1;
9598 if (mode & IEEE_A) {
9599 band |= IEEE80211_52GHZ_BAND;
9600 modulation |= IEEE80211_OFDM_MODULATION;
9601 } else
9602 priv->ieee->abg_true = 0;
9603 } else {
9604 if (mode & IEEE_A) {
9605 IPW_WARNING("Attempt to set 2200BG into "
9606 "802.11a mode\n");
9607 mutex_unlock(&priv->mutex);
9608 return -EINVAL;
9609 }
9610
9611 priv->ieee->abg_true = 0;
9612 }
9613
9614 if (mode & IEEE_B) {
9615 band |= IEEE80211_24GHZ_BAND;
9616 modulation |= IEEE80211_CCK_MODULATION;
9617 } else
9618 priv->ieee->abg_true = 0;
9619
9620 if (mode & IEEE_G) {
9621 band |= IEEE80211_24GHZ_BAND;
9622 modulation |= IEEE80211_OFDM_MODULATION;
9623 } else
9624 priv->ieee->abg_true = 0;
9625
9626 priv->ieee->mode = mode;
9627 priv->ieee->freq_band = band;
9628 priv->ieee->modulation = modulation;
9629 init_supported_rates(priv, &priv->rates);
9630
9631 /* Network configuration changed -- force [re]association */
9632 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9633 if (!ipw_disassociate(priv)) {
9634 ipw_send_supported_rates(priv, &priv->rates);
9635 ipw_associate(priv);
9636 }
9637
9638 /* Update the band LEDs */
9639 ipw_led_band_on(priv);
9640
9641 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9642 mode & IEEE_A ? 'a' : '.',
9643 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9644 mutex_unlock(&priv->mutex);
9645 return 0;
9646 }
9647
9648 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9649 struct iw_request_info *info,
9650 union iwreq_data *wrqu, char *extra)
9651 {
9652 struct ipw_priv *priv = ieee80211_priv(dev);
9653 mutex_lock(&priv->mutex);
9654 switch (priv->ieee->mode) {
9655 case IEEE_A:
9656 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9657 break;
9658 case IEEE_B:
9659 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9660 break;
9661 case IEEE_A | IEEE_B:
9662 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9663 break;
9664 case IEEE_G:
9665 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9666 break;
9667 case IEEE_A | IEEE_G:
9668 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9669 break;
9670 case IEEE_B | IEEE_G:
9671 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9672 break;
9673 case IEEE_A | IEEE_B | IEEE_G:
9674 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9675 break;
9676 default:
9677 strncpy(extra, "unknown", MAX_WX_STRING);
9678 break;
9679 }
9680
9681 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9682
9683 wrqu->data.length = strlen(extra) + 1;
9684 mutex_unlock(&priv->mutex);
9685
9686 return 0;
9687 }
9688
9689 static int ipw_wx_set_preamble(struct net_device *dev,
9690 struct iw_request_info *info,
9691 union iwreq_data *wrqu, char *extra)
9692 {
9693 struct ipw_priv *priv = ieee80211_priv(dev);
9694 int mode = *(int *)extra;
9695 mutex_lock(&priv->mutex);
9696 /* Switching from SHORT -> LONG requires a disassociation */
9697 if (mode == 1) {
9698 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9699 priv->config |= CFG_PREAMBLE_LONG;
9700
9701 /* Network configuration changed -- force [re]association */
9702 IPW_DEBUG_ASSOC
9703 ("[re]association triggered due to preamble change.\n");
9704 if (!ipw_disassociate(priv))
9705 ipw_associate(priv);
9706 }
9707 goto done;
9708 }
9709
9710 if (mode == 0) {
9711 priv->config &= ~CFG_PREAMBLE_LONG;
9712 goto done;
9713 }
9714 mutex_unlock(&priv->mutex);
9715 return -EINVAL;
9716
9717 done:
9718 mutex_unlock(&priv->mutex);
9719 return 0;
9720 }
9721
9722 static int ipw_wx_get_preamble(struct net_device *dev,
9723 struct iw_request_info *info,
9724 union iwreq_data *wrqu, char *extra)
9725 {
9726 struct ipw_priv *priv = ieee80211_priv(dev);
9727 mutex_lock(&priv->mutex);
9728 if (priv->config & CFG_PREAMBLE_LONG)
9729 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9730 else
9731 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9732 mutex_unlock(&priv->mutex);
9733 return 0;
9734 }
9735
9736 #ifdef CONFIG_IPW2200_MONITOR
9737 static int ipw_wx_set_monitor(struct net_device *dev,
9738 struct iw_request_info *info,
9739 union iwreq_data *wrqu, char *extra)
9740 {
9741 struct ipw_priv *priv = ieee80211_priv(dev);
9742 int *parms = (int *)extra;
9743 int enable = (parms[0] > 0);
9744 mutex_lock(&priv->mutex);
9745 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9746 if (enable) {
9747 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9748 #ifdef CONFIG_IPW2200_RADIOTAP
9749 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9750 #else
9751 priv->net_dev->type = ARPHRD_IEEE80211;
9752 #endif
9753 queue_work(priv->workqueue, &priv->adapter_restart);
9754 }
9755
9756 ipw_set_channel(priv, parms[1]);
9757 } else {
9758 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9759 mutex_unlock(&priv->mutex);
9760 return 0;
9761 }
9762 priv->net_dev->type = ARPHRD_ETHER;
9763 queue_work(priv->workqueue, &priv->adapter_restart);
9764 }
9765 mutex_unlock(&priv->mutex);
9766 return 0;
9767 }
9768
9769 #endif // CONFIG_IPW2200_MONITOR
9770
9771 static int ipw_wx_reset(struct net_device *dev,
9772 struct iw_request_info *info,
9773 union iwreq_data *wrqu, char *extra)
9774 {
9775 struct ipw_priv *priv = ieee80211_priv(dev);
9776 IPW_DEBUG_WX("RESET\n");
9777 queue_work(priv->workqueue, &priv->adapter_restart);
9778 return 0;
9779 }
9780
9781 static int ipw_wx_sw_reset(struct net_device *dev,
9782 struct iw_request_info *info,
9783 union iwreq_data *wrqu, char *extra)
9784 {
9785 struct ipw_priv *priv = ieee80211_priv(dev);
9786 union iwreq_data wrqu_sec = {
9787 .encoding = {
9788 .flags = IW_ENCODE_DISABLED,
9789 },
9790 };
9791 int ret;
9792
9793 IPW_DEBUG_WX("SW_RESET\n");
9794
9795 mutex_lock(&priv->mutex);
9796
9797 ret = ipw_sw_reset(priv, 2);
9798 if (!ret) {
9799 free_firmware();
9800 ipw_adapter_restart(priv);
9801 }
9802
9803 /* The SW reset bit might have been toggled on by the 'disable'
9804 * module parameter, so take appropriate action */
9805 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9806
9807 mutex_unlock(&priv->mutex);
9808 ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9809 mutex_lock(&priv->mutex);
9810
9811 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9812 /* Configuration likely changed -- force [re]association */
9813 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9814 "reset.\n");
9815 if (!ipw_disassociate(priv))
9816 ipw_associate(priv);
9817 }
9818
9819 mutex_unlock(&priv->mutex);
9820
9821 return 0;
9822 }
9823
9824 /* Rebase the WE IOCTLs to zero for the handler array */
9825 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9826 static iw_handler ipw_wx_handlers[] = {
9827 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9828 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9829 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9830 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9831 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9832 IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
9833 IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
9834 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9835 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9836 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9837 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9838 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9839 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9840 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9841 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9842 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9843 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9844 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9845 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9846 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
9847 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
9848 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
9849 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
9850 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
9851 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
9852 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
9853 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
9854 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
9855 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
9856 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
9857 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
9858 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
9859 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
9860 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
9861 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
9862 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
9863 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
9864 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
9865 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
9866 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
9867 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
9868 };
9869
9870 enum {
9871 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9872 IPW_PRIV_GET_POWER,
9873 IPW_PRIV_SET_MODE,
9874 IPW_PRIV_GET_MODE,
9875 IPW_PRIV_SET_PREAMBLE,
9876 IPW_PRIV_GET_PREAMBLE,
9877 IPW_PRIV_RESET,
9878 IPW_PRIV_SW_RESET,
9879 #ifdef CONFIG_IPW2200_MONITOR
9880 IPW_PRIV_SET_MONITOR,
9881 #endif
9882 };
9883
9884 static struct iw_priv_args ipw_priv_args[] = {
9885 {
9886 .cmd = IPW_PRIV_SET_POWER,
9887 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9888 .name = "set_power"},
9889 {
9890 .cmd = IPW_PRIV_GET_POWER,
9891 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9892 .name = "get_power"},
9893 {
9894 .cmd = IPW_PRIV_SET_MODE,
9895 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9896 .name = "set_mode"},
9897 {
9898 .cmd = IPW_PRIV_GET_MODE,
9899 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9900 .name = "get_mode"},
9901 {
9902 .cmd = IPW_PRIV_SET_PREAMBLE,
9903 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9904 .name = "set_preamble"},
9905 {
9906 .cmd = IPW_PRIV_GET_PREAMBLE,
9907 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
9908 .name = "get_preamble"},
9909 {
9910 IPW_PRIV_RESET,
9911 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
9912 {
9913 IPW_PRIV_SW_RESET,
9914 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
9915 #ifdef CONFIG_IPW2200_MONITOR
9916 {
9917 IPW_PRIV_SET_MONITOR,
9918 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
9919 #endif /* CONFIG_IPW2200_MONITOR */
9920 };
9921
9922 static iw_handler ipw_priv_handler[] = {
9923 ipw_wx_set_powermode,
9924 ipw_wx_get_powermode,
9925 ipw_wx_set_wireless_mode,
9926 ipw_wx_get_wireless_mode,
9927 ipw_wx_set_preamble,
9928 ipw_wx_get_preamble,
9929 ipw_wx_reset,
9930 ipw_wx_sw_reset,
9931 #ifdef CONFIG_IPW2200_MONITOR
9932 ipw_wx_set_monitor,
9933 #endif
9934 };
9935
9936 static struct iw_handler_def ipw_wx_handler_def = {
9937 .standard = ipw_wx_handlers,
9938 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
9939 .num_private = ARRAY_SIZE(ipw_priv_handler),
9940 .num_private_args = ARRAY_SIZE(ipw_priv_args),
9941 .private = ipw_priv_handler,
9942 .private_args = ipw_priv_args,
9943 .get_wireless_stats = ipw_get_wireless_stats,
9944 };
9945
9946 /*
9947 * Get wireless statistics.
9948 * Called by /proc/net/wireless
9949 * Also called by SIOCGIWSTATS
9950 */
9951 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
9952 {
9953 struct ipw_priv *priv = ieee80211_priv(dev);
9954 struct iw_statistics *wstats;
9955
9956 wstats = &priv->wstats;
9957
9958 /* if hw is disabled, then ipw_get_ordinal() can't be called.
9959 * netdev->get_wireless_stats seems to be called before fw is
9960 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
9961 * and associated; if not associcated, the values are all meaningless
9962 * anyway, so set them all to NULL and INVALID */
9963 if (!(priv->status & STATUS_ASSOCIATED)) {
9964 wstats->miss.beacon = 0;
9965 wstats->discard.retries = 0;
9966 wstats->qual.qual = 0;
9967 wstats->qual.level = 0;
9968 wstats->qual.noise = 0;
9969 wstats->qual.updated = 7;
9970 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
9971 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
9972 return wstats;
9973 }
9974
9975 wstats->qual.qual = priv->quality;
9976 wstats->qual.level = priv->exp_avg_rssi;
9977 wstats->qual.noise = priv->exp_avg_noise;
9978 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
9979 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
9980
9981 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
9982 wstats->discard.retries = priv->last_tx_failures;
9983 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
9984
9985 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
9986 goto fail_get_ordinal;
9987 wstats->discard.retries += tx_retry; */
9988
9989 return wstats;
9990 }
9991
9992 /* net device stuff */
9993
9994 static void init_sys_config(struct ipw_sys_config *sys_config)
9995 {
9996 memset(sys_config, 0, sizeof(struct ipw_sys_config));
9997 sys_config->bt_coexistence = 0;
9998 sys_config->answer_broadcast_ssid_probe = 0;
9999 sys_config->accept_all_data_frames = 0;
10000 sys_config->accept_non_directed_frames = 1;
10001 sys_config->exclude_unicast_unencrypted = 0;
10002 sys_config->disable_unicast_decryption = 1;
10003 sys_config->exclude_multicast_unencrypted = 0;
10004 sys_config->disable_multicast_decryption = 1;
10005 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10006 antenna = CFG_SYS_ANTENNA_BOTH;
10007 sys_config->antenna_diversity = antenna;
10008 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
10009 sys_config->dot11g_auto_detection = 0;
10010 sys_config->enable_cts_to_self = 0;
10011 sys_config->bt_coexist_collision_thr = 0;
10012 sys_config->pass_noise_stats_to_host = 1; //1 -- fix for 256
10013 sys_config->silence_threshold = 0x1e;
10014 }
10015
10016 static int ipw_net_open(struct net_device *dev)
10017 {
10018 struct ipw_priv *priv = ieee80211_priv(dev);
10019 IPW_DEBUG_INFO("dev->open\n");
10020 /* we should be verifying the device is ready to be opened */
10021 mutex_lock(&priv->mutex);
10022 if (!(priv->status & STATUS_RF_KILL_MASK) &&
10023 (priv->status & STATUS_ASSOCIATED))
10024 netif_start_queue(dev);
10025 mutex_unlock(&priv->mutex);
10026 return 0;
10027 }
10028
10029 static int ipw_net_stop(struct net_device *dev)
10030 {
10031 IPW_DEBUG_INFO("dev->close\n");
10032 netif_stop_queue(dev);
10033 return 0;
10034 }
10035
10036 /*
10037 todo:
10038
10039 modify to send one tfd per fragment instead of using chunking. otherwise
10040 we need to heavily modify the ieee80211_skb_to_txb.
10041 */
10042
10043 static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10044 int pri)
10045 {
10046 struct ieee80211_hdr_3addrqos *hdr = (struct ieee80211_hdr_3addrqos *)
10047 txb->fragments[0]->data;
10048 int i = 0;
10049 struct tfd_frame *tfd;
10050 #ifdef CONFIG_IPW2200_QOS
10051 int tx_id = ipw_get_tx_queue_number(priv, pri);
10052 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10053 #else
10054 struct clx2_tx_queue *txq = &priv->txq[0];
10055 #endif
10056 struct clx2_queue *q = &txq->q;
10057 u8 id, hdr_len, unicast;
10058 u16 remaining_bytes;
10059 int fc;
10060
10061 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10062 switch (priv->ieee->iw_mode) {
10063 case IW_MODE_ADHOC:
10064 unicast = !is_multicast_ether_addr(hdr->addr1);
10065 id = ipw_find_station(priv, hdr->addr1);
10066 if (id == IPW_INVALID_STATION) {
10067 id = ipw_add_station(priv, hdr->addr1);
10068 if (id == IPW_INVALID_STATION) {
10069 IPW_WARNING("Attempt to send data to "
10070 "invalid cell: " MAC_FMT "\n",
10071 MAC_ARG(hdr->addr1));
10072 goto drop;
10073 }
10074 }
10075 break;
10076
10077 case IW_MODE_INFRA:
10078 default:
10079 unicast = !is_multicast_ether_addr(hdr->addr3);
10080 id = 0;
10081 break;
10082 }
10083
10084 tfd = &txq->bd[q->first_empty];
10085 txq->txb[q->first_empty] = txb;
10086 memset(tfd, 0, sizeof(*tfd));
10087 tfd->u.data.station_number = id;
10088
10089 tfd->control_flags.message_type = TX_FRAME_TYPE;
10090 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10091
10092 tfd->u.data.cmd_id = DINO_CMD_TX;
10093 tfd->u.data.len = cpu_to_le16(txb->payload_size);
10094 remaining_bytes = txb->payload_size;
10095
10096 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10097 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10098 else
10099 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10100
10101 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10102 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10103
10104 fc = le16_to_cpu(hdr->frame_ctl);
10105 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10106
10107 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10108
10109 if (likely(unicast))
10110 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10111
10112 if (txb->encrypted && !priv->ieee->host_encrypt) {
10113 switch (priv->ieee->sec.level) {
10114 case SEC_LEVEL_3:
10115 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10116 IEEE80211_FCTL_PROTECTED;
10117 /* XXX: ACK flag must be set for CCMP even if it
10118 * is a multicast/broadcast packet, because CCMP
10119 * group communication encrypted by GTK is
10120 * actually done by the AP. */
10121 if (!unicast)
10122 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10123
10124 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10125 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10126 tfd->u.data.key_index = 0;
10127 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10128 break;
10129 case SEC_LEVEL_2:
10130 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10131 IEEE80211_FCTL_PROTECTED;
10132 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10133 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10134 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10135 break;
10136 case SEC_LEVEL_1:
10137 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10138 IEEE80211_FCTL_PROTECTED;
10139 tfd->u.data.key_index = priv->ieee->tx_keyidx;
10140 if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <=
10141 40)
10142 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10143 else
10144 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10145 break;
10146 case SEC_LEVEL_0:
10147 break;
10148 default:
10149 printk(KERN_ERR "Unknow security level %d\n",
10150 priv->ieee->sec.level);
10151 break;
10152 }
10153 } else
10154 /* No hardware encryption */
10155 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10156
10157 #ifdef CONFIG_IPW2200_QOS
10158 if (fc & IEEE80211_STYPE_QOS_DATA)
10159 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10160 #endif /* CONFIG_IPW2200_QOS */
10161
10162 /* payload */
10163 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10164 txb->nr_frags));
10165 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10166 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10167 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10168 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10169 i, le32_to_cpu(tfd->u.data.num_chunks),
10170 txb->fragments[i]->len - hdr_len);
10171 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10172 i, tfd->u.data.num_chunks,
10173 txb->fragments[i]->len - hdr_len);
10174 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10175 txb->fragments[i]->len - hdr_len);
10176
10177 tfd->u.data.chunk_ptr[i] =
10178 cpu_to_le32(pci_map_single
10179 (priv->pci_dev,
10180 txb->fragments[i]->data + hdr_len,
10181 txb->fragments[i]->len - hdr_len,
10182 PCI_DMA_TODEVICE));
10183 tfd->u.data.chunk_len[i] =
10184 cpu_to_le16(txb->fragments[i]->len - hdr_len);
10185 }
10186
10187 if (i != txb->nr_frags) {
10188 struct sk_buff *skb;
10189 u16 remaining_bytes = 0;
10190 int j;
10191
10192 for (j = i; j < txb->nr_frags; j++)
10193 remaining_bytes += txb->fragments[j]->len - hdr_len;
10194
10195 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10196 remaining_bytes);
10197 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10198 if (skb != NULL) {
10199 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10200 for (j = i; j < txb->nr_frags; j++) {
10201 int size = txb->fragments[j]->len - hdr_len;
10202
10203 printk(KERN_INFO "Adding frag %d %d...\n",
10204 j, size);
10205 memcpy(skb_put(skb, size),
10206 txb->fragments[j]->data + hdr_len, size);
10207 }
10208 dev_kfree_skb_any(txb->fragments[i]);
10209 txb->fragments[i] = skb;
10210 tfd->u.data.chunk_ptr[i] =
10211 cpu_to_le32(pci_map_single
10212 (priv->pci_dev, skb->data,
10213 tfd->u.data.chunk_len[i],
10214 PCI_DMA_TODEVICE));
10215
10216 tfd->u.data.num_chunks =
10217 cpu_to_le32(le32_to_cpu(tfd->u.data.num_chunks) +
10218 1);
10219 }
10220 }
10221
10222 /* kick DMA */
10223 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10224 ipw_write32(priv, q->reg_w, q->first_empty);
10225
10226 if (ipw_queue_space(q) < q->high_mark)
10227 netif_stop_queue(priv->net_dev);
10228
10229 return NETDEV_TX_OK;
10230
10231 drop:
10232 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10233 ieee80211_txb_free(txb);
10234 return NETDEV_TX_OK;
10235 }
10236
10237 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10238 {
10239 struct ipw_priv *priv = ieee80211_priv(dev);
10240 #ifdef CONFIG_IPW2200_QOS
10241 int tx_id = ipw_get_tx_queue_number(priv, pri);
10242 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10243 #else
10244 struct clx2_tx_queue *txq = &priv->txq[0];
10245 #endif /* CONFIG_IPW2200_QOS */
10246
10247 if (ipw_queue_space(&txq->q) < txq->q.high_mark)
10248 return 1;
10249
10250 return 0;
10251 }
10252
10253 #ifdef CONFIG_IPW2200_PROMISCUOUS
10254 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10255 struct ieee80211_txb *txb)
10256 {
10257 struct ieee80211_rx_stats dummystats;
10258 struct ieee80211_hdr *hdr;
10259 u8 n;
10260 u16 filter = priv->prom_priv->filter;
10261 int hdr_only = 0;
10262
10263 if (filter & IPW_PROM_NO_TX)
10264 return;
10265
10266 memset(&dummystats, 0, sizeof(dummystats));
10267
10268 /* Filtering of fragment chains is done agains the first fragment */
10269 hdr = (void *)txb->fragments[0]->data;
10270 if (ieee80211_is_management(hdr->frame_ctl)) {
10271 if (filter & IPW_PROM_NO_MGMT)
10272 return;
10273 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10274 hdr_only = 1;
10275 } else if (ieee80211_is_control(hdr->frame_ctl)) {
10276 if (filter & IPW_PROM_NO_CTL)
10277 return;
10278 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10279 hdr_only = 1;
10280 } else if (ieee80211_is_data(hdr->frame_ctl)) {
10281 if (filter & IPW_PROM_NO_DATA)
10282 return;
10283 if (filter & IPW_PROM_DATA_HEADER_ONLY)
10284 hdr_only = 1;
10285 }
10286
10287 for(n=0; n<txb->nr_frags; ++n) {
10288 struct sk_buff *src = txb->fragments[n];
10289 struct sk_buff *dst;
10290 struct ieee80211_radiotap_header *rt_hdr;
10291 int len;
10292
10293 if (hdr_only) {
10294 hdr = (void *)src->data;
10295 len = ieee80211_get_hdrlen(hdr->frame_ctl);
10296 } else
10297 len = src->len;
10298
10299 dst = alloc_skb(
10300 len + IEEE80211_RADIOTAP_HDRLEN, GFP_ATOMIC);
10301 if (!dst) continue;
10302
10303 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10304
10305 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10306 rt_hdr->it_pad = 0;
10307 rt_hdr->it_present = 0; /* after all, it's just an idea */
10308 rt_hdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
10309
10310 *(u16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10311 ieee80211chan2mhz(priv->channel));
10312 if (priv->channel > 14) /* 802.11a */
10313 *(u16*)skb_put(dst, sizeof(u16)) =
10314 cpu_to_le16(IEEE80211_CHAN_OFDM |
10315 IEEE80211_CHAN_5GHZ);
10316 else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10317 *(u16*)skb_put(dst, sizeof(u16)) =
10318 cpu_to_le16(IEEE80211_CHAN_CCK |
10319 IEEE80211_CHAN_2GHZ);
10320 else /* 802.11g */
10321 *(u16*)skb_put(dst, sizeof(u16)) =
10322 cpu_to_le16(IEEE80211_CHAN_OFDM |
10323 IEEE80211_CHAN_2GHZ);
10324
10325 rt_hdr->it_len = dst->len;
10326
10327 memcpy(skb_put(dst, len), src->data, len);
10328
10329 if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats))
10330 dev_kfree_skb_any(dst);
10331 }
10332 }
10333 #endif
10334
10335 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
10336 struct net_device *dev, int pri)
10337 {
10338 struct ipw_priv *priv = ieee80211_priv(dev);
10339 unsigned long flags;
10340 int ret;
10341
10342 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10343 spin_lock_irqsave(&priv->lock, flags);
10344
10345 if (!(priv->status & STATUS_ASSOCIATED)) {
10346 IPW_DEBUG_INFO("Tx attempt while not associated.\n");
10347 priv->ieee->stats.tx_carrier_errors++;
10348 netif_stop_queue(dev);
10349 goto fail_unlock;
10350 }
10351
10352 #ifdef CONFIG_IPW2200_PROMISCUOUS
10353 if (rtap_iface && netif_running(priv->prom_net_dev))
10354 ipw_handle_promiscuous_tx(priv, txb);
10355 #endif
10356
10357 ret = ipw_tx_skb(priv, txb, pri);
10358 if (ret == NETDEV_TX_OK)
10359 __ipw_led_activity_on(priv);
10360 spin_unlock_irqrestore(&priv->lock, flags);
10361
10362 return ret;
10363
10364 fail_unlock:
10365 spin_unlock_irqrestore(&priv->lock, flags);
10366 return 1;
10367 }
10368
10369 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
10370 {
10371 struct ipw_priv *priv = ieee80211_priv(dev);
10372
10373 priv->ieee->stats.tx_packets = priv->tx_packets;
10374 priv->ieee->stats.rx_packets = priv->rx_packets;
10375 return &priv->ieee->stats;
10376 }
10377
10378 static void ipw_net_set_multicast_list(struct net_device *dev)
10379 {
10380
10381 }
10382
10383 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10384 {
10385 struct ipw_priv *priv = ieee80211_priv(dev);
10386 struct sockaddr *addr = p;
10387 if (!is_valid_ether_addr(addr->sa_data))
10388 return -EADDRNOTAVAIL;
10389 mutex_lock(&priv->mutex);
10390 priv->config |= CFG_CUSTOM_MAC;
10391 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10392 printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n",
10393 priv->net_dev->name, MAC_ARG(priv->mac_addr));
10394 queue_work(priv->workqueue, &priv->adapter_restart);
10395 mutex_unlock(&priv->mutex);
10396 return 0;
10397 }
10398
10399 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10400 struct ethtool_drvinfo *info)
10401 {
10402 struct ipw_priv *p = ieee80211_priv(dev);
10403 char vers[64];
10404 char date[32];
10405 u32 len;
10406
10407 strcpy(info->driver, DRV_NAME);
10408 strcpy(info->version, DRV_VERSION);
10409
10410 len = sizeof(vers);
10411 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10412 len = sizeof(date);
10413 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10414
10415 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10416 vers, date);
10417 strcpy(info->bus_info, pci_name(p->pci_dev));
10418 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10419 }
10420
10421 static u32 ipw_ethtool_get_link(struct net_device *dev)
10422 {
10423 struct ipw_priv *priv = ieee80211_priv(dev);
10424 return (priv->status & STATUS_ASSOCIATED) != 0;
10425 }
10426
10427 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10428 {
10429 return IPW_EEPROM_IMAGE_SIZE;
10430 }
10431
10432 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10433 struct ethtool_eeprom *eeprom, u8 * bytes)
10434 {
10435 struct ipw_priv *p = ieee80211_priv(dev);
10436
10437 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10438 return -EINVAL;
10439 mutex_lock(&p->mutex);
10440 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10441 mutex_unlock(&p->mutex);
10442 return 0;
10443 }
10444
10445 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10446 struct ethtool_eeprom *eeprom, u8 * bytes)
10447 {
10448 struct ipw_priv *p = ieee80211_priv(dev);
10449 int i;
10450
10451 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10452 return -EINVAL;
10453 mutex_lock(&p->mutex);
10454 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10455 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10456 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10457 mutex_unlock(&p->mutex);
10458 return 0;
10459 }
10460
10461 static struct ethtool_ops ipw_ethtool_ops = {
10462 .get_link = ipw_ethtool_get_link,
10463 .get_drvinfo = ipw_ethtool_get_drvinfo,
10464 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10465 .get_eeprom = ipw_ethtool_get_eeprom,
10466 .set_eeprom = ipw_ethtool_set_eeprom,
10467 };
10468
10469 static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
10470 {
10471 struct ipw_priv *priv = data;
10472 u32 inta, inta_mask;
10473
10474 if (!priv)
10475 return IRQ_NONE;
10476
10477 spin_lock(&priv->irq_lock);
10478
10479 if (!(priv->status & STATUS_INT_ENABLED)) {
10480 /* Shared IRQ */
10481 goto none;
10482 }
10483
10484 inta = ipw_read32(priv, IPW_INTA_RW);
10485 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10486
10487 if (inta == 0xFFFFFFFF) {
10488 /* Hardware disappeared */
10489 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10490 goto none;
10491 }
10492
10493 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10494 /* Shared interrupt */
10495 goto none;
10496 }
10497
10498 /* tell the device to stop sending interrupts */
10499 __ipw_disable_interrupts(priv);
10500
10501 /* ack current interrupts */
10502 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10503 ipw_write32(priv, IPW_INTA_RW, inta);
10504
10505 /* Cache INTA value for our tasklet */
10506 priv->isr_inta = inta;
10507
10508 tasklet_schedule(&priv->irq_tasklet);
10509
10510 spin_unlock(&priv->irq_lock);
10511
10512 return IRQ_HANDLED;
10513 none:
10514 spin_unlock(&priv->irq_lock);
10515 return IRQ_NONE;
10516 }
10517
10518 static void ipw_rf_kill(void *adapter)
10519 {
10520 struct ipw_priv *priv = adapter;
10521 unsigned long flags;
10522
10523 spin_lock_irqsave(&priv->lock, flags);
10524
10525 if (rf_kill_active(priv)) {
10526 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10527 if (priv->workqueue)
10528 queue_delayed_work(priv->workqueue,
10529 &priv->rf_kill, 2 * HZ);
10530 goto exit_unlock;
10531 }
10532
10533 /* RF Kill is now disabled, so bring the device back up */
10534
10535 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10536 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10537 "device\n");
10538
10539 /* we can not do an adapter restart while inside an irq lock */
10540 queue_work(priv->workqueue, &priv->adapter_restart);
10541 } else
10542 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10543 "enabled\n");
10544
10545 exit_unlock:
10546 spin_unlock_irqrestore(&priv->lock, flags);
10547 }
10548
10549 static void ipw_bg_rf_kill(void *data)
10550 {
10551 struct ipw_priv *priv = data;
10552 mutex_lock(&priv->mutex);
10553 ipw_rf_kill(data);
10554 mutex_unlock(&priv->mutex);
10555 }
10556
10557 static void ipw_link_up(struct ipw_priv *priv)
10558 {
10559 priv->last_seq_num = -1;
10560 priv->last_frag_num = -1;
10561 priv->last_packet_time = 0;
10562
10563 netif_carrier_on(priv->net_dev);
10564 if (netif_queue_stopped(priv->net_dev)) {
10565 IPW_DEBUG_NOTIF("waking queue\n");
10566 netif_wake_queue(priv->net_dev);
10567 } else {
10568 IPW_DEBUG_NOTIF("starting queue\n");
10569 netif_start_queue(priv->net_dev);
10570 }
10571
10572 cancel_delayed_work(&priv->request_scan);
10573 ipw_reset_stats(priv);
10574 /* Ensure the rate is updated immediately */
10575 priv->last_rate = ipw_get_current_rate(priv);
10576 ipw_gather_stats(priv);
10577 ipw_led_link_up(priv);
10578 notify_wx_assoc_event(priv);
10579
10580 if (priv->config & CFG_BACKGROUND_SCAN)
10581 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10582 }
10583
10584 static void ipw_bg_link_up(void *data)
10585 {
10586 struct ipw_priv *priv = data;
10587 mutex_lock(&priv->mutex);
10588 ipw_link_up(data);
10589 mutex_unlock(&priv->mutex);
10590 }
10591
10592 static void ipw_link_down(struct ipw_priv *priv)
10593 {
10594 ipw_led_link_down(priv);
10595 netif_carrier_off(priv->net_dev);
10596 netif_stop_queue(priv->net_dev);
10597 notify_wx_assoc_event(priv);
10598
10599 /* Cancel any queued work ... */
10600 cancel_delayed_work(&priv->request_scan);
10601 cancel_delayed_work(&priv->adhoc_check);
10602 cancel_delayed_work(&priv->gather_stats);
10603
10604 ipw_reset_stats(priv);
10605
10606 if (!(priv->status & STATUS_EXIT_PENDING)) {
10607 /* Queue up another scan... */
10608 queue_work(priv->workqueue, &priv->request_scan);
10609 }
10610 }
10611
10612 static void ipw_bg_link_down(void *data)
10613 {
10614 struct ipw_priv *priv = data;
10615 mutex_lock(&priv->mutex);
10616 ipw_link_down(data);
10617 mutex_unlock(&priv->mutex);
10618 }
10619
10620 static int ipw_setup_deferred_work(struct ipw_priv *priv)
10621 {
10622 int ret = 0;
10623
10624 priv->workqueue = create_workqueue(DRV_NAME);
10625 init_waitqueue_head(&priv->wait_command_queue);
10626 init_waitqueue_head(&priv->wait_state);
10627
10628 INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv);
10629 INIT_WORK(&priv->associate, ipw_bg_associate, priv);
10630 INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv);
10631 INIT_WORK(&priv->system_config, ipw_system_config, priv);
10632 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv);
10633 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv);
10634 INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv);
10635 INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv);
10636 INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv);
10637 INIT_WORK(&priv->request_scan,
10638 (void (*)(void *))ipw_request_scan, priv);
10639 INIT_WORK(&priv->gather_stats,
10640 (void (*)(void *))ipw_bg_gather_stats, priv);
10641 INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv);
10642 INIT_WORK(&priv->roam, ipw_bg_roam, priv);
10643 INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv);
10644 INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv);
10645 INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv);
10646 INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on,
10647 priv);
10648 INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off,
10649 priv);
10650 INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off,
10651 priv);
10652 INIT_WORK(&priv->merge_networks,
10653 (void (*)(void *))ipw_merge_adhoc_network, priv);
10654
10655 #ifdef CONFIG_IPW2200_QOS
10656 INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate,
10657 priv);
10658 #endif /* CONFIG_IPW2200_QOS */
10659
10660 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10661 ipw_irq_tasklet, (unsigned long)priv);
10662
10663 return ret;
10664 }
10665
10666 static void shim__set_security(struct net_device *dev,
10667 struct ieee80211_security *sec)
10668 {
10669 struct ipw_priv *priv = ieee80211_priv(dev);
10670 int i;
10671 for (i = 0; i < 4; i++) {
10672 if (sec->flags & (1 << i)) {
10673 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10674 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10675 if (sec->key_sizes[i] == 0)
10676 priv->ieee->sec.flags &= ~(1 << i);
10677 else {
10678 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10679 sec->key_sizes[i]);
10680 priv->ieee->sec.flags |= (1 << i);
10681 }
10682 priv->status |= STATUS_SECURITY_UPDATED;
10683 } else if (sec->level != SEC_LEVEL_1)
10684 priv->ieee->sec.flags &= ~(1 << i);
10685 }
10686
10687 if (sec->flags & SEC_ACTIVE_KEY) {
10688 if (sec->active_key <= 3) {
10689 priv->ieee->sec.active_key = sec->active_key;
10690 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10691 } else
10692 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10693 priv->status |= STATUS_SECURITY_UPDATED;
10694 } else
10695 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10696
10697 if ((sec->flags & SEC_AUTH_MODE) &&
10698 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10699 priv->ieee->sec.auth_mode = sec->auth_mode;
10700 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10701 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10702 priv->capability |= CAP_SHARED_KEY;
10703 else
10704 priv->capability &= ~CAP_SHARED_KEY;
10705 priv->status |= STATUS_SECURITY_UPDATED;
10706 }
10707
10708 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10709 priv->ieee->sec.flags |= SEC_ENABLED;
10710 priv->ieee->sec.enabled = sec->enabled;
10711 priv->status |= STATUS_SECURITY_UPDATED;
10712 if (sec->enabled)
10713 priv->capability |= CAP_PRIVACY_ON;
10714 else
10715 priv->capability &= ~CAP_PRIVACY_ON;
10716 }
10717
10718 if (sec->flags & SEC_ENCRYPT)
10719 priv->ieee->sec.encrypt = sec->encrypt;
10720
10721 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10722 priv->ieee->sec.level = sec->level;
10723 priv->ieee->sec.flags |= SEC_LEVEL;
10724 priv->status |= STATUS_SECURITY_UPDATED;
10725 }
10726
10727 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10728 ipw_set_hwcrypto_keys(priv);
10729
10730 /* To match current functionality of ipw2100 (which works well w/
10731 * various supplicants, we don't force a disassociate if the
10732 * privacy capability changes ... */
10733 #if 0
10734 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10735 (((priv->assoc_request.capability &
10736 WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
10737 (!(priv->assoc_request.capability &
10738 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
10739 IPW_DEBUG_ASSOC("Disassociating due to capability "
10740 "change.\n");
10741 ipw_disassociate(priv);
10742 }
10743 #endif
10744 }
10745
10746 static int init_supported_rates(struct ipw_priv *priv,
10747 struct ipw_supported_rates *rates)
10748 {
10749 /* TODO: Mask out rates based on priv->rates_mask */
10750
10751 memset(rates, 0, sizeof(*rates));
10752 /* configure supported rates */
10753 switch (priv->ieee->freq_band) {
10754 case IEEE80211_52GHZ_BAND:
10755 rates->ieee_mode = IPW_A_MODE;
10756 rates->purpose = IPW_RATE_CAPABILITIES;
10757 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10758 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10759 break;
10760
10761 default: /* Mixed or 2.4Ghz */
10762 rates->ieee_mode = IPW_G_MODE;
10763 rates->purpose = IPW_RATE_CAPABILITIES;
10764 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
10765 IEEE80211_CCK_DEFAULT_RATES_MASK);
10766 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
10767 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10768 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10769 }
10770 break;
10771 }
10772
10773 return 0;
10774 }
10775
10776 static int ipw_config(struct ipw_priv *priv)
10777 {
10778 /* This is only called from ipw_up, which resets/reloads the firmware
10779 so, we don't need to first disable the card before we configure
10780 it */
10781 if (ipw_set_tx_power(priv))
10782 goto error;
10783
10784 /* initialize adapter address */
10785 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10786 goto error;
10787
10788 /* set basic system config settings */
10789 init_sys_config(&priv->sys_config);
10790
10791 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10792 * Does not support BT priority yet (don't abort or defer our Tx) */
10793 if (bt_coexist) {
10794 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10795
10796 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10797 priv->sys_config.bt_coexistence
10798 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10799 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10800 priv->sys_config.bt_coexistence
10801 |= CFG_BT_COEXISTENCE_OOB;
10802 }
10803
10804 #ifdef CONFIG_IPW2200_PROMISCUOUS
10805 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10806 priv->sys_config.accept_all_data_frames = 1;
10807 priv->sys_config.accept_non_directed_frames = 1;
10808 priv->sys_config.accept_all_mgmt_bcpr = 1;
10809 priv->sys_config.accept_all_mgmt_frames = 1;
10810 }
10811 #endif
10812
10813 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10814 priv->sys_config.answer_broadcast_ssid_probe = 1;
10815 else
10816 priv->sys_config.answer_broadcast_ssid_probe = 0;
10817
10818 if (ipw_send_system_config(priv))
10819 goto error;
10820
10821 init_supported_rates(priv, &priv->rates);
10822 if (ipw_send_supported_rates(priv, &priv->rates))
10823 goto error;
10824
10825 /* Set request-to-send threshold */
10826 if (priv->rts_threshold) {
10827 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10828 goto error;
10829 }
10830 #ifdef CONFIG_IPW2200_QOS
10831 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10832 ipw_qos_activate(priv, NULL);
10833 #endif /* CONFIG_IPW2200_QOS */
10834
10835 if (ipw_set_random_seed(priv))
10836 goto error;
10837
10838 /* final state transition to the RUN state */
10839 if (ipw_send_host_complete(priv))
10840 goto error;
10841
10842 priv->status |= STATUS_INIT;
10843
10844 ipw_led_init(priv);
10845 ipw_led_radio_on(priv);
10846 priv->notif_missed_beacons = 0;
10847
10848 /* Set hardware WEP key if it is configured. */
10849 if ((priv->capability & CAP_PRIVACY_ON) &&
10850 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10851 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10852 ipw_set_hwcrypto_keys(priv);
10853
10854 return 0;
10855
10856 error:
10857 return -EIO;
10858 }
10859
10860 /*
10861 * NOTE:
10862 *
10863 * These tables have been tested in conjunction with the
10864 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10865 *
10866 * Altering this values, using it on other hardware, or in geographies
10867 * not intended for resale of the above mentioned Intel adapters has
10868 * not been tested.
10869 *
10870 * Remember to update the table in README.ipw2200 when changing this
10871 * table.
10872 *
10873 */
10874 static const struct ieee80211_geo ipw_geos[] = {
10875 { /* Restricted */
10876 "---",
10877 .bg_channels = 11,
10878 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10879 {2427, 4}, {2432, 5}, {2437, 6},
10880 {2442, 7}, {2447, 8}, {2452, 9},
10881 {2457, 10}, {2462, 11}},
10882 },
10883
10884 { /* Custom US/Canada */
10885 "ZZF",
10886 .bg_channels = 11,
10887 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10888 {2427, 4}, {2432, 5}, {2437, 6},
10889 {2442, 7}, {2447, 8}, {2452, 9},
10890 {2457, 10}, {2462, 11}},
10891 .a_channels = 8,
10892 .a = {{5180, 36},
10893 {5200, 40},
10894 {5220, 44},
10895 {5240, 48},
10896 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10897 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10898 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10899 {5320, 64, IEEE80211_CH_PASSIVE_ONLY}},
10900 },
10901
10902 { /* Rest of World */
10903 "ZZD",
10904 .bg_channels = 13,
10905 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10906 {2427, 4}, {2432, 5}, {2437, 6},
10907 {2442, 7}, {2447, 8}, {2452, 9},
10908 {2457, 10}, {2462, 11}, {2467, 12},
10909 {2472, 13}},
10910 },
10911
10912 { /* Custom USA & Europe & High */
10913 "ZZA",
10914 .bg_channels = 11,
10915 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10916 {2427, 4}, {2432, 5}, {2437, 6},
10917 {2442, 7}, {2447, 8}, {2452, 9},
10918 {2457, 10}, {2462, 11}},
10919 .a_channels = 13,
10920 .a = {{5180, 36},
10921 {5200, 40},
10922 {5220, 44},
10923 {5240, 48},
10924 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10925 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10926 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10927 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10928 {5745, 149},
10929 {5765, 153},
10930 {5785, 157},
10931 {5805, 161},
10932 {5825, 165}},
10933 },
10934
10935 { /* Custom NA & Europe */
10936 "ZZB",
10937 .bg_channels = 11,
10938 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10939 {2427, 4}, {2432, 5}, {2437, 6},
10940 {2442, 7}, {2447, 8}, {2452, 9},
10941 {2457, 10}, {2462, 11}},
10942 .a_channels = 13,
10943 .a = {{5180, 36},
10944 {5200, 40},
10945 {5220, 44},
10946 {5240, 48},
10947 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10948 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10949 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10950 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10951 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10952 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10953 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10954 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10955 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10956 },
10957
10958 { /* Custom Japan */
10959 "ZZC",
10960 .bg_channels = 11,
10961 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10962 {2427, 4}, {2432, 5}, {2437, 6},
10963 {2442, 7}, {2447, 8}, {2452, 9},
10964 {2457, 10}, {2462, 11}},
10965 .a_channels = 4,
10966 .a = {{5170, 34}, {5190, 38},
10967 {5210, 42}, {5230, 46}},
10968 },
10969
10970 { /* Custom */
10971 "ZZM",
10972 .bg_channels = 11,
10973 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10974 {2427, 4}, {2432, 5}, {2437, 6},
10975 {2442, 7}, {2447, 8}, {2452, 9},
10976 {2457, 10}, {2462, 11}},
10977 },
10978
10979 { /* Europe */
10980 "ZZE",
10981 .bg_channels = 13,
10982 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10983 {2427, 4}, {2432, 5}, {2437, 6},
10984 {2442, 7}, {2447, 8}, {2452, 9},
10985 {2457, 10}, {2462, 11}, {2467, 12},
10986 {2472, 13}},
10987 .a_channels = 19,
10988 .a = {{5180, 36},
10989 {5200, 40},
10990 {5220, 44},
10991 {5240, 48},
10992 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10993 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10994 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10995 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10996 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
10997 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
10998 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
10999 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11000 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11001 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11002 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11003 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11004 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11005 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11006 {5700, 140, IEEE80211_CH_PASSIVE_ONLY}},
11007 },
11008
11009 { /* Custom Japan */
11010 "ZZJ",
11011 .bg_channels = 14,
11012 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11013 {2427, 4}, {2432, 5}, {2437, 6},
11014 {2442, 7}, {2447, 8}, {2452, 9},
11015 {2457, 10}, {2462, 11}, {2467, 12},
11016 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY}},
11017 .a_channels = 4,
11018 .a = {{5170, 34}, {5190, 38},
11019 {5210, 42}, {5230, 46}},
11020 },
11021
11022 { /* Rest of World */
11023 "ZZR",
11024 .bg_channels = 14,
11025 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11026 {2427, 4}, {2432, 5}, {2437, 6},
11027 {2442, 7}, {2447, 8}, {2452, 9},
11028 {2457, 10}, {2462, 11}, {2467, 12},
11029 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY |
11030 IEEE80211_CH_PASSIVE_ONLY}},
11031 },
11032
11033 { /* High Band */
11034 "ZZH",
11035 .bg_channels = 13,
11036 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11037 {2427, 4}, {2432, 5}, {2437, 6},
11038 {2442, 7}, {2447, 8}, {2452, 9},
11039 {2457, 10}, {2462, 11},
11040 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11041 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11042 .a_channels = 4,
11043 .a = {{5745, 149}, {5765, 153},
11044 {5785, 157}, {5805, 161}},
11045 },
11046
11047 { /* Custom Europe */
11048 "ZZG",
11049 .bg_channels = 13,
11050 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11051 {2427, 4}, {2432, 5}, {2437, 6},
11052 {2442, 7}, {2447, 8}, {2452, 9},
11053 {2457, 10}, {2462, 11},
11054 {2467, 12}, {2472, 13}},
11055 .a_channels = 4,
11056 .a = {{5180, 36}, {5200, 40},
11057 {5220, 44}, {5240, 48}},
11058 },
11059
11060 { /* Europe */
11061 "ZZK",
11062 .bg_channels = 13,
11063 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11064 {2427, 4}, {2432, 5}, {2437, 6},
11065 {2442, 7}, {2447, 8}, {2452, 9},
11066 {2457, 10}, {2462, 11},
11067 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11068 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11069 .a_channels = 24,
11070 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11071 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11072 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11073 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11074 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11075 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11076 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11077 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11078 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11079 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11080 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11081 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11082 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11083 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11084 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11085 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11086 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11087 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11088 {5700, 140, IEEE80211_CH_PASSIVE_ONLY},
11089 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11090 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11091 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11092 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11093 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11094 },
11095
11096 { /* Europe */
11097 "ZZL",
11098 .bg_channels = 11,
11099 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11100 {2427, 4}, {2432, 5}, {2437, 6},
11101 {2442, 7}, {2447, 8}, {2452, 9},
11102 {2457, 10}, {2462, 11}},
11103 .a_channels = 13,
11104 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11105 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11106 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11107 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11108 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11109 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11110 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11111 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11112 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11113 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11114 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11115 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11116 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11117 }
11118 };
11119
11120 #define MAX_HW_RESTARTS 5
11121 static int ipw_up(struct ipw_priv *priv)
11122 {
11123 int rc, i, j;
11124
11125 if (priv->status & STATUS_EXIT_PENDING)
11126 return -EIO;
11127
11128 if (cmdlog && !priv->cmdlog) {
11129 priv->cmdlog = kmalloc(sizeof(*priv->cmdlog) * cmdlog,
11130 GFP_KERNEL);
11131 if (priv->cmdlog == NULL) {
11132 IPW_ERROR("Error allocating %d command log entries.\n",
11133 cmdlog);
11134 return -ENOMEM;
11135 } else {
11136 memset(priv->cmdlog, 0, sizeof(*priv->cmdlog) * cmdlog);
11137 priv->cmdlog_len = cmdlog;
11138 }
11139 }
11140
11141 for (i = 0; i < MAX_HW_RESTARTS; i++) {
11142 /* Load the microcode, firmware, and eeprom.
11143 * Also start the clocks. */
11144 rc = ipw_load(priv);
11145 if (rc) {
11146 IPW_ERROR("Unable to load firmware: %d\n", rc);
11147 return rc;
11148 }
11149
11150 ipw_init_ordinals(priv);
11151 if (!(priv->config & CFG_CUSTOM_MAC))
11152 eeprom_parse_mac(priv, priv->mac_addr);
11153 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11154
11155 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11156 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11157 ipw_geos[j].name, 3))
11158 break;
11159 }
11160 if (j == ARRAY_SIZE(ipw_geos)) {
11161 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11162 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11163 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11164 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11165 j = 0;
11166 }
11167 if (ieee80211_set_geo(priv->ieee, &ipw_geos[j])) {
11168 IPW_WARNING("Could not set geography.");
11169 return 0;
11170 }
11171
11172 if (priv->status & STATUS_RF_KILL_SW) {
11173 IPW_WARNING("Radio disabled by module parameter.\n");
11174 return 0;
11175 } else if (rf_kill_active(priv)) {
11176 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11177 "Kill switch must be turned off for "
11178 "wireless networking to work.\n");
11179 queue_delayed_work(priv->workqueue, &priv->rf_kill,
11180 2 * HZ);
11181 return 0;
11182 }
11183
11184 rc = ipw_config(priv);
11185 if (!rc) {
11186 IPW_DEBUG_INFO("Configured device on count %i\n", i);
11187
11188 /* If configure to try and auto-associate, kick
11189 * off a scan. */
11190 queue_work(priv->workqueue, &priv->request_scan);
11191
11192 return 0;
11193 }
11194
11195 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11196 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11197 i, MAX_HW_RESTARTS);
11198
11199 /* We had an error bringing up the hardware, so take it
11200 * all the way back down so we can try again */
11201 ipw_down(priv);
11202 }
11203
11204 /* tried to restart and config the device for as long as our
11205 * patience could withstand */
11206 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11207
11208 return -EIO;
11209 }
11210
11211 static void ipw_bg_up(void *data)
11212 {
11213 struct ipw_priv *priv = data;
11214 mutex_lock(&priv->mutex);
11215 ipw_up(data);
11216 mutex_unlock(&priv->mutex);
11217 }
11218
11219 static void ipw_deinit(struct ipw_priv *priv)
11220 {
11221 int i;
11222
11223 if (priv->status & STATUS_SCANNING) {
11224 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11225 ipw_abort_scan(priv);
11226 }
11227
11228 if (priv->status & STATUS_ASSOCIATED) {
11229 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11230 ipw_disassociate(priv);
11231 }
11232
11233 ipw_led_shutdown(priv);
11234
11235 /* Wait up to 1s for status to change to not scanning and not
11236 * associated (disassociation can take a while for a ful 802.11
11237 * exchange */
11238 for (i = 1000; i && (priv->status &
11239 (STATUS_DISASSOCIATING |
11240 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11241 udelay(10);
11242
11243 if (priv->status & (STATUS_DISASSOCIATING |
11244 STATUS_ASSOCIATED | STATUS_SCANNING))
11245 IPW_DEBUG_INFO("Still associated or scanning...\n");
11246 else
11247 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11248
11249 /* Attempt to disable the card */
11250 ipw_send_card_disable(priv, 0);
11251
11252 priv->status &= ~STATUS_INIT;
11253 }
11254
11255 static void ipw_down(struct ipw_priv *priv)
11256 {
11257 int exit_pending = priv->status & STATUS_EXIT_PENDING;
11258
11259 priv->status |= STATUS_EXIT_PENDING;
11260
11261 if (ipw_is_init(priv))
11262 ipw_deinit(priv);
11263
11264 /* Wipe out the EXIT_PENDING status bit if we are not actually
11265 * exiting the module */
11266 if (!exit_pending)
11267 priv->status &= ~STATUS_EXIT_PENDING;
11268
11269 /* tell the device to stop sending interrupts */
11270 ipw_disable_interrupts(priv);
11271
11272 /* Clear all bits but the RF Kill */
11273 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11274 netif_carrier_off(priv->net_dev);
11275 netif_stop_queue(priv->net_dev);
11276
11277 ipw_stop_nic(priv);
11278
11279 ipw_led_radio_off(priv);
11280 }
11281
11282 static void ipw_bg_down(void *data)
11283 {
11284 struct ipw_priv *priv = data;
11285 mutex_lock(&priv->mutex);
11286 ipw_down(data);
11287 mutex_unlock(&priv->mutex);
11288 }
11289
11290 /* Called by register_netdev() */
11291 static int ipw_net_init(struct net_device *dev)
11292 {
11293 struct ipw_priv *priv = ieee80211_priv(dev);
11294 mutex_lock(&priv->mutex);
11295
11296 if (ipw_up(priv)) {
11297 mutex_unlock(&priv->mutex);
11298 return -EIO;
11299 }
11300
11301 mutex_unlock(&priv->mutex);
11302 return 0;
11303 }
11304
11305 /* PCI driver stuff */
11306 static struct pci_device_id card_ids[] = {
11307 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11308 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11309 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11310 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11311 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11312 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11313 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11314 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11315 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11316 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11317 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11318 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11319 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11320 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11321 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11322 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11323 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11324 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
11325 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11326 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11327 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11328 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11329
11330 /* required last entry */
11331 {0,}
11332 };
11333
11334 MODULE_DEVICE_TABLE(pci, card_ids);
11335
11336 static struct attribute *ipw_sysfs_entries[] = {
11337 &dev_attr_rf_kill.attr,
11338 &dev_attr_direct_dword.attr,
11339 &dev_attr_indirect_byte.attr,
11340 &dev_attr_indirect_dword.attr,
11341 &dev_attr_mem_gpio_reg.attr,
11342 &dev_attr_command_event_reg.attr,
11343 &dev_attr_nic_type.attr,
11344 &dev_attr_status.attr,
11345 &dev_attr_cfg.attr,
11346 &dev_attr_error.attr,
11347 &dev_attr_event_log.attr,
11348 &dev_attr_cmd_log.attr,
11349 &dev_attr_eeprom_delay.attr,
11350 &dev_attr_ucode_version.attr,
11351 &dev_attr_rtc.attr,
11352 &dev_attr_scan_age.attr,
11353 &dev_attr_led.attr,
11354 &dev_attr_speed_scan.attr,
11355 &dev_attr_net_stats.attr,
11356 #ifdef CONFIG_IPW2200_PROMISCUOUS
11357 &dev_attr_rtap_iface.attr,
11358 &dev_attr_rtap_filter.attr,
11359 #endif
11360 NULL
11361 };
11362
11363 static struct attribute_group ipw_attribute_group = {
11364 .name = NULL, /* put in device directory */
11365 .attrs = ipw_sysfs_entries,
11366 };
11367
11368 #ifdef CONFIG_IPW2200_PROMISCUOUS
11369 static int ipw_prom_open(struct net_device *dev)
11370 {
11371 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11372 struct ipw_priv *priv = prom_priv->priv;
11373
11374 IPW_DEBUG_INFO("prom dev->open\n");
11375 netif_carrier_off(dev);
11376 netif_stop_queue(dev);
11377
11378 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11379 priv->sys_config.accept_all_data_frames = 1;
11380 priv->sys_config.accept_non_directed_frames = 1;
11381 priv->sys_config.accept_all_mgmt_bcpr = 1;
11382 priv->sys_config.accept_all_mgmt_frames = 1;
11383
11384 ipw_send_system_config(priv);
11385 }
11386
11387 return 0;
11388 }
11389
11390 static int ipw_prom_stop(struct net_device *dev)
11391 {
11392 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11393 struct ipw_priv *priv = prom_priv->priv;
11394
11395 IPW_DEBUG_INFO("prom dev->stop\n");
11396
11397 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11398 priv->sys_config.accept_all_data_frames = 0;
11399 priv->sys_config.accept_non_directed_frames = 0;
11400 priv->sys_config.accept_all_mgmt_bcpr = 0;
11401 priv->sys_config.accept_all_mgmt_frames = 0;
11402
11403 ipw_send_system_config(priv);
11404 }
11405
11406 return 0;
11407 }
11408
11409 static int ipw_prom_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
11410 {
11411 IPW_DEBUG_INFO("prom dev->xmit\n");
11412 netif_stop_queue(dev);
11413 return -EOPNOTSUPP;
11414 }
11415
11416 static struct net_device_stats *ipw_prom_get_stats(struct net_device *dev)
11417 {
11418 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11419 return &prom_priv->ieee->stats;
11420 }
11421
11422 static int ipw_prom_alloc(struct ipw_priv *priv)
11423 {
11424 int rc = 0;
11425
11426 if (priv->prom_net_dev)
11427 return -EPERM;
11428
11429 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
11430 if (priv->prom_net_dev == NULL)
11431 return -ENOMEM;
11432
11433 priv->prom_priv = ieee80211_priv(priv->prom_net_dev);
11434 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11435 priv->prom_priv->priv = priv;
11436
11437 strcpy(priv->prom_net_dev->name, "rtap%d");
11438
11439 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11440 priv->prom_net_dev->open = ipw_prom_open;
11441 priv->prom_net_dev->stop = ipw_prom_stop;
11442 priv->prom_net_dev->get_stats = ipw_prom_get_stats;
11443 priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit;
11444
11445 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11446
11447 rc = register_netdev(priv->prom_net_dev);
11448 if (rc) {
11449 free_ieee80211(priv->prom_net_dev);
11450 priv->prom_net_dev = NULL;
11451 return rc;
11452 }
11453
11454 return 0;
11455 }
11456
11457 static void ipw_prom_free(struct ipw_priv *priv)
11458 {
11459 if (!priv->prom_net_dev)
11460 return;
11461
11462 unregister_netdev(priv->prom_net_dev);
11463 free_ieee80211(priv->prom_net_dev);
11464
11465 priv->prom_net_dev = NULL;
11466 }
11467
11468 #endif
11469
11470
11471 static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11472 {
11473 int err = 0;
11474 struct net_device *net_dev;
11475 void __iomem *base;
11476 u32 length, val;
11477 struct ipw_priv *priv;
11478 int i;
11479
11480 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
11481 if (net_dev == NULL) {
11482 err = -ENOMEM;
11483 goto out;
11484 }
11485
11486 priv = ieee80211_priv(net_dev);
11487 priv->ieee = netdev_priv(net_dev);
11488
11489 priv->net_dev = net_dev;
11490 priv->pci_dev = pdev;
11491 #ifdef CONFIG_IPW2200_DEBUG
11492 ipw_debug_level = debug;
11493 #endif
11494 spin_lock_init(&priv->irq_lock);
11495 spin_lock_init(&priv->lock);
11496 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11497 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11498
11499 mutex_init(&priv->mutex);
11500 if (pci_enable_device(pdev)) {
11501 err = -ENODEV;
11502 goto out_free_ieee80211;
11503 }
11504
11505 pci_set_master(pdev);
11506
11507 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11508 if (!err)
11509 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
11510 if (err) {
11511 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11512 goto out_pci_disable_device;
11513 }
11514
11515 pci_set_drvdata(pdev, priv);
11516
11517 err = pci_request_regions(pdev, DRV_NAME);
11518 if (err)
11519 goto out_pci_disable_device;
11520
11521 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11522 * PCI Tx retries from interfering with C3 CPU state */
11523 pci_read_config_dword(pdev, 0x40, &val);
11524 if ((val & 0x0000ff00) != 0)
11525 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11526
11527 length = pci_resource_len(pdev, 0);
11528 priv->hw_len = length;
11529
11530 base = ioremap_nocache(pci_resource_start(pdev, 0), length);
11531 if (!base) {
11532 err = -ENODEV;
11533 goto out_pci_release_regions;
11534 }
11535
11536 priv->hw_base = base;
11537 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11538 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11539
11540 err = ipw_setup_deferred_work(priv);
11541 if (err) {
11542 IPW_ERROR("Unable to setup deferred work\n");
11543 goto out_iounmap;
11544 }
11545
11546 ipw_sw_reset(priv, 1);
11547
11548 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11549 if (err) {
11550 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11551 goto out_destroy_workqueue;
11552 }
11553
11554 SET_MODULE_OWNER(net_dev);
11555 SET_NETDEV_DEV(net_dev, &pdev->dev);
11556
11557 mutex_lock(&priv->mutex);
11558
11559 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11560 priv->ieee->set_security = shim__set_security;
11561 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11562
11563 #ifdef CONFIG_IPW2200_QOS
11564 priv->ieee->is_qos_active = ipw_is_qos_active;
11565 priv->ieee->handle_probe_response = ipw_handle_beacon;
11566 priv->ieee->handle_beacon = ipw_handle_probe_response;
11567 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11568 #endif /* CONFIG_IPW2200_QOS */
11569
11570 priv->ieee->perfect_rssi = -20;
11571 priv->ieee->worst_rssi = -85;
11572
11573 net_dev->open = ipw_net_open;
11574 net_dev->stop = ipw_net_stop;
11575 net_dev->init = ipw_net_init;
11576 net_dev->get_stats = ipw_net_get_stats;
11577 net_dev->set_multicast_list = ipw_net_set_multicast_list;
11578 net_dev->set_mac_address = ipw_net_set_mac_address;
11579 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11580 net_dev->wireless_data = &priv->wireless_data;
11581 net_dev->wireless_handlers = &ipw_wx_handler_def;
11582 net_dev->ethtool_ops = &ipw_ethtool_ops;
11583 net_dev->irq = pdev->irq;
11584 net_dev->base_addr = (unsigned long)priv->hw_base;
11585 net_dev->mem_start = pci_resource_start(pdev, 0);
11586 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11587
11588 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11589 if (err) {
11590 IPW_ERROR("failed to create sysfs device attributes\n");
11591 mutex_unlock(&priv->mutex);
11592 goto out_release_irq;
11593 }
11594
11595 mutex_unlock(&priv->mutex);
11596 err = register_netdev(net_dev);
11597 if (err) {
11598 IPW_ERROR("failed to register network device\n");
11599 goto out_remove_sysfs;
11600 }
11601
11602 #ifdef CONFIG_IPW2200_PROMISCUOUS
11603 if (rtap_iface) {
11604 err = ipw_prom_alloc(priv);
11605 if (err) {
11606 IPW_ERROR("Failed to register promiscuous network "
11607 "device (error %d).\n", err);
11608 unregister_netdev(priv->net_dev);
11609 goto out_remove_sysfs;
11610 }
11611 }
11612 #endif
11613
11614 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11615 "channels, %d 802.11a channels)\n",
11616 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11617 priv->ieee->geo.a_channels);
11618
11619 return 0;
11620
11621 out_remove_sysfs:
11622 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11623 out_release_irq:
11624 free_irq(pdev->irq, priv);
11625 out_destroy_workqueue:
11626 destroy_workqueue(priv->workqueue);
11627 priv->workqueue = NULL;
11628 out_iounmap:
11629 iounmap(priv->hw_base);
11630 out_pci_release_regions:
11631 pci_release_regions(pdev);
11632 out_pci_disable_device:
11633 pci_disable_device(pdev);
11634 pci_set_drvdata(pdev, NULL);
11635 out_free_ieee80211:
11636 free_ieee80211(priv->net_dev);
11637 out:
11638 return err;
11639 }
11640
11641 static void ipw_pci_remove(struct pci_dev *pdev)
11642 {
11643 struct ipw_priv *priv = pci_get_drvdata(pdev);
11644 struct list_head *p, *q;
11645 int i;
11646
11647 if (!priv)
11648 return;
11649
11650 mutex_lock(&priv->mutex);
11651
11652 priv->status |= STATUS_EXIT_PENDING;
11653 ipw_down(priv);
11654 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11655
11656 mutex_unlock(&priv->mutex);
11657
11658 unregister_netdev(priv->net_dev);
11659
11660 if (priv->rxq) {
11661 ipw_rx_queue_free(priv, priv->rxq);
11662 priv->rxq = NULL;
11663 }
11664 ipw_tx_queue_free(priv);
11665
11666 if (priv->cmdlog) {
11667 kfree(priv->cmdlog);
11668 priv->cmdlog = NULL;
11669 }
11670 /* ipw_down will ensure that there is no more pending work
11671 * in the workqueue's, so we can safely remove them now. */
11672 cancel_delayed_work(&priv->adhoc_check);
11673 cancel_delayed_work(&priv->gather_stats);
11674 cancel_delayed_work(&priv->request_scan);
11675 cancel_delayed_work(&priv->rf_kill);
11676 cancel_delayed_work(&priv->scan_check);
11677 destroy_workqueue(priv->workqueue);
11678 priv->workqueue = NULL;
11679
11680 /* Free MAC hash list for ADHOC */
11681 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11682 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11683 list_del(p);
11684 kfree(list_entry(p, struct ipw_ibss_seq, list));
11685 }
11686 }
11687
11688 kfree(priv->error);
11689 priv->error = NULL;
11690
11691 #ifdef CONFIG_IPW2200_PROMISCUOUS
11692 ipw_prom_free(priv);
11693 #endif
11694
11695 free_irq(pdev->irq, priv);
11696 iounmap(priv->hw_base);
11697 pci_release_regions(pdev);
11698 pci_disable_device(pdev);
11699 pci_set_drvdata(pdev, NULL);
11700 free_ieee80211(priv->net_dev);
11701 free_firmware();
11702 }
11703
11704 #ifdef CONFIG_PM
11705 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11706 {
11707 struct ipw_priv *priv = pci_get_drvdata(pdev);
11708 struct net_device *dev = priv->net_dev;
11709
11710 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11711
11712 /* Take down the device; powers it off, etc. */
11713 ipw_down(priv);
11714
11715 /* Remove the PRESENT state of the device */
11716 netif_device_detach(dev);
11717
11718 pci_save_state(pdev);
11719 pci_disable_device(pdev);
11720 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11721
11722 return 0;
11723 }
11724
11725 static int ipw_pci_resume(struct pci_dev *pdev)
11726 {
11727 struct ipw_priv *priv = pci_get_drvdata(pdev);
11728 struct net_device *dev = priv->net_dev;
11729 u32 val;
11730
11731 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11732
11733 pci_set_power_state(pdev, PCI_D0);
11734 pci_enable_device(pdev);
11735 pci_restore_state(pdev);
11736
11737 /*
11738 * Suspend/Resume resets the PCI configuration space, so we have to
11739 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11740 * from interfering with C3 CPU state. pci_restore_state won't help
11741 * here since it only restores the first 64 bytes pci config header.
11742 */
11743 pci_read_config_dword(pdev, 0x40, &val);
11744 if ((val & 0x0000ff00) != 0)
11745 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11746
11747 /* Set the device back into the PRESENT state; this will also wake
11748 * the queue of needed */
11749 netif_device_attach(dev);
11750
11751 /* Bring the device back up */
11752 queue_work(priv->workqueue, &priv->up);
11753
11754 return 0;
11755 }
11756 #endif
11757
11758 /* driver initialization stuff */
11759 static struct pci_driver ipw_driver = {
11760 .name = DRV_NAME,
11761 .id_table = card_ids,
11762 .probe = ipw_pci_probe,
11763 .remove = __devexit_p(ipw_pci_remove),
11764 #ifdef CONFIG_PM
11765 .suspend = ipw_pci_suspend,
11766 .resume = ipw_pci_resume,
11767 #endif
11768 };
11769
11770 static int __init ipw_init(void)
11771 {
11772 int ret;
11773
11774 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11775 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11776
11777 ret = pci_module_init(&ipw_driver);
11778 if (ret) {
11779 IPW_ERROR("Unable to initialize PCI module\n");
11780 return ret;
11781 }
11782
11783 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11784 if (ret) {
11785 IPW_ERROR("Unable to create driver sysfs file\n");
11786 pci_unregister_driver(&ipw_driver);
11787 return ret;
11788 }
11789
11790 return ret;
11791 }
11792
11793 static void __exit ipw_exit(void)
11794 {
11795 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11796 pci_unregister_driver(&ipw_driver);
11797 }
11798
11799 module_param(disable, int, 0444);
11800 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11801
11802 module_param(associate, int, 0444);
11803 MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
11804
11805 module_param(auto_create, int, 0444);
11806 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11807
11808 module_param(led, int, 0444);
11809 MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n");
11810
11811 #ifdef CONFIG_IPW2200_DEBUG
11812 module_param(debug, int, 0444);
11813 MODULE_PARM_DESC(debug, "debug output mask");
11814 #endif
11815
11816 module_param(channel, int, 0444);
11817 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11818
11819 #ifdef CONFIG_IPW2200_PROMISCUOUS
11820 module_param(rtap_iface, int, 0444);
11821 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
11822 #endif
11823
11824 #ifdef CONFIG_IPW2200_QOS
11825 module_param(qos_enable, int, 0444);
11826 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
11827
11828 module_param(qos_burst_enable, int, 0444);
11829 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11830
11831 module_param(qos_no_ack_mask, int, 0444);
11832 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
11833
11834 module_param(burst_duration_CCK, int, 0444);
11835 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
11836
11837 module_param(burst_duration_OFDM, int, 0444);
11838 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
11839 #endif /* CONFIG_IPW2200_QOS */
11840
11841 #ifdef CONFIG_IPW2200_MONITOR
11842 module_param(mode, int, 0444);
11843 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
11844 #else
11845 module_param(mode, int, 0444);
11846 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
11847 #endif
11848
11849 module_param(bt_coexist, int, 0444);
11850 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
11851
11852 module_param(hwcrypto, int, 0444);
11853 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
11854
11855 module_param(cmdlog, int, 0444);
11856 MODULE_PARM_DESC(cmdlog,
11857 "allocate a ring buffer for logging firmware commands");
11858
11859 module_param(roaming, int, 0444);
11860 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
11861
11862 module_param(antenna, int, 0444);
11863 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
11864
11865 module_exit(ipw_exit);
11866 module_init(ipw_init);