]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/wireless/ipw2200.c
Pull acpi-debug into release branch
[mirror_ubuntu-bionic-kernel.git] / drivers / net / wireless / ipw2200.c
1 /******************************************************************************
2
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31 ******************************************************************************/
32
33 #include "ipw2200.h"
34 #include <linux/version.h>
35
36
37 #ifndef KBUILD_EXTMOD
38 #define VK "k"
39 #else
40 #define VK
41 #endif
42
43 #ifdef CONFIG_IPW2200_DEBUG
44 #define VD "d"
45 #else
46 #define VD
47 #endif
48
49 #ifdef CONFIG_IPW2200_MONITOR
50 #define VM "m"
51 #else
52 #define VM
53 #endif
54
55 #ifdef CONFIG_IPW2200_PROMISCUOUS
56 #define VP "p"
57 #else
58 #define VP
59 #endif
60
61 #ifdef CONFIG_IPW2200_RADIOTAP
62 #define VR "r"
63 #else
64 #define VR
65 #endif
66
67 #ifdef CONFIG_IPW2200_QOS
68 #define VQ "q"
69 #else
70 #define VQ
71 #endif
72
73 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
74 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
75 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
76 #define DRV_VERSION IPW2200_VERSION
77
78 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
79
80 MODULE_DESCRIPTION(DRV_DESCRIPTION);
81 MODULE_VERSION(DRV_VERSION);
82 MODULE_AUTHOR(DRV_COPYRIGHT);
83 MODULE_LICENSE("GPL");
84
85 static int cmdlog = 0;
86 static int debug = 0;
87 static int channel = 0;
88 static int mode = 0;
89
90 static u32 ipw_debug_level;
91 static int associate = 1;
92 static int auto_create = 1;
93 static int led = 0;
94 static int disable = 0;
95 static int bt_coexist = 0;
96 static int hwcrypto = 0;
97 static int roaming = 1;
98 static const char ipw_modes[] = {
99 'a', 'b', 'g', '?'
100 };
101 static int antenna = CFG_SYS_ANTENNA_BOTH;
102
103 #ifdef CONFIG_IPW2200_PROMISCUOUS
104 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
105 #endif
106
107
108 #ifdef CONFIG_IPW2200_QOS
109 static int qos_enable = 0;
110 static int qos_burst_enable = 0;
111 static int qos_no_ack_mask = 0;
112 static int burst_duration_CCK = 0;
113 static int burst_duration_OFDM = 0;
114
115 static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
116 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
117 QOS_TX3_CW_MIN_OFDM},
118 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
119 QOS_TX3_CW_MAX_OFDM},
120 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
121 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
122 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
123 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
124 };
125
126 static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
127 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
128 QOS_TX3_CW_MIN_CCK},
129 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
130 QOS_TX3_CW_MAX_CCK},
131 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
132 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
133 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
134 QOS_TX3_TXOP_LIMIT_CCK}
135 };
136
137 static struct ieee80211_qos_parameters def_parameters_OFDM = {
138 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
139 DEF_TX3_CW_MIN_OFDM},
140 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
141 DEF_TX3_CW_MAX_OFDM},
142 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
143 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
144 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
145 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
146 };
147
148 static struct ieee80211_qos_parameters def_parameters_CCK = {
149 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
150 DEF_TX3_CW_MIN_CCK},
151 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
152 DEF_TX3_CW_MAX_CCK},
153 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
154 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
155 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
156 DEF_TX3_TXOP_LIMIT_CCK}
157 };
158
159 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
160
161 static int from_priority_to_tx_queue[] = {
162 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
163 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
164 };
165
166 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
167
168 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
169 *qos_param);
170 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
171 *qos_param);
172 #endif /* CONFIG_IPW2200_QOS */
173
174 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
175 static void ipw_remove_current_network(struct ipw_priv *priv);
176 static void ipw_rx(struct ipw_priv *priv);
177 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
178 struct clx2_tx_queue *txq, int qindex);
179 static int ipw_queue_reset(struct ipw_priv *priv);
180
181 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
182 int len, int sync);
183
184 static void ipw_tx_queue_free(struct ipw_priv *);
185
186 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
187 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
188 static void ipw_rx_queue_replenish(void *);
189 static int ipw_up(struct ipw_priv *);
190 static void ipw_bg_up(struct work_struct *work);
191 static void ipw_down(struct ipw_priv *);
192 static void ipw_bg_down(struct work_struct *work);
193 static int ipw_config(struct ipw_priv *);
194 static int init_supported_rates(struct ipw_priv *priv,
195 struct ipw_supported_rates *prates);
196 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
197 static void ipw_send_wep_keys(struct ipw_priv *, int);
198
199 static int snprint_line(char *buf, size_t count,
200 const u8 * data, u32 len, u32 ofs)
201 {
202 int out, i, j, l;
203 char c;
204
205 out = snprintf(buf, count, "%08X", ofs);
206
207 for (l = 0, i = 0; i < 2; i++) {
208 out += snprintf(buf + out, count - out, " ");
209 for (j = 0; j < 8 && l < len; j++, l++)
210 out += snprintf(buf + out, count - out, "%02X ",
211 data[(i * 8 + j)]);
212 for (; j < 8; j++)
213 out += snprintf(buf + out, count - out, " ");
214 }
215
216 out += snprintf(buf + out, count - out, " ");
217 for (l = 0, i = 0; i < 2; i++) {
218 out += snprintf(buf + out, count - out, " ");
219 for (j = 0; j < 8 && l < len; j++, l++) {
220 c = data[(i * 8 + j)];
221 if (!isascii(c) || !isprint(c))
222 c = '.';
223
224 out += snprintf(buf + out, count - out, "%c", c);
225 }
226
227 for (; j < 8; j++)
228 out += snprintf(buf + out, count - out, " ");
229 }
230
231 return out;
232 }
233
234 static void printk_buf(int level, const u8 * data, u32 len)
235 {
236 char line[81];
237 u32 ofs = 0;
238 if (!(ipw_debug_level & level))
239 return;
240
241 while (len) {
242 snprint_line(line, sizeof(line), &data[ofs],
243 min(len, 16U), ofs);
244 printk(KERN_DEBUG "%s\n", line);
245 ofs += 16;
246 len -= min(len, 16U);
247 }
248 }
249
250 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
251 {
252 size_t out = size;
253 u32 ofs = 0;
254 int total = 0;
255
256 while (size && len) {
257 out = snprint_line(output, size, &data[ofs],
258 min_t(size_t, len, 16U), ofs);
259
260 ofs += 16;
261 output += out;
262 size -= out;
263 len -= min_t(size_t, len, 16U);
264 total += out;
265 }
266 return total;
267 }
268
269 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
270 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
271 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
272
273 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
274 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
275 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
276
277 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
278 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
279 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
280 {
281 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
282 __LINE__, (u32) (b), (u32) (c));
283 _ipw_write_reg8(a, b, c);
284 }
285
286 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
287 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
288 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
289 {
290 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
291 __LINE__, (u32) (b), (u32) (c));
292 _ipw_write_reg16(a, b, c);
293 }
294
295 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
296 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
297 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
298 {
299 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
300 __LINE__, (u32) (b), (u32) (c));
301 _ipw_write_reg32(a, b, c);
302 }
303
304 /* 8-bit direct write (low 4K) */
305 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
306
307 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
308 #define ipw_write8(ipw, ofs, val) \
309 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
310 _ipw_write8(ipw, ofs, val)
311
312 /* 16-bit direct write (low 4K) */
313 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
314
315 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
316 #define ipw_write16(ipw, ofs, val) \
317 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
318 _ipw_write16(ipw, ofs, val)
319
320 /* 32-bit direct write (low 4K) */
321 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
322
323 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
324 #define ipw_write32(ipw, ofs, val) \
325 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
326 _ipw_write32(ipw, ofs, val)
327
328 /* 8-bit direct read (low 4K) */
329 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
330
331 /* 8-bit direct read (low 4K), with debug wrapper */
332 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
333 {
334 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
335 return _ipw_read8(ipw, ofs);
336 }
337
338 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
339 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
340
341 /* 16-bit direct read (low 4K) */
342 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
343
344 /* 16-bit direct read (low 4K), with debug wrapper */
345 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
346 {
347 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
348 return _ipw_read16(ipw, ofs);
349 }
350
351 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
352 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
353
354 /* 32-bit direct read (low 4K) */
355 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
356
357 /* 32-bit direct read (low 4K), with debug wrapper */
358 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
359 {
360 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
361 return _ipw_read32(ipw, ofs);
362 }
363
364 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
365 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
366
367 /* multi-byte read (above 4K), with debug wrapper */
368 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
369 static inline void __ipw_read_indirect(const char *f, int l,
370 struct ipw_priv *a, u32 b, u8 * c, int d)
371 {
372 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %d bytes\n", f, l, (u32) (b),
373 d);
374 _ipw_read_indirect(a, b, c, d);
375 }
376
377 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
378 #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d)
379
380 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
381 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
382 int num);
383 #define ipw_write_indirect(a, b, c, d) \
384 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
385 _ipw_write_indirect(a, b, c, d)
386
387 /* 32-bit indirect write (above 4K) */
388 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
389 {
390 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
391 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
392 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
393 }
394
395 /* 8-bit indirect write (above 4K) */
396 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
397 {
398 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
399 u32 dif_len = reg - aligned_addr;
400
401 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
402 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
403 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
404 }
405
406 /* 16-bit indirect write (above 4K) */
407 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
408 {
409 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
410 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
411
412 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
413 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
414 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
415 }
416
417 /* 8-bit indirect read (above 4K) */
418 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
419 {
420 u32 word;
421 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
422 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
423 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
424 return (word >> ((reg & 0x3) * 8)) & 0xff;
425 }
426
427 /* 32-bit indirect read (above 4K) */
428 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
429 {
430 u32 value;
431
432 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
433
434 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
435 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
436 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
437 return value;
438 }
439
440 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
441 /* for area above 1st 4K of SRAM/reg space */
442 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
443 int num)
444 {
445 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
446 u32 dif_len = addr - aligned_addr;
447 u32 i;
448
449 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
450
451 if (num <= 0) {
452 return;
453 }
454
455 /* Read the first dword (or portion) byte by byte */
456 if (unlikely(dif_len)) {
457 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
458 /* Start reading at aligned_addr + dif_len */
459 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
460 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
461 aligned_addr += 4;
462 }
463
464 /* Read all of the middle dwords as dwords, with auto-increment */
465 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
466 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
467 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
468
469 /* Read the last dword (or portion) byte by byte */
470 if (unlikely(num)) {
471 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
472 for (i = 0; num > 0; i++, num--)
473 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
474 }
475 }
476
477 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
478 /* for area above 1st 4K of SRAM/reg space */
479 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
480 int num)
481 {
482 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
483 u32 dif_len = addr - aligned_addr;
484 u32 i;
485
486 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
487
488 if (num <= 0) {
489 return;
490 }
491
492 /* Write the first dword (or portion) byte by byte */
493 if (unlikely(dif_len)) {
494 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
495 /* Start writing at aligned_addr + dif_len */
496 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
497 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
498 aligned_addr += 4;
499 }
500
501 /* Write all of the middle dwords as dwords, with auto-increment */
502 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
503 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
504 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
505
506 /* Write the last dword (or portion) byte by byte */
507 if (unlikely(num)) {
508 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
509 for (i = 0; num > 0; i++, num--, buf++)
510 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
511 }
512 }
513
514 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
515 /* for 1st 4K of SRAM/regs space */
516 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
517 int num)
518 {
519 memcpy_toio((priv->hw_base + addr), buf, num);
520 }
521
522 /* Set bit(s) in low 4K of SRAM/regs */
523 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
524 {
525 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
526 }
527
528 /* Clear bit(s) in low 4K of SRAM/regs */
529 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
530 {
531 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
532 }
533
534 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
535 {
536 if (priv->status & STATUS_INT_ENABLED)
537 return;
538 priv->status |= STATUS_INT_ENABLED;
539 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
540 }
541
542 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
543 {
544 if (!(priv->status & STATUS_INT_ENABLED))
545 return;
546 priv->status &= ~STATUS_INT_ENABLED;
547 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
548 }
549
550 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
551 {
552 unsigned long flags;
553
554 spin_lock_irqsave(&priv->irq_lock, flags);
555 __ipw_enable_interrupts(priv);
556 spin_unlock_irqrestore(&priv->irq_lock, flags);
557 }
558
559 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
560 {
561 unsigned long flags;
562
563 spin_lock_irqsave(&priv->irq_lock, flags);
564 __ipw_disable_interrupts(priv);
565 spin_unlock_irqrestore(&priv->irq_lock, flags);
566 }
567
568 static char *ipw_error_desc(u32 val)
569 {
570 switch (val) {
571 case IPW_FW_ERROR_OK:
572 return "ERROR_OK";
573 case IPW_FW_ERROR_FAIL:
574 return "ERROR_FAIL";
575 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
576 return "MEMORY_UNDERFLOW";
577 case IPW_FW_ERROR_MEMORY_OVERFLOW:
578 return "MEMORY_OVERFLOW";
579 case IPW_FW_ERROR_BAD_PARAM:
580 return "BAD_PARAM";
581 case IPW_FW_ERROR_BAD_CHECKSUM:
582 return "BAD_CHECKSUM";
583 case IPW_FW_ERROR_NMI_INTERRUPT:
584 return "NMI_INTERRUPT";
585 case IPW_FW_ERROR_BAD_DATABASE:
586 return "BAD_DATABASE";
587 case IPW_FW_ERROR_ALLOC_FAIL:
588 return "ALLOC_FAIL";
589 case IPW_FW_ERROR_DMA_UNDERRUN:
590 return "DMA_UNDERRUN";
591 case IPW_FW_ERROR_DMA_STATUS:
592 return "DMA_STATUS";
593 case IPW_FW_ERROR_DINO_ERROR:
594 return "DINO_ERROR";
595 case IPW_FW_ERROR_EEPROM_ERROR:
596 return "EEPROM_ERROR";
597 case IPW_FW_ERROR_SYSASSERT:
598 return "SYSASSERT";
599 case IPW_FW_ERROR_FATAL_ERROR:
600 return "FATAL_ERROR";
601 default:
602 return "UNKNOWN_ERROR";
603 }
604 }
605
606 static void ipw_dump_error_log(struct ipw_priv *priv,
607 struct ipw_fw_error *error)
608 {
609 u32 i;
610
611 if (!error) {
612 IPW_ERROR("Error allocating and capturing error log. "
613 "Nothing to dump.\n");
614 return;
615 }
616
617 IPW_ERROR("Start IPW Error Log Dump:\n");
618 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
619 error->status, error->config);
620
621 for (i = 0; i < error->elem_len; i++)
622 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
623 ipw_error_desc(error->elem[i].desc),
624 error->elem[i].time,
625 error->elem[i].blink1,
626 error->elem[i].blink2,
627 error->elem[i].link1,
628 error->elem[i].link2, error->elem[i].data);
629 for (i = 0; i < error->log_len; i++)
630 IPW_ERROR("%i\t0x%08x\t%i\n",
631 error->log[i].time,
632 error->log[i].data, error->log[i].event);
633 }
634
635 static inline int ipw_is_init(struct ipw_priv *priv)
636 {
637 return (priv->status & STATUS_INIT) ? 1 : 0;
638 }
639
640 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
641 {
642 u32 addr, field_info, field_len, field_count, total_len;
643
644 IPW_DEBUG_ORD("ordinal = %i\n", ord);
645
646 if (!priv || !val || !len) {
647 IPW_DEBUG_ORD("Invalid argument\n");
648 return -EINVAL;
649 }
650
651 /* verify device ordinal tables have been initialized */
652 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
653 IPW_DEBUG_ORD("Access ordinals before initialization\n");
654 return -EINVAL;
655 }
656
657 switch (IPW_ORD_TABLE_ID_MASK & ord) {
658 case IPW_ORD_TABLE_0_MASK:
659 /*
660 * TABLE 0: Direct access to a table of 32 bit values
661 *
662 * This is a very simple table with the data directly
663 * read from the table
664 */
665
666 /* remove the table id from the ordinal */
667 ord &= IPW_ORD_TABLE_VALUE_MASK;
668
669 /* boundary check */
670 if (ord > priv->table0_len) {
671 IPW_DEBUG_ORD("ordinal value (%i) longer then "
672 "max (%i)\n", ord, priv->table0_len);
673 return -EINVAL;
674 }
675
676 /* verify we have enough room to store the value */
677 if (*len < sizeof(u32)) {
678 IPW_DEBUG_ORD("ordinal buffer length too small, "
679 "need %zd\n", sizeof(u32));
680 return -EINVAL;
681 }
682
683 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
684 ord, priv->table0_addr + (ord << 2));
685
686 *len = sizeof(u32);
687 ord <<= 2;
688 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
689 break;
690
691 case IPW_ORD_TABLE_1_MASK:
692 /*
693 * TABLE 1: Indirect access to a table of 32 bit values
694 *
695 * This is a fairly large table of u32 values each
696 * representing starting addr for the data (which is
697 * also a u32)
698 */
699
700 /* remove the table id from the ordinal */
701 ord &= IPW_ORD_TABLE_VALUE_MASK;
702
703 /* boundary check */
704 if (ord > priv->table1_len) {
705 IPW_DEBUG_ORD("ordinal value too long\n");
706 return -EINVAL;
707 }
708
709 /* verify we have enough room to store the value */
710 if (*len < sizeof(u32)) {
711 IPW_DEBUG_ORD("ordinal buffer length too small, "
712 "need %zd\n", sizeof(u32));
713 return -EINVAL;
714 }
715
716 *((u32 *) val) =
717 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
718 *len = sizeof(u32);
719 break;
720
721 case IPW_ORD_TABLE_2_MASK:
722 /*
723 * TABLE 2: Indirect access to a table of variable sized values
724 *
725 * This table consist of six values, each containing
726 * - dword containing the starting offset of the data
727 * - dword containing the lengh in the first 16bits
728 * and the count in the second 16bits
729 */
730
731 /* remove the table id from the ordinal */
732 ord &= IPW_ORD_TABLE_VALUE_MASK;
733
734 /* boundary check */
735 if (ord > priv->table2_len) {
736 IPW_DEBUG_ORD("ordinal value too long\n");
737 return -EINVAL;
738 }
739
740 /* get the address of statistic */
741 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
742
743 /* get the second DW of statistics ;
744 * two 16-bit words - first is length, second is count */
745 field_info =
746 ipw_read_reg32(priv,
747 priv->table2_addr + (ord << 3) +
748 sizeof(u32));
749
750 /* get each entry length */
751 field_len = *((u16 *) & field_info);
752
753 /* get number of entries */
754 field_count = *(((u16 *) & field_info) + 1);
755
756 /* abort if not enought memory */
757 total_len = field_len * field_count;
758 if (total_len > *len) {
759 *len = total_len;
760 return -EINVAL;
761 }
762
763 *len = total_len;
764 if (!total_len)
765 return 0;
766
767 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
768 "field_info = 0x%08x\n",
769 addr, total_len, field_info);
770 ipw_read_indirect(priv, addr, val, total_len);
771 break;
772
773 default:
774 IPW_DEBUG_ORD("Invalid ordinal!\n");
775 return -EINVAL;
776
777 }
778
779 return 0;
780 }
781
782 static void ipw_init_ordinals(struct ipw_priv *priv)
783 {
784 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
785 priv->table0_len = ipw_read32(priv, priv->table0_addr);
786
787 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
788 priv->table0_addr, priv->table0_len);
789
790 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
791 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
792
793 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
794 priv->table1_addr, priv->table1_len);
795
796 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
797 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
798 priv->table2_len &= 0x0000ffff; /* use first two bytes */
799
800 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
801 priv->table2_addr, priv->table2_len);
802
803 }
804
805 static u32 ipw_register_toggle(u32 reg)
806 {
807 reg &= ~IPW_START_STANDBY;
808 if (reg & IPW_GATE_ODMA)
809 reg &= ~IPW_GATE_ODMA;
810 if (reg & IPW_GATE_IDMA)
811 reg &= ~IPW_GATE_IDMA;
812 if (reg & IPW_GATE_ADMA)
813 reg &= ~IPW_GATE_ADMA;
814 return reg;
815 }
816
817 /*
818 * LED behavior:
819 * - On radio ON, turn on any LEDs that require to be on during start
820 * - On initialization, start unassociated blink
821 * - On association, disable unassociated blink
822 * - On disassociation, start unassociated blink
823 * - On radio OFF, turn off any LEDs started during radio on
824 *
825 */
826 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
827 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
828 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
829
830 static void ipw_led_link_on(struct ipw_priv *priv)
831 {
832 unsigned long flags;
833 u32 led;
834
835 /* If configured to not use LEDs, or nic_type is 1,
836 * then we don't toggle a LINK led */
837 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
838 return;
839
840 spin_lock_irqsave(&priv->lock, flags);
841
842 if (!(priv->status & STATUS_RF_KILL_MASK) &&
843 !(priv->status & STATUS_LED_LINK_ON)) {
844 IPW_DEBUG_LED("Link LED On\n");
845 led = ipw_read_reg32(priv, IPW_EVENT_REG);
846 led |= priv->led_association_on;
847
848 led = ipw_register_toggle(led);
849
850 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
851 ipw_write_reg32(priv, IPW_EVENT_REG, led);
852
853 priv->status |= STATUS_LED_LINK_ON;
854
855 /* If we aren't associated, schedule turning the LED off */
856 if (!(priv->status & STATUS_ASSOCIATED))
857 queue_delayed_work(priv->workqueue,
858 &priv->led_link_off,
859 LD_TIME_LINK_ON);
860 }
861
862 spin_unlock_irqrestore(&priv->lock, flags);
863 }
864
865 static void ipw_bg_led_link_on(struct work_struct *work)
866 {
867 struct ipw_priv *priv =
868 container_of(work, struct ipw_priv, led_link_on.work);
869 mutex_lock(&priv->mutex);
870 ipw_led_link_on(priv);
871 mutex_unlock(&priv->mutex);
872 }
873
874 static void ipw_led_link_off(struct ipw_priv *priv)
875 {
876 unsigned long flags;
877 u32 led;
878
879 /* If configured not to use LEDs, or nic type is 1,
880 * then we don't goggle the LINK led. */
881 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
882 return;
883
884 spin_lock_irqsave(&priv->lock, flags);
885
886 if (priv->status & STATUS_LED_LINK_ON) {
887 led = ipw_read_reg32(priv, IPW_EVENT_REG);
888 led &= priv->led_association_off;
889 led = ipw_register_toggle(led);
890
891 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
892 ipw_write_reg32(priv, IPW_EVENT_REG, led);
893
894 IPW_DEBUG_LED("Link LED Off\n");
895
896 priv->status &= ~STATUS_LED_LINK_ON;
897
898 /* If we aren't associated and the radio is on, schedule
899 * turning the LED on (blink while unassociated) */
900 if (!(priv->status & STATUS_RF_KILL_MASK) &&
901 !(priv->status & STATUS_ASSOCIATED))
902 queue_delayed_work(priv->workqueue, &priv->led_link_on,
903 LD_TIME_LINK_OFF);
904
905 }
906
907 spin_unlock_irqrestore(&priv->lock, flags);
908 }
909
910 static void ipw_bg_led_link_off(struct work_struct *work)
911 {
912 struct ipw_priv *priv =
913 container_of(work, struct ipw_priv, led_link_off.work);
914 mutex_lock(&priv->mutex);
915 ipw_led_link_off(priv);
916 mutex_unlock(&priv->mutex);
917 }
918
919 static void __ipw_led_activity_on(struct ipw_priv *priv)
920 {
921 u32 led;
922
923 if (priv->config & CFG_NO_LED)
924 return;
925
926 if (priv->status & STATUS_RF_KILL_MASK)
927 return;
928
929 if (!(priv->status & STATUS_LED_ACT_ON)) {
930 led = ipw_read_reg32(priv, IPW_EVENT_REG);
931 led |= priv->led_activity_on;
932
933 led = ipw_register_toggle(led);
934
935 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
936 ipw_write_reg32(priv, IPW_EVENT_REG, led);
937
938 IPW_DEBUG_LED("Activity LED On\n");
939
940 priv->status |= STATUS_LED_ACT_ON;
941
942 cancel_delayed_work(&priv->led_act_off);
943 queue_delayed_work(priv->workqueue, &priv->led_act_off,
944 LD_TIME_ACT_ON);
945 } else {
946 /* Reschedule LED off for full time period */
947 cancel_delayed_work(&priv->led_act_off);
948 queue_delayed_work(priv->workqueue, &priv->led_act_off,
949 LD_TIME_ACT_ON);
950 }
951 }
952
953 #if 0
954 void ipw_led_activity_on(struct ipw_priv *priv)
955 {
956 unsigned long flags;
957 spin_lock_irqsave(&priv->lock, flags);
958 __ipw_led_activity_on(priv);
959 spin_unlock_irqrestore(&priv->lock, flags);
960 }
961 #endif /* 0 */
962
963 static void ipw_led_activity_off(struct ipw_priv *priv)
964 {
965 unsigned long flags;
966 u32 led;
967
968 if (priv->config & CFG_NO_LED)
969 return;
970
971 spin_lock_irqsave(&priv->lock, flags);
972
973 if (priv->status & STATUS_LED_ACT_ON) {
974 led = ipw_read_reg32(priv, IPW_EVENT_REG);
975 led &= priv->led_activity_off;
976
977 led = ipw_register_toggle(led);
978
979 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
980 ipw_write_reg32(priv, IPW_EVENT_REG, led);
981
982 IPW_DEBUG_LED("Activity LED Off\n");
983
984 priv->status &= ~STATUS_LED_ACT_ON;
985 }
986
987 spin_unlock_irqrestore(&priv->lock, flags);
988 }
989
990 static void ipw_bg_led_activity_off(struct work_struct *work)
991 {
992 struct ipw_priv *priv =
993 container_of(work, struct ipw_priv, led_act_off.work);
994 mutex_lock(&priv->mutex);
995 ipw_led_activity_off(priv);
996 mutex_unlock(&priv->mutex);
997 }
998
999 static void ipw_led_band_on(struct ipw_priv *priv)
1000 {
1001 unsigned long flags;
1002 u32 led;
1003
1004 /* Only nic type 1 supports mode LEDs */
1005 if (priv->config & CFG_NO_LED ||
1006 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1007 return;
1008
1009 spin_lock_irqsave(&priv->lock, flags);
1010
1011 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1012 if (priv->assoc_network->mode == IEEE_A) {
1013 led |= priv->led_ofdm_on;
1014 led &= priv->led_association_off;
1015 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1016 } else if (priv->assoc_network->mode == IEEE_G) {
1017 led |= priv->led_ofdm_on;
1018 led |= priv->led_association_on;
1019 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1020 } else {
1021 led &= priv->led_ofdm_off;
1022 led |= priv->led_association_on;
1023 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1024 }
1025
1026 led = ipw_register_toggle(led);
1027
1028 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1029 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1030
1031 spin_unlock_irqrestore(&priv->lock, flags);
1032 }
1033
1034 static void ipw_led_band_off(struct ipw_priv *priv)
1035 {
1036 unsigned long flags;
1037 u32 led;
1038
1039 /* Only nic type 1 supports mode LEDs */
1040 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1041 return;
1042
1043 spin_lock_irqsave(&priv->lock, flags);
1044
1045 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1046 led &= priv->led_ofdm_off;
1047 led &= priv->led_association_off;
1048
1049 led = ipw_register_toggle(led);
1050
1051 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1052 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1053
1054 spin_unlock_irqrestore(&priv->lock, flags);
1055 }
1056
1057 static void ipw_led_radio_on(struct ipw_priv *priv)
1058 {
1059 ipw_led_link_on(priv);
1060 }
1061
1062 static void ipw_led_radio_off(struct ipw_priv *priv)
1063 {
1064 ipw_led_activity_off(priv);
1065 ipw_led_link_off(priv);
1066 }
1067
1068 static void ipw_led_link_up(struct ipw_priv *priv)
1069 {
1070 /* Set the Link Led on for all nic types */
1071 ipw_led_link_on(priv);
1072 }
1073
1074 static void ipw_led_link_down(struct ipw_priv *priv)
1075 {
1076 ipw_led_activity_off(priv);
1077 ipw_led_link_off(priv);
1078
1079 if (priv->status & STATUS_RF_KILL_MASK)
1080 ipw_led_radio_off(priv);
1081 }
1082
1083 static void ipw_led_init(struct ipw_priv *priv)
1084 {
1085 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1086
1087 /* Set the default PINs for the link and activity leds */
1088 priv->led_activity_on = IPW_ACTIVITY_LED;
1089 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1090
1091 priv->led_association_on = IPW_ASSOCIATED_LED;
1092 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1093
1094 /* Set the default PINs for the OFDM leds */
1095 priv->led_ofdm_on = IPW_OFDM_LED;
1096 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1097
1098 switch (priv->nic_type) {
1099 case EEPROM_NIC_TYPE_1:
1100 /* In this NIC type, the LEDs are reversed.... */
1101 priv->led_activity_on = IPW_ASSOCIATED_LED;
1102 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1103 priv->led_association_on = IPW_ACTIVITY_LED;
1104 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1105
1106 if (!(priv->config & CFG_NO_LED))
1107 ipw_led_band_on(priv);
1108
1109 /* And we don't blink link LEDs for this nic, so
1110 * just return here */
1111 return;
1112
1113 case EEPROM_NIC_TYPE_3:
1114 case EEPROM_NIC_TYPE_2:
1115 case EEPROM_NIC_TYPE_4:
1116 case EEPROM_NIC_TYPE_0:
1117 break;
1118
1119 default:
1120 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1121 priv->nic_type);
1122 priv->nic_type = EEPROM_NIC_TYPE_0;
1123 break;
1124 }
1125
1126 if (!(priv->config & CFG_NO_LED)) {
1127 if (priv->status & STATUS_ASSOCIATED)
1128 ipw_led_link_on(priv);
1129 else
1130 ipw_led_link_off(priv);
1131 }
1132 }
1133
1134 static void ipw_led_shutdown(struct ipw_priv *priv)
1135 {
1136 ipw_led_activity_off(priv);
1137 ipw_led_link_off(priv);
1138 ipw_led_band_off(priv);
1139 cancel_delayed_work(&priv->led_link_on);
1140 cancel_delayed_work(&priv->led_link_off);
1141 cancel_delayed_work(&priv->led_act_off);
1142 }
1143
1144 /*
1145 * The following adds a new attribute to the sysfs representation
1146 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1147 * used for controling the debug level.
1148 *
1149 * See the level definitions in ipw for details.
1150 */
1151 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1152 {
1153 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1154 }
1155
1156 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1157 size_t count)
1158 {
1159 char *p = (char *)buf;
1160 u32 val;
1161
1162 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1163 p++;
1164 if (p[0] == 'x' || p[0] == 'X')
1165 p++;
1166 val = simple_strtoul(p, &p, 16);
1167 } else
1168 val = simple_strtoul(p, &p, 10);
1169 if (p == buf)
1170 printk(KERN_INFO DRV_NAME
1171 ": %s is not in hex or decimal form.\n", buf);
1172 else
1173 ipw_debug_level = val;
1174
1175 return strnlen(buf, count);
1176 }
1177
1178 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1179 show_debug_level, store_debug_level);
1180
1181 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1182 {
1183 /* length = 1st dword in log */
1184 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1185 }
1186
1187 static void ipw_capture_event_log(struct ipw_priv *priv,
1188 u32 log_len, struct ipw_event *log)
1189 {
1190 u32 base;
1191
1192 if (log_len) {
1193 base = ipw_read32(priv, IPW_EVENT_LOG);
1194 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1195 (u8 *) log, sizeof(*log) * log_len);
1196 }
1197 }
1198
1199 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1200 {
1201 struct ipw_fw_error *error;
1202 u32 log_len = ipw_get_event_log_len(priv);
1203 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1204 u32 elem_len = ipw_read_reg32(priv, base);
1205
1206 error = kmalloc(sizeof(*error) +
1207 sizeof(*error->elem) * elem_len +
1208 sizeof(*error->log) * log_len, GFP_ATOMIC);
1209 if (!error) {
1210 IPW_ERROR("Memory allocation for firmware error log "
1211 "failed.\n");
1212 return NULL;
1213 }
1214 error->jiffies = jiffies;
1215 error->status = priv->status;
1216 error->config = priv->config;
1217 error->elem_len = elem_len;
1218 error->log_len = log_len;
1219 error->elem = (struct ipw_error_elem *)error->payload;
1220 error->log = (struct ipw_event *)(error->elem + elem_len);
1221
1222 ipw_capture_event_log(priv, log_len, error->log);
1223
1224 if (elem_len)
1225 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1226 sizeof(*error->elem) * elem_len);
1227
1228 return error;
1229 }
1230
1231 static ssize_t show_event_log(struct device *d,
1232 struct device_attribute *attr, char *buf)
1233 {
1234 struct ipw_priv *priv = dev_get_drvdata(d);
1235 u32 log_len = ipw_get_event_log_len(priv);
1236 struct ipw_event log[log_len];
1237 u32 len = 0, i;
1238
1239 ipw_capture_event_log(priv, log_len, log);
1240
1241 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1242 for (i = 0; i < log_len; i++)
1243 len += snprintf(buf + len, PAGE_SIZE - len,
1244 "\n%08X%08X%08X",
1245 log[i].time, log[i].event, log[i].data);
1246 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1247 return len;
1248 }
1249
1250 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1251
1252 static ssize_t show_error(struct device *d,
1253 struct device_attribute *attr, char *buf)
1254 {
1255 struct ipw_priv *priv = dev_get_drvdata(d);
1256 u32 len = 0, i;
1257 if (!priv->error)
1258 return 0;
1259 len += snprintf(buf + len, PAGE_SIZE - len,
1260 "%08lX%08X%08X%08X",
1261 priv->error->jiffies,
1262 priv->error->status,
1263 priv->error->config, priv->error->elem_len);
1264 for (i = 0; i < priv->error->elem_len; i++)
1265 len += snprintf(buf + len, PAGE_SIZE - len,
1266 "\n%08X%08X%08X%08X%08X%08X%08X",
1267 priv->error->elem[i].time,
1268 priv->error->elem[i].desc,
1269 priv->error->elem[i].blink1,
1270 priv->error->elem[i].blink2,
1271 priv->error->elem[i].link1,
1272 priv->error->elem[i].link2,
1273 priv->error->elem[i].data);
1274
1275 len += snprintf(buf + len, PAGE_SIZE - len,
1276 "\n%08X", priv->error->log_len);
1277 for (i = 0; i < priv->error->log_len; i++)
1278 len += snprintf(buf + len, PAGE_SIZE - len,
1279 "\n%08X%08X%08X",
1280 priv->error->log[i].time,
1281 priv->error->log[i].event,
1282 priv->error->log[i].data);
1283 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1284 return len;
1285 }
1286
1287 static ssize_t clear_error(struct device *d,
1288 struct device_attribute *attr,
1289 const char *buf, size_t count)
1290 {
1291 struct ipw_priv *priv = dev_get_drvdata(d);
1292
1293 kfree(priv->error);
1294 priv->error = NULL;
1295 return count;
1296 }
1297
1298 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1299
1300 static ssize_t show_cmd_log(struct device *d,
1301 struct device_attribute *attr, char *buf)
1302 {
1303 struct ipw_priv *priv = dev_get_drvdata(d);
1304 u32 len = 0, i;
1305 if (!priv->cmdlog)
1306 return 0;
1307 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1308 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1309 i = (i + 1) % priv->cmdlog_len) {
1310 len +=
1311 snprintf(buf + len, PAGE_SIZE - len,
1312 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1313 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1314 priv->cmdlog[i].cmd.len);
1315 len +=
1316 snprintk_buf(buf + len, PAGE_SIZE - len,
1317 (u8 *) priv->cmdlog[i].cmd.param,
1318 priv->cmdlog[i].cmd.len);
1319 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1320 }
1321 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1322 return len;
1323 }
1324
1325 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1326
1327 #ifdef CONFIG_IPW2200_PROMISCUOUS
1328 static void ipw_prom_free(struct ipw_priv *priv);
1329 static int ipw_prom_alloc(struct ipw_priv *priv);
1330 static ssize_t store_rtap_iface(struct device *d,
1331 struct device_attribute *attr,
1332 const char *buf, size_t count)
1333 {
1334 struct ipw_priv *priv = dev_get_drvdata(d);
1335 int rc = 0;
1336
1337 if (count < 1)
1338 return -EINVAL;
1339
1340 switch (buf[0]) {
1341 case '0':
1342 if (!rtap_iface)
1343 return count;
1344
1345 if (netif_running(priv->prom_net_dev)) {
1346 IPW_WARNING("Interface is up. Cannot unregister.\n");
1347 return count;
1348 }
1349
1350 ipw_prom_free(priv);
1351 rtap_iface = 0;
1352 break;
1353
1354 case '1':
1355 if (rtap_iface)
1356 return count;
1357
1358 rc = ipw_prom_alloc(priv);
1359 if (!rc)
1360 rtap_iface = 1;
1361 break;
1362
1363 default:
1364 return -EINVAL;
1365 }
1366
1367 if (rc) {
1368 IPW_ERROR("Failed to register promiscuous network "
1369 "device (error %d).\n", rc);
1370 }
1371
1372 return count;
1373 }
1374
1375 static ssize_t show_rtap_iface(struct device *d,
1376 struct device_attribute *attr,
1377 char *buf)
1378 {
1379 struct ipw_priv *priv = dev_get_drvdata(d);
1380 if (rtap_iface)
1381 return sprintf(buf, "%s", priv->prom_net_dev->name);
1382 else {
1383 buf[0] = '-';
1384 buf[1] = '1';
1385 buf[2] = '\0';
1386 return 3;
1387 }
1388 }
1389
1390 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1391 store_rtap_iface);
1392
1393 static ssize_t store_rtap_filter(struct device *d,
1394 struct device_attribute *attr,
1395 const char *buf, size_t count)
1396 {
1397 struct ipw_priv *priv = dev_get_drvdata(d);
1398
1399 if (!priv->prom_priv) {
1400 IPW_ERROR("Attempting to set filter without "
1401 "rtap_iface enabled.\n");
1402 return -EPERM;
1403 }
1404
1405 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1406
1407 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1408 BIT_ARG16(priv->prom_priv->filter));
1409
1410 return count;
1411 }
1412
1413 static ssize_t show_rtap_filter(struct device *d,
1414 struct device_attribute *attr,
1415 char *buf)
1416 {
1417 struct ipw_priv *priv = dev_get_drvdata(d);
1418 return sprintf(buf, "0x%04X",
1419 priv->prom_priv ? priv->prom_priv->filter : 0);
1420 }
1421
1422 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1423 store_rtap_filter);
1424 #endif
1425
1426 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1427 char *buf)
1428 {
1429 struct ipw_priv *priv = dev_get_drvdata(d);
1430 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1431 }
1432
1433 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1434 const char *buf, size_t count)
1435 {
1436 struct ipw_priv *priv = dev_get_drvdata(d);
1437 struct net_device *dev = priv->net_dev;
1438 char buffer[] = "00000000";
1439 unsigned long len =
1440 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1441 unsigned long val;
1442 char *p = buffer;
1443
1444 IPW_DEBUG_INFO("enter\n");
1445
1446 strncpy(buffer, buf, len);
1447 buffer[len] = 0;
1448
1449 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1450 p++;
1451 if (p[0] == 'x' || p[0] == 'X')
1452 p++;
1453 val = simple_strtoul(p, &p, 16);
1454 } else
1455 val = simple_strtoul(p, &p, 10);
1456 if (p == buffer) {
1457 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1458 } else {
1459 priv->ieee->scan_age = val;
1460 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1461 }
1462
1463 IPW_DEBUG_INFO("exit\n");
1464 return len;
1465 }
1466
1467 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1468
1469 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1470 char *buf)
1471 {
1472 struct ipw_priv *priv = dev_get_drvdata(d);
1473 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1474 }
1475
1476 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1477 const char *buf, size_t count)
1478 {
1479 struct ipw_priv *priv = dev_get_drvdata(d);
1480
1481 IPW_DEBUG_INFO("enter\n");
1482
1483 if (count == 0)
1484 return 0;
1485
1486 if (*buf == 0) {
1487 IPW_DEBUG_LED("Disabling LED control.\n");
1488 priv->config |= CFG_NO_LED;
1489 ipw_led_shutdown(priv);
1490 } else {
1491 IPW_DEBUG_LED("Enabling LED control.\n");
1492 priv->config &= ~CFG_NO_LED;
1493 ipw_led_init(priv);
1494 }
1495
1496 IPW_DEBUG_INFO("exit\n");
1497 return count;
1498 }
1499
1500 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1501
1502 static ssize_t show_status(struct device *d,
1503 struct device_attribute *attr, char *buf)
1504 {
1505 struct ipw_priv *p = d->driver_data;
1506 return sprintf(buf, "0x%08x\n", (int)p->status);
1507 }
1508
1509 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1510
1511 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1512 char *buf)
1513 {
1514 struct ipw_priv *p = d->driver_data;
1515 return sprintf(buf, "0x%08x\n", (int)p->config);
1516 }
1517
1518 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1519
1520 static ssize_t show_nic_type(struct device *d,
1521 struct device_attribute *attr, char *buf)
1522 {
1523 struct ipw_priv *priv = d->driver_data;
1524 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1525 }
1526
1527 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1528
1529 static ssize_t show_ucode_version(struct device *d,
1530 struct device_attribute *attr, char *buf)
1531 {
1532 u32 len = sizeof(u32), tmp = 0;
1533 struct ipw_priv *p = d->driver_data;
1534
1535 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1536 return 0;
1537
1538 return sprintf(buf, "0x%08x\n", tmp);
1539 }
1540
1541 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1542
1543 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1544 char *buf)
1545 {
1546 u32 len = sizeof(u32), tmp = 0;
1547 struct ipw_priv *p = d->driver_data;
1548
1549 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1550 return 0;
1551
1552 return sprintf(buf, "0x%08x\n", tmp);
1553 }
1554
1555 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1556
1557 /*
1558 * Add a device attribute to view/control the delay between eeprom
1559 * operations.
1560 */
1561 static ssize_t show_eeprom_delay(struct device *d,
1562 struct device_attribute *attr, char *buf)
1563 {
1564 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
1565 return sprintf(buf, "%i\n", n);
1566 }
1567 static ssize_t store_eeprom_delay(struct device *d,
1568 struct device_attribute *attr,
1569 const char *buf, size_t count)
1570 {
1571 struct ipw_priv *p = d->driver_data;
1572 sscanf(buf, "%i", &p->eeprom_delay);
1573 return strnlen(buf, count);
1574 }
1575
1576 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1577 show_eeprom_delay, store_eeprom_delay);
1578
1579 static ssize_t show_command_event_reg(struct device *d,
1580 struct device_attribute *attr, char *buf)
1581 {
1582 u32 reg = 0;
1583 struct ipw_priv *p = d->driver_data;
1584
1585 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1586 return sprintf(buf, "0x%08x\n", reg);
1587 }
1588 static ssize_t store_command_event_reg(struct device *d,
1589 struct device_attribute *attr,
1590 const char *buf, size_t count)
1591 {
1592 u32 reg;
1593 struct ipw_priv *p = d->driver_data;
1594
1595 sscanf(buf, "%x", &reg);
1596 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1597 return strnlen(buf, count);
1598 }
1599
1600 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1601 show_command_event_reg, store_command_event_reg);
1602
1603 static ssize_t show_mem_gpio_reg(struct device *d,
1604 struct device_attribute *attr, char *buf)
1605 {
1606 u32 reg = 0;
1607 struct ipw_priv *p = d->driver_data;
1608
1609 reg = ipw_read_reg32(p, 0x301100);
1610 return sprintf(buf, "0x%08x\n", reg);
1611 }
1612 static ssize_t store_mem_gpio_reg(struct device *d,
1613 struct device_attribute *attr,
1614 const char *buf, size_t count)
1615 {
1616 u32 reg;
1617 struct ipw_priv *p = d->driver_data;
1618
1619 sscanf(buf, "%x", &reg);
1620 ipw_write_reg32(p, 0x301100, reg);
1621 return strnlen(buf, count);
1622 }
1623
1624 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1625 show_mem_gpio_reg, store_mem_gpio_reg);
1626
1627 static ssize_t show_indirect_dword(struct device *d,
1628 struct device_attribute *attr, char *buf)
1629 {
1630 u32 reg = 0;
1631 struct ipw_priv *priv = d->driver_data;
1632
1633 if (priv->status & STATUS_INDIRECT_DWORD)
1634 reg = ipw_read_reg32(priv, priv->indirect_dword);
1635 else
1636 reg = 0;
1637
1638 return sprintf(buf, "0x%08x\n", reg);
1639 }
1640 static ssize_t store_indirect_dword(struct device *d,
1641 struct device_attribute *attr,
1642 const char *buf, size_t count)
1643 {
1644 struct ipw_priv *priv = d->driver_data;
1645
1646 sscanf(buf, "%x", &priv->indirect_dword);
1647 priv->status |= STATUS_INDIRECT_DWORD;
1648 return strnlen(buf, count);
1649 }
1650
1651 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1652 show_indirect_dword, store_indirect_dword);
1653
1654 static ssize_t show_indirect_byte(struct device *d,
1655 struct device_attribute *attr, char *buf)
1656 {
1657 u8 reg = 0;
1658 struct ipw_priv *priv = d->driver_data;
1659
1660 if (priv->status & STATUS_INDIRECT_BYTE)
1661 reg = ipw_read_reg8(priv, priv->indirect_byte);
1662 else
1663 reg = 0;
1664
1665 return sprintf(buf, "0x%02x\n", reg);
1666 }
1667 static ssize_t store_indirect_byte(struct device *d,
1668 struct device_attribute *attr,
1669 const char *buf, size_t count)
1670 {
1671 struct ipw_priv *priv = d->driver_data;
1672
1673 sscanf(buf, "%x", &priv->indirect_byte);
1674 priv->status |= STATUS_INDIRECT_BYTE;
1675 return strnlen(buf, count);
1676 }
1677
1678 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1679 show_indirect_byte, store_indirect_byte);
1680
1681 static ssize_t show_direct_dword(struct device *d,
1682 struct device_attribute *attr, char *buf)
1683 {
1684 u32 reg = 0;
1685 struct ipw_priv *priv = d->driver_data;
1686
1687 if (priv->status & STATUS_DIRECT_DWORD)
1688 reg = ipw_read32(priv, priv->direct_dword);
1689 else
1690 reg = 0;
1691
1692 return sprintf(buf, "0x%08x\n", reg);
1693 }
1694 static ssize_t store_direct_dword(struct device *d,
1695 struct device_attribute *attr,
1696 const char *buf, size_t count)
1697 {
1698 struct ipw_priv *priv = d->driver_data;
1699
1700 sscanf(buf, "%x", &priv->direct_dword);
1701 priv->status |= STATUS_DIRECT_DWORD;
1702 return strnlen(buf, count);
1703 }
1704
1705 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1706 show_direct_dword, store_direct_dword);
1707
1708 static int rf_kill_active(struct ipw_priv *priv)
1709 {
1710 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1711 priv->status |= STATUS_RF_KILL_HW;
1712 else
1713 priv->status &= ~STATUS_RF_KILL_HW;
1714
1715 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1716 }
1717
1718 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1719 char *buf)
1720 {
1721 /* 0 - RF kill not enabled
1722 1 - SW based RF kill active (sysfs)
1723 2 - HW based RF kill active
1724 3 - Both HW and SW baed RF kill active */
1725 struct ipw_priv *priv = d->driver_data;
1726 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1727 (rf_kill_active(priv) ? 0x2 : 0x0);
1728 return sprintf(buf, "%i\n", val);
1729 }
1730
1731 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1732 {
1733 if ((disable_radio ? 1 : 0) ==
1734 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1735 return 0;
1736
1737 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1738 disable_radio ? "OFF" : "ON");
1739
1740 if (disable_radio) {
1741 priv->status |= STATUS_RF_KILL_SW;
1742
1743 if (priv->workqueue)
1744 cancel_delayed_work(&priv->request_scan);
1745 queue_work(priv->workqueue, &priv->down);
1746 } else {
1747 priv->status &= ~STATUS_RF_KILL_SW;
1748 if (rf_kill_active(priv)) {
1749 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1750 "disabled by HW switch\n");
1751 /* Make sure the RF_KILL check timer is running */
1752 cancel_delayed_work(&priv->rf_kill);
1753 queue_delayed_work(priv->workqueue, &priv->rf_kill,
1754 round_jiffies(2 * HZ));
1755 } else
1756 queue_work(priv->workqueue, &priv->up);
1757 }
1758
1759 return 1;
1760 }
1761
1762 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1763 const char *buf, size_t count)
1764 {
1765 struct ipw_priv *priv = d->driver_data;
1766
1767 ipw_radio_kill_sw(priv, buf[0] == '1');
1768
1769 return count;
1770 }
1771
1772 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1773
1774 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1775 char *buf)
1776 {
1777 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1778 int pos = 0, len = 0;
1779 if (priv->config & CFG_SPEED_SCAN) {
1780 while (priv->speed_scan[pos] != 0)
1781 len += sprintf(&buf[len], "%d ",
1782 priv->speed_scan[pos++]);
1783 return len + sprintf(&buf[len], "\n");
1784 }
1785
1786 return sprintf(buf, "0\n");
1787 }
1788
1789 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1790 const char *buf, size_t count)
1791 {
1792 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1793 int channel, pos = 0;
1794 const char *p = buf;
1795
1796 /* list of space separated channels to scan, optionally ending with 0 */
1797 while ((channel = simple_strtol(p, NULL, 0))) {
1798 if (pos == MAX_SPEED_SCAN - 1) {
1799 priv->speed_scan[pos] = 0;
1800 break;
1801 }
1802
1803 if (ieee80211_is_valid_channel(priv->ieee, channel))
1804 priv->speed_scan[pos++] = channel;
1805 else
1806 IPW_WARNING("Skipping invalid channel request: %d\n",
1807 channel);
1808 p = strchr(p, ' ');
1809 if (!p)
1810 break;
1811 while (*p == ' ' || *p == '\t')
1812 p++;
1813 }
1814
1815 if (pos == 0)
1816 priv->config &= ~CFG_SPEED_SCAN;
1817 else {
1818 priv->speed_scan_pos = 0;
1819 priv->config |= CFG_SPEED_SCAN;
1820 }
1821
1822 return count;
1823 }
1824
1825 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1826 store_speed_scan);
1827
1828 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1829 char *buf)
1830 {
1831 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1832 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1833 }
1834
1835 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1836 const char *buf, size_t count)
1837 {
1838 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1839 if (buf[0] == '1')
1840 priv->config |= CFG_NET_STATS;
1841 else
1842 priv->config &= ~CFG_NET_STATS;
1843
1844 return count;
1845 }
1846
1847 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1848 show_net_stats, store_net_stats);
1849
1850 static ssize_t show_channels(struct device *d,
1851 struct device_attribute *attr,
1852 char *buf)
1853 {
1854 struct ipw_priv *priv = dev_get_drvdata(d);
1855 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
1856 int len = 0, i;
1857
1858 len = sprintf(&buf[len],
1859 "Displaying %d channels in 2.4Ghz band "
1860 "(802.11bg):\n", geo->bg_channels);
1861
1862 for (i = 0; i < geo->bg_channels; i++) {
1863 len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1864 geo->bg[i].channel,
1865 geo->bg[i].flags & IEEE80211_CH_RADAR_DETECT ?
1866 " (radar spectrum)" : "",
1867 ((geo->bg[i].flags & IEEE80211_CH_NO_IBSS) ||
1868 (geo->bg[i].flags & IEEE80211_CH_RADAR_DETECT))
1869 ? "" : ", IBSS",
1870 geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY ?
1871 "passive only" : "active/passive",
1872 geo->bg[i].flags & IEEE80211_CH_B_ONLY ?
1873 "B" : "B/G");
1874 }
1875
1876 len += sprintf(&buf[len],
1877 "Displaying %d channels in 5.2Ghz band "
1878 "(802.11a):\n", geo->a_channels);
1879 for (i = 0; i < geo->a_channels; i++) {
1880 len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1881 geo->a[i].channel,
1882 geo->a[i].flags & IEEE80211_CH_RADAR_DETECT ?
1883 " (radar spectrum)" : "",
1884 ((geo->a[i].flags & IEEE80211_CH_NO_IBSS) ||
1885 (geo->a[i].flags & IEEE80211_CH_RADAR_DETECT))
1886 ? "" : ", IBSS",
1887 geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY ?
1888 "passive only" : "active/passive");
1889 }
1890
1891 return len;
1892 }
1893
1894 static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1895
1896 static void notify_wx_assoc_event(struct ipw_priv *priv)
1897 {
1898 union iwreq_data wrqu;
1899 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1900 if (priv->status & STATUS_ASSOCIATED)
1901 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1902 else
1903 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1904 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1905 }
1906
1907 static void ipw_irq_tasklet(struct ipw_priv *priv)
1908 {
1909 u32 inta, inta_mask, handled = 0;
1910 unsigned long flags;
1911 int rc = 0;
1912
1913 spin_lock_irqsave(&priv->irq_lock, flags);
1914
1915 inta = ipw_read32(priv, IPW_INTA_RW);
1916 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1917 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1918
1919 /* Add any cached INTA values that need to be handled */
1920 inta |= priv->isr_inta;
1921
1922 spin_unlock_irqrestore(&priv->irq_lock, flags);
1923
1924 spin_lock_irqsave(&priv->lock, flags);
1925
1926 /* handle all the justifications for the interrupt */
1927 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1928 ipw_rx(priv);
1929 handled |= IPW_INTA_BIT_RX_TRANSFER;
1930 }
1931
1932 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1933 IPW_DEBUG_HC("Command completed.\n");
1934 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1935 priv->status &= ~STATUS_HCMD_ACTIVE;
1936 wake_up_interruptible(&priv->wait_command_queue);
1937 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1938 }
1939
1940 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1941 IPW_DEBUG_TX("TX_QUEUE_1\n");
1942 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1943 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1944 }
1945
1946 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1947 IPW_DEBUG_TX("TX_QUEUE_2\n");
1948 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1949 handled |= IPW_INTA_BIT_TX_QUEUE_2;
1950 }
1951
1952 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1953 IPW_DEBUG_TX("TX_QUEUE_3\n");
1954 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1955 handled |= IPW_INTA_BIT_TX_QUEUE_3;
1956 }
1957
1958 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1959 IPW_DEBUG_TX("TX_QUEUE_4\n");
1960 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1961 handled |= IPW_INTA_BIT_TX_QUEUE_4;
1962 }
1963
1964 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
1965 IPW_WARNING("STATUS_CHANGE\n");
1966 handled |= IPW_INTA_BIT_STATUS_CHANGE;
1967 }
1968
1969 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1970 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1971 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
1972 }
1973
1974 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1975 IPW_WARNING("HOST_CMD_DONE\n");
1976 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1977 }
1978
1979 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
1980 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1981 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
1982 }
1983
1984 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1985 IPW_WARNING("PHY_OFF_DONE\n");
1986 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1987 }
1988
1989 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
1990 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1991 priv->status |= STATUS_RF_KILL_HW;
1992 wake_up_interruptible(&priv->wait_command_queue);
1993 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1994 cancel_delayed_work(&priv->request_scan);
1995 schedule_work(&priv->link_down);
1996 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
1997 handled |= IPW_INTA_BIT_RF_KILL_DONE;
1998 }
1999
2000 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2001 IPW_WARNING("Firmware error detected. Restarting.\n");
2002 if (priv->error) {
2003 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2004 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2005 struct ipw_fw_error *error =
2006 ipw_alloc_error_log(priv);
2007 ipw_dump_error_log(priv, error);
2008 kfree(error);
2009 }
2010 } else {
2011 priv->error = ipw_alloc_error_log(priv);
2012 if (priv->error)
2013 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2014 else
2015 IPW_DEBUG_FW("Error allocating sysfs 'error' "
2016 "log.\n");
2017 if (ipw_debug_level & IPW_DL_FW_ERRORS)
2018 ipw_dump_error_log(priv, priv->error);
2019 }
2020
2021 /* XXX: If hardware encryption is for WPA/WPA2,
2022 * we have to notify the supplicant. */
2023 if (priv->ieee->sec.encrypt) {
2024 priv->status &= ~STATUS_ASSOCIATED;
2025 notify_wx_assoc_event(priv);
2026 }
2027
2028 /* Keep the restart process from trying to send host
2029 * commands by clearing the INIT status bit */
2030 priv->status &= ~STATUS_INIT;
2031
2032 /* Cancel currently queued command. */
2033 priv->status &= ~STATUS_HCMD_ACTIVE;
2034 wake_up_interruptible(&priv->wait_command_queue);
2035
2036 queue_work(priv->workqueue, &priv->adapter_restart);
2037 handled |= IPW_INTA_BIT_FATAL_ERROR;
2038 }
2039
2040 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2041 IPW_ERROR("Parity error\n");
2042 handled |= IPW_INTA_BIT_PARITY_ERROR;
2043 }
2044
2045 if (handled != inta) {
2046 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2047 }
2048
2049 spin_unlock_irqrestore(&priv->lock, flags);
2050
2051 /* enable all interrupts */
2052 ipw_enable_interrupts(priv);
2053 }
2054
2055 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2056 static char *get_cmd_string(u8 cmd)
2057 {
2058 switch (cmd) {
2059 IPW_CMD(HOST_COMPLETE);
2060 IPW_CMD(POWER_DOWN);
2061 IPW_CMD(SYSTEM_CONFIG);
2062 IPW_CMD(MULTICAST_ADDRESS);
2063 IPW_CMD(SSID);
2064 IPW_CMD(ADAPTER_ADDRESS);
2065 IPW_CMD(PORT_TYPE);
2066 IPW_CMD(RTS_THRESHOLD);
2067 IPW_CMD(FRAG_THRESHOLD);
2068 IPW_CMD(POWER_MODE);
2069 IPW_CMD(WEP_KEY);
2070 IPW_CMD(TGI_TX_KEY);
2071 IPW_CMD(SCAN_REQUEST);
2072 IPW_CMD(SCAN_REQUEST_EXT);
2073 IPW_CMD(ASSOCIATE);
2074 IPW_CMD(SUPPORTED_RATES);
2075 IPW_CMD(SCAN_ABORT);
2076 IPW_CMD(TX_FLUSH);
2077 IPW_CMD(QOS_PARAMETERS);
2078 IPW_CMD(DINO_CONFIG);
2079 IPW_CMD(RSN_CAPABILITIES);
2080 IPW_CMD(RX_KEY);
2081 IPW_CMD(CARD_DISABLE);
2082 IPW_CMD(SEED_NUMBER);
2083 IPW_CMD(TX_POWER);
2084 IPW_CMD(COUNTRY_INFO);
2085 IPW_CMD(AIRONET_INFO);
2086 IPW_CMD(AP_TX_POWER);
2087 IPW_CMD(CCKM_INFO);
2088 IPW_CMD(CCX_VER_INFO);
2089 IPW_CMD(SET_CALIBRATION);
2090 IPW_CMD(SENSITIVITY_CALIB);
2091 IPW_CMD(RETRY_LIMIT);
2092 IPW_CMD(IPW_PRE_POWER_DOWN);
2093 IPW_CMD(VAP_BEACON_TEMPLATE);
2094 IPW_CMD(VAP_DTIM_PERIOD);
2095 IPW_CMD(EXT_SUPPORTED_RATES);
2096 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2097 IPW_CMD(VAP_QUIET_INTERVALS);
2098 IPW_CMD(VAP_CHANNEL_SWITCH);
2099 IPW_CMD(VAP_MANDATORY_CHANNELS);
2100 IPW_CMD(VAP_CELL_PWR_LIMIT);
2101 IPW_CMD(VAP_CF_PARAM_SET);
2102 IPW_CMD(VAP_SET_BEACONING_STATE);
2103 IPW_CMD(MEASUREMENT);
2104 IPW_CMD(POWER_CAPABILITY);
2105 IPW_CMD(SUPPORTED_CHANNELS);
2106 IPW_CMD(TPC_REPORT);
2107 IPW_CMD(WME_INFO);
2108 IPW_CMD(PRODUCTION_COMMAND);
2109 default:
2110 return "UNKNOWN";
2111 }
2112 }
2113
2114 #define HOST_COMPLETE_TIMEOUT HZ
2115
2116 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2117 {
2118 int rc = 0;
2119 unsigned long flags;
2120
2121 spin_lock_irqsave(&priv->lock, flags);
2122 if (priv->status & STATUS_HCMD_ACTIVE) {
2123 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2124 get_cmd_string(cmd->cmd));
2125 spin_unlock_irqrestore(&priv->lock, flags);
2126 return -EAGAIN;
2127 }
2128
2129 priv->status |= STATUS_HCMD_ACTIVE;
2130
2131 if (priv->cmdlog) {
2132 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2133 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2134 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2135 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2136 cmd->len);
2137 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2138 }
2139
2140 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2141 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2142 priv->status);
2143
2144 #ifndef DEBUG_CMD_WEP_KEY
2145 if (cmd->cmd == IPW_CMD_WEP_KEY)
2146 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2147 else
2148 #endif
2149 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2150
2151 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2152 if (rc) {
2153 priv->status &= ~STATUS_HCMD_ACTIVE;
2154 IPW_ERROR("Failed to send %s: Reason %d\n",
2155 get_cmd_string(cmd->cmd), rc);
2156 spin_unlock_irqrestore(&priv->lock, flags);
2157 goto exit;
2158 }
2159 spin_unlock_irqrestore(&priv->lock, flags);
2160
2161 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2162 !(priv->
2163 status & STATUS_HCMD_ACTIVE),
2164 HOST_COMPLETE_TIMEOUT);
2165 if (rc == 0) {
2166 spin_lock_irqsave(&priv->lock, flags);
2167 if (priv->status & STATUS_HCMD_ACTIVE) {
2168 IPW_ERROR("Failed to send %s: Command timed out.\n",
2169 get_cmd_string(cmd->cmd));
2170 priv->status &= ~STATUS_HCMD_ACTIVE;
2171 spin_unlock_irqrestore(&priv->lock, flags);
2172 rc = -EIO;
2173 goto exit;
2174 }
2175 spin_unlock_irqrestore(&priv->lock, flags);
2176 } else
2177 rc = 0;
2178
2179 if (priv->status & STATUS_RF_KILL_HW) {
2180 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2181 get_cmd_string(cmd->cmd));
2182 rc = -EIO;
2183 goto exit;
2184 }
2185
2186 exit:
2187 if (priv->cmdlog) {
2188 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2189 priv->cmdlog_pos %= priv->cmdlog_len;
2190 }
2191 return rc;
2192 }
2193
2194 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2195 {
2196 struct host_cmd cmd = {
2197 .cmd = command,
2198 };
2199
2200 return __ipw_send_cmd(priv, &cmd);
2201 }
2202
2203 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2204 void *data)
2205 {
2206 struct host_cmd cmd = {
2207 .cmd = command,
2208 .len = len,
2209 .param = data,
2210 };
2211
2212 return __ipw_send_cmd(priv, &cmd);
2213 }
2214
2215 static int ipw_send_host_complete(struct ipw_priv *priv)
2216 {
2217 if (!priv) {
2218 IPW_ERROR("Invalid args\n");
2219 return -1;
2220 }
2221
2222 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2223 }
2224
2225 static int ipw_send_system_config(struct ipw_priv *priv)
2226 {
2227 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2228 sizeof(priv->sys_config),
2229 &priv->sys_config);
2230 }
2231
2232 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2233 {
2234 if (!priv || !ssid) {
2235 IPW_ERROR("Invalid args\n");
2236 return -1;
2237 }
2238
2239 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2240 ssid);
2241 }
2242
2243 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2244 {
2245 if (!priv || !mac) {
2246 IPW_ERROR("Invalid args\n");
2247 return -1;
2248 }
2249
2250 IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
2251 priv->net_dev->name, MAC_ARG(mac));
2252
2253 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2254 }
2255
2256 /*
2257 * NOTE: This must be executed from our workqueue as it results in udelay
2258 * being called which may corrupt the keyboard if executed on default
2259 * workqueue
2260 */
2261 static void ipw_adapter_restart(void *adapter)
2262 {
2263 struct ipw_priv *priv = adapter;
2264
2265 if (priv->status & STATUS_RF_KILL_MASK)
2266 return;
2267
2268 ipw_down(priv);
2269
2270 if (priv->assoc_network &&
2271 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2272 ipw_remove_current_network(priv);
2273
2274 if (ipw_up(priv)) {
2275 IPW_ERROR("Failed to up device\n");
2276 return;
2277 }
2278 }
2279
2280 static void ipw_bg_adapter_restart(struct work_struct *work)
2281 {
2282 struct ipw_priv *priv =
2283 container_of(work, struct ipw_priv, adapter_restart);
2284 mutex_lock(&priv->mutex);
2285 ipw_adapter_restart(priv);
2286 mutex_unlock(&priv->mutex);
2287 }
2288
2289 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2290
2291 static void ipw_scan_check(void *data)
2292 {
2293 struct ipw_priv *priv = data;
2294 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2295 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2296 "adapter after (%dms).\n",
2297 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2298 queue_work(priv->workqueue, &priv->adapter_restart);
2299 }
2300 }
2301
2302 static void ipw_bg_scan_check(struct work_struct *work)
2303 {
2304 struct ipw_priv *priv =
2305 container_of(work, struct ipw_priv, scan_check.work);
2306 mutex_lock(&priv->mutex);
2307 ipw_scan_check(priv);
2308 mutex_unlock(&priv->mutex);
2309 }
2310
2311 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2312 struct ipw_scan_request_ext *request)
2313 {
2314 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2315 sizeof(*request), request);
2316 }
2317
2318 static int ipw_send_scan_abort(struct ipw_priv *priv)
2319 {
2320 if (!priv) {
2321 IPW_ERROR("Invalid args\n");
2322 return -1;
2323 }
2324
2325 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2326 }
2327
2328 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2329 {
2330 struct ipw_sensitivity_calib calib = {
2331 .beacon_rssi_raw = cpu_to_le16(sens),
2332 };
2333
2334 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2335 &calib);
2336 }
2337
2338 static int ipw_send_associate(struct ipw_priv *priv,
2339 struct ipw_associate *associate)
2340 {
2341 struct ipw_associate tmp_associate;
2342
2343 if (!priv || !associate) {
2344 IPW_ERROR("Invalid args\n");
2345 return -1;
2346 }
2347
2348 memcpy(&tmp_associate, associate, sizeof(*associate));
2349 tmp_associate.policy_support =
2350 cpu_to_le16(tmp_associate.policy_support);
2351 tmp_associate.assoc_tsf_msw = cpu_to_le32(tmp_associate.assoc_tsf_msw);
2352 tmp_associate.assoc_tsf_lsw = cpu_to_le32(tmp_associate.assoc_tsf_lsw);
2353 tmp_associate.capability = cpu_to_le16(tmp_associate.capability);
2354 tmp_associate.listen_interval =
2355 cpu_to_le16(tmp_associate.listen_interval);
2356 tmp_associate.beacon_interval =
2357 cpu_to_le16(tmp_associate.beacon_interval);
2358 tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window);
2359
2360 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(tmp_associate),
2361 &tmp_associate);
2362 }
2363
2364 static int ipw_send_supported_rates(struct ipw_priv *priv,
2365 struct ipw_supported_rates *rates)
2366 {
2367 if (!priv || !rates) {
2368 IPW_ERROR("Invalid args\n");
2369 return -1;
2370 }
2371
2372 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2373 rates);
2374 }
2375
2376 static int ipw_set_random_seed(struct ipw_priv *priv)
2377 {
2378 u32 val;
2379
2380 if (!priv) {
2381 IPW_ERROR("Invalid args\n");
2382 return -1;
2383 }
2384
2385 get_random_bytes(&val, sizeof(val));
2386
2387 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2388 }
2389
2390 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2391 {
2392 if (!priv) {
2393 IPW_ERROR("Invalid args\n");
2394 return -1;
2395 }
2396
2397 phy_off = cpu_to_le32(phy_off);
2398 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off),
2399 &phy_off);
2400 }
2401
2402 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2403 {
2404 if (!priv || !power) {
2405 IPW_ERROR("Invalid args\n");
2406 return -1;
2407 }
2408
2409 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2410 }
2411
2412 static int ipw_set_tx_power(struct ipw_priv *priv)
2413 {
2414 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
2415 struct ipw_tx_power tx_power;
2416 s8 max_power;
2417 int i;
2418
2419 memset(&tx_power, 0, sizeof(tx_power));
2420
2421 /* configure device for 'G' band */
2422 tx_power.ieee_mode = IPW_G_MODE;
2423 tx_power.num_channels = geo->bg_channels;
2424 for (i = 0; i < geo->bg_channels; i++) {
2425 max_power = geo->bg[i].max_power;
2426 tx_power.channels_tx_power[i].channel_number =
2427 geo->bg[i].channel;
2428 tx_power.channels_tx_power[i].tx_power = max_power ?
2429 min(max_power, priv->tx_power) : priv->tx_power;
2430 }
2431 if (ipw_send_tx_power(priv, &tx_power))
2432 return -EIO;
2433
2434 /* configure device to also handle 'B' band */
2435 tx_power.ieee_mode = IPW_B_MODE;
2436 if (ipw_send_tx_power(priv, &tx_power))
2437 return -EIO;
2438
2439 /* configure device to also handle 'A' band */
2440 if (priv->ieee->abg_true) {
2441 tx_power.ieee_mode = IPW_A_MODE;
2442 tx_power.num_channels = geo->a_channels;
2443 for (i = 0; i < tx_power.num_channels; i++) {
2444 max_power = geo->a[i].max_power;
2445 tx_power.channels_tx_power[i].channel_number =
2446 geo->a[i].channel;
2447 tx_power.channels_tx_power[i].tx_power = max_power ?
2448 min(max_power, priv->tx_power) : priv->tx_power;
2449 }
2450 if (ipw_send_tx_power(priv, &tx_power))
2451 return -EIO;
2452 }
2453 return 0;
2454 }
2455
2456 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2457 {
2458 struct ipw_rts_threshold rts_threshold = {
2459 .rts_threshold = cpu_to_le16(rts),
2460 };
2461
2462 if (!priv) {
2463 IPW_ERROR("Invalid args\n");
2464 return -1;
2465 }
2466
2467 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2468 sizeof(rts_threshold), &rts_threshold);
2469 }
2470
2471 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2472 {
2473 struct ipw_frag_threshold frag_threshold = {
2474 .frag_threshold = cpu_to_le16(frag),
2475 };
2476
2477 if (!priv) {
2478 IPW_ERROR("Invalid args\n");
2479 return -1;
2480 }
2481
2482 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2483 sizeof(frag_threshold), &frag_threshold);
2484 }
2485
2486 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2487 {
2488 u32 param;
2489
2490 if (!priv) {
2491 IPW_ERROR("Invalid args\n");
2492 return -1;
2493 }
2494
2495 /* If on battery, set to 3, if AC set to CAM, else user
2496 * level */
2497 switch (mode) {
2498 case IPW_POWER_BATTERY:
2499 param = IPW_POWER_INDEX_3;
2500 break;
2501 case IPW_POWER_AC:
2502 param = IPW_POWER_MODE_CAM;
2503 break;
2504 default:
2505 param = mode;
2506 break;
2507 }
2508
2509 param = cpu_to_le32(param);
2510 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2511 &param);
2512 }
2513
2514 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2515 {
2516 struct ipw_retry_limit retry_limit = {
2517 .short_retry_limit = slimit,
2518 .long_retry_limit = llimit
2519 };
2520
2521 if (!priv) {
2522 IPW_ERROR("Invalid args\n");
2523 return -1;
2524 }
2525
2526 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2527 &retry_limit);
2528 }
2529
2530 /*
2531 * The IPW device contains a Microwire compatible EEPROM that stores
2532 * various data like the MAC address. Usually the firmware has exclusive
2533 * access to the eeprom, but during device initialization (before the
2534 * device driver has sent the HostComplete command to the firmware) the
2535 * device driver has read access to the EEPROM by way of indirect addressing
2536 * through a couple of memory mapped registers.
2537 *
2538 * The following is a simplified implementation for pulling data out of the
2539 * the eeprom, along with some helper functions to find information in
2540 * the per device private data's copy of the eeprom.
2541 *
2542 * NOTE: To better understand how these functions work (i.e what is a chip
2543 * select and why do have to keep driving the eeprom clock?), read
2544 * just about any data sheet for a Microwire compatible EEPROM.
2545 */
2546
2547 /* write a 32 bit value into the indirect accessor register */
2548 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2549 {
2550 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2551
2552 /* the eeprom requires some time to complete the operation */
2553 udelay(p->eeprom_delay);
2554
2555 return;
2556 }
2557
2558 /* perform a chip select operation */
2559 static void eeprom_cs(struct ipw_priv *priv)
2560 {
2561 eeprom_write_reg(priv, 0);
2562 eeprom_write_reg(priv, EEPROM_BIT_CS);
2563 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2564 eeprom_write_reg(priv, EEPROM_BIT_CS);
2565 }
2566
2567 /* perform a chip select operation */
2568 static void eeprom_disable_cs(struct ipw_priv *priv)
2569 {
2570 eeprom_write_reg(priv, EEPROM_BIT_CS);
2571 eeprom_write_reg(priv, 0);
2572 eeprom_write_reg(priv, EEPROM_BIT_SK);
2573 }
2574
2575 /* push a single bit down to the eeprom */
2576 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2577 {
2578 int d = (bit ? EEPROM_BIT_DI : 0);
2579 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2580 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2581 }
2582
2583 /* push an opcode followed by an address down to the eeprom */
2584 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2585 {
2586 int i;
2587
2588 eeprom_cs(priv);
2589 eeprom_write_bit(priv, 1);
2590 eeprom_write_bit(priv, op & 2);
2591 eeprom_write_bit(priv, op & 1);
2592 for (i = 7; i >= 0; i--) {
2593 eeprom_write_bit(priv, addr & (1 << i));
2594 }
2595 }
2596
2597 /* pull 16 bits off the eeprom, one bit at a time */
2598 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2599 {
2600 int i;
2601 u16 r = 0;
2602
2603 /* Send READ Opcode */
2604 eeprom_op(priv, EEPROM_CMD_READ, addr);
2605
2606 /* Send dummy bit */
2607 eeprom_write_reg(priv, EEPROM_BIT_CS);
2608
2609 /* Read the byte off the eeprom one bit at a time */
2610 for (i = 0; i < 16; i++) {
2611 u32 data = 0;
2612 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2613 eeprom_write_reg(priv, EEPROM_BIT_CS);
2614 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2615 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2616 }
2617
2618 /* Send another dummy bit */
2619 eeprom_write_reg(priv, 0);
2620 eeprom_disable_cs(priv);
2621
2622 return r;
2623 }
2624
2625 /* helper function for pulling the mac address out of the private */
2626 /* data's copy of the eeprom data */
2627 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2628 {
2629 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2630 }
2631
2632 /*
2633 * Either the device driver (i.e. the host) or the firmware can
2634 * load eeprom data into the designated region in SRAM. If neither
2635 * happens then the FW will shutdown with a fatal error.
2636 *
2637 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2638 * bit needs region of shared SRAM needs to be non-zero.
2639 */
2640 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2641 {
2642 int i;
2643 u16 *eeprom = (u16 *) priv->eeprom;
2644
2645 IPW_DEBUG_TRACE(">>\n");
2646
2647 /* read entire contents of eeprom into private buffer */
2648 for (i = 0; i < 128; i++)
2649 eeprom[i] = le16_to_cpu(eeprom_read_u16(priv, (u8) i));
2650
2651 /*
2652 If the data looks correct, then copy it to our private
2653 copy. Otherwise let the firmware know to perform the operation
2654 on its own.
2655 */
2656 if (priv->eeprom[EEPROM_VERSION] != 0) {
2657 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2658
2659 /* write the eeprom data to sram */
2660 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2661 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2662
2663 /* Do not load eeprom data on fatal error or suspend */
2664 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2665 } else {
2666 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2667
2668 /* Load eeprom data on fatal error or suspend */
2669 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2670 }
2671
2672 IPW_DEBUG_TRACE("<<\n");
2673 }
2674
2675 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2676 {
2677 count >>= 2;
2678 if (!count)
2679 return;
2680 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2681 while (count--)
2682 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2683 }
2684
2685 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2686 {
2687 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2688 CB_NUMBER_OF_ELEMENTS_SMALL *
2689 sizeof(struct command_block));
2690 }
2691
2692 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2693 { /* start dma engine but no transfers yet */
2694
2695 IPW_DEBUG_FW(">> : \n");
2696
2697 /* Start the dma */
2698 ipw_fw_dma_reset_command_blocks(priv);
2699
2700 /* Write CB base address */
2701 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2702
2703 IPW_DEBUG_FW("<< : \n");
2704 return 0;
2705 }
2706
2707 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2708 {
2709 u32 control = 0;
2710
2711 IPW_DEBUG_FW(">> :\n");
2712
2713 /* set the Stop and Abort bit */
2714 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2715 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2716 priv->sram_desc.last_cb_index = 0;
2717
2718 IPW_DEBUG_FW("<< \n");
2719 }
2720
2721 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2722 struct command_block *cb)
2723 {
2724 u32 address =
2725 IPW_SHARED_SRAM_DMA_CONTROL +
2726 (sizeof(struct command_block) * index);
2727 IPW_DEBUG_FW(">> :\n");
2728
2729 ipw_write_indirect(priv, address, (u8 *) cb,
2730 (int)sizeof(struct command_block));
2731
2732 IPW_DEBUG_FW("<< :\n");
2733 return 0;
2734
2735 }
2736
2737 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2738 {
2739 u32 control = 0;
2740 u32 index = 0;
2741
2742 IPW_DEBUG_FW(">> :\n");
2743
2744 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2745 ipw_fw_dma_write_command_block(priv, index,
2746 &priv->sram_desc.cb_list[index]);
2747
2748 /* Enable the DMA in the CSR register */
2749 ipw_clear_bit(priv, IPW_RESET_REG,
2750 IPW_RESET_REG_MASTER_DISABLED |
2751 IPW_RESET_REG_STOP_MASTER);
2752
2753 /* Set the Start bit. */
2754 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2755 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2756
2757 IPW_DEBUG_FW("<< :\n");
2758 return 0;
2759 }
2760
2761 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2762 {
2763 u32 address;
2764 u32 register_value = 0;
2765 u32 cb_fields_address = 0;
2766
2767 IPW_DEBUG_FW(">> :\n");
2768 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2769 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2770
2771 /* Read the DMA Controlor register */
2772 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2773 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2774
2775 /* Print the CB values */
2776 cb_fields_address = address;
2777 register_value = ipw_read_reg32(priv, cb_fields_address);
2778 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2779
2780 cb_fields_address += sizeof(u32);
2781 register_value = ipw_read_reg32(priv, cb_fields_address);
2782 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2783
2784 cb_fields_address += sizeof(u32);
2785 register_value = ipw_read_reg32(priv, cb_fields_address);
2786 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2787 register_value);
2788
2789 cb_fields_address += sizeof(u32);
2790 register_value = ipw_read_reg32(priv, cb_fields_address);
2791 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2792
2793 IPW_DEBUG_FW(">> :\n");
2794 }
2795
2796 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2797 {
2798 u32 current_cb_address = 0;
2799 u32 current_cb_index = 0;
2800
2801 IPW_DEBUG_FW("<< :\n");
2802 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2803
2804 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2805 sizeof(struct command_block);
2806
2807 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2808 current_cb_index, current_cb_address);
2809
2810 IPW_DEBUG_FW(">> :\n");
2811 return current_cb_index;
2812
2813 }
2814
2815 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2816 u32 src_address,
2817 u32 dest_address,
2818 u32 length,
2819 int interrupt_enabled, int is_last)
2820 {
2821
2822 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2823 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2824 CB_DEST_SIZE_LONG;
2825 struct command_block *cb;
2826 u32 last_cb_element = 0;
2827
2828 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2829 src_address, dest_address, length);
2830
2831 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2832 return -1;
2833
2834 last_cb_element = priv->sram_desc.last_cb_index;
2835 cb = &priv->sram_desc.cb_list[last_cb_element];
2836 priv->sram_desc.last_cb_index++;
2837
2838 /* Calculate the new CB control word */
2839 if (interrupt_enabled)
2840 control |= CB_INT_ENABLED;
2841
2842 if (is_last)
2843 control |= CB_LAST_VALID;
2844
2845 control |= length;
2846
2847 /* Calculate the CB Element's checksum value */
2848 cb->status = control ^ src_address ^ dest_address;
2849
2850 /* Copy the Source and Destination addresses */
2851 cb->dest_addr = dest_address;
2852 cb->source_addr = src_address;
2853
2854 /* Copy the Control Word last */
2855 cb->control = control;
2856
2857 return 0;
2858 }
2859
2860 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2861 u32 src_phys, u32 dest_address, u32 length)
2862 {
2863 u32 bytes_left = length;
2864 u32 src_offset = 0;
2865 u32 dest_offset = 0;
2866 int status = 0;
2867 IPW_DEBUG_FW(">> \n");
2868 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2869 src_phys, dest_address, length);
2870 while (bytes_left > CB_MAX_LENGTH) {
2871 status = ipw_fw_dma_add_command_block(priv,
2872 src_phys + src_offset,
2873 dest_address +
2874 dest_offset,
2875 CB_MAX_LENGTH, 0, 0);
2876 if (status) {
2877 IPW_DEBUG_FW_INFO(": Failed\n");
2878 return -1;
2879 } else
2880 IPW_DEBUG_FW_INFO(": Added new cb\n");
2881
2882 src_offset += CB_MAX_LENGTH;
2883 dest_offset += CB_MAX_LENGTH;
2884 bytes_left -= CB_MAX_LENGTH;
2885 }
2886
2887 /* add the buffer tail */
2888 if (bytes_left > 0) {
2889 status =
2890 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2891 dest_address + dest_offset,
2892 bytes_left, 0, 0);
2893 if (status) {
2894 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2895 return -1;
2896 } else
2897 IPW_DEBUG_FW_INFO
2898 (": Adding new cb - the buffer tail\n");
2899 }
2900
2901 IPW_DEBUG_FW("<< \n");
2902 return 0;
2903 }
2904
2905 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2906 {
2907 u32 current_index = 0, previous_index;
2908 u32 watchdog = 0;
2909
2910 IPW_DEBUG_FW(">> : \n");
2911
2912 current_index = ipw_fw_dma_command_block_index(priv);
2913 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2914 (int)priv->sram_desc.last_cb_index);
2915
2916 while (current_index < priv->sram_desc.last_cb_index) {
2917 udelay(50);
2918 previous_index = current_index;
2919 current_index = ipw_fw_dma_command_block_index(priv);
2920
2921 if (previous_index < current_index) {
2922 watchdog = 0;
2923 continue;
2924 }
2925 if (++watchdog > 400) {
2926 IPW_DEBUG_FW_INFO("Timeout\n");
2927 ipw_fw_dma_dump_command_block(priv);
2928 ipw_fw_dma_abort(priv);
2929 return -1;
2930 }
2931 }
2932
2933 ipw_fw_dma_abort(priv);
2934
2935 /*Disable the DMA in the CSR register */
2936 ipw_set_bit(priv, IPW_RESET_REG,
2937 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2938
2939 IPW_DEBUG_FW("<< dmaWaitSync \n");
2940 return 0;
2941 }
2942
2943 static void ipw_remove_current_network(struct ipw_priv *priv)
2944 {
2945 struct list_head *element, *safe;
2946 struct ieee80211_network *network = NULL;
2947 unsigned long flags;
2948
2949 spin_lock_irqsave(&priv->ieee->lock, flags);
2950 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2951 network = list_entry(element, struct ieee80211_network, list);
2952 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2953 list_del(element);
2954 list_add_tail(&network->list,
2955 &priv->ieee->network_free_list);
2956 }
2957 }
2958 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2959 }
2960
2961 /**
2962 * Check that card is still alive.
2963 * Reads debug register from domain0.
2964 * If card is present, pre-defined value should
2965 * be found there.
2966 *
2967 * @param priv
2968 * @return 1 if card is present, 0 otherwise
2969 */
2970 static inline int ipw_alive(struct ipw_priv *priv)
2971 {
2972 return ipw_read32(priv, 0x90) == 0xd55555d5;
2973 }
2974
2975 /* timeout in msec, attempted in 10-msec quanta */
2976 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2977 int timeout)
2978 {
2979 int i = 0;
2980
2981 do {
2982 if ((ipw_read32(priv, addr) & mask) == mask)
2983 return i;
2984 mdelay(10);
2985 i += 10;
2986 } while (i < timeout);
2987
2988 return -ETIME;
2989 }
2990
2991 /* These functions load the firmware and micro code for the operation of
2992 * the ipw hardware. It assumes the buffer has all the bits for the
2993 * image and the caller is handling the memory allocation and clean up.
2994 */
2995
2996 static int ipw_stop_master(struct ipw_priv *priv)
2997 {
2998 int rc;
2999
3000 IPW_DEBUG_TRACE(">> \n");
3001 /* stop master. typical delay - 0 */
3002 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3003
3004 /* timeout is in msec, polled in 10-msec quanta */
3005 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3006 IPW_RESET_REG_MASTER_DISABLED, 100);
3007 if (rc < 0) {
3008 IPW_ERROR("wait for stop master failed after 100ms\n");
3009 return -1;
3010 }
3011
3012 IPW_DEBUG_INFO("stop master %dms\n", rc);
3013
3014 return rc;
3015 }
3016
3017 static void ipw_arc_release(struct ipw_priv *priv)
3018 {
3019 IPW_DEBUG_TRACE(">> \n");
3020 mdelay(5);
3021
3022 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3023
3024 /* no one knows timing, for safety add some delay */
3025 mdelay(5);
3026 }
3027
3028 struct fw_chunk {
3029 u32 address;
3030 u32 length;
3031 };
3032
3033 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3034 {
3035 int rc = 0, i, addr;
3036 u8 cr = 0;
3037 u16 *image;
3038
3039 image = (u16 *) data;
3040
3041 IPW_DEBUG_TRACE(">> \n");
3042
3043 rc = ipw_stop_master(priv);
3044
3045 if (rc < 0)
3046 return rc;
3047
3048 for (addr = IPW_SHARED_LOWER_BOUND;
3049 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3050 ipw_write32(priv, addr, 0);
3051 }
3052
3053 /* no ucode (yet) */
3054 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3055 /* destroy DMA queues */
3056 /* reset sequence */
3057
3058 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3059 ipw_arc_release(priv);
3060 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3061 mdelay(1);
3062
3063 /* reset PHY */
3064 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3065 mdelay(1);
3066
3067 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3068 mdelay(1);
3069
3070 /* enable ucode store */
3071 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3072 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3073 mdelay(1);
3074
3075 /* write ucode */
3076 /**
3077 * @bug
3078 * Do NOT set indirect address register once and then
3079 * store data to indirect data register in the loop.
3080 * It seems very reasonable, but in this case DINO do not
3081 * accept ucode. It is essential to set address each time.
3082 */
3083 /* load new ipw uCode */
3084 for (i = 0; i < len / 2; i++)
3085 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3086 cpu_to_le16(image[i]));
3087
3088 /* enable DINO */
3089 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3090 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3091
3092 /* this is where the igx / win driver deveates from the VAP driver. */
3093
3094 /* wait for alive response */
3095 for (i = 0; i < 100; i++) {
3096 /* poll for incoming data */
3097 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3098 if (cr & DINO_RXFIFO_DATA)
3099 break;
3100 mdelay(1);
3101 }
3102
3103 if (cr & DINO_RXFIFO_DATA) {
3104 /* alive_command_responce size is NOT multiple of 4 */
3105 u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3106
3107 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3108 response_buffer[i] =
3109 le32_to_cpu(ipw_read_reg32(priv,
3110 IPW_BASEBAND_RX_FIFO_READ));
3111 memcpy(&priv->dino_alive, response_buffer,
3112 sizeof(priv->dino_alive));
3113 if (priv->dino_alive.alive_command == 1
3114 && priv->dino_alive.ucode_valid == 1) {
3115 rc = 0;
3116 IPW_DEBUG_INFO
3117 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3118 "of %02d/%02d/%02d %02d:%02d\n",
3119 priv->dino_alive.software_revision,
3120 priv->dino_alive.software_revision,
3121 priv->dino_alive.device_identifier,
3122 priv->dino_alive.device_identifier,
3123 priv->dino_alive.time_stamp[0],
3124 priv->dino_alive.time_stamp[1],
3125 priv->dino_alive.time_stamp[2],
3126 priv->dino_alive.time_stamp[3],
3127 priv->dino_alive.time_stamp[4]);
3128 } else {
3129 IPW_DEBUG_INFO("Microcode is not alive\n");
3130 rc = -EINVAL;
3131 }
3132 } else {
3133 IPW_DEBUG_INFO("No alive response from DINO\n");
3134 rc = -ETIME;
3135 }
3136
3137 /* disable DINO, otherwise for some reason
3138 firmware have problem getting alive resp. */
3139 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3140
3141 return rc;
3142 }
3143
3144 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3145 {
3146 int rc = -1;
3147 int offset = 0;
3148 struct fw_chunk *chunk;
3149 dma_addr_t shared_phys;
3150 u8 *shared_virt;
3151
3152 IPW_DEBUG_TRACE("<< : \n");
3153 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
3154
3155 if (!shared_virt)
3156 return -ENOMEM;
3157
3158 memmove(shared_virt, data, len);
3159
3160 /* Start the Dma */
3161 rc = ipw_fw_dma_enable(priv);
3162
3163 if (priv->sram_desc.last_cb_index > 0) {
3164 /* the DMA is already ready this would be a bug. */
3165 BUG();
3166 goto out;
3167 }
3168
3169 do {
3170 chunk = (struct fw_chunk *)(data + offset);
3171 offset += sizeof(struct fw_chunk);
3172 /* build DMA packet and queue up for sending */
3173 /* dma to chunk->address, the chunk->length bytes from data +
3174 * offeset*/
3175 /* Dma loading */
3176 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
3177 le32_to_cpu(chunk->address),
3178 le32_to_cpu(chunk->length));
3179 if (rc) {
3180 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3181 goto out;
3182 }
3183
3184 offset += le32_to_cpu(chunk->length);
3185 } while (offset < len);
3186
3187 /* Run the DMA and wait for the answer */
3188 rc = ipw_fw_dma_kick(priv);
3189 if (rc) {
3190 IPW_ERROR("dmaKick Failed\n");
3191 goto out;
3192 }
3193
3194 rc = ipw_fw_dma_wait(priv);
3195 if (rc) {
3196 IPW_ERROR("dmaWaitSync Failed\n");
3197 goto out;
3198 }
3199 out:
3200 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
3201 return rc;
3202 }
3203
3204 /* stop nic */
3205 static int ipw_stop_nic(struct ipw_priv *priv)
3206 {
3207 int rc = 0;
3208
3209 /* stop */
3210 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3211
3212 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3213 IPW_RESET_REG_MASTER_DISABLED, 500);
3214 if (rc < 0) {
3215 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3216 return rc;
3217 }
3218
3219 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3220
3221 return rc;
3222 }
3223
3224 static void ipw_start_nic(struct ipw_priv *priv)
3225 {
3226 IPW_DEBUG_TRACE(">>\n");
3227
3228 /* prvHwStartNic release ARC */
3229 ipw_clear_bit(priv, IPW_RESET_REG,
3230 IPW_RESET_REG_MASTER_DISABLED |
3231 IPW_RESET_REG_STOP_MASTER |
3232 CBD_RESET_REG_PRINCETON_RESET);
3233
3234 /* enable power management */
3235 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3236 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3237
3238 IPW_DEBUG_TRACE("<<\n");
3239 }
3240
3241 static int ipw_init_nic(struct ipw_priv *priv)
3242 {
3243 int rc;
3244
3245 IPW_DEBUG_TRACE(">>\n");
3246 /* reset */
3247 /*prvHwInitNic */
3248 /* set "initialization complete" bit to move adapter to D0 state */
3249 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3250
3251 /* low-level PLL activation */
3252 ipw_write32(priv, IPW_READ_INT_REGISTER,
3253 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3254
3255 /* wait for clock stabilization */
3256 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3257 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3258 if (rc < 0)
3259 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3260
3261 /* assert SW reset */
3262 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3263
3264 udelay(10);
3265
3266 /* set "initialization complete" bit to move adapter to D0 state */
3267 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3268
3269 IPW_DEBUG_TRACE(">>\n");
3270 return 0;
3271 }
3272
3273 /* Call this function from process context, it will sleep in request_firmware.
3274 * Probe is an ok place to call this from.
3275 */
3276 static int ipw_reset_nic(struct ipw_priv *priv)
3277 {
3278 int rc = 0;
3279 unsigned long flags;
3280
3281 IPW_DEBUG_TRACE(">>\n");
3282
3283 rc = ipw_init_nic(priv);
3284
3285 spin_lock_irqsave(&priv->lock, flags);
3286 /* Clear the 'host command active' bit... */
3287 priv->status &= ~STATUS_HCMD_ACTIVE;
3288 wake_up_interruptible(&priv->wait_command_queue);
3289 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3290 wake_up_interruptible(&priv->wait_state);
3291 spin_unlock_irqrestore(&priv->lock, flags);
3292
3293 IPW_DEBUG_TRACE("<<\n");
3294 return rc;
3295 }
3296
3297
3298 struct ipw_fw {
3299 __le32 ver;
3300 __le32 boot_size;
3301 __le32 ucode_size;
3302 __le32 fw_size;
3303 u8 data[0];
3304 };
3305
3306 static int ipw_get_fw(struct ipw_priv *priv,
3307 const struct firmware **raw, const char *name)
3308 {
3309 struct ipw_fw *fw;
3310 int rc;
3311
3312 /* ask firmware_class module to get the boot firmware off disk */
3313 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3314 if (rc < 0) {
3315 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3316 return rc;
3317 }
3318
3319 if ((*raw)->size < sizeof(*fw)) {
3320 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3321 return -EINVAL;
3322 }
3323
3324 fw = (void *)(*raw)->data;
3325
3326 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3327 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3328 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3329 name, (*raw)->size);
3330 return -EINVAL;
3331 }
3332
3333 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3334 name,
3335 le32_to_cpu(fw->ver) >> 16,
3336 le32_to_cpu(fw->ver) & 0xff,
3337 (*raw)->size - sizeof(*fw));
3338 return 0;
3339 }
3340
3341 #define IPW_RX_BUF_SIZE (3000)
3342
3343 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3344 struct ipw_rx_queue *rxq)
3345 {
3346 unsigned long flags;
3347 int i;
3348
3349 spin_lock_irqsave(&rxq->lock, flags);
3350
3351 INIT_LIST_HEAD(&rxq->rx_free);
3352 INIT_LIST_HEAD(&rxq->rx_used);
3353
3354 /* Fill the rx_used queue with _all_ of the Rx buffers */
3355 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3356 /* In the reset function, these buffers may have been allocated
3357 * to an SKB, so we need to unmap and free potential storage */
3358 if (rxq->pool[i].skb != NULL) {
3359 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3360 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3361 dev_kfree_skb(rxq->pool[i].skb);
3362 rxq->pool[i].skb = NULL;
3363 }
3364 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3365 }
3366
3367 /* Set us so that we have processed and used all buffers, but have
3368 * not restocked the Rx queue with fresh buffers */
3369 rxq->read = rxq->write = 0;
3370 rxq->processed = RX_QUEUE_SIZE - 1;
3371 rxq->free_count = 0;
3372 spin_unlock_irqrestore(&rxq->lock, flags);
3373 }
3374
3375 #ifdef CONFIG_PM
3376 static int fw_loaded = 0;
3377 static const struct firmware *raw = NULL;
3378
3379 static void free_firmware(void)
3380 {
3381 if (fw_loaded) {
3382 release_firmware(raw);
3383 raw = NULL;
3384 fw_loaded = 0;
3385 }
3386 }
3387 #else
3388 #define free_firmware() do {} while (0)
3389 #endif
3390
3391 static int ipw_load(struct ipw_priv *priv)
3392 {
3393 #ifndef CONFIG_PM
3394 const struct firmware *raw = NULL;
3395 #endif
3396 struct ipw_fw *fw;
3397 u8 *boot_img, *ucode_img, *fw_img;
3398 u8 *name = NULL;
3399 int rc = 0, retries = 3;
3400
3401 switch (priv->ieee->iw_mode) {
3402 case IW_MODE_ADHOC:
3403 name = "ipw2200-ibss.fw";
3404 break;
3405 #ifdef CONFIG_IPW2200_MONITOR
3406 case IW_MODE_MONITOR:
3407 name = "ipw2200-sniffer.fw";
3408 break;
3409 #endif
3410 case IW_MODE_INFRA:
3411 name = "ipw2200-bss.fw";
3412 break;
3413 }
3414
3415 if (!name) {
3416 rc = -EINVAL;
3417 goto error;
3418 }
3419
3420 #ifdef CONFIG_PM
3421 if (!fw_loaded) {
3422 #endif
3423 rc = ipw_get_fw(priv, &raw, name);
3424 if (rc < 0)
3425 goto error;
3426 #ifdef CONFIG_PM
3427 }
3428 #endif
3429
3430 fw = (void *)raw->data;
3431 boot_img = &fw->data[0];
3432 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3433 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3434 le32_to_cpu(fw->ucode_size)];
3435
3436 if (rc < 0)
3437 goto error;
3438
3439 if (!priv->rxq)
3440 priv->rxq = ipw_rx_queue_alloc(priv);
3441 else
3442 ipw_rx_queue_reset(priv, priv->rxq);
3443 if (!priv->rxq) {
3444 IPW_ERROR("Unable to initialize Rx queue\n");
3445 goto error;
3446 }
3447
3448 retry:
3449 /* Ensure interrupts are disabled */
3450 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3451 priv->status &= ~STATUS_INT_ENABLED;
3452
3453 /* ack pending interrupts */
3454 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3455
3456 ipw_stop_nic(priv);
3457
3458 rc = ipw_reset_nic(priv);
3459 if (rc < 0) {
3460 IPW_ERROR("Unable to reset NIC\n");
3461 goto error;
3462 }
3463
3464 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3465 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3466
3467 /* DMA the initial boot firmware into the device */
3468 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3469 if (rc < 0) {
3470 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3471 goto error;
3472 }
3473
3474 /* kick start the device */
3475 ipw_start_nic(priv);
3476
3477 /* wait for the device to finish its initial startup sequence */
3478 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3479 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3480 if (rc < 0) {
3481 IPW_ERROR("device failed to boot initial fw image\n");
3482 goto error;
3483 }
3484 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3485
3486 /* ack fw init done interrupt */
3487 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3488
3489 /* DMA the ucode into the device */
3490 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3491 if (rc < 0) {
3492 IPW_ERROR("Unable to load ucode: %d\n", rc);
3493 goto error;
3494 }
3495
3496 /* stop nic */
3497 ipw_stop_nic(priv);
3498
3499 /* DMA bss firmware into the device */
3500 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3501 if (rc < 0) {
3502 IPW_ERROR("Unable to load firmware: %d\n", rc);
3503 goto error;
3504 }
3505 #ifdef CONFIG_PM
3506 fw_loaded = 1;
3507 #endif
3508
3509 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3510
3511 rc = ipw_queue_reset(priv);
3512 if (rc < 0) {
3513 IPW_ERROR("Unable to initialize queues\n");
3514 goto error;
3515 }
3516
3517 /* Ensure interrupts are disabled */
3518 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3519 /* ack pending interrupts */
3520 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3521
3522 /* kick start the device */
3523 ipw_start_nic(priv);
3524
3525 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3526 if (retries > 0) {
3527 IPW_WARNING("Parity error. Retrying init.\n");
3528 retries--;
3529 goto retry;
3530 }
3531
3532 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3533 rc = -EIO;
3534 goto error;
3535 }
3536
3537 /* wait for the device */
3538 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3539 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3540 if (rc < 0) {
3541 IPW_ERROR("device failed to start within 500ms\n");
3542 goto error;
3543 }
3544 IPW_DEBUG_INFO("device response after %dms\n", rc);
3545
3546 /* ack fw init done interrupt */
3547 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3548
3549 /* read eeprom data and initialize the eeprom region of sram */
3550 priv->eeprom_delay = 1;
3551 ipw_eeprom_init_sram(priv);
3552
3553 /* enable interrupts */
3554 ipw_enable_interrupts(priv);
3555
3556 /* Ensure our queue has valid packets */
3557 ipw_rx_queue_replenish(priv);
3558
3559 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3560
3561 /* ack pending interrupts */
3562 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3563
3564 #ifndef CONFIG_PM
3565 release_firmware(raw);
3566 #endif
3567 return 0;
3568
3569 error:
3570 if (priv->rxq) {
3571 ipw_rx_queue_free(priv, priv->rxq);
3572 priv->rxq = NULL;
3573 }
3574 ipw_tx_queue_free(priv);
3575 if (raw)
3576 release_firmware(raw);
3577 #ifdef CONFIG_PM
3578 fw_loaded = 0;
3579 raw = NULL;
3580 #endif
3581
3582 return rc;
3583 }
3584
3585 /**
3586 * DMA services
3587 *
3588 * Theory of operation
3589 *
3590 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3591 * 2 empty entries always kept in the buffer to protect from overflow.
3592 *
3593 * For Tx queue, there are low mark and high mark limits. If, after queuing
3594 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3595 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3596 * Tx queue resumed.
3597 *
3598 * The IPW operates with six queues, one receive queue in the device's
3599 * sram, one transmit queue for sending commands to the device firmware,
3600 * and four transmit queues for data.
3601 *
3602 * The four transmit queues allow for performing quality of service (qos)
3603 * transmissions as per the 802.11 protocol. Currently Linux does not
3604 * provide a mechanism to the user for utilizing prioritized queues, so
3605 * we only utilize the first data transmit queue (queue1).
3606 */
3607
3608 /**
3609 * Driver allocates buffers of this size for Rx
3610 */
3611
3612 static inline int ipw_queue_space(const struct clx2_queue *q)
3613 {
3614 int s = q->last_used - q->first_empty;
3615 if (s <= 0)
3616 s += q->n_bd;
3617 s -= 2; /* keep some reserve to not confuse empty and full situations */
3618 if (s < 0)
3619 s = 0;
3620 return s;
3621 }
3622
3623 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3624 {
3625 return (++index == n_bd) ? 0 : index;
3626 }
3627
3628 /**
3629 * Initialize common DMA queue structure
3630 *
3631 * @param q queue to init
3632 * @param count Number of BD's to allocate. Should be power of 2
3633 * @param read_register Address for 'read' register
3634 * (not offset within BAR, full address)
3635 * @param write_register Address for 'write' register
3636 * (not offset within BAR, full address)
3637 * @param base_register Address for 'base' register
3638 * (not offset within BAR, full address)
3639 * @param size Address for 'size' register
3640 * (not offset within BAR, full address)
3641 */
3642 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3643 int count, u32 read, u32 write, u32 base, u32 size)
3644 {
3645 q->n_bd = count;
3646
3647 q->low_mark = q->n_bd / 4;
3648 if (q->low_mark < 4)
3649 q->low_mark = 4;
3650
3651 q->high_mark = q->n_bd / 8;
3652 if (q->high_mark < 2)
3653 q->high_mark = 2;
3654
3655 q->first_empty = q->last_used = 0;
3656 q->reg_r = read;
3657 q->reg_w = write;
3658
3659 ipw_write32(priv, base, q->dma_addr);
3660 ipw_write32(priv, size, count);
3661 ipw_write32(priv, read, 0);
3662 ipw_write32(priv, write, 0);
3663
3664 _ipw_read32(priv, 0x90);
3665 }
3666
3667 static int ipw_queue_tx_init(struct ipw_priv *priv,
3668 struct clx2_tx_queue *q,
3669 int count, u32 read, u32 write, u32 base, u32 size)
3670 {
3671 struct pci_dev *dev = priv->pci_dev;
3672
3673 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3674 if (!q->txb) {
3675 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3676 return -ENOMEM;
3677 }
3678
3679 q->bd =
3680 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3681 if (!q->bd) {
3682 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3683 sizeof(q->bd[0]) * count);
3684 kfree(q->txb);
3685 q->txb = NULL;
3686 return -ENOMEM;
3687 }
3688
3689 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3690 return 0;
3691 }
3692
3693 /**
3694 * Free one TFD, those at index [txq->q.last_used].
3695 * Do NOT advance any indexes
3696 *
3697 * @param dev
3698 * @param txq
3699 */
3700 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3701 struct clx2_tx_queue *txq)
3702 {
3703 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3704 struct pci_dev *dev = priv->pci_dev;
3705 int i;
3706
3707 /* classify bd */
3708 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3709 /* nothing to cleanup after for host commands */
3710 return;
3711
3712 /* sanity check */
3713 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3714 IPW_ERROR("Too many chunks: %i\n",
3715 le32_to_cpu(bd->u.data.num_chunks));
3716 /** @todo issue fatal error, it is quite serious situation */
3717 return;
3718 }
3719
3720 /* unmap chunks if any */
3721 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3722 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3723 le16_to_cpu(bd->u.data.chunk_len[i]),
3724 PCI_DMA_TODEVICE);
3725 if (txq->txb[txq->q.last_used]) {
3726 ieee80211_txb_free(txq->txb[txq->q.last_used]);
3727 txq->txb[txq->q.last_used] = NULL;
3728 }
3729 }
3730 }
3731
3732 /**
3733 * Deallocate DMA queue.
3734 *
3735 * Empty queue by removing and destroying all BD's.
3736 * Free all buffers.
3737 *
3738 * @param dev
3739 * @param q
3740 */
3741 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3742 {
3743 struct clx2_queue *q = &txq->q;
3744 struct pci_dev *dev = priv->pci_dev;
3745
3746 if (q->n_bd == 0)
3747 return;
3748
3749 /* first, empty all BD's */
3750 for (; q->first_empty != q->last_used;
3751 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3752 ipw_queue_tx_free_tfd(priv, txq);
3753 }
3754
3755 /* free buffers belonging to queue itself */
3756 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3757 q->dma_addr);
3758 kfree(txq->txb);
3759
3760 /* 0 fill whole structure */
3761 memset(txq, 0, sizeof(*txq));
3762 }
3763
3764 /**
3765 * Destroy all DMA queues and structures
3766 *
3767 * @param priv
3768 */
3769 static void ipw_tx_queue_free(struct ipw_priv *priv)
3770 {
3771 /* Tx CMD queue */
3772 ipw_queue_tx_free(priv, &priv->txq_cmd);
3773
3774 /* Tx queues */
3775 ipw_queue_tx_free(priv, &priv->txq[0]);
3776 ipw_queue_tx_free(priv, &priv->txq[1]);
3777 ipw_queue_tx_free(priv, &priv->txq[2]);
3778 ipw_queue_tx_free(priv, &priv->txq[3]);
3779 }
3780
3781 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3782 {
3783 /* First 3 bytes are manufacturer */
3784 bssid[0] = priv->mac_addr[0];
3785 bssid[1] = priv->mac_addr[1];
3786 bssid[2] = priv->mac_addr[2];
3787
3788 /* Last bytes are random */
3789 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3790
3791 bssid[0] &= 0xfe; /* clear multicast bit */
3792 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3793 }
3794
3795 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3796 {
3797 struct ipw_station_entry entry;
3798 int i;
3799
3800 for (i = 0; i < priv->num_stations; i++) {
3801 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3802 /* Another node is active in network */
3803 priv->missed_adhoc_beacons = 0;
3804 if (!(priv->config & CFG_STATIC_CHANNEL))
3805 /* when other nodes drop out, we drop out */
3806 priv->config &= ~CFG_ADHOC_PERSIST;
3807
3808 return i;
3809 }
3810 }
3811
3812 if (i == MAX_STATIONS)
3813 return IPW_INVALID_STATION;
3814
3815 IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
3816
3817 entry.reserved = 0;
3818 entry.support_mode = 0;
3819 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3820 memcpy(priv->stations[i], bssid, ETH_ALEN);
3821 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3822 &entry, sizeof(entry));
3823 priv->num_stations++;
3824
3825 return i;
3826 }
3827
3828 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3829 {
3830 int i;
3831
3832 for (i = 0; i < priv->num_stations; i++)
3833 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3834 return i;
3835
3836 return IPW_INVALID_STATION;
3837 }
3838
3839 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3840 {
3841 int err;
3842
3843 if (priv->status & STATUS_ASSOCIATING) {
3844 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3845 queue_work(priv->workqueue, &priv->disassociate);
3846 return;
3847 }
3848
3849 if (!(priv->status & STATUS_ASSOCIATED)) {
3850 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3851 return;
3852 }
3853
3854 IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
3855 "on channel %d.\n",
3856 MAC_ARG(priv->assoc_request.bssid),
3857 priv->assoc_request.channel);
3858
3859 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3860 priv->status |= STATUS_DISASSOCIATING;
3861
3862 if (quiet)
3863 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3864 else
3865 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3866
3867 err = ipw_send_associate(priv, &priv->assoc_request);
3868 if (err) {
3869 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3870 "failed.\n");
3871 return;
3872 }
3873
3874 }
3875
3876 static int ipw_disassociate(void *data)
3877 {
3878 struct ipw_priv *priv = data;
3879 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3880 return 0;
3881 ipw_send_disassociate(data, 0);
3882 return 1;
3883 }
3884
3885 static void ipw_bg_disassociate(struct work_struct *work)
3886 {
3887 struct ipw_priv *priv =
3888 container_of(work, struct ipw_priv, disassociate);
3889 mutex_lock(&priv->mutex);
3890 ipw_disassociate(priv);
3891 mutex_unlock(&priv->mutex);
3892 }
3893
3894 static void ipw_system_config(struct work_struct *work)
3895 {
3896 struct ipw_priv *priv =
3897 container_of(work, struct ipw_priv, system_config);
3898
3899 #ifdef CONFIG_IPW2200_PROMISCUOUS
3900 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3901 priv->sys_config.accept_all_data_frames = 1;
3902 priv->sys_config.accept_non_directed_frames = 1;
3903 priv->sys_config.accept_all_mgmt_bcpr = 1;
3904 priv->sys_config.accept_all_mgmt_frames = 1;
3905 }
3906 #endif
3907
3908 ipw_send_system_config(priv);
3909 }
3910
3911 struct ipw_status_code {
3912 u16 status;
3913 const char *reason;
3914 };
3915
3916 static const struct ipw_status_code ipw_status_codes[] = {
3917 {0x00, "Successful"},
3918 {0x01, "Unspecified failure"},
3919 {0x0A, "Cannot support all requested capabilities in the "
3920 "Capability information field"},
3921 {0x0B, "Reassociation denied due to inability to confirm that "
3922 "association exists"},
3923 {0x0C, "Association denied due to reason outside the scope of this "
3924 "standard"},
3925 {0x0D,
3926 "Responding station does not support the specified authentication "
3927 "algorithm"},
3928 {0x0E,
3929 "Received an Authentication frame with authentication sequence "
3930 "transaction sequence number out of expected sequence"},
3931 {0x0F, "Authentication rejected because of challenge failure"},
3932 {0x10, "Authentication rejected due to timeout waiting for next "
3933 "frame in sequence"},
3934 {0x11, "Association denied because AP is unable to handle additional "
3935 "associated stations"},
3936 {0x12,
3937 "Association denied due to requesting station not supporting all "
3938 "of the datarates in the BSSBasicServiceSet Parameter"},
3939 {0x13,
3940 "Association denied due to requesting station not supporting "
3941 "short preamble operation"},
3942 {0x14,
3943 "Association denied due to requesting station not supporting "
3944 "PBCC encoding"},
3945 {0x15,
3946 "Association denied due to requesting station not supporting "
3947 "channel agility"},
3948 {0x19,
3949 "Association denied due to requesting station not supporting "
3950 "short slot operation"},
3951 {0x1A,
3952 "Association denied due to requesting station not supporting "
3953 "DSSS-OFDM operation"},
3954 {0x28, "Invalid Information Element"},
3955 {0x29, "Group Cipher is not valid"},
3956 {0x2A, "Pairwise Cipher is not valid"},
3957 {0x2B, "AKMP is not valid"},
3958 {0x2C, "Unsupported RSN IE version"},
3959 {0x2D, "Invalid RSN IE Capabilities"},
3960 {0x2E, "Cipher suite is rejected per security policy"},
3961 };
3962
3963 static const char *ipw_get_status_code(u16 status)
3964 {
3965 int i;
3966 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3967 if (ipw_status_codes[i].status == (status & 0xff))
3968 return ipw_status_codes[i].reason;
3969 return "Unknown status value.";
3970 }
3971
3972 static void inline average_init(struct average *avg)
3973 {
3974 memset(avg, 0, sizeof(*avg));
3975 }
3976
3977 #define DEPTH_RSSI 8
3978 #define DEPTH_NOISE 16
3979 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
3980 {
3981 return ((depth-1)*prev_avg + val)/depth;
3982 }
3983
3984 static void average_add(struct average *avg, s16 val)
3985 {
3986 avg->sum -= avg->entries[avg->pos];
3987 avg->sum += val;
3988 avg->entries[avg->pos++] = val;
3989 if (unlikely(avg->pos == AVG_ENTRIES)) {
3990 avg->init = 1;
3991 avg->pos = 0;
3992 }
3993 }
3994
3995 static s16 average_value(struct average *avg)
3996 {
3997 if (!unlikely(avg->init)) {
3998 if (avg->pos)
3999 return avg->sum / avg->pos;
4000 return 0;
4001 }
4002
4003 return avg->sum / AVG_ENTRIES;
4004 }
4005
4006 static void ipw_reset_stats(struct ipw_priv *priv)
4007 {
4008 u32 len = sizeof(u32);
4009
4010 priv->quality = 0;
4011
4012 average_init(&priv->average_missed_beacons);
4013 priv->exp_avg_rssi = -60;
4014 priv->exp_avg_noise = -85 + 0x100;
4015
4016 priv->last_rate = 0;
4017 priv->last_missed_beacons = 0;
4018 priv->last_rx_packets = 0;
4019 priv->last_tx_packets = 0;
4020 priv->last_tx_failures = 0;
4021
4022 /* Firmware managed, reset only when NIC is restarted, so we have to
4023 * normalize on the current value */
4024 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4025 &priv->last_rx_err, &len);
4026 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4027 &priv->last_tx_failures, &len);
4028
4029 /* Driver managed, reset with each association */
4030 priv->missed_adhoc_beacons = 0;
4031 priv->missed_beacons = 0;
4032 priv->tx_packets = 0;
4033 priv->rx_packets = 0;
4034
4035 }
4036
4037 static u32 ipw_get_max_rate(struct ipw_priv *priv)
4038 {
4039 u32 i = 0x80000000;
4040 u32 mask = priv->rates_mask;
4041 /* If currently associated in B mode, restrict the maximum
4042 * rate match to B rates */
4043 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4044 mask &= IEEE80211_CCK_RATES_MASK;
4045
4046 /* TODO: Verify that the rate is supported by the current rates
4047 * list. */
4048
4049 while (i && !(mask & i))
4050 i >>= 1;
4051 switch (i) {
4052 case IEEE80211_CCK_RATE_1MB_MASK:
4053 return 1000000;
4054 case IEEE80211_CCK_RATE_2MB_MASK:
4055 return 2000000;
4056 case IEEE80211_CCK_RATE_5MB_MASK:
4057 return 5500000;
4058 case IEEE80211_OFDM_RATE_6MB_MASK:
4059 return 6000000;
4060 case IEEE80211_OFDM_RATE_9MB_MASK:
4061 return 9000000;
4062 case IEEE80211_CCK_RATE_11MB_MASK:
4063 return 11000000;
4064 case IEEE80211_OFDM_RATE_12MB_MASK:
4065 return 12000000;
4066 case IEEE80211_OFDM_RATE_18MB_MASK:
4067 return 18000000;
4068 case IEEE80211_OFDM_RATE_24MB_MASK:
4069 return 24000000;
4070 case IEEE80211_OFDM_RATE_36MB_MASK:
4071 return 36000000;
4072 case IEEE80211_OFDM_RATE_48MB_MASK:
4073 return 48000000;
4074 case IEEE80211_OFDM_RATE_54MB_MASK:
4075 return 54000000;
4076 }
4077
4078 if (priv->ieee->mode == IEEE_B)
4079 return 11000000;
4080 else
4081 return 54000000;
4082 }
4083
4084 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4085 {
4086 u32 rate, len = sizeof(rate);
4087 int err;
4088
4089 if (!(priv->status & STATUS_ASSOCIATED))
4090 return 0;
4091
4092 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4093 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4094 &len);
4095 if (err) {
4096 IPW_DEBUG_INFO("failed querying ordinals.\n");
4097 return 0;
4098 }
4099 } else
4100 return ipw_get_max_rate(priv);
4101
4102 switch (rate) {
4103 case IPW_TX_RATE_1MB:
4104 return 1000000;
4105 case IPW_TX_RATE_2MB:
4106 return 2000000;
4107 case IPW_TX_RATE_5MB:
4108 return 5500000;
4109 case IPW_TX_RATE_6MB:
4110 return 6000000;
4111 case IPW_TX_RATE_9MB:
4112 return 9000000;
4113 case IPW_TX_RATE_11MB:
4114 return 11000000;
4115 case IPW_TX_RATE_12MB:
4116 return 12000000;
4117 case IPW_TX_RATE_18MB:
4118 return 18000000;
4119 case IPW_TX_RATE_24MB:
4120 return 24000000;
4121 case IPW_TX_RATE_36MB:
4122 return 36000000;
4123 case IPW_TX_RATE_48MB:
4124 return 48000000;
4125 case IPW_TX_RATE_54MB:
4126 return 54000000;
4127 }
4128
4129 return 0;
4130 }
4131
4132 #define IPW_STATS_INTERVAL (2 * HZ)
4133 static void ipw_gather_stats(struct ipw_priv *priv)
4134 {
4135 u32 rx_err, rx_err_delta, rx_packets_delta;
4136 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4137 u32 missed_beacons_percent, missed_beacons_delta;
4138 u32 quality = 0;
4139 u32 len = sizeof(u32);
4140 s16 rssi;
4141 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4142 rate_quality;
4143 u32 max_rate;
4144
4145 if (!(priv->status & STATUS_ASSOCIATED)) {
4146 priv->quality = 0;
4147 return;
4148 }
4149
4150 /* Update the statistics */
4151 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4152 &priv->missed_beacons, &len);
4153 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4154 priv->last_missed_beacons = priv->missed_beacons;
4155 if (priv->assoc_request.beacon_interval) {
4156 missed_beacons_percent = missed_beacons_delta *
4157 (HZ * priv->assoc_request.beacon_interval) /
4158 (IPW_STATS_INTERVAL * 10);
4159 } else {
4160 missed_beacons_percent = 0;
4161 }
4162 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4163
4164 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4165 rx_err_delta = rx_err - priv->last_rx_err;
4166 priv->last_rx_err = rx_err;
4167
4168 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4169 tx_failures_delta = tx_failures - priv->last_tx_failures;
4170 priv->last_tx_failures = tx_failures;
4171
4172 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4173 priv->last_rx_packets = priv->rx_packets;
4174
4175 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4176 priv->last_tx_packets = priv->tx_packets;
4177
4178 /* Calculate quality based on the following:
4179 *
4180 * Missed beacon: 100% = 0, 0% = 70% missed
4181 * Rate: 60% = 1Mbs, 100% = Max
4182 * Rx and Tx errors represent a straight % of total Rx/Tx
4183 * RSSI: 100% = > -50, 0% = < -80
4184 * Rx errors: 100% = 0, 0% = 50% missed
4185 *
4186 * The lowest computed quality is used.
4187 *
4188 */
4189 #define BEACON_THRESHOLD 5
4190 beacon_quality = 100 - missed_beacons_percent;
4191 if (beacon_quality < BEACON_THRESHOLD)
4192 beacon_quality = 0;
4193 else
4194 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4195 (100 - BEACON_THRESHOLD);
4196 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4197 beacon_quality, missed_beacons_percent);
4198
4199 priv->last_rate = ipw_get_current_rate(priv);
4200 max_rate = ipw_get_max_rate(priv);
4201 rate_quality = priv->last_rate * 40 / max_rate + 60;
4202 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4203 rate_quality, priv->last_rate / 1000000);
4204
4205 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4206 rx_quality = 100 - (rx_err_delta * 100) /
4207 (rx_packets_delta + rx_err_delta);
4208 else
4209 rx_quality = 100;
4210 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4211 rx_quality, rx_err_delta, rx_packets_delta);
4212
4213 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4214 tx_quality = 100 - (tx_failures_delta * 100) /
4215 (tx_packets_delta + tx_failures_delta);
4216 else
4217 tx_quality = 100;
4218 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4219 tx_quality, tx_failures_delta, tx_packets_delta);
4220
4221 rssi = priv->exp_avg_rssi;
4222 signal_quality =
4223 (100 *
4224 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4225 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4226 (priv->ieee->perfect_rssi - rssi) *
4227 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4228 62 * (priv->ieee->perfect_rssi - rssi))) /
4229 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4230 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4231 if (signal_quality > 100)
4232 signal_quality = 100;
4233 else if (signal_quality < 1)
4234 signal_quality = 0;
4235
4236 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4237 signal_quality, rssi);
4238
4239 quality = min(beacon_quality,
4240 min(rate_quality,
4241 min(tx_quality, min(rx_quality, signal_quality))));
4242 if (quality == beacon_quality)
4243 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4244 quality);
4245 if (quality == rate_quality)
4246 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4247 quality);
4248 if (quality == tx_quality)
4249 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4250 quality);
4251 if (quality == rx_quality)
4252 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4253 quality);
4254 if (quality == signal_quality)
4255 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4256 quality);
4257
4258 priv->quality = quality;
4259
4260 queue_delayed_work(priv->workqueue, &priv->gather_stats,
4261 IPW_STATS_INTERVAL);
4262 }
4263
4264 static void ipw_bg_gather_stats(struct work_struct *work)
4265 {
4266 struct ipw_priv *priv =
4267 container_of(work, struct ipw_priv, gather_stats.work);
4268 mutex_lock(&priv->mutex);
4269 ipw_gather_stats(priv);
4270 mutex_unlock(&priv->mutex);
4271 }
4272
4273 /* Missed beacon behavior:
4274 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4275 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4276 * Above disassociate threshold, give up and stop scanning.
4277 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4278 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4279 int missed_count)
4280 {
4281 priv->notif_missed_beacons = missed_count;
4282
4283 if (missed_count > priv->disassociate_threshold &&
4284 priv->status & STATUS_ASSOCIATED) {
4285 /* If associated and we've hit the missed
4286 * beacon threshold, disassociate, turn
4287 * off roaming, and abort any active scans */
4288 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4289 IPW_DL_STATE | IPW_DL_ASSOC,
4290 "Missed beacon: %d - disassociate\n", missed_count);
4291 priv->status &= ~STATUS_ROAMING;
4292 if (priv->status & STATUS_SCANNING) {
4293 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4294 IPW_DL_STATE,
4295 "Aborting scan with missed beacon.\n");
4296 queue_work(priv->workqueue, &priv->abort_scan);
4297 }
4298
4299 queue_work(priv->workqueue, &priv->disassociate);
4300 return;
4301 }
4302
4303 if (priv->status & STATUS_ROAMING) {
4304 /* If we are currently roaming, then just
4305 * print a debug statement... */
4306 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4307 "Missed beacon: %d - roam in progress\n",
4308 missed_count);
4309 return;
4310 }
4311
4312 if (roaming &&
4313 (missed_count > priv->roaming_threshold &&
4314 missed_count <= priv->disassociate_threshold)) {
4315 /* If we are not already roaming, set the ROAM
4316 * bit in the status and kick off a scan.
4317 * This can happen several times before we reach
4318 * disassociate_threshold. */
4319 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4320 "Missed beacon: %d - initiate "
4321 "roaming\n", missed_count);
4322 if (!(priv->status & STATUS_ROAMING)) {
4323 priv->status |= STATUS_ROAMING;
4324 if (!(priv->status & STATUS_SCANNING))
4325 queue_delayed_work(priv->workqueue,
4326 &priv->request_scan, 0);
4327 }
4328 return;
4329 }
4330
4331 if (priv->status & STATUS_SCANNING) {
4332 /* Stop scan to keep fw from getting
4333 * stuck (only if we aren't roaming --
4334 * otherwise we'll never scan more than 2 or 3
4335 * channels..) */
4336 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4337 "Aborting scan with missed beacon.\n");
4338 queue_work(priv->workqueue, &priv->abort_scan);
4339 }
4340
4341 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4342 }
4343
4344 /**
4345 * Handle host notification packet.
4346 * Called from interrupt routine
4347 */
4348 static void ipw_rx_notification(struct ipw_priv *priv,
4349 struct ipw_rx_notification *notif)
4350 {
4351 notif->size = le16_to_cpu(notif->size);
4352
4353 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size);
4354
4355 switch (notif->subtype) {
4356 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4357 struct notif_association *assoc = &notif->u.assoc;
4358
4359 switch (assoc->state) {
4360 case CMAS_ASSOCIATED:{
4361 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4362 IPW_DL_ASSOC,
4363 "associated: '%s' " MAC_FMT
4364 " \n",
4365 escape_essid(priv->essid,
4366 priv->essid_len),
4367 MAC_ARG(priv->bssid));
4368
4369 switch (priv->ieee->iw_mode) {
4370 case IW_MODE_INFRA:
4371 memcpy(priv->ieee->bssid,
4372 priv->bssid, ETH_ALEN);
4373 break;
4374
4375 case IW_MODE_ADHOC:
4376 memcpy(priv->ieee->bssid,
4377 priv->bssid, ETH_ALEN);
4378
4379 /* clear out the station table */
4380 priv->num_stations = 0;
4381
4382 IPW_DEBUG_ASSOC
4383 ("queueing adhoc check\n");
4384 queue_delayed_work(priv->
4385 workqueue,
4386 &priv->
4387 adhoc_check,
4388 priv->
4389 assoc_request.
4390 beacon_interval);
4391 break;
4392 }
4393
4394 priv->status &= ~STATUS_ASSOCIATING;
4395 priv->status |= STATUS_ASSOCIATED;
4396 queue_work(priv->workqueue,
4397 &priv->system_config);
4398
4399 #ifdef CONFIG_IPW2200_QOS
4400 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4401 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_ctl))
4402 if ((priv->status & STATUS_AUTH) &&
4403 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4404 == IEEE80211_STYPE_ASSOC_RESP)) {
4405 if ((sizeof
4406 (struct
4407 ieee80211_assoc_response)
4408 <= notif->size)
4409 && (notif->size <= 2314)) {
4410 struct
4411 ieee80211_rx_stats
4412 stats = {
4413 .len =
4414 notif->
4415 size - 1,
4416 };
4417
4418 IPW_DEBUG_QOS
4419 ("QoS Associate "
4420 "size %d\n",
4421 notif->size);
4422 ieee80211_rx_mgt(priv->
4423 ieee,
4424 (struct
4425 ieee80211_hdr_4addr
4426 *)
4427 &notif->u.raw, &stats);
4428 }
4429 }
4430 #endif
4431
4432 schedule_work(&priv->link_up);
4433
4434 break;
4435 }
4436
4437 case CMAS_AUTHENTICATED:{
4438 if (priv->
4439 status & (STATUS_ASSOCIATED |
4440 STATUS_AUTH)) {
4441 struct notif_authenticate *auth
4442 = &notif->u.auth;
4443 IPW_DEBUG(IPW_DL_NOTIF |
4444 IPW_DL_STATE |
4445 IPW_DL_ASSOC,
4446 "deauthenticated: '%s' "
4447 MAC_FMT
4448 ": (0x%04X) - %s \n",
4449 escape_essid(priv->
4450 essid,
4451 priv->
4452 essid_len),
4453 MAC_ARG(priv->bssid),
4454 ntohs(auth->status),
4455 ipw_get_status_code
4456 (ntohs
4457 (auth->status)));
4458
4459 priv->status &=
4460 ~(STATUS_ASSOCIATING |
4461 STATUS_AUTH |
4462 STATUS_ASSOCIATED);
4463
4464 schedule_work(&priv->link_down);
4465 break;
4466 }
4467
4468 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4469 IPW_DL_ASSOC,
4470 "authenticated: '%s' " MAC_FMT
4471 "\n",
4472 escape_essid(priv->essid,
4473 priv->essid_len),
4474 MAC_ARG(priv->bssid));
4475 break;
4476 }
4477
4478 case CMAS_INIT:{
4479 if (priv->status & STATUS_AUTH) {
4480 struct
4481 ieee80211_assoc_response
4482 *resp;
4483 resp =
4484 (struct
4485 ieee80211_assoc_response
4486 *)&notif->u.raw;
4487 IPW_DEBUG(IPW_DL_NOTIF |
4488 IPW_DL_STATE |
4489 IPW_DL_ASSOC,
4490 "association failed (0x%04X): %s\n",
4491 ntohs(resp->status),
4492 ipw_get_status_code
4493 (ntohs
4494 (resp->status)));
4495 }
4496
4497 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4498 IPW_DL_ASSOC,
4499 "disassociated: '%s' " MAC_FMT
4500 " \n",
4501 escape_essid(priv->essid,
4502 priv->essid_len),
4503 MAC_ARG(priv->bssid));
4504
4505 priv->status &=
4506 ~(STATUS_DISASSOCIATING |
4507 STATUS_ASSOCIATING |
4508 STATUS_ASSOCIATED | STATUS_AUTH);
4509 if (priv->assoc_network
4510 && (priv->assoc_network->
4511 capability &
4512 WLAN_CAPABILITY_IBSS))
4513 ipw_remove_current_network
4514 (priv);
4515
4516 schedule_work(&priv->link_down);
4517
4518 break;
4519 }
4520
4521 case CMAS_RX_ASSOC_RESP:
4522 break;
4523
4524 default:
4525 IPW_ERROR("assoc: unknown (%d)\n",
4526 assoc->state);
4527 break;
4528 }
4529
4530 break;
4531 }
4532
4533 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4534 struct notif_authenticate *auth = &notif->u.auth;
4535 switch (auth->state) {
4536 case CMAS_AUTHENTICATED:
4537 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4538 "authenticated: '%s' " MAC_FMT " \n",
4539 escape_essid(priv->essid,
4540 priv->essid_len),
4541 MAC_ARG(priv->bssid));
4542 priv->status |= STATUS_AUTH;
4543 break;
4544
4545 case CMAS_INIT:
4546 if (priv->status & STATUS_AUTH) {
4547 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4548 IPW_DL_ASSOC,
4549 "authentication failed (0x%04X): %s\n",
4550 ntohs(auth->status),
4551 ipw_get_status_code(ntohs
4552 (auth->
4553 status)));
4554 }
4555 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4556 IPW_DL_ASSOC,
4557 "deauthenticated: '%s' " MAC_FMT "\n",
4558 escape_essid(priv->essid,
4559 priv->essid_len),
4560 MAC_ARG(priv->bssid));
4561
4562 priv->status &= ~(STATUS_ASSOCIATING |
4563 STATUS_AUTH |
4564 STATUS_ASSOCIATED);
4565
4566 schedule_work(&priv->link_down);
4567 break;
4568
4569 case CMAS_TX_AUTH_SEQ_1:
4570 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4571 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4572 break;
4573 case CMAS_RX_AUTH_SEQ_2:
4574 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4575 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4576 break;
4577 case CMAS_AUTH_SEQ_1_PASS:
4578 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4579 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4580 break;
4581 case CMAS_AUTH_SEQ_1_FAIL:
4582 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4583 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4584 break;
4585 case CMAS_TX_AUTH_SEQ_3:
4586 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4587 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4588 break;
4589 case CMAS_RX_AUTH_SEQ_4:
4590 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4591 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4592 break;
4593 case CMAS_AUTH_SEQ_2_PASS:
4594 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4595 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4596 break;
4597 case CMAS_AUTH_SEQ_2_FAIL:
4598 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4599 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4600 break;
4601 case CMAS_TX_ASSOC:
4602 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4603 IPW_DL_ASSOC, "TX_ASSOC\n");
4604 break;
4605 case CMAS_RX_ASSOC_RESP:
4606 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4607 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4608
4609 break;
4610 case CMAS_ASSOCIATED:
4611 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4612 IPW_DL_ASSOC, "ASSOCIATED\n");
4613 break;
4614 default:
4615 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4616 auth->state);
4617 break;
4618 }
4619 break;
4620 }
4621
4622 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4623 struct notif_channel_result *x =
4624 &notif->u.channel_result;
4625
4626 if (notif->size == sizeof(*x)) {
4627 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4628 x->channel_num);
4629 } else {
4630 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4631 "(should be %zd)\n",
4632 notif->size, sizeof(*x));
4633 }
4634 break;
4635 }
4636
4637 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4638 struct notif_scan_complete *x = &notif->u.scan_complete;
4639 if (notif->size == sizeof(*x)) {
4640 IPW_DEBUG_SCAN
4641 ("Scan completed: type %d, %d channels, "
4642 "%d status\n", x->scan_type,
4643 x->num_channels, x->status);
4644 } else {
4645 IPW_ERROR("Scan completed of wrong size %d "
4646 "(should be %zd)\n",
4647 notif->size, sizeof(*x));
4648 }
4649
4650 priv->status &=
4651 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4652
4653 wake_up_interruptible(&priv->wait_state);
4654 cancel_delayed_work(&priv->scan_check);
4655
4656 if (priv->status & STATUS_EXIT_PENDING)
4657 break;
4658
4659 priv->ieee->scans++;
4660
4661 #ifdef CONFIG_IPW2200_MONITOR
4662 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4663 priv->status |= STATUS_SCAN_FORCED;
4664 queue_delayed_work(priv->workqueue,
4665 &priv->request_scan, 0);
4666 break;
4667 }
4668 priv->status &= ~STATUS_SCAN_FORCED;
4669 #endif /* CONFIG_IPW2200_MONITOR */
4670
4671 if (!(priv->status & (STATUS_ASSOCIATED |
4672 STATUS_ASSOCIATING |
4673 STATUS_ROAMING |
4674 STATUS_DISASSOCIATING)))
4675 queue_work(priv->workqueue, &priv->associate);
4676 else if (priv->status & STATUS_ROAMING) {
4677 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4678 /* If a scan completed and we are in roam mode, then
4679 * the scan that completed was the one requested as a
4680 * result of entering roam... so, schedule the
4681 * roam work */
4682 queue_work(priv->workqueue,
4683 &priv->roam);
4684 else
4685 /* Don't schedule if we aborted the scan */
4686 priv->status &= ~STATUS_ROAMING;
4687 } else if (priv->status & STATUS_SCAN_PENDING)
4688 queue_delayed_work(priv->workqueue,
4689 &priv->request_scan, 0);
4690 else if (priv->config & CFG_BACKGROUND_SCAN
4691 && priv->status & STATUS_ASSOCIATED)
4692 queue_delayed_work(priv->workqueue,
4693 &priv->request_scan,
4694 round_jiffies(HZ));
4695
4696 /* Send an empty event to user space.
4697 * We don't send the received data on the event because
4698 * it would require us to do complex transcoding, and
4699 * we want to minimise the work done in the irq handler
4700 * Use a request to extract the data.
4701 * Also, we generate this even for any scan, regardless
4702 * on how the scan was initiated. User space can just
4703 * sync on periodic scan to get fresh data...
4704 * Jean II */
4705 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) {
4706 union iwreq_data wrqu;
4707
4708 wrqu.data.length = 0;
4709 wrqu.data.flags = 0;
4710 wireless_send_event(priv->net_dev, SIOCGIWSCAN,
4711 &wrqu, NULL);
4712 }
4713 break;
4714 }
4715
4716 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4717 struct notif_frag_length *x = &notif->u.frag_len;
4718
4719 if (notif->size == sizeof(*x))
4720 IPW_ERROR("Frag length: %d\n",
4721 le16_to_cpu(x->frag_length));
4722 else
4723 IPW_ERROR("Frag length of wrong size %d "
4724 "(should be %zd)\n",
4725 notif->size, sizeof(*x));
4726 break;
4727 }
4728
4729 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4730 struct notif_link_deterioration *x =
4731 &notif->u.link_deterioration;
4732
4733 if (notif->size == sizeof(*x)) {
4734 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4735 "link deterioration: type %d, cnt %d\n",
4736 x->silence_notification_type,
4737 x->silence_count);
4738 memcpy(&priv->last_link_deterioration, x,
4739 sizeof(*x));
4740 } else {
4741 IPW_ERROR("Link Deterioration of wrong size %d "
4742 "(should be %zd)\n",
4743 notif->size, sizeof(*x));
4744 }
4745 break;
4746 }
4747
4748 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4749 IPW_ERROR("Dino config\n");
4750 if (priv->hcmd
4751 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4752 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4753
4754 break;
4755 }
4756
4757 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4758 struct notif_beacon_state *x = &notif->u.beacon_state;
4759 if (notif->size != sizeof(*x)) {
4760 IPW_ERROR
4761 ("Beacon state of wrong size %d (should "
4762 "be %zd)\n", notif->size, sizeof(*x));
4763 break;
4764 }
4765
4766 if (le32_to_cpu(x->state) ==
4767 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4768 ipw_handle_missed_beacon(priv,
4769 le32_to_cpu(x->
4770 number));
4771
4772 break;
4773 }
4774
4775 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4776 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4777 if (notif->size == sizeof(*x)) {
4778 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4779 "0x%02x station %d\n",
4780 x->key_state, x->security_type,
4781 x->station_index);
4782 break;
4783 }
4784
4785 IPW_ERROR
4786 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4787 notif->size, sizeof(*x));
4788 break;
4789 }
4790
4791 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4792 struct notif_calibration *x = &notif->u.calibration;
4793
4794 if (notif->size == sizeof(*x)) {
4795 memcpy(&priv->calib, x, sizeof(*x));
4796 IPW_DEBUG_INFO("TODO: Calibration\n");
4797 break;
4798 }
4799
4800 IPW_ERROR
4801 ("Calibration of wrong size %d (should be %zd)\n",
4802 notif->size, sizeof(*x));
4803 break;
4804 }
4805
4806 case HOST_NOTIFICATION_NOISE_STATS:{
4807 if (notif->size == sizeof(u32)) {
4808 priv->exp_avg_noise =
4809 exponential_average(priv->exp_avg_noise,
4810 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4811 DEPTH_NOISE);
4812 break;
4813 }
4814
4815 IPW_ERROR
4816 ("Noise stat is wrong size %d (should be %zd)\n",
4817 notif->size, sizeof(u32));
4818 break;
4819 }
4820
4821 default:
4822 IPW_DEBUG_NOTIF("Unknown notification: "
4823 "subtype=%d,flags=0x%2x,size=%d\n",
4824 notif->subtype, notif->flags, notif->size);
4825 }
4826 }
4827
4828 /**
4829 * Destroys all DMA structures and initialise them again
4830 *
4831 * @param priv
4832 * @return error code
4833 */
4834 static int ipw_queue_reset(struct ipw_priv *priv)
4835 {
4836 int rc = 0;
4837 /** @todo customize queue sizes */
4838 int nTx = 64, nTxCmd = 8;
4839 ipw_tx_queue_free(priv);
4840 /* Tx CMD queue */
4841 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4842 IPW_TX_CMD_QUEUE_READ_INDEX,
4843 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4844 IPW_TX_CMD_QUEUE_BD_BASE,
4845 IPW_TX_CMD_QUEUE_BD_SIZE);
4846 if (rc) {
4847 IPW_ERROR("Tx Cmd queue init failed\n");
4848 goto error;
4849 }
4850 /* Tx queue(s) */
4851 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4852 IPW_TX_QUEUE_0_READ_INDEX,
4853 IPW_TX_QUEUE_0_WRITE_INDEX,
4854 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4855 if (rc) {
4856 IPW_ERROR("Tx 0 queue init failed\n");
4857 goto error;
4858 }
4859 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4860 IPW_TX_QUEUE_1_READ_INDEX,
4861 IPW_TX_QUEUE_1_WRITE_INDEX,
4862 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4863 if (rc) {
4864 IPW_ERROR("Tx 1 queue init failed\n");
4865 goto error;
4866 }
4867 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4868 IPW_TX_QUEUE_2_READ_INDEX,
4869 IPW_TX_QUEUE_2_WRITE_INDEX,
4870 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4871 if (rc) {
4872 IPW_ERROR("Tx 2 queue init failed\n");
4873 goto error;
4874 }
4875 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4876 IPW_TX_QUEUE_3_READ_INDEX,
4877 IPW_TX_QUEUE_3_WRITE_INDEX,
4878 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4879 if (rc) {
4880 IPW_ERROR("Tx 3 queue init failed\n");
4881 goto error;
4882 }
4883 /* statistics */
4884 priv->rx_bufs_min = 0;
4885 priv->rx_pend_max = 0;
4886 return rc;
4887
4888 error:
4889 ipw_tx_queue_free(priv);
4890 return rc;
4891 }
4892
4893 /**
4894 * Reclaim Tx queue entries no more used by NIC.
4895 *
4896 * When FW adwances 'R' index, all entries between old and
4897 * new 'R' index need to be reclaimed. As result, some free space
4898 * forms. If there is enough free space (> low mark), wake Tx queue.
4899 *
4900 * @note Need to protect against garbage in 'R' index
4901 * @param priv
4902 * @param txq
4903 * @param qindex
4904 * @return Number of used entries remains in the queue
4905 */
4906 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4907 struct clx2_tx_queue *txq, int qindex)
4908 {
4909 u32 hw_tail;
4910 int used;
4911 struct clx2_queue *q = &txq->q;
4912
4913 hw_tail = ipw_read32(priv, q->reg_r);
4914 if (hw_tail >= q->n_bd) {
4915 IPW_ERROR
4916 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4917 hw_tail, q->n_bd);
4918 goto done;
4919 }
4920 for (; q->last_used != hw_tail;
4921 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4922 ipw_queue_tx_free_tfd(priv, txq);
4923 priv->tx_packets++;
4924 }
4925 done:
4926 if ((ipw_queue_space(q) > q->low_mark) &&
4927 (qindex >= 0) &&
4928 (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev))
4929 netif_wake_queue(priv->net_dev);
4930 used = q->first_empty - q->last_used;
4931 if (used < 0)
4932 used += q->n_bd;
4933
4934 return used;
4935 }
4936
4937 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4938 int len, int sync)
4939 {
4940 struct clx2_tx_queue *txq = &priv->txq_cmd;
4941 struct clx2_queue *q = &txq->q;
4942 struct tfd_frame *tfd;
4943
4944 if (ipw_queue_space(q) < (sync ? 1 : 2)) {
4945 IPW_ERROR("No space for Tx\n");
4946 return -EBUSY;
4947 }
4948
4949 tfd = &txq->bd[q->first_empty];
4950 txq->txb[q->first_empty] = NULL;
4951
4952 memset(tfd, 0, sizeof(*tfd));
4953 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
4954 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
4955 priv->hcmd_seq++;
4956 tfd->u.cmd.index = hcmd;
4957 tfd->u.cmd.length = len;
4958 memcpy(tfd->u.cmd.payload, buf, len);
4959 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
4960 ipw_write32(priv, q->reg_w, q->first_empty);
4961 _ipw_read32(priv, 0x90);
4962
4963 return 0;
4964 }
4965
4966 /*
4967 * Rx theory of operation
4968 *
4969 * The host allocates 32 DMA target addresses and passes the host address
4970 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
4971 * 0 to 31
4972 *
4973 * Rx Queue Indexes
4974 * The host/firmware share two index registers for managing the Rx buffers.
4975 *
4976 * The READ index maps to the first position that the firmware may be writing
4977 * to -- the driver can read up to (but not including) this position and get
4978 * good data.
4979 * The READ index is managed by the firmware once the card is enabled.
4980 *
4981 * The WRITE index maps to the last position the driver has read from -- the
4982 * position preceding WRITE is the last slot the firmware can place a packet.
4983 *
4984 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4985 * WRITE = READ.
4986 *
4987 * During initialization the host sets up the READ queue position to the first
4988 * INDEX position, and WRITE to the last (READ - 1 wrapped)
4989 *
4990 * When the firmware places a packet in a buffer it will advance the READ index
4991 * and fire the RX interrupt. The driver can then query the READ index and
4992 * process as many packets as possible, moving the WRITE index forward as it
4993 * resets the Rx queue buffers with new memory.
4994 *
4995 * The management in the driver is as follows:
4996 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
4997 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
4998 * to replensish the ipw->rxq->rx_free.
4999 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5000 * ipw->rxq is replenished and the READ INDEX is updated (updating the
5001 * 'processed' and 'read' driver indexes as well)
5002 * + A received packet is processed and handed to the kernel network stack,
5003 * detached from the ipw->rxq. The driver 'processed' index is updated.
5004 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5005 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5006 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
5007 * were enough free buffers and RX_STALLED is set it is cleared.
5008 *
5009 *
5010 * Driver sequence:
5011 *
5012 * ipw_rx_queue_alloc() Allocates rx_free
5013 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
5014 * ipw_rx_queue_restock
5015 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
5016 * queue, updates firmware pointers, and updates
5017 * the WRITE index. If insufficient rx_free buffers
5018 * are available, schedules ipw_rx_queue_replenish
5019 *
5020 * -- enable interrupts --
5021 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
5022 * READ INDEX, detaching the SKB from the pool.
5023 * Moves the packet buffer from queue to rx_used.
5024 * Calls ipw_rx_queue_restock to refill any empty
5025 * slots.
5026 * ...
5027 *
5028 */
5029
5030 /*
5031 * If there are slots in the RX queue that need to be restocked,
5032 * and we have free pre-allocated buffers, fill the ranks as much
5033 * as we can pulling from rx_free.
5034 *
5035 * This moves the 'write' index forward to catch up with 'processed', and
5036 * also updates the memory address in the firmware to reference the new
5037 * target buffer.
5038 */
5039 static void ipw_rx_queue_restock(struct ipw_priv *priv)
5040 {
5041 struct ipw_rx_queue *rxq = priv->rxq;
5042 struct list_head *element;
5043 struct ipw_rx_mem_buffer *rxb;
5044 unsigned long flags;
5045 int write;
5046
5047 spin_lock_irqsave(&rxq->lock, flags);
5048 write = rxq->write;
5049 while ((rxq->write != rxq->processed) && (rxq->free_count)) {
5050 element = rxq->rx_free.next;
5051 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5052 list_del(element);
5053
5054 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5055 rxb->dma_addr);
5056 rxq->queue[rxq->write] = rxb;
5057 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5058 rxq->free_count--;
5059 }
5060 spin_unlock_irqrestore(&rxq->lock, flags);
5061
5062 /* If the pre-allocated buffer pool is dropping low, schedule to
5063 * refill it */
5064 if (rxq->free_count <= RX_LOW_WATERMARK)
5065 queue_work(priv->workqueue, &priv->rx_replenish);
5066
5067 /* If we've added more space for the firmware to place data, tell it */
5068 if (write != rxq->write)
5069 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5070 }
5071
5072 /*
5073 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5074 * Also restock the Rx queue via ipw_rx_queue_restock.
5075 *
5076 * This is called as a scheduled work item (except for during intialization)
5077 */
5078 static void ipw_rx_queue_replenish(void *data)
5079 {
5080 struct ipw_priv *priv = data;
5081 struct ipw_rx_queue *rxq = priv->rxq;
5082 struct list_head *element;
5083 struct ipw_rx_mem_buffer *rxb;
5084 unsigned long flags;
5085
5086 spin_lock_irqsave(&rxq->lock, flags);
5087 while (!list_empty(&rxq->rx_used)) {
5088 element = rxq->rx_used.next;
5089 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5090 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5091 if (!rxb->skb) {
5092 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5093 priv->net_dev->name);
5094 /* We don't reschedule replenish work here -- we will
5095 * call the restock method and if it still needs
5096 * more buffers it will schedule replenish */
5097 break;
5098 }
5099 list_del(element);
5100
5101 rxb->dma_addr =
5102 pci_map_single(priv->pci_dev, rxb->skb->data,
5103 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5104
5105 list_add_tail(&rxb->list, &rxq->rx_free);
5106 rxq->free_count++;
5107 }
5108 spin_unlock_irqrestore(&rxq->lock, flags);
5109
5110 ipw_rx_queue_restock(priv);
5111 }
5112
5113 static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5114 {
5115 struct ipw_priv *priv =
5116 container_of(work, struct ipw_priv, rx_replenish);
5117 mutex_lock(&priv->mutex);
5118 ipw_rx_queue_replenish(priv);
5119 mutex_unlock(&priv->mutex);
5120 }
5121
5122 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5123 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5124 * This free routine walks the list of POOL entries and if SKB is set to
5125 * non NULL it is unmapped and freed
5126 */
5127 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5128 {
5129 int i;
5130
5131 if (!rxq)
5132 return;
5133
5134 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5135 if (rxq->pool[i].skb != NULL) {
5136 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5137 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5138 dev_kfree_skb(rxq->pool[i].skb);
5139 }
5140 }
5141
5142 kfree(rxq);
5143 }
5144
5145 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5146 {
5147 struct ipw_rx_queue *rxq;
5148 int i;
5149
5150 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5151 if (unlikely(!rxq)) {
5152 IPW_ERROR("memory allocation failed\n");
5153 return NULL;
5154 }
5155 spin_lock_init(&rxq->lock);
5156 INIT_LIST_HEAD(&rxq->rx_free);
5157 INIT_LIST_HEAD(&rxq->rx_used);
5158
5159 /* Fill the rx_used queue with _all_ of the Rx buffers */
5160 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5161 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5162
5163 /* Set us so that we have processed and used all buffers, but have
5164 * not restocked the Rx queue with fresh buffers */
5165 rxq->read = rxq->write = 0;
5166 rxq->processed = RX_QUEUE_SIZE - 1;
5167 rxq->free_count = 0;
5168
5169 return rxq;
5170 }
5171
5172 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5173 {
5174 rate &= ~IEEE80211_BASIC_RATE_MASK;
5175 if (ieee_mode == IEEE_A) {
5176 switch (rate) {
5177 case IEEE80211_OFDM_RATE_6MB:
5178 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
5179 1 : 0;
5180 case IEEE80211_OFDM_RATE_9MB:
5181 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
5182 1 : 0;
5183 case IEEE80211_OFDM_RATE_12MB:
5184 return priv->
5185 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5186 case IEEE80211_OFDM_RATE_18MB:
5187 return priv->
5188 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5189 case IEEE80211_OFDM_RATE_24MB:
5190 return priv->
5191 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5192 case IEEE80211_OFDM_RATE_36MB:
5193 return priv->
5194 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5195 case IEEE80211_OFDM_RATE_48MB:
5196 return priv->
5197 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5198 case IEEE80211_OFDM_RATE_54MB:
5199 return priv->
5200 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5201 default:
5202 return 0;
5203 }
5204 }
5205
5206 /* B and G mixed */
5207 switch (rate) {
5208 case IEEE80211_CCK_RATE_1MB:
5209 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
5210 case IEEE80211_CCK_RATE_2MB:
5211 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
5212 case IEEE80211_CCK_RATE_5MB:
5213 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
5214 case IEEE80211_CCK_RATE_11MB:
5215 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
5216 }
5217
5218 /* If we are limited to B modulations, bail at this point */
5219 if (ieee_mode == IEEE_B)
5220 return 0;
5221
5222 /* G */
5223 switch (rate) {
5224 case IEEE80211_OFDM_RATE_6MB:
5225 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
5226 case IEEE80211_OFDM_RATE_9MB:
5227 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
5228 case IEEE80211_OFDM_RATE_12MB:
5229 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5230 case IEEE80211_OFDM_RATE_18MB:
5231 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5232 case IEEE80211_OFDM_RATE_24MB:
5233 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5234 case IEEE80211_OFDM_RATE_36MB:
5235 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5236 case IEEE80211_OFDM_RATE_48MB:
5237 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5238 case IEEE80211_OFDM_RATE_54MB:
5239 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5240 }
5241
5242 return 0;
5243 }
5244
5245 static int ipw_compatible_rates(struct ipw_priv *priv,
5246 const struct ieee80211_network *network,
5247 struct ipw_supported_rates *rates)
5248 {
5249 int num_rates, i;
5250
5251 memset(rates, 0, sizeof(*rates));
5252 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5253 rates->num_rates = 0;
5254 for (i = 0; i < num_rates; i++) {
5255 if (!ipw_is_rate_in_mask(priv, network->mode,
5256 network->rates[i])) {
5257
5258 if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
5259 IPW_DEBUG_SCAN("Adding masked mandatory "
5260 "rate %02X\n",
5261 network->rates[i]);
5262 rates->supported_rates[rates->num_rates++] =
5263 network->rates[i];
5264 continue;
5265 }
5266
5267 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5268 network->rates[i], priv->rates_mask);
5269 continue;
5270 }
5271
5272 rates->supported_rates[rates->num_rates++] = network->rates[i];
5273 }
5274
5275 num_rates = min(network->rates_ex_len,
5276 (u8) (IPW_MAX_RATES - num_rates));
5277 for (i = 0; i < num_rates; i++) {
5278 if (!ipw_is_rate_in_mask(priv, network->mode,
5279 network->rates_ex[i])) {
5280 if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
5281 IPW_DEBUG_SCAN("Adding masked mandatory "
5282 "rate %02X\n",
5283 network->rates_ex[i]);
5284 rates->supported_rates[rates->num_rates++] =
5285 network->rates[i];
5286 continue;
5287 }
5288
5289 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5290 network->rates_ex[i], priv->rates_mask);
5291 continue;
5292 }
5293
5294 rates->supported_rates[rates->num_rates++] =
5295 network->rates_ex[i];
5296 }
5297
5298 return 1;
5299 }
5300
5301 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5302 const struct ipw_supported_rates *src)
5303 {
5304 u8 i;
5305 for (i = 0; i < src->num_rates; i++)
5306 dest->supported_rates[i] = src->supported_rates[i];
5307 dest->num_rates = src->num_rates;
5308 }
5309
5310 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5311 * mask should ever be used -- right now all callers to add the scan rates are
5312 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5313 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5314 u8 modulation, u32 rate_mask)
5315 {
5316 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5317 IEEE80211_BASIC_RATE_MASK : 0;
5318
5319 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
5320 rates->supported_rates[rates->num_rates++] =
5321 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
5322
5323 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
5324 rates->supported_rates[rates->num_rates++] =
5325 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
5326
5327 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
5328 rates->supported_rates[rates->num_rates++] = basic_mask |
5329 IEEE80211_CCK_RATE_5MB;
5330
5331 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
5332 rates->supported_rates[rates->num_rates++] = basic_mask |
5333 IEEE80211_CCK_RATE_11MB;
5334 }
5335
5336 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5337 u8 modulation, u32 rate_mask)
5338 {
5339 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5340 IEEE80211_BASIC_RATE_MASK : 0;
5341
5342 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
5343 rates->supported_rates[rates->num_rates++] = basic_mask |
5344 IEEE80211_OFDM_RATE_6MB;
5345
5346 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
5347 rates->supported_rates[rates->num_rates++] =
5348 IEEE80211_OFDM_RATE_9MB;
5349
5350 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
5351 rates->supported_rates[rates->num_rates++] = basic_mask |
5352 IEEE80211_OFDM_RATE_12MB;
5353
5354 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
5355 rates->supported_rates[rates->num_rates++] =
5356 IEEE80211_OFDM_RATE_18MB;
5357
5358 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
5359 rates->supported_rates[rates->num_rates++] = basic_mask |
5360 IEEE80211_OFDM_RATE_24MB;
5361
5362 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
5363 rates->supported_rates[rates->num_rates++] =
5364 IEEE80211_OFDM_RATE_36MB;
5365
5366 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
5367 rates->supported_rates[rates->num_rates++] =
5368 IEEE80211_OFDM_RATE_48MB;
5369
5370 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
5371 rates->supported_rates[rates->num_rates++] =
5372 IEEE80211_OFDM_RATE_54MB;
5373 }
5374
5375 struct ipw_network_match {
5376 struct ieee80211_network *network;
5377 struct ipw_supported_rates rates;
5378 };
5379
5380 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5381 struct ipw_network_match *match,
5382 struct ieee80211_network *network,
5383 int roaming)
5384 {
5385 struct ipw_supported_rates rates;
5386
5387 /* Verify that this network's capability is compatible with the
5388 * current mode (AdHoc or Infrastructure) */
5389 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5390 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5391 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded due to "
5392 "capability mismatch.\n",
5393 escape_essid(network->ssid, network->ssid_len),
5394 MAC_ARG(network->bssid));
5395 return 0;
5396 }
5397
5398 /* If we do not have an ESSID for this AP, we can not associate with
5399 * it */
5400 if (network->flags & NETWORK_EMPTY_ESSID) {
5401 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5402 "because of hidden ESSID.\n",
5403 escape_essid(network->ssid, network->ssid_len),
5404 MAC_ARG(network->bssid));
5405 return 0;
5406 }
5407
5408 if (unlikely(roaming)) {
5409 /* If we are roaming, then ensure check if this is a valid
5410 * network to try and roam to */
5411 if ((network->ssid_len != match->network->ssid_len) ||
5412 memcmp(network->ssid, match->network->ssid,
5413 network->ssid_len)) {
5414 IPW_DEBUG_MERGE("Netowrk '%s (" MAC_FMT ")' excluded "
5415 "because of non-network ESSID.\n",
5416 escape_essid(network->ssid,
5417 network->ssid_len),
5418 MAC_ARG(network->bssid));
5419 return 0;
5420 }
5421 } else {
5422 /* If an ESSID has been configured then compare the broadcast
5423 * ESSID to ours */
5424 if ((priv->config & CFG_STATIC_ESSID) &&
5425 ((network->ssid_len != priv->essid_len) ||
5426 memcmp(network->ssid, priv->essid,
5427 min(network->ssid_len, priv->essid_len)))) {
5428 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5429
5430 strncpy(escaped,
5431 escape_essid(network->ssid, network->ssid_len),
5432 sizeof(escaped));
5433 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5434 "because of ESSID mismatch: '%s'.\n",
5435 escaped, MAC_ARG(network->bssid),
5436 escape_essid(priv->essid,
5437 priv->essid_len));
5438 return 0;
5439 }
5440 }
5441
5442 /* If the old network rate is better than this one, don't bother
5443 * testing everything else. */
5444
5445 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5446 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5447 "current network.\n",
5448 escape_essid(match->network->ssid,
5449 match->network->ssid_len));
5450 return 0;
5451 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5452 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5453 "current network.\n",
5454 escape_essid(match->network->ssid,
5455 match->network->ssid_len));
5456 return 0;
5457 }
5458
5459 /* Now go through and see if the requested network is valid... */
5460 if (priv->ieee->scan_age != 0 &&
5461 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5462 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5463 "because of age: %ums.\n",
5464 escape_essid(network->ssid, network->ssid_len),
5465 MAC_ARG(network->bssid),
5466 jiffies_to_msecs(jiffies -
5467 network->last_scanned));
5468 return 0;
5469 }
5470
5471 if ((priv->config & CFG_STATIC_CHANNEL) &&
5472 (network->channel != priv->channel)) {
5473 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5474 "because of channel mismatch: %d != %d.\n",
5475 escape_essid(network->ssid, network->ssid_len),
5476 MAC_ARG(network->bssid),
5477 network->channel, priv->channel);
5478 return 0;
5479 }
5480
5481 /* Verify privacy compatability */
5482 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5483 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5484 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5485 "because of privacy mismatch: %s != %s.\n",
5486 escape_essid(network->ssid, network->ssid_len),
5487 MAC_ARG(network->bssid),
5488 priv->
5489 capability & CAP_PRIVACY_ON ? "on" : "off",
5490 network->
5491 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5492 "off");
5493 return 0;
5494 }
5495
5496 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5497 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5498 "because of the same BSSID match: " MAC_FMT
5499 ".\n", escape_essid(network->ssid,
5500 network->ssid_len),
5501 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5502 return 0;
5503 }
5504
5505 /* Filter out any incompatible freq / mode combinations */
5506 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5507 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5508 "because of invalid frequency/mode "
5509 "combination.\n",
5510 escape_essid(network->ssid, network->ssid_len),
5511 MAC_ARG(network->bssid));
5512 return 0;
5513 }
5514
5515 /* Ensure that the rates supported by the driver are compatible with
5516 * this AP, including verification of basic rates (mandatory) */
5517 if (!ipw_compatible_rates(priv, network, &rates)) {
5518 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5519 "because configured rate mask excludes "
5520 "AP mandatory rate.\n",
5521 escape_essid(network->ssid, network->ssid_len),
5522 MAC_ARG(network->bssid));
5523 return 0;
5524 }
5525
5526 if (rates.num_rates == 0) {
5527 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5528 "because of no compatible rates.\n",
5529 escape_essid(network->ssid, network->ssid_len),
5530 MAC_ARG(network->bssid));
5531 return 0;
5532 }
5533
5534 /* TODO: Perform any further minimal comparititive tests. We do not
5535 * want to put too much policy logic here; intelligent scan selection
5536 * should occur within a generic IEEE 802.11 user space tool. */
5537
5538 /* Set up 'new' AP to this network */
5539 ipw_copy_rates(&match->rates, &rates);
5540 match->network = network;
5541 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' is a viable match.\n",
5542 escape_essid(network->ssid, network->ssid_len),
5543 MAC_ARG(network->bssid));
5544
5545 return 1;
5546 }
5547
5548 static void ipw_merge_adhoc_network(struct work_struct *work)
5549 {
5550 struct ipw_priv *priv =
5551 container_of(work, struct ipw_priv, merge_networks);
5552 struct ieee80211_network *network = NULL;
5553 struct ipw_network_match match = {
5554 .network = priv->assoc_network
5555 };
5556
5557 if ((priv->status & STATUS_ASSOCIATED) &&
5558 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5559 /* First pass through ROAM process -- look for a better
5560 * network */
5561 unsigned long flags;
5562
5563 spin_lock_irqsave(&priv->ieee->lock, flags);
5564 list_for_each_entry(network, &priv->ieee->network_list, list) {
5565 if (network != priv->assoc_network)
5566 ipw_find_adhoc_network(priv, &match, network,
5567 1);
5568 }
5569 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5570
5571 if (match.network == priv->assoc_network) {
5572 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5573 "merge to.\n");
5574 return;
5575 }
5576
5577 mutex_lock(&priv->mutex);
5578 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5579 IPW_DEBUG_MERGE("remove network %s\n",
5580 escape_essid(priv->essid,
5581 priv->essid_len));
5582 ipw_remove_current_network(priv);
5583 }
5584
5585 ipw_disassociate(priv);
5586 priv->assoc_network = match.network;
5587 mutex_unlock(&priv->mutex);
5588 return;
5589 }
5590 }
5591
5592 static int ipw_best_network(struct ipw_priv *priv,
5593 struct ipw_network_match *match,
5594 struct ieee80211_network *network, int roaming)
5595 {
5596 struct ipw_supported_rates rates;
5597
5598 /* Verify that this network's capability is compatible with the
5599 * current mode (AdHoc or Infrastructure) */
5600 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5601 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5602 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5603 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5604 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to "
5605 "capability mismatch.\n",
5606 escape_essid(network->ssid, network->ssid_len),
5607 MAC_ARG(network->bssid));
5608 return 0;
5609 }
5610
5611 /* If we do not have an ESSID for this AP, we can not associate with
5612 * it */
5613 if (network->flags & NETWORK_EMPTY_ESSID) {
5614 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5615 "because of hidden ESSID.\n",
5616 escape_essid(network->ssid, network->ssid_len),
5617 MAC_ARG(network->bssid));
5618 return 0;
5619 }
5620
5621 if (unlikely(roaming)) {
5622 /* If we are roaming, then ensure check if this is a valid
5623 * network to try and roam to */
5624 if ((network->ssid_len != match->network->ssid_len) ||
5625 memcmp(network->ssid, match->network->ssid,
5626 network->ssid_len)) {
5627 IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded "
5628 "because of non-network ESSID.\n",
5629 escape_essid(network->ssid,
5630 network->ssid_len),
5631 MAC_ARG(network->bssid));
5632 return 0;
5633 }
5634 } else {
5635 /* If an ESSID has been configured then compare the broadcast
5636 * ESSID to ours */
5637 if ((priv->config & CFG_STATIC_ESSID) &&
5638 ((network->ssid_len != priv->essid_len) ||
5639 memcmp(network->ssid, priv->essid,
5640 min(network->ssid_len, priv->essid_len)))) {
5641 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5642 strncpy(escaped,
5643 escape_essid(network->ssid, network->ssid_len),
5644 sizeof(escaped));
5645 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5646 "because of ESSID mismatch: '%s'.\n",
5647 escaped, MAC_ARG(network->bssid),
5648 escape_essid(priv->essid,
5649 priv->essid_len));
5650 return 0;
5651 }
5652 }
5653
5654 /* If the old network rate is better than this one, don't bother
5655 * testing everything else. */
5656 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5657 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5658 strncpy(escaped,
5659 escape_essid(network->ssid, network->ssid_len),
5660 sizeof(escaped));
5661 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because "
5662 "'%s (" MAC_FMT ")' has a stronger signal.\n",
5663 escaped, MAC_ARG(network->bssid),
5664 escape_essid(match->network->ssid,
5665 match->network->ssid_len),
5666 MAC_ARG(match->network->bssid));
5667 return 0;
5668 }
5669
5670 /* If this network has already had an association attempt within the
5671 * last 3 seconds, do not try and associate again... */
5672 if (network->last_associate &&
5673 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5674 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5675 "because of storming (%ums since last "
5676 "assoc attempt).\n",
5677 escape_essid(network->ssid, network->ssid_len),
5678 MAC_ARG(network->bssid),
5679 jiffies_to_msecs(jiffies -
5680 network->last_associate));
5681 return 0;
5682 }
5683
5684 /* Now go through and see if the requested network is valid... */
5685 if (priv->ieee->scan_age != 0 &&
5686 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5687 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5688 "because of age: %ums.\n",
5689 escape_essid(network->ssid, network->ssid_len),
5690 MAC_ARG(network->bssid),
5691 jiffies_to_msecs(jiffies -
5692 network->last_scanned));
5693 return 0;
5694 }
5695
5696 if ((priv->config & CFG_STATIC_CHANNEL) &&
5697 (network->channel != priv->channel)) {
5698 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5699 "because of channel mismatch: %d != %d.\n",
5700 escape_essid(network->ssid, network->ssid_len),
5701 MAC_ARG(network->bssid),
5702 network->channel, priv->channel);
5703 return 0;
5704 }
5705
5706 /* Verify privacy compatability */
5707 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5708 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5709 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5710 "because of privacy mismatch: %s != %s.\n",
5711 escape_essid(network->ssid, network->ssid_len),
5712 MAC_ARG(network->bssid),
5713 priv->capability & CAP_PRIVACY_ON ? "on" :
5714 "off",
5715 network->capability &
5716 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5717 return 0;
5718 }
5719
5720 if ((priv->config & CFG_STATIC_BSSID) &&
5721 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5722 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5723 "because of BSSID mismatch: " MAC_FMT ".\n",
5724 escape_essid(network->ssid, network->ssid_len),
5725 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5726 return 0;
5727 }
5728
5729 /* Filter out any incompatible freq / mode combinations */
5730 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5731 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5732 "because of invalid frequency/mode "
5733 "combination.\n",
5734 escape_essid(network->ssid, network->ssid_len),
5735 MAC_ARG(network->bssid));
5736 return 0;
5737 }
5738
5739 /* Filter out invalid channel in current GEO */
5740 if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) {
5741 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5742 "because of invalid channel in current GEO\n",
5743 escape_essid(network->ssid, network->ssid_len),
5744 MAC_ARG(network->bssid));
5745 return 0;
5746 }
5747
5748 /* Ensure that the rates supported by the driver are compatible with
5749 * this AP, including verification of basic rates (mandatory) */
5750 if (!ipw_compatible_rates(priv, network, &rates)) {
5751 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5752 "because configured rate mask excludes "
5753 "AP mandatory rate.\n",
5754 escape_essid(network->ssid, network->ssid_len),
5755 MAC_ARG(network->bssid));
5756 return 0;
5757 }
5758
5759 if (rates.num_rates == 0) {
5760 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5761 "because of no compatible rates.\n",
5762 escape_essid(network->ssid, network->ssid_len),
5763 MAC_ARG(network->bssid));
5764 return 0;
5765 }
5766
5767 /* TODO: Perform any further minimal comparititive tests. We do not
5768 * want to put too much policy logic here; intelligent scan selection
5769 * should occur within a generic IEEE 802.11 user space tool. */
5770
5771 /* Set up 'new' AP to this network */
5772 ipw_copy_rates(&match->rates, &rates);
5773 match->network = network;
5774
5775 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n",
5776 escape_essid(network->ssid, network->ssid_len),
5777 MAC_ARG(network->bssid));
5778
5779 return 1;
5780 }
5781
5782 static void ipw_adhoc_create(struct ipw_priv *priv,
5783 struct ieee80211_network *network)
5784 {
5785 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
5786 int i;
5787
5788 /*
5789 * For the purposes of scanning, we can set our wireless mode
5790 * to trigger scans across combinations of bands, but when it
5791 * comes to creating a new ad-hoc network, we have tell the FW
5792 * exactly which band to use.
5793 *
5794 * We also have the possibility of an invalid channel for the
5795 * chossen band. Attempting to create a new ad-hoc network
5796 * with an invalid channel for wireless mode will trigger a
5797 * FW fatal error.
5798 *
5799 */
5800 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
5801 case IEEE80211_52GHZ_BAND:
5802 network->mode = IEEE_A;
5803 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5804 BUG_ON(i == -1);
5805 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5806 IPW_WARNING("Overriding invalid channel\n");
5807 priv->channel = geo->a[0].channel;
5808 }
5809 break;
5810
5811 case IEEE80211_24GHZ_BAND:
5812 if (priv->ieee->mode & IEEE_G)
5813 network->mode = IEEE_G;
5814 else
5815 network->mode = IEEE_B;
5816 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5817 BUG_ON(i == -1);
5818 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5819 IPW_WARNING("Overriding invalid channel\n");
5820 priv->channel = geo->bg[0].channel;
5821 }
5822 break;
5823
5824 default:
5825 IPW_WARNING("Overriding invalid channel\n");
5826 if (priv->ieee->mode & IEEE_A) {
5827 network->mode = IEEE_A;
5828 priv->channel = geo->a[0].channel;
5829 } else if (priv->ieee->mode & IEEE_G) {
5830 network->mode = IEEE_G;
5831 priv->channel = geo->bg[0].channel;
5832 } else {
5833 network->mode = IEEE_B;
5834 priv->channel = geo->bg[0].channel;
5835 }
5836 break;
5837 }
5838
5839 network->channel = priv->channel;
5840 priv->config |= CFG_ADHOC_PERSIST;
5841 ipw_create_bssid(priv, network->bssid);
5842 network->ssid_len = priv->essid_len;
5843 memcpy(network->ssid, priv->essid, priv->essid_len);
5844 memset(&network->stats, 0, sizeof(network->stats));
5845 network->capability = WLAN_CAPABILITY_IBSS;
5846 if (!(priv->config & CFG_PREAMBLE_LONG))
5847 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5848 if (priv->capability & CAP_PRIVACY_ON)
5849 network->capability |= WLAN_CAPABILITY_PRIVACY;
5850 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5851 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5852 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5853 memcpy(network->rates_ex,
5854 &priv->rates.supported_rates[network->rates_len],
5855 network->rates_ex_len);
5856 network->last_scanned = 0;
5857 network->flags = 0;
5858 network->last_associate = 0;
5859 network->time_stamp[0] = 0;
5860 network->time_stamp[1] = 0;
5861 network->beacon_interval = 100; /* Default */
5862 network->listen_interval = 10; /* Default */
5863 network->atim_window = 0; /* Default */
5864 network->wpa_ie_len = 0;
5865 network->rsn_ie_len = 0;
5866 }
5867
5868 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5869 {
5870 struct ipw_tgi_tx_key key;
5871
5872 if (!(priv->ieee->sec.flags & (1 << index)))
5873 return;
5874
5875 key.key_id = index;
5876 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5877 key.security_type = type;
5878 key.station_index = 0; /* always 0 for BSS */
5879 key.flags = 0;
5880 /* 0 for new key; previous value of counter (after fatal error) */
5881 key.tx_counter[0] = cpu_to_le32(0);
5882 key.tx_counter[1] = cpu_to_le32(0);
5883
5884 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5885 }
5886
5887 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5888 {
5889 struct ipw_wep_key key;
5890 int i;
5891
5892 key.cmd_id = DINO_CMD_WEP_KEY;
5893 key.seq_num = 0;
5894
5895 /* Note: AES keys cannot be set for multiple times.
5896 * Only set it at the first time. */
5897 for (i = 0; i < 4; i++) {
5898 key.key_index = i | type;
5899 if (!(priv->ieee->sec.flags & (1 << i))) {
5900 key.key_size = 0;
5901 continue;
5902 }
5903
5904 key.key_size = priv->ieee->sec.key_sizes[i];
5905 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5906
5907 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5908 }
5909 }
5910
5911 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5912 {
5913 if (priv->ieee->host_encrypt)
5914 return;
5915
5916 switch (level) {
5917 case SEC_LEVEL_3:
5918 priv->sys_config.disable_unicast_decryption = 0;
5919 priv->ieee->host_decrypt = 0;
5920 break;
5921 case SEC_LEVEL_2:
5922 priv->sys_config.disable_unicast_decryption = 1;
5923 priv->ieee->host_decrypt = 1;
5924 break;
5925 case SEC_LEVEL_1:
5926 priv->sys_config.disable_unicast_decryption = 0;
5927 priv->ieee->host_decrypt = 0;
5928 break;
5929 case SEC_LEVEL_0:
5930 priv->sys_config.disable_unicast_decryption = 1;
5931 break;
5932 default:
5933 break;
5934 }
5935 }
5936
5937 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5938 {
5939 if (priv->ieee->host_encrypt)
5940 return;
5941
5942 switch (level) {
5943 case SEC_LEVEL_3:
5944 priv->sys_config.disable_multicast_decryption = 0;
5945 break;
5946 case SEC_LEVEL_2:
5947 priv->sys_config.disable_multicast_decryption = 1;
5948 break;
5949 case SEC_LEVEL_1:
5950 priv->sys_config.disable_multicast_decryption = 0;
5951 break;
5952 case SEC_LEVEL_0:
5953 priv->sys_config.disable_multicast_decryption = 1;
5954 break;
5955 default:
5956 break;
5957 }
5958 }
5959
5960 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
5961 {
5962 switch (priv->ieee->sec.level) {
5963 case SEC_LEVEL_3:
5964 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5965 ipw_send_tgi_tx_key(priv,
5966 DCT_FLAG_EXT_SECURITY_CCM,
5967 priv->ieee->sec.active_key);
5968
5969 if (!priv->ieee->host_mc_decrypt)
5970 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
5971 break;
5972 case SEC_LEVEL_2:
5973 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5974 ipw_send_tgi_tx_key(priv,
5975 DCT_FLAG_EXT_SECURITY_TKIP,
5976 priv->ieee->sec.active_key);
5977 break;
5978 case SEC_LEVEL_1:
5979 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
5980 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
5981 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
5982 break;
5983 case SEC_LEVEL_0:
5984 default:
5985 break;
5986 }
5987 }
5988
5989 static void ipw_adhoc_check(void *data)
5990 {
5991 struct ipw_priv *priv = data;
5992
5993 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
5994 !(priv->config & CFG_ADHOC_PERSIST)) {
5995 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
5996 IPW_DL_STATE | IPW_DL_ASSOC,
5997 "Missed beacon: %d - disassociate\n",
5998 priv->missed_adhoc_beacons);
5999 ipw_remove_current_network(priv);
6000 ipw_disassociate(priv);
6001 return;
6002 }
6003
6004 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
6005 priv->assoc_request.beacon_interval);
6006 }
6007
6008 static void ipw_bg_adhoc_check(struct work_struct *work)
6009 {
6010 struct ipw_priv *priv =
6011 container_of(work, struct ipw_priv, adhoc_check.work);
6012 mutex_lock(&priv->mutex);
6013 ipw_adhoc_check(priv);
6014 mutex_unlock(&priv->mutex);
6015 }
6016
6017 static void ipw_debug_config(struct ipw_priv *priv)
6018 {
6019 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6020 "[CFG 0x%08X]\n", priv->config);
6021 if (priv->config & CFG_STATIC_CHANNEL)
6022 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6023 else
6024 IPW_DEBUG_INFO("Channel unlocked.\n");
6025 if (priv->config & CFG_STATIC_ESSID)
6026 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
6027 escape_essid(priv->essid, priv->essid_len));
6028 else
6029 IPW_DEBUG_INFO("ESSID unlocked.\n");
6030 if (priv->config & CFG_STATIC_BSSID)
6031 IPW_DEBUG_INFO("BSSID locked to " MAC_FMT "\n",
6032 MAC_ARG(priv->bssid));
6033 else
6034 IPW_DEBUG_INFO("BSSID unlocked.\n");
6035 if (priv->capability & CAP_PRIVACY_ON)
6036 IPW_DEBUG_INFO("PRIVACY on\n");
6037 else
6038 IPW_DEBUG_INFO("PRIVACY off\n");
6039 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6040 }
6041
6042 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6043 {
6044 /* TODO: Verify that this works... */
6045 struct ipw_fixed_rate fr = {
6046 .tx_rates = priv->rates_mask
6047 };
6048 u32 reg;
6049 u16 mask = 0;
6050
6051 /* Identify 'current FW band' and match it with the fixed
6052 * Tx rates */
6053
6054 switch (priv->ieee->freq_band) {
6055 case IEEE80211_52GHZ_BAND: /* A only */
6056 /* IEEE_A */
6057 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
6058 /* Invalid fixed rate mask */
6059 IPW_DEBUG_WX
6060 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6061 fr.tx_rates = 0;
6062 break;
6063 }
6064
6065 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
6066 break;
6067
6068 default: /* 2.4Ghz or Mixed */
6069 /* IEEE_B */
6070 if (mode == IEEE_B) {
6071 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
6072 /* Invalid fixed rate mask */
6073 IPW_DEBUG_WX
6074 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6075 fr.tx_rates = 0;
6076 }
6077 break;
6078 }
6079
6080 /* IEEE_G */
6081 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
6082 IEEE80211_OFDM_RATES_MASK)) {
6083 /* Invalid fixed rate mask */
6084 IPW_DEBUG_WX
6085 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6086 fr.tx_rates = 0;
6087 break;
6088 }
6089
6090 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
6091 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
6092 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
6093 }
6094
6095 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
6096 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
6097 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
6098 }
6099
6100 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
6101 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
6102 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
6103 }
6104
6105 fr.tx_rates |= mask;
6106 break;
6107 }
6108
6109 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6110 ipw_write_reg32(priv, reg, *(u32 *) & fr);
6111 }
6112
6113 static void ipw_abort_scan(struct ipw_priv *priv)
6114 {
6115 int err;
6116
6117 if (priv->status & STATUS_SCAN_ABORTING) {
6118 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6119 return;
6120 }
6121 priv->status |= STATUS_SCAN_ABORTING;
6122
6123 err = ipw_send_scan_abort(priv);
6124 if (err)
6125 IPW_DEBUG_HC("Request to abort scan failed.\n");
6126 }
6127
6128 static void ipw_add_scan_channels(struct ipw_priv *priv,
6129 struct ipw_scan_request_ext *scan,
6130 int scan_type)
6131 {
6132 int channel_index = 0;
6133 const struct ieee80211_geo *geo;
6134 int i;
6135
6136 geo = ieee80211_get_geo(priv->ieee);
6137
6138 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
6139 int start = channel_index;
6140 for (i = 0; i < geo->a_channels; i++) {
6141 if ((priv->status & STATUS_ASSOCIATED) &&
6142 geo->a[i].channel == priv->channel)
6143 continue;
6144 channel_index++;
6145 scan->channels_list[channel_index] = geo->a[i].channel;
6146 ipw_set_scan_type(scan, channel_index,
6147 geo->a[i].
6148 flags & IEEE80211_CH_PASSIVE_ONLY ?
6149 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6150 scan_type);
6151 }
6152
6153 if (start != channel_index) {
6154 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6155 (channel_index - start);
6156 channel_index++;
6157 }
6158 }
6159
6160 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
6161 int start = channel_index;
6162 if (priv->config & CFG_SPEED_SCAN) {
6163 int index;
6164 u8 channels[IEEE80211_24GHZ_CHANNELS] = {
6165 /* nop out the list */
6166 [0] = 0
6167 };
6168
6169 u8 channel;
6170 while (channel_index < IPW_SCAN_CHANNELS) {
6171 channel =
6172 priv->speed_scan[priv->speed_scan_pos];
6173 if (channel == 0) {
6174 priv->speed_scan_pos = 0;
6175 channel = priv->speed_scan[0];
6176 }
6177 if ((priv->status & STATUS_ASSOCIATED) &&
6178 channel == priv->channel) {
6179 priv->speed_scan_pos++;
6180 continue;
6181 }
6182
6183 /* If this channel has already been
6184 * added in scan, break from loop
6185 * and this will be the first channel
6186 * in the next scan.
6187 */
6188 if (channels[channel - 1] != 0)
6189 break;
6190
6191 channels[channel - 1] = 1;
6192 priv->speed_scan_pos++;
6193 channel_index++;
6194 scan->channels_list[channel_index] = channel;
6195 index =
6196 ieee80211_channel_to_index(priv->ieee, channel);
6197 ipw_set_scan_type(scan, channel_index,
6198 geo->bg[index].
6199 flags &
6200 IEEE80211_CH_PASSIVE_ONLY ?
6201 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6202 : scan_type);
6203 }
6204 } else {
6205 for (i = 0; i < geo->bg_channels; i++) {
6206 if ((priv->status & STATUS_ASSOCIATED) &&
6207 geo->bg[i].channel == priv->channel)
6208 continue;
6209 channel_index++;
6210 scan->channels_list[channel_index] =
6211 geo->bg[i].channel;
6212 ipw_set_scan_type(scan, channel_index,
6213 geo->bg[i].
6214 flags &
6215 IEEE80211_CH_PASSIVE_ONLY ?
6216 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6217 : scan_type);
6218 }
6219 }
6220
6221 if (start != channel_index) {
6222 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6223 (channel_index - start);
6224 }
6225 }
6226 }
6227
6228 static int ipw_request_scan_helper(struct ipw_priv *priv, int type)
6229 {
6230 struct ipw_scan_request_ext scan;
6231 int err = 0, scan_type;
6232
6233 if (!(priv->status & STATUS_INIT) ||
6234 (priv->status & STATUS_EXIT_PENDING))
6235 return 0;
6236
6237 mutex_lock(&priv->mutex);
6238
6239 if (priv->status & STATUS_SCANNING) {
6240 IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n");
6241 priv->status |= STATUS_SCAN_PENDING;
6242 goto done;
6243 }
6244
6245 if (!(priv->status & STATUS_SCAN_FORCED) &&
6246 priv->status & STATUS_SCAN_ABORTING) {
6247 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6248 priv->status |= STATUS_SCAN_PENDING;
6249 goto done;
6250 }
6251
6252 if (priv->status & STATUS_RF_KILL_MASK) {
6253 IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6254 priv->status |= STATUS_SCAN_PENDING;
6255 goto done;
6256 }
6257
6258 memset(&scan, 0, sizeof(scan));
6259 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
6260
6261 if (type == IW_SCAN_TYPE_PASSIVE) {
6262 IPW_DEBUG_WX("use passive scanning\n");
6263 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6264 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6265 cpu_to_le16(120);
6266 ipw_add_scan_channels(priv, &scan, scan_type);
6267 goto send_request;
6268 }
6269
6270 /* Use active scan by default. */
6271 if (priv->config & CFG_SPEED_SCAN)
6272 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6273 cpu_to_le16(30);
6274 else
6275 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6276 cpu_to_le16(20);
6277
6278 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6279 cpu_to_le16(20);
6280
6281 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
6282
6283 #ifdef CONFIG_IPW2200_MONITOR
6284 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6285 u8 channel;
6286 u8 band = 0;
6287
6288 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
6289 case IEEE80211_52GHZ_BAND:
6290 band = (u8) (IPW_A_MODE << 6) | 1;
6291 channel = priv->channel;
6292 break;
6293
6294 case IEEE80211_24GHZ_BAND:
6295 band = (u8) (IPW_B_MODE << 6) | 1;
6296 channel = priv->channel;
6297 break;
6298
6299 default:
6300 band = (u8) (IPW_B_MODE << 6) | 1;
6301 channel = 9;
6302 break;
6303 }
6304
6305 scan.channels_list[0] = band;
6306 scan.channels_list[1] = channel;
6307 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6308
6309 /* NOTE: The card will sit on this channel for this time
6310 * period. Scan aborts are timing sensitive and frequently
6311 * result in firmware restarts. As such, it is best to
6312 * set a small dwell_time here and just keep re-issuing
6313 * scans. Otherwise fast channel hopping will not actually
6314 * hop channels.
6315 *
6316 * TODO: Move SPEED SCAN support to all modes and bands */
6317 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6318 cpu_to_le16(2000);
6319 } else {
6320 #endif /* CONFIG_IPW2200_MONITOR */
6321 /* If we are roaming, then make this a directed scan for the
6322 * current network. Otherwise, ensure that every other scan
6323 * is a fast channel hop scan */
6324 if ((priv->status & STATUS_ROAMING)
6325 || (!(priv->status & STATUS_ASSOCIATED)
6326 && (priv->config & CFG_STATIC_ESSID)
6327 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6328 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6329 if (err) {
6330 IPW_DEBUG_HC("Attempt to send SSID command "
6331 "failed.\n");
6332 goto done;
6333 }
6334
6335 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6336 } else
6337 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6338
6339 ipw_add_scan_channels(priv, &scan, scan_type);
6340 #ifdef CONFIG_IPW2200_MONITOR
6341 }
6342 #endif
6343
6344 send_request:
6345 err = ipw_send_scan_request_ext(priv, &scan);
6346 if (err) {
6347 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6348 goto done;
6349 }
6350
6351 priv->status |= STATUS_SCANNING;
6352 priv->status &= ~STATUS_SCAN_PENDING;
6353 queue_delayed_work(priv->workqueue, &priv->scan_check,
6354 IPW_SCAN_CHECK_WATCHDOG);
6355 done:
6356 mutex_unlock(&priv->mutex);
6357 return err;
6358 }
6359
6360 static void ipw_request_passive_scan(struct work_struct *work)
6361 {
6362 struct ipw_priv *priv =
6363 container_of(work, struct ipw_priv, request_passive_scan);
6364 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
6365 }
6366
6367 static void ipw_request_scan(struct work_struct *work)
6368 {
6369 struct ipw_priv *priv =
6370 container_of(work, struct ipw_priv, request_scan.work);
6371 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
6372 }
6373
6374 static void ipw_bg_abort_scan(struct work_struct *work)
6375 {
6376 struct ipw_priv *priv =
6377 container_of(work, struct ipw_priv, abort_scan);
6378 mutex_lock(&priv->mutex);
6379 ipw_abort_scan(priv);
6380 mutex_unlock(&priv->mutex);
6381 }
6382
6383 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6384 {
6385 /* This is called when wpa_supplicant loads and closes the driver
6386 * interface. */
6387 priv->ieee->wpa_enabled = value;
6388 return 0;
6389 }
6390
6391 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6392 {
6393 struct ieee80211_device *ieee = priv->ieee;
6394 struct ieee80211_security sec = {
6395 .flags = SEC_AUTH_MODE,
6396 };
6397 int ret = 0;
6398
6399 if (value & IW_AUTH_ALG_SHARED_KEY) {
6400 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6401 ieee->open_wep = 0;
6402 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6403 sec.auth_mode = WLAN_AUTH_OPEN;
6404 ieee->open_wep = 1;
6405 } else if (value & IW_AUTH_ALG_LEAP) {
6406 sec.auth_mode = WLAN_AUTH_LEAP;
6407 ieee->open_wep = 1;
6408 } else
6409 return -EINVAL;
6410
6411 if (ieee->set_security)
6412 ieee->set_security(ieee->dev, &sec);
6413 else
6414 ret = -EOPNOTSUPP;
6415
6416 return ret;
6417 }
6418
6419 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6420 int wpa_ie_len)
6421 {
6422 /* make sure WPA is enabled */
6423 ipw_wpa_enable(priv, 1);
6424 }
6425
6426 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6427 char *capabilities, int length)
6428 {
6429 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6430
6431 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6432 capabilities);
6433 }
6434
6435 /*
6436 * WE-18 support
6437 */
6438
6439 /* SIOCSIWGENIE */
6440 static int ipw_wx_set_genie(struct net_device *dev,
6441 struct iw_request_info *info,
6442 union iwreq_data *wrqu, char *extra)
6443 {
6444 struct ipw_priv *priv = ieee80211_priv(dev);
6445 struct ieee80211_device *ieee = priv->ieee;
6446 u8 *buf;
6447 int err = 0;
6448
6449 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6450 (wrqu->data.length && extra == NULL))
6451 return -EINVAL;
6452
6453 if (wrqu->data.length) {
6454 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6455 if (buf == NULL) {
6456 err = -ENOMEM;
6457 goto out;
6458 }
6459
6460 memcpy(buf, extra, wrqu->data.length);
6461 kfree(ieee->wpa_ie);
6462 ieee->wpa_ie = buf;
6463 ieee->wpa_ie_len = wrqu->data.length;
6464 } else {
6465 kfree(ieee->wpa_ie);
6466 ieee->wpa_ie = NULL;
6467 ieee->wpa_ie_len = 0;
6468 }
6469
6470 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6471 out:
6472 return err;
6473 }
6474
6475 /* SIOCGIWGENIE */
6476 static int ipw_wx_get_genie(struct net_device *dev,
6477 struct iw_request_info *info,
6478 union iwreq_data *wrqu, char *extra)
6479 {
6480 struct ipw_priv *priv = ieee80211_priv(dev);
6481 struct ieee80211_device *ieee = priv->ieee;
6482 int err = 0;
6483
6484 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6485 wrqu->data.length = 0;
6486 goto out;
6487 }
6488
6489 if (wrqu->data.length < ieee->wpa_ie_len) {
6490 err = -E2BIG;
6491 goto out;
6492 }
6493
6494 wrqu->data.length = ieee->wpa_ie_len;
6495 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6496
6497 out:
6498 return err;
6499 }
6500
6501 static int wext_cipher2level(int cipher)
6502 {
6503 switch (cipher) {
6504 case IW_AUTH_CIPHER_NONE:
6505 return SEC_LEVEL_0;
6506 case IW_AUTH_CIPHER_WEP40:
6507 case IW_AUTH_CIPHER_WEP104:
6508 return SEC_LEVEL_1;
6509 case IW_AUTH_CIPHER_TKIP:
6510 return SEC_LEVEL_2;
6511 case IW_AUTH_CIPHER_CCMP:
6512 return SEC_LEVEL_3;
6513 default:
6514 return -1;
6515 }
6516 }
6517
6518 /* SIOCSIWAUTH */
6519 static int ipw_wx_set_auth(struct net_device *dev,
6520 struct iw_request_info *info,
6521 union iwreq_data *wrqu, char *extra)
6522 {
6523 struct ipw_priv *priv = ieee80211_priv(dev);
6524 struct ieee80211_device *ieee = priv->ieee;
6525 struct iw_param *param = &wrqu->param;
6526 struct ieee80211_crypt_data *crypt;
6527 unsigned long flags;
6528 int ret = 0;
6529
6530 switch (param->flags & IW_AUTH_INDEX) {
6531 case IW_AUTH_WPA_VERSION:
6532 break;
6533 case IW_AUTH_CIPHER_PAIRWISE:
6534 ipw_set_hw_decrypt_unicast(priv,
6535 wext_cipher2level(param->value));
6536 break;
6537 case IW_AUTH_CIPHER_GROUP:
6538 ipw_set_hw_decrypt_multicast(priv,
6539 wext_cipher2level(param->value));
6540 break;
6541 case IW_AUTH_KEY_MGMT:
6542 /*
6543 * ipw2200 does not use these parameters
6544 */
6545 break;
6546
6547 case IW_AUTH_TKIP_COUNTERMEASURES:
6548 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6549 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6550 break;
6551
6552 flags = crypt->ops->get_flags(crypt->priv);
6553
6554 if (param->value)
6555 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6556 else
6557 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6558
6559 crypt->ops->set_flags(flags, crypt->priv);
6560
6561 break;
6562
6563 case IW_AUTH_DROP_UNENCRYPTED:{
6564 /* HACK:
6565 *
6566 * wpa_supplicant calls set_wpa_enabled when the driver
6567 * is loaded and unloaded, regardless of if WPA is being
6568 * used. No other calls are made which can be used to
6569 * determine if encryption will be used or not prior to
6570 * association being expected. If encryption is not being
6571 * used, drop_unencrypted is set to false, else true -- we
6572 * can use this to determine if the CAP_PRIVACY_ON bit should
6573 * be set.
6574 */
6575 struct ieee80211_security sec = {
6576 .flags = SEC_ENABLED,
6577 .enabled = param->value,
6578 };
6579 priv->ieee->drop_unencrypted = param->value;
6580 /* We only change SEC_LEVEL for open mode. Others
6581 * are set by ipw_wpa_set_encryption.
6582 */
6583 if (!param->value) {
6584 sec.flags |= SEC_LEVEL;
6585 sec.level = SEC_LEVEL_0;
6586 } else {
6587 sec.flags |= SEC_LEVEL;
6588 sec.level = SEC_LEVEL_1;
6589 }
6590 if (priv->ieee->set_security)
6591 priv->ieee->set_security(priv->ieee->dev, &sec);
6592 break;
6593 }
6594
6595 case IW_AUTH_80211_AUTH_ALG:
6596 ret = ipw_wpa_set_auth_algs(priv, param->value);
6597 break;
6598
6599 case IW_AUTH_WPA_ENABLED:
6600 ret = ipw_wpa_enable(priv, param->value);
6601 ipw_disassociate(priv);
6602 break;
6603
6604 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6605 ieee->ieee802_1x = param->value;
6606 break;
6607
6608 case IW_AUTH_PRIVACY_INVOKED:
6609 ieee->privacy_invoked = param->value;
6610 break;
6611
6612 default:
6613 return -EOPNOTSUPP;
6614 }
6615 return ret;
6616 }
6617
6618 /* SIOCGIWAUTH */
6619 static int ipw_wx_get_auth(struct net_device *dev,
6620 struct iw_request_info *info,
6621 union iwreq_data *wrqu, char *extra)
6622 {
6623 struct ipw_priv *priv = ieee80211_priv(dev);
6624 struct ieee80211_device *ieee = priv->ieee;
6625 struct ieee80211_crypt_data *crypt;
6626 struct iw_param *param = &wrqu->param;
6627 int ret = 0;
6628
6629 switch (param->flags & IW_AUTH_INDEX) {
6630 case IW_AUTH_WPA_VERSION:
6631 case IW_AUTH_CIPHER_PAIRWISE:
6632 case IW_AUTH_CIPHER_GROUP:
6633 case IW_AUTH_KEY_MGMT:
6634 /*
6635 * wpa_supplicant will control these internally
6636 */
6637 ret = -EOPNOTSUPP;
6638 break;
6639
6640 case IW_AUTH_TKIP_COUNTERMEASURES:
6641 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6642 if (!crypt || !crypt->ops->get_flags)
6643 break;
6644
6645 param->value = (crypt->ops->get_flags(crypt->priv) &
6646 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6647
6648 break;
6649
6650 case IW_AUTH_DROP_UNENCRYPTED:
6651 param->value = ieee->drop_unencrypted;
6652 break;
6653
6654 case IW_AUTH_80211_AUTH_ALG:
6655 param->value = ieee->sec.auth_mode;
6656 break;
6657
6658 case IW_AUTH_WPA_ENABLED:
6659 param->value = ieee->wpa_enabled;
6660 break;
6661
6662 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6663 param->value = ieee->ieee802_1x;
6664 break;
6665
6666 case IW_AUTH_ROAMING_CONTROL:
6667 case IW_AUTH_PRIVACY_INVOKED:
6668 param->value = ieee->privacy_invoked;
6669 break;
6670
6671 default:
6672 return -EOPNOTSUPP;
6673 }
6674 return 0;
6675 }
6676
6677 /* SIOCSIWENCODEEXT */
6678 static int ipw_wx_set_encodeext(struct net_device *dev,
6679 struct iw_request_info *info,
6680 union iwreq_data *wrqu, char *extra)
6681 {
6682 struct ipw_priv *priv = ieee80211_priv(dev);
6683 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6684
6685 if (hwcrypto) {
6686 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6687 /* IPW HW can't build TKIP MIC,
6688 host decryption still needed */
6689 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6690 priv->ieee->host_mc_decrypt = 1;
6691 else {
6692 priv->ieee->host_encrypt = 0;
6693 priv->ieee->host_encrypt_msdu = 1;
6694 priv->ieee->host_decrypt = 1;
6695 }
6696 } else {
6697 priv->ieee->host_encrypt = 0;
6698 priv->ieee->host_encrypt_msdu = 0;
6699 priv->ieee->host_decrypt = 0;
6700 priv->ieee->host_mc_decrypt = 0;
6701 }
6702 }
6703
6704 return ieee80211_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6705 }
6706
6707 /* SIOCGIWENCODEEXT */
6708 static int ipw_wx_get_encodeext(struct net_device *dev,
6709 struct iw_request_info *info,
6710 union iwreq_data *wrqu, char *extra)
6711 {
6712 struct ipw_priv *priv = ieee80211_priv(dev);
6713 return ieee80211_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6714 }
6715
6716 /* SIOCSIWMLME */
6717 static int ipw_wx_set_mlme(struct net_device *dev,
6718 struct iw_request_info *info,
6719 union iwreq_data *wrqu, char *extra)
6720 {
6721 struct ipw_priv *priv = ieee80211_priv(dev);
6722 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6723 u16 reason;
6724
6725 reason = cpu_to_le16(mlme->reason_code);
6726
6727 switch (mlme->cmd) {
6728 case IW_MLME_DEAUTH:
6729 /* silently ignore */
6730 break;
6731
6732 case IW_MLME_DISASSOC:
6733 ipw_disassociate(priv);
6734 break;
6735
6736 default:
6737 return -EOPNOTSUPP;
6738 }
6739 return 0;
6740 }
6741
6742 #ifdef CONFIG_IPW2200_QOS
6743
6744 /* QoS */
6745 /*
6746 * get the modulation type of the current network or
6747 * the card current mode
6748 */
6749 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6750 {
6751 u8 mode = 0;
6752
6753 if (priv->status & STATUS_ASSOCIATED) {
6754 unsigned long flags;
6755
6756 spin_lock_irqsave(&priv->ieee->lock, flags);
6757 mode = priv->assoc_network->mode;
6758 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6759 } else {
6760 mode = priv->ieee->mode;
6761 }
6762 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6763 return mode;
6764 }
6765
6766 /*
6767 * Handle management frame beacon and probe response
6768 */
6769 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6770 int active_network,
6771 struct ieee80211_network *network)
6772 {
6773 u32 size = sizeof(struct ieee80211_qos_parameters);
6774
6775 if (network->capability & WLAN_CAPABILITY_IBSS)
6776 network->qos_data.active = network->qos_data.supported;
6777
6778 if (network->flags & NETWORK_HAS_QOS_MASK) {
6779 if (active_network &&
6780 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6781 network->qos_data.active = network->qos_data.supported;
6782
6783 if ((network->qos_data.active == 1) && (active_network == 1) &&
6784 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6785 (network->qos_data.old_param_count !=
6786 network->qos_data.param_count)) {
6787 network->qos_data.old_param_count =
6788 network->qos_data.param_count;
6789 schedule_work(&priv->qos_activate);
6790 IPW_DEBUG_QOS("QoS parameters change call "
6791 "qos_activate\n");
6792 }
6793 } else {
6794 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6795 memcpy(&network->qos_data.parameters,
6796 &def_parameters_CCK, size);
6797 else
6798 memcpy(&network->qos_data.parameters,
6799 &def_parameters_OFDM, size);
6800
6801 if ((network->qos_data.active == 1) && (active_network == 1)) {
6802 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6803 schedule_work(&priv->qos_activate);
6804 }
6805
6806 network->qos_data.active = 0;
6807 network->qos_data.supported = 0;
6808 }
6809 if ((priv->status & STATUS_ASSOCIATED) &&
6810 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6811 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6812 if ((network->capability & WLAN_CAPABILITY_IBSS) &&
6813 !(network->flags & NETWORK_EMPTY_ESSID))
6814 if ((network->ssid_len ==
6815 priv->assoc_network->ssid_len) &&
6816 !memcmp(network->ssid,
6817 priv->assoc_network->ssid,
6818 network->ssid_len)) {
6819 queue_work(priv->workqueue,
6820 &priv->merge_networks);
6821 }
6822 }
6823
6824 return 0;
6825 }
6826
6827 /*
6828 * This function set up the firmware to support QoS. It sends
6829 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6830 */
6831 static int ipw_qos_activate(struct ipw_priv *priv,
6832 struct ieee80211_qos_data *qos_network_data)
6833 {
6834 int err;
6835 struct ieee80211_qos_parameters qos_parameters[QOS_QOS_SETS];
6836 struct ieee80211_qos_parameters *active_one = NULL;
6837 u32 size = sizeof(struct ieee80211_qos_parameters);
6838 u32 burst_duration;
6839 int i;
6840 u8 type;
6841
6842 type = ipw_qos_current_mode(priv);
6843
6844 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6845 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6846 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6847 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6848
6849 if (qos_network_data == NULL) {
6850 if (type == IEEE_B) {
6851 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6852 active_one = &def_parameters_CCK;
6853 } else
6854 active_one = &def_parameters_OFDM;
6855
6856 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6857 burst_duration = ipw_qos_get_burst_duration(priv);
6858 for (i = 0; i < QOS_QUEUE_NUM; i++)
6859 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6860 (u16)burst_duration;
6861 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6862 if (type == IEEE_B) {
6863 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
6864 type);
6865 if (priv->qos_data.qos_enable == 0)
6866 active_one = &def_parameters_CCK;
6867 else
6868 active_one = priv->qos_data.def_qos_parm_CCK;
6869 } else {
6870 if (priv->qos_data.qos_enable == 0)
6871 active_one = &def_parameters_OFDM;
6872 else
6873 active_one = priv->qos_data.def_qos_parm_OFDM;
6874 }
6875 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6876 } else {
6877 unsigned long flags;
6878 int active;
6879
6880 spin_lock_irqsave(&priv->ieee->lock, flags);
6881 active_one = &(qos_network_data->parameters);
6882 qos_network_data->old_param_count =
6883 qos_network_data->param_count;
6884 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6885 active = qos_network_data->supported;
6886 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6887
6888 if (active == 0) {
6889 burst_duration = ipw_qos_get_burst_duration(priv);
6890 for (i = 0; i < QOS_QUEUE_NUM; i++)
6891 qos_parameters[QOS_PARAM_SET_ACTIVE].
6892 tx_op_limit[i] = (u16)burst_duration;
6893 }
6894 }
6895
6896 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6897 for (i = 0; i < 3; i++) {
6898 int j;
6899 for (j = 0; j < QOS_QUEUE_NUM; j++) {
6900 qos_parameters[i].cw_min[j] = cpu_to_le16(qos_parameters[i].cw_min[j]);
6901 qos_parameters[i].cw_max[j] = cpu_to_le16(qos_parameters[i].cw_max[j]);
6902 qos_parameters[i].tx_op_limit[j] = cpu_to_le16(qos_parameters[i].tx_op_limit[j]);
6903 }
6904 }
6905
6906 err = ipw_send_qos_params_command(priv,
6907 (struct ieee80211_qos_parameters *)
6908 &(qos_parameters[0]));
6909 if (err)
6910 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6911
6912 return err;
6913 }
6914
6915 /*
6916 * send IPW_CMD_WME_INFO to the firmware
6917 */
6918 static int ipw_qos_set_info_element(struct ipw_priv *priv)
6919 {
6920 int ret = 0;
6921 struct ieee80211_qos_information_element qos_info;
6922
6923 if (priv == NULL)
6924 return -1;
6925
6926 qos_info.elementID = QOS_ELEMENT_ID;
6927 qos_info.length = sizeof(struct ieee80211_qos_information_element) - 2;
6928
6929 qos_info.version = QOS_VERSION_1;
6930 qos_info.ac_info = 0;
6931
6932 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
6933 qos_info.qui_type = QOS_OUI_TYPE;
6934 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
6935
6936 ret = ipw_send_qos_info_command(priv, &qos_info);
6937 if (ret != 0) {
6938 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
6939 }
6940 return ret;
6941 }
6942
6943 /*
6944 * Set the QoS parameter with the association request structure
6945 */
6946 static int ipw_qos_association(struct ipw_priv *priv,
6947 struct ieee80211_network *network)
6948 {
6949 int err = 0;
6950 struct ieee80211_qos_data *qos_data = NULL;
6951 struct ieee80211_qos_data ibss_data = {
6952 .supported = 1,
6953 .active = 1,
6954 };
6955
6956 switch (priv->ieee->iw_mode) {
6957 case IW_MODE_ADHOC:
6958 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
6959
6960 qos_data = &ibss_data;
6961 break;
6962
6963 case IW_MODE_INFRA:
6964 qos_data = &network->qos_data;
6965 break;
6966
6967 default:
6968 BUG();
6969 break;
6970 }
6971
6972 err = ipw_qos_activate(priv, qos_data);
6973 if (err) {
6974 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
6975 return err;
6976 }
6977
6978 if (priv->qos_data.qos_enable && qos_data->supported) {
6979 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
6980 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
6981 return ipw_qos_set_info_element(priv);
6982 }
6983
6984 return 0;
6985 }
6986
6987 /*
6988 * handling the beaconing responses. if we get different QoS setting
6989 * off the network from the associated setting, adjust the QoS
6990 * setting
6991 */
6992 static int ipw_qos_association_resp(struct ipw_priv *priv,
6993 struct ieee80211_network *network)
6994 {
6995 int ret = 0;
6996 unsigned long flags;
6997 u32 size = sizeof(struct ieee80211_qos_parameters);
6998 int set_qos_param = 0;
6999
7000 if ((priv == NULL) || (network == NULL) ||
7001 (priv->assoc_network == NULL))
7002 return ret;
7003
7004 if (!(priv->status & STATUS_ASSOCIATED))
7005 return ret;
7006
7007 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7008 return ret;
7009
7010 spin_lock_irqsave(&priv->ieee->lock, flags);
7011 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7012 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7013 sizeof(struct ieee80211_qos_data));
7014 priv->assoc_network->qos_data.active = 1;
7015 if ((network->qos_data.old_param_count !=
7016 network->qos_data.param_count)) {
7017 set_qos_param = 1;
7018 network->qos_data.old_param_count =
7019 network->qos_data.param_count;
7020 }
7021
7022 } else {
7023 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7024 memcpy(&priv->assoc_network->qos_data.parameters,
7025 &def_parameters_CCK, size);
7026 else
7027 memcpy(&priv->assoc_network->qos_data.parameters,
7028 &def_parameters_OFDM, size);
7029 priv->assoc_network->qos_data.active = 0;
7030 priv->assoc_network->qos_data.supported = 0;
7031 set_qos_param = 1;
7032 }
7033
7034 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7035
7036 if (set_qos_param == 1)
7037 schedule_work(&priv->qos_activate);
7038
7039 return ret;
7040 }
7041
7042 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7043 {
7044 u32 ret = 0;
7045
7046 if ((priv == NULL))
7047 return 0;
7048
7049 if (!(priv->ieee->modulation & IEEE80211_OFDM_MODULATION))
7050 ret = priv->qos_data.burst_duration_CCK;
7051 else
7052 ret = priv->qos_data.burst_duration_OFDM;
7053
7054 return ret;
7055 }
7056
7057 /*
7058 * Initialize the setting of QoS global
7059 */
7060 static void ipw_qos_init(struct ipw_priv *priv, int enable,
7061 int burst_enable, u32 burst_duration_CCK,
7062 u32 burst_duration_OFDM)
7063 {
7064 priv->qos_data.qos_enable = enable;
7065
7066 if (priv->qos_data.qos_enable) {
7067 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7068 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7069 IPW_DEBUG_QOS("QoS is enabled\n");
7070 } else {
7071 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7072 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7073 IPW_DEBUG_QOS("QoS is not enabled\n");
7074 }
7075
7076 priv->qos_data.burst_enable = burst_enable;
7077
7078 if (burst_enable) {
7079 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7080 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7081 } else {
7082 priv->qos_data.burst_duration_CCK = 0;
7083 priv->qos_data.burst_duration_OFDM = 0;
7084 }
7085 }
7086
7087 /*
7088 * map the packet priority to the right TX Queue
7089 */
7090 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7091 {
7092 if (priority > 7 || !priv->qos_data.qos_enable)
7093 priority = 0;
7094
7095 return from_priority_to_tx_queue[priority] - 1;
7096 }
7097
7098 static int ipw_is_qos_active(struct net_device *dev,
7099 struct sk_buff *skb)
7100 {
7101 struct ipw_priv *priv = ieee80211_priv(dev);
7102 struct ieee80211_qos_data *qos_data = NULL;
7103 int active, supported;
7104 u8 *daddr = skb->data + ETH_ALEN;
7105 int unicast = !is_multicast_ether_addr(daddr);
7106
7107 if (!(priv->status & STATUS_ASSOCIATED))
7108 return 0;
7109
7110 qos_data = &priv->assoc_network->qos_data;
7111
7112 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7113 if (unicast == 0)
7114 qos_data->active = 0;
7115 else
7116 qos_data->active = qos_data->supported;
7117 }
7118 active = qos_data->active;
7119 supported = qos_data->supported;
7120 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7121 "unicast %d\n",
7122 priv->qos_data.qos_enable, active, supported, unicast);
7123 if (active && priv->qos_data.qos_enable)
7124 return 1;
7125
7126 return 0;
7127
7128 }
7129 /*
7130 * add QoS parameter to the TX command
7131 */
7132 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7133 u16 priority,
7134 struct tfd_data *tfd)
7135 {
7136 int tx_queue_id = 0;
7137
7138
7139 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7140 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7141
7142 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7143 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7144 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7145 }
7146 return 0;
7147 }
7148
7149 /*
7150 * background support to run QoS activate functionality
7151 */
7152 static void ipw_bg_qos_activate(struct work_struct *work)
7153 {
7154 struct ipw_priv *priv =
7155 container_of(work, struct ipw_priv, qos_activate);
7156
7157 if (priv == NULL)
7158 return;
7159
7160 mutex_lock(&priv->mutex);
7161
7162 if (priv->status & STATUS_ASSOCIATED)
7163 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7164
7165 mutex_unlock(&priv->mutex);
7166 }
7167
7168 static int ipw_handle_probe_response(struct net_device *dev,
7169 struct ieee80211_probe_response *resp,
7170 struct ieee80211_network *network)
7171 {
7172 struct ipw_priv *priv = ieee80211_priv(dev);
7173 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7174 (network == priv->assoc_network));
7175
7176 ipw_qos_handle_probe_response(priv, active_network, network);
7177
7178 return 0;
7179 }
7180
7181 static int ipw_handle_beacon(struct net_device *dev,
7182 struct ieee80211_beacon *resp,
7183 struct ieee80211_network *network)
7184 {
7185 struct ipw_priv *priv = ieee80211_priv(dev);
7186 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7187 (network == priv->assoc_network));
7188
7189 ipw_qos_handle_probe_response(priv, active_network, network);
7190
7191 return 0;
7192 }
7193
7194 static int ipw_handle_assoc_response(struct net_device *dev,
7195 struct ieee80211_assoc_response *resp,
7196 struct ieee80211_network *network)
7197 {
7198 struct ipw_priv *priv = ieee80211_priv(dev);
7199 ipw_qos_association_resp(priv, network);
7200 return 0;
7201 }
7202
7203 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
7204 *qos_param)
7205 {
7206 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7207 sizeof(*qos_param) * 3, qos_param);
7208 }
7209
7210 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
7211 *qos_param)
7212 {
7213 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7214 qos_param);
7215 }
7216
7217 #endif /* CONFIG_IPW2200_QOS */
7218
7219 static int ipw_associate_network(struct ipw_priv *priv,
7220 struct ieee80211_network *network,
7221 struct ipw_supported_rates *rates, int roaming)
7222 {
7223 int err;
7224
7225 if (priv->config & CFG_FIXED_RATE)
7226 ipw_set_fixed_rate(priv, network->mode);
7227
7228 if (!(priv->config & CFG_STATIC_ESSID)) {
7229 priv->essid_len = min(network->ssid_len,
7230 (u8) IW_ESSID_MAX_SIZE);
7231 memcpy(priv->essid, network->ssid, priv->essid_len);
7232 }
7233
7234 network->last_associate = jiffies;
7235
7236 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7237 priv->assoc_request.channel = network->channel;
7238 priv->assoc_request.auth_key = 0;
7239
7240 if ((priv->capability & CAP_PRIVACY_ON) &&
7241 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7242 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7243 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7244
7245 if (priv->ieee->sec.level == SEC_LEVEL_1)
7246 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7247
7248 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7249 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7250 priv->assoc_request.auth_type = AUTH_LEAP;
7251 else
7252 priv->assoc_request.auth_type = AUTH_OPEN;
7253
7254 if (priv->ieee->wpa_ie_len) {
7255 priv->assoc_request.policy_support = 0x02; /* RSN active */
7256 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7257 priv->ieee->wpa_ie_len);
7258 }
7259
7260 /*
7261 * It is valid for our ieee device to support multiple modes, but
7262 * when it comes to associating to a given network we have to choose
7263 * just one mode.
7264 */
7265 if (network->mode & priv->ieee->mode & IEEE_A)
7266 priv->assoc_request.ieee_mode = IPW_A_MODE;
7267 else if (network->mode & priv->ieee->mode & IEEE_G)
7268 priv->assoc_request.ieee_mode = IPW_G_MODE;
7269 else if (network->mode & priv->ieee->mode & IEEE_B)
7270 priv->assoc_request.ieee_mode = IPW_B_MODE;
7271
7272 priv->assoc_request.capability = network->capability;
7273 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7274 && !(priv->config & CFG_PREAMBLE_LONG)) {
7275 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7276 } else {
7277 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7278
7279 /* Clear the short preamble if we won't be supporting it */
7280 priv->assoc_request.capability &=
7281 ~WLAN_CAPABILITY_SHORT_PREAMBLE;
7282 }
7283
7284 /* Clear capability bits that aren't used in Ad Hoc */
7285 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7286 priv->assoc_request.capability &=
7287 ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
7288
7289 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7290 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7291 roaming ? "Rea" : "A",
7292 escape_essid(priv->essid, priv->essid_len),
7293 network->channel,
7294 ipw_modes[priv->assoc_request.ieee_mode],
7295 rates->num_rates,
7296 (priv->assoc_request.preamble_length ==
7297 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7298 network->capability &
7299 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7300 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7301 priv->capability & CAP_PRIVACY_ON ?
7302 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7303 "(open)") : "",
7304 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7305 priv->capability & CAP_PRIVACY_ON ?
7306 '1' + priv->ieee->sec.active_key : '.',
7307 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7308
7309 priv->assoc_request.beacon_interval = network->beacon_interval;
7310 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7311 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7312 priv->assoc_request.assoc_type = HC_IBSS_START;
7313 priv->assoc_request.assoc_tsf_msw = 0;
7314 priv->assoc_request.assoc_tsf_lsw = 0;
7315 } else {
7316 if (unlikely(roaming))
7317 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7318 else
7319 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7320 priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
7321 priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
7322 }
7323
7324 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7325
7326 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7327 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7328 priv->assoc_request.atim_window = network->atim_window;
7329 } else {
7330 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7331 priv->assoc_request.atim_window = 0;
7332 }
7333
7334 priv->assoc_request.listen_interval = network->listen_interval;
7335
7336 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7337 if (err) {
7338 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7339 return err;
7340 }
7341
7342 rates->ieee_mode = priv->assoc_request.ieee_mode;
7343 rates->purpose = IPW_RATE_CONNECT;
7344 ipw_send_supported_rates(priv, rates);
7345
7346 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7347 priv->sys_config.dot11g_auto_detection = 1;
7348 else
7349 priv->sys_config.dot11g_auto_detection = 0;
7350
7351 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7352 priv->sys_config.answer_broadcast_ssid_probe = 1;
7353 else
7354 priv->sys_config.answer_broadcast_ssid_probe = 0;
7355
7356 err = ipw_send_system_config(priv);
7357 if (err) {
7358 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7359 return err;
7360 }
7361
7362 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7363 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7364 if (err) {
7365 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7366 return err;
7367 }
7368
7369 /*
7370 * If preemption is enabled, it is possible for the association
7371 * to complete before we return from ipw_send_associate. Therefore
7372 * we have to be sure and update our priviate data first.
7373 */
7374 priv->channel = network->channel;
7375 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7376 priv->status |= STATUS_ASSOCIATING;
7377 priv->status &= ~STATUS_SECURITY_UPDATED;
7378
7379 priv->assoc_network = network;
7380
7381 #ifdef CONFIG_IPW2200_QOS
7382 ipw_qos_association(priv, network);
7383 #endif
7384
7385 err = ipw_send_associate(priv, &priv->assoc_request);
7386 if (err) {
7387 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7388 return err;
7389 }
7390
7391 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n",
7392 escape_essid(priv->essid, priv->essid_len),
7393 MAC_ARG(priv->bssid));
7394
7395 return 0;
7396 }
7397
7398 static void ipw_roam(void *data)
7399 {
7400 struct ipw_priv *priv = data;
7401 struct ieee80211_network *network = NULL;
7402 struct ipw_network_match match = {
7403 .network = priv->assoc_network
7404 };
7405
7406 /* The roaming process is as follows:
7407 *
7408 * 1. Missed beacon threshold triggers the roaming process by
7409 * setting the status ROAM bit and requesting a scan.
7410 * 2. When the scan completes, it schedules the ROAM work
7411 * 3. The ROAM work looks at all of the known networks for one that
7412 * is a better network than the currently associated. If none
7413 * found, the ROAM process is over (ROAM bit cleared)
7414 * 4. If a better network is found, a disassociation request is
7415 * sent.
7416 * 5. When the disassociation completes, the roam work is again
7417 * scheduled. The second time through, the driver is no longer
7418 * associated, and the newly selected network is sent an
7419 * association request.
7420 * 6. At this point ,the roaming process is complete and the ROAM
7421 * status bit is cleared.
7422 */
7423
7424 /* If we are no longer associated, and the roaming bit is no longer
7425 * set, then we are not actively roaming, so just return */
7426 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7427 return;
7428
7429 if (priv->status & STATUS_ASSOCIATED) {
7430 /* First pass through ROAM process -- look for a better
7431 * network */
7432 unsigned long flags;
7433 u8 rssi = priv->assoc_network->stats.rssi;
7434 priv->assoc_network->stats.rssi = -128;
7435 spin_lock_irqsave(&priv->ieee->lock, flags);
7436 list_for_each_entry(network, &priv->ieee->network_list, list) {
7437 if (network != priv->assoc_network)
7438 ipw_best_network(priv, &match, network, 1);
7439 }
7440 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7441 priv->assoc_network->stats.rssi = rssi;
7442
7443 if (match.network == priv->assoc_network) {
7444 IPW_DEBUG_ASSOC("No better APs in this network to "
7445 "roam to.\n");
7446 priv->status &= ~STATUS_ROAMING;
7447 ipw_debug_config(priv);
7448 return;
7449 }
7450
7451 ipw_send_disassociate(priv, 1);
7452 priv->assoc_network = match.network;
7453
7454 return;
7455 }
7456
7457 /* Second pass through ROAM process -- request association */
7458 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7459 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7460 priv->status &= ~STATUS_ROAMING;
7461 }
7462
7463 static void ipw_bg_roam(struct work_struct *work)
7464 {
7465 struct ipw_priv *priv =
7466 container_of(work, struct ipw_priv, roam);
7467 mutex_lock(&priv->mutex);
7468 ipw_roam(priv);
7469 mutex_unlock(&priv->mutex);
7470 }
7471
7472 static int ipw_associate(void *data)
7473 {
7474 struct ipw_priv *priv = data;
7475
7476 struct ieee80211_network *network = NULL;
7477 struct ipw_network_match match = {
7478 .network = NULL
7479 };
7480 struct ipw_supported_rates *rates;
7481 struct list_head *element;
7482 unsigned long flags;
7483
7484 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7485 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7486 return 0;
7487 }
7488
7489 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7490 IPW_DEBUG_ASSOC("Not attempting association (already in "
7491 "progress)\n");
7492 return 0;
7493 }
7494
7495 if (priv->status & STATUS_DISASSOCIATING) {
7496 IPW_DEBUG_ASSOC("Not attempting association (in "
7497 "disassociating)\n ");
7498 queue_work(priv->workqueue, &priv->associate);
7499 return 0;
7500 }
7501
7502 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7503 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7504 "initialized)\n");
7505 return 0;
7506 }
7507
7508 if (!(priv->config & CFG_ASSOCIATE) &&
7509 !(priv->config & (CFG_STATIC_ESSID |
7510 CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
7511 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7512 return 0;
7513 }
7514
7515 /* Protect our use of the network_list */
7516 spin_lock_irqsave(&priv->ieee->lock, flags);
7517 list_for_each_entry(network, &priv->ieee->network_list, list)
7518 ipw_best_network(priv, &match, network, 0);
7519
7520 network = match.network;
7521 rates = &match.rates;
7522
7523 if (network == NULL &&
7524 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7525 priv->config & CFG_ADHOC_CREATE &&
7526 priv->config & CFG_STATIC_ESSID &&
7527 priv->config & CFG_STATIC_CHANNEL &&
7528 !list_empty(&priv->ieee->network_free_list)) {
7529 element = priv->ieee->network_free_list.next;
7530 network = list_entry(element, struct ieee80211_network, list);
7531 ipw_adhoc_create(priv, network);
7532 rates = &priv->rates;
7533 list_del(element);
7534 list_add_tail(&network->list, &priv->ieee->network_list);
7535 }
7536 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7537
7538 /* If we reached the end of the list, then we don't have any valid
7539 * matching APs */
7540 if (!network) {
7541 ipw_debug_config(priv);
7542
7543 if (!(priv->status & STATUS_SCANNING)) {
7544 if (!(priv->config & CFG_SPEED_SCAN))
7545 queue_delayed_work(priv->workqueue,
7546 &priv->request_scan,
7547 SCAN_INTERVAL);
7548 else
7549 queue_delayed_work(priv->workqueue,
7550 &priv->request_scan, 0);
7551 }
7552
7553 return 0;
7554 }
7555
7556 ipw_associate_network(priv, network, rates, 0);
7557
7558 return 1;
7559 }
7560
7561 static void ipw_bg_associate(struct work_struct *work)
7562 {
7563 struct ipw_priv *priv =
7564 container_of(work, struct ipw_priv, associate);
7565 mutex_lock(&priv->mutex);
7566 ipw_associate(priv);
7567 mutex_unlock(&priv->mutex);
7568 }
7569
7570 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7571 struct sk_buff *skb)
7572 {
7573 struct ieee80211_hdr *hdr;
7574 u16 fc;
7575
7576 hdr = (struct ieee80211_hdr *)skb->data;
7577 fc = le16_to_cpu(hdr->frame_ctl);
7578 if (!(fc & IEEE80211_FCTL_PROTECTED))
7579 return;
7580
7581 fc &= ~IEEE80211_FCTL_PROTECTED;
7582 hdr->frame_ctl = cpu_to_le16(fc);
7583 switch (priv->ieee->sec.level) {
7584 case SEC_LEVEL_3:
7585 /* Remove CCMP HDR */
7586 memmove(skb->data + IEEE80211_3ADDR_LEN,
7587 skb->data + IEEE80211_3ADDR_LEN + 8,
7588 skb->len - IEEE80211_3ADDR_LEN - 8);
7589 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7590 break;
7591 case SEC_LEVEL_2:
7592 break;
7593 case SEC_LEVEL_1:
7594 /* Remove IV */
7595 memmove(skb->data + IEEE80211_3ADDR_LEN,
7596 skb->data + IEEE80211_3ADDR_LEN + 4,
7597 skb->len - IEEE80211_3ADDR_LEN - 4);
7598 skb_trim(skb, skb->len - 8); /* IV + ICV */
7599 break;
7600 case SEC_LEVEL_0:
7601 break;
7602 default:
7603 printk(KERN_ERR "Unknow security level %d\n",
7604 priv->ieee->sec.level);
7605 break;
7606 }
7607 }
7608
7609 static void ipw_handle_data_packet(struct ipw_priv *priv,
7610 struct ipw_rx_mem_buffer *rxb,
7611 struct ieee80211_rx_stats *stats)
7612 {
7613 struct ieee80211_hdr_4addr *hdr;
7614 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7615
7616 /* We received data from the HW, so stop the watchdog */
7617 priv->net_dev->trans_start = jiffies;
7618
7619 /* We only process data packets if the
7620 * interface is open */
7621 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7622 skb_tailroom(rxb->skb))) {
7623 priv->ieee->stats.rx_errors++;
7624 priv->wstats.discard.misc++;
7625 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7626 return;
7627 } else if (unlikely(!netif_running(priv->net_dev))) {
7628 priv->ieee->stats.rx_dropped++;
7629 priv->wstats.discard.misc++;
7630 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7631 return;
7632 }
7633
7634 /* Advance skb->data to the start of the actual payload */
7635 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7636
7637 /* Set the size of the skb to the size of the frame */
7638 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7639
7640 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7641
7642 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7643 hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data;
7644 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7645 (is_multicast_ether_addr(hdr->addr1) ?
7646 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7647 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7648
7649 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7650 priv->ieee->stats.rx_errors++;
7651 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7652 rxb->skb = NULL;
7653 __ipw_led_activity_on(priv);
7654 }
7655 }
7656
7657 #ifdef CONFIG_IPW2200_RADIOTAP
7658 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7659 struct ipw_rx_mem_buffer *rxb,
7660 struct ieee80211_rx_stats *stats)
7661 {
7662 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7663 struct ipw_rx_frame *frame = &pkt->u.frame;
7664
7665 /* initial pull of some data */
7666 u16 received_channel = frame->received_channel;
7667 u8 antennaAndPhy = frame->antennaAndPhy;
7668 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7669 u16 pktrate = frame->rate;
7670
7671 /* Magic struct that slots into the radiotap header -- no reason
7672 * to build this manually element by element, we can write it much
7673 * more efficiently than we can parse it. ORDER MATTERS HERE */
7674 struct ipw_rt_hdr *ipw_rt;
7675
7676 short len = le16_to_cpu(pkt->u.frame.length);
7677
7678 /* We received data from the HW, so stop the watchdog */
7679 priv->net_dev->trans_start = jiffies;
7680
7681 /* We only process data packets if the
7682 * interface is open */
7683 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7684 skb_tailroom(rxb->skb))) {
7685 priv->ieee->stats.rx_errors++;
7686 priv->wstats.discard.misc++;
7687 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7688 return;
7689 } else if (unlikely(!netif_running(priv->net_dev))) {
7690 priv->ieee->stats.rx_dropped++;
7691 priv->wstats.discard.misc++;
7692 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7693 return;
7694 }
7695
7696 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7697 * that now */
7698 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7699 /* FIXME: Should alloc bigger skb instead */
7700 priv->ieee->stats.rx_dropped++;
7701 priv->wstats.discard.misc++;
7702 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7703 return;
7704 }
7705
7706 /* copy the frame itself */
7707 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7708 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7709
7710 /* Zero the radiotap static buffer ... We only need to zero the bytes NOT
7711 * part of our real header, saves a little time.
7712 *
7713 * No longer necessary since we fill in all our data. Purge before merging
7714 * patch officially.
7715 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7716 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7717 */
7718
7719 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7720
7721 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7722 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7723 ipw_rt->rt_hdr.it_len = sizeof(struct ipw_rt_hdr); /* total header+data */
7724
7725 /* Big bitfield of all the fields we provide in radiotap */
7726 ipw_rt->rt_hdr.it_present =
7727 ((1 << IEEE80211_RADIOTAP_TSFT) |
7728 (1 << IEEE80211_RADIOTAP_FLAGS) |
7729 (1 << IEEE80211_RADIOTAP_RATE) |
7730 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7731 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7732 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7733 (1 << IEEE80211_RADIOTAP_ANTENNA));
7734
7735 /* Zero the flags, we'll add to them as we go */
7736 ipw_rt->rt_flags = 0;
7737 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7738 frame->parent_tsf[2] << 16 |
7739 frame->parent_tsf[1] << 8 |
7740 frame->parent_tsf[0]);
7741
7742 /* Convert signal to DBM */
7743 ipw_rt->rt_dbmsignal = antsignal;
7744 ipw_rt->rt_dbmnoise = frame->noise;
7745
7746 /* Convert the channel data and set the flags */
7747 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7748 if (received_channel > 14) { /* 802.11a */
7749 ipw_rt->rt_chbitmask =
7750 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7751 } else if (antennaAndPhy & 32) { /* 802.11b */
7752 ipw_rt->rt_chbitmask =
7753 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7754 } else { /* 802.11g */
7755 ipw_rt->rt_chbitmask =
7756 (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7757 }
7758
7759 /* set the rate in multiples of 500k/s */
7760 switch (pktrate) {
7761 case IPW_TX_RATE_1MB:
7762 ipw_rt->rt_rate = 2;
7763 break;
7764 case IPW_TX_RATE_2MB:
7765 ipw_rt->rt_rate = 4;
7766 break;
7767 case IPW_TX_RATE_5MB:
7768 ipw_rt->rt_rate = 10;
7769 break;
7770 case IPW_TX_RATE_6MB:
7771 ipw_rt->rt_rate = 12;
7772 break;
7773 case IPW_TX_RATE_9MB:
7774 ipw_rt->rt_rate = 18;
7775 break;
7776 case IPW_TX_RATE_11MB:
7777 ipw_rt->rt_rate = 22;
7778 break;
7779 case IPW_TX_RATE_12MB:
7780 ipw_rt->rt_rate = 24;
7781 break;
7782 case IPW_TX_RATE_18MB:
7783 ipw_rt->rt_rate = 36;
7784 break;
7785 case IPW_TX_RATE_24MB:
7786 ipw_rt->rt_rate = 48;
7787 break;
7788 case IPW_TX_RATE_36MB:
7789 ipw_rt->rt_rate = 72;
7790 break;
7791 case IPW_TX_RATE_48MB:
7792 ipw_rt->rt_rate = 96;
7793 break;
7794 case IPW_TX_RATE_54MB:
7795 ipw_rt->rt_rate = 108;
7796 break;
7797 default:
7798 ipw_rt->rt_rate = 0;
7799 break;
7800 }
7801
7802 /* antenna number */
7803 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7804
7805 /* set the preamble flag if we have it */
7806 if ((antennaAndPhy & 64))
7807 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7808
7809 /* Set the size of the skb to the size of the frame */
7810 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7811
7812 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7813
7814 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7815 priv->ieee->stats.rx_errors++;
7816 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7817 rxb->skb = NULL;
7818 /* no LED during capture */
7819 }
7820 }
7821 #endif
7822
7823 #ifdef CONFIG_IPW2200_PROMISCUOUS
7824 #define ieee80211_is_probe_response(fc) \
7825 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7826 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7827
7828 #define ieee80211_is_management(fc) \
7829 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7830
7831 #define ieee80211_is_control(fc) \
7832 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7833
7834 #define ieee80211_is_data(fc) \
7835 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7836
7837 #define ieee80211_is_assoc_request(fc) \
7838 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7839
7840 #define ieee80211_is_reassoc_request(fc) \
7841 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7842
7843 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7844 struct ipw_rx_mem_buffer *rxb,
7845 struct ieee80211_rx_stats *stats)
7846 {
7847 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7848 struct ipw_rx_frame *frame = &pkt->u.frame;
7849 struct ipw_rt_hdr *ipw_rt;
7850
7851 /* First cache any information we need before we overwrite
7852 * the information provided in the skb from the hardware */
7853 struct ieee80211_hdr *hdr;
7854 u16 channel = frame->received_channel;
7855 u8 phy_flags = frame->antennaAndPhy;
7856 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7857 s8 noise = frame->noise;
7858 u8 rate = frame->rate;
7859 short len = le16_to_cpu(pkt->u.frame.length);
7860 struct sk_buff *skb;
7861 int hdr_only = 0;
7862 u16 filter = priv->prom_priv->filter;
7863
7864 /* If the filter is set to not include Rx frames then return */
7865 if (filter & IPW_PROM_NO_RX)
7866 return;
7867
7868 /* We received data from the HW, so stop the watchdog */
7869 priv->prom_net_dev->trans_start = jiffies;
7870
7871 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
7872 priv->prom_priv->ieee->stats.rx_errors++;
7873 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7874 return;
7875 }
7876
7877 /* We only process data packets if the interface is open */
7878 if (unlikely(!netif_running(priv->prom_net_dev))) {
7879 priv->prom_priv->ieee->stats.rx_dropped++;
7880 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7881 return;
7882 }
7883
7884 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7885 * that now */
7886 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7887 /* FIXME: Should alloc bigger skb instead */
7888 priv->prom_priv->ieee->stats.rx_dropped++;
7889 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7890 return;
7891 }
7892
7893 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
7894 if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) {
7895 if (filter & IPW_PROM_NO_MGMT)
7896 return;
7897 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
7898 hdr_only = 1;
7899 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) {
7900 if (filter & IPW_PROM_NO_CTL)
7901 return;
7902 if (filter & IPW_PROM_CTL_HEADER_ONLY)
7903 hdr_only = 1;
7904 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) {
7905 if (filter & IPW_PROM_NO_DATA)
7906 return;
7907 if (filter & IPW_PROM_DATA_HEADER_ONLY)
7908 hdr_only = 1;
7909 }
7910
7911 /* Copy the SKB since this is for the promiscuous side */
7912 skb = skb_copy(rxb->skb, GFP_ATOMIC);
7913 if (skb == NULL) {
7914 IPW_ERROR("skb_clone failed for promiscuous copy.\n");
7915 return;
7916 }
7917
7918 /* copy the frame data to write after where the radiotap header goes */
7919 ipw_rt = (void *)skb->data;
7920
7921 if (hdr_only)
7922 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
7923
7924 memcpy(ipw_rt->payload, hdr, len);
7925
7926 /* Zero the radiotap static buffer ... We only need to zero the bytes
7927 * NOT part of our real header, saves a little time.
7928 *
7929 * No longer necessary since we fill in all our data. Purge before
7930 * merging patch officially.
7931 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7932 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7933 */
7934
7935 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7936 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7937 ipw_rt->rt_hdr.it_len = sizeof(*ipw_rt); /* total header+data */
7938
7939 /* Set the size of the skb to the size of the frame */
7940 skb_put(skb, ipw_rt->rt_hdr.it_len + len);
7941
7942 /* Big bitfield of all the fields we provide in radiotap */
7943 ipw_rt->rt_hdr.it_present =
7944 ((1 << IEEE80211_RADIOTAP_TSFT) |
7945 (1 << IEEE80211_RADIOTAP_FLAGS) |
7946 (1 << IEEE80211_RADIOTAP_RATE) |
7947 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7948 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7949 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7950 (1 << IEEE80211_RADIOTAP_ANTENNA));
7951
7952 /* Zero the flags, we'll add to them as we go */
7953 ipw_rt->rt_flags = 0;
7954 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7955 frame->parent_tsf[2] << 16 |
7956 frame->parent_tsf[1] << 8 |
7957 frame->parent_tsf[0]);
7958
7959 /* Convert to DBM */
7960 ipw_rt->rt_dbmsignal = signal;
7961 ipw_rt->rt_dbmnoise = noise;
7962
7963 /* Convert the channel data and set the flags */
7964 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
7965 if (channel > 14) { /* 802.11a */
7966 ipw_rt->rt_chbitmask =
7967 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7968 } else if (phy_flags & (1 << 5)) { /* 802.11b */
7969 ipw_rt->rt_chbitmask =
7970 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7971 } else { /* 802.11g */
7972 ipw_rt->rt_chbitmask =
7973 (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7974 }
7975
7976 /* set the rate in multiples of 500k/s */
7977 switch (rate) {
7978 case IPW_TX_RATE_1MB:
7979 ipw_rt->rt_rate = 2;
7980 break;
7981 case IPW_TX_RATE_2MB:
7982 ipw_rt->rt_rate = 4;
7983 break;
7984 case IPW_TX_RATE_5MB:
7985 ipw_rt->rt_rate = 10;
7986 break;
7987 case IPW_TX_RATE_6MB:
7988 ipw_rt->rt_rate = 12;
7989 break;
7990 case IPW_TX_RATE_9MB:
7991 ipw_rt->rt_rate = 18;
7992 break;
7993 case IPW_TX_RATE_11MB:
7994 ipw_rt->rt_rate = 22;
7995 break;
7996 case IPW_TX_RATE_12MB:
7997 ipw_rt->rt_rate = 24;
7998 break;
7999 case IPW_TX_RATE_18MB:
8000 ipw_rt->rt_rate = 36;
8001 break;
8002 case IPW_TX_RATE_24MB:
8003 ipw_rt->rt_rate = 48;
8004 break;
8005 case IPW_TX_RATE_36MB:
8006 ipw_rt->rt_rate = 72;
8007 break;
8008 case IPW_TX_RATE_48MB:
8009 ipw_rt->rt_rate = 96;
8010 break;
8011 case IPW_TX_RATE_54MB:
8012 ipw_rt->rt_rate = 108;
8013 break;
8014 default:
8015 ipw_rt->rt_rate = 0;
8016 break;
8017 }
8018
8019 /* antenna number */
8020 ipw_rt->rt_antenna = (phy_flags & 3);
8021
8022 /* set the preamble flag if we have it */
8023 if (phy_flags & (1 << 6))
8024 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8025
8026 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8027
8028 if (!ieee80211_rx(priv->prom_priv->ieee, skb, stats)) {
8029 priv->prom_priv->ieee->stats.rx_errors++;
8030 dev_kfree_skb_any(skb);
8031 }
8032 }
8033 #endif
8034
8035 static int is_network_packet(struct ipw_priv *priv,
8036 struct ieee80211_hdr_4addr *header)
8037 {
8038 /* Filter incoming packets to determine if they are targetted toward
8039 * this network, discarding packets coming from ourselves */
8040 switch (priv->ieee->iw_mode) {
8041 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
8042 /* packets from our adapter are dropped (echo) */
8043 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
8044 return 0;
8045
8046 /* {broad,multi}cast packets to our BSSID go through */
8047 if (is_multicast_ether_addr(header->addr1))
8048 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
8049
8050 /* packets to our adapter go through */
8051 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8052 ETH_ALEN);
8053
8054 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
8055 /* packets from our adapter are dropped (echo) */
8056 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
8057 return 0;
8058
8059 /* {broad,multi}cast packets to our BSS go through */
8060 if (is_multicast_ether_addr(header->addr1))
8061 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
8062
8063 /* packets to our adapter go through */
8064 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8065 ETH_ALEN);
8066 }
8067
8068 return 1;
8069 }
8070
8071 #define IPW_PACKET_RETRY_TIME HZ
8072
8073 static int is_duplicate_packet(struct ipw_priv *priv,
8074 struct ieee80211_hdr_4addr *header)
8075 {
8076 u16 sc = le16_to_cpu(header->seq_ctl);
8077 u16 seq = WLAN_GET_SEQ_SEQ(sc);
8078 u16 frag = WLAN_GET_SEQ_FRAG(sc);
8079 u16 *last_seq, *last_frag;
8080 unsigned long *last_time;
8081
8082 switch (priv->ieee->iw_mode) {
8083 case IW_MODE_ADHOC:
8084 {
8085 struct list_head *p;
8086 struct ipw_ibss_seq *entry = NULL;
8087 u8 *mac = header->addr2;
8088 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8089
8090 __list_for_each(p, &priv->ibss_mac_hash[index]) {
8091 entry =
8092 list_entry(p, struct ipw_ibss_seq, list);
8093 if (!memcmp(entry->mac, mac, ETH_ALEN))
8094 break;
8095 }
8096 if (p == &priv->ibss_mac_hash[index]) {
8097 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8098 if (!entry) {
8099 IPW_ERROR
8100 ("Cannot malloc new mac entry\n");
8101 return 0;
8102 }
8103 memcpy(entry->mac, mac, ETH_ALEN);
8104 entry->seq_num = seq;
8105 entry->frag_num = frag;
8106 entry->packet_time = jiffies;
8107 list_add(&entry->list,
8108 &priv->ibss_mac_hash[index]);
8109 return 0;
8110 }
8111 last_seq = &entry->seq_num;
8112 last_frag = &entry->frag_num;
8113 last_time = &entry->packet_time;
8114 break;
8115 }
8116 case IW_MODE_INFRA:
8117 last_seq = &priv->last_seq_num;
8118 last_frag = &priv->last_frag_num;
8119 last_time = &priv->last_packet_time;
8120 break;
8121 default:
8122 return 0;
8123 }
8124 if ((*last_seq == seq) &&
8125 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8126 if (*last_frag == frag)
8127 goto drop;
8128 if (*last_frag + 1 != frag)
8129 /* out-of-order fragment */
8130 goto drop;
8131 } else
8132 *last_seq = seq;
8133
8134 *last_frag = frag;
8135 *last_time = jiffies;
8136 return 0;
8137
8138 drop:
8139 /* Comment this line now since we observed the card receives
8140 * duplicate packets but the FCTL_RETRY bit is not set in the
8141 * IBSS mode with fragmentation enabled.
8142 BUG_ON(!(le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_RETRY)); */
8143 return 1;
8144 }
8145
8146 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8147 struct ipw_rx_mem_buffer *rxb,
8148 struct ieee80211_rx_stats *stats)
8149 {
8150 struct sk_buff *skb = rxb->skb;
8151 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8152 struct ieee80211_hdr_4addr *header = (struct ieee80211_hdr_4addr *)
8153 (skb->data + IPW_RX_FRAME_SIZE);
8154
8155 ieee80211_rx_mgt(priv->ieee, header, stats);
8156
8157 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8158 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8159 IEEE80211_STYPE_PROBE_RESP) ||
8160 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8161 IEEE80211_STYPE_BEACON))) {
8162 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8163 ipw_add_station(priv, header->addr2);
8164 }
8165
8166 if (priv->config & CFG_NET_STATS) {
8167 IPW_DEBUG_HC("sending stat packet\n");
8168
8169 /* Set the size of the skb to the size of the full
8170 * ipw header and 802.11 frame */
8171 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8172 IPW_RX_FRAME_SIZE);
8173
8174 /* Advance past the ipw packet header to the 802.11 frame */
8175 skb_pull(skb, IPW_RX_FRAME_SIZE);
8176
8177 /* Push the ieee80211_rx_stats before the 802.11 frame */
8178 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8179
8180 skb->dev = priv->ieee->dev;
8181
8182 /* Point raw at the ieee80211_stats */
8183 skb_reset_mac_header(skb);
8184
8185 skb->pkt_type = PACKET_OTHERHOST;
8186 skb->protocol = __constant_htons(ETH_P_80211_STATS);
8187 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8188 netif_rx(skb);
8189 rxb->skb = NULL;
8190 }
8191 }
8192
8193 /*
8194 * Main entry function for recieving a packet with 80211 headers. This
8195 * should be called when ever the FW has notified us that there is a new
8196 * skb in the recieve queue.
8197 */
8198 static void ipw_rx(struct ipw_priv *priv)
8199 {
8200 struct ipw_rx_mem_buffer *rxb;
8201 struct ipw_rx_packet *pkt;
8202 struct ieee80211_hdr_4addr *header;
8203 u32 r, w, i;
8204 u8 network_packet;
8205
8206 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8207 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8208 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
8209
8210 while (i != r) {
8211 rxb = priv->rxq->queue[i];
8212 if (unlikely(rxb == NULL)) {
8213 printk(KERN_CRIT "Queue not allocated!\n");
8214 break;
8215 }
8216 priv->rxq->queue[i] = NULL;
8217
8218 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8219 IPW_RX_BUF_SIZE,
8220 PCI_DMA_FROMDEVICE);
8221
8222 pkt = (struct ipw_rx_packet *)rxb->skb->data;
8223 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8224 pkt->header.message_type,
8225 pkt->header.rx_seq_num, pkt->header.control_bits);
8226
8227 switch (pkt->header.message_type) {
8228 case RX_FRAME_TYPE: /* 802.11 frame */ {
8229 struct ieee80211_rx_stats stats = {
8230 .rssi = pkt->u.frame.rssi_dbm -
8231 IPW_RSSI_TO_DBM,
8232 .signal =
8233 le16_to_cpu(pkt->u.frame.rssi_dbm) -
8234 IPW_RSSI_TO_DBM + 0x100,
8235 .noise =
8236 le16_to_cpu(pkt->u.frame.noise),
8237 .rate = pkt->u.frame.rate,
8238 .mac_time = jiffies,
8239 .received_channel =
8240 pkt->u.frame.received_channel,
8241 .freq =
8242 (pkt->u.frame.
8243 control & (1 << 0)) ?
8244 IEEE80211_24GHZ_BAND :
8245 IEEE80211_52GHZ_BAND,
8246 .len = le16_to_cpu(pkt->u.frame.length),
8247 };
8248
8249 if (stats.rssi != 0)
8250 stats.mask |= IEEE80211_STATMASK_RSSI;
8251 if (stats.signal != 0)
8252 stats.mask |= IEEE80211_STATMASK_SIGNAL;
8253 if (stats.noise != 0)
8254 stats.mask |= IEEE80211_STATMASK_NOISE;
8255 if (stats.rate != 0)
8256 stats.mask |= IEEE80211_STATMASK_RATE;
8257
8258 priv->rx_packets++;
8259
8260 #ifdef CONFIG_IPW2200_PROMISCUOUS
8261 if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8262 ipw_handle_promiscuous_rx(priv, rxb, &stats);
8263 #endif
8264
8265 #ifdef CONFIG_IPW2200_MONITOR
8266 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8267 #ifdef CONFIG_IPW2200_RADIOTAP
8268
8269 ipw_handle_data_packet_monitor(priv,
8270 rxb,
8271 &stats);
8272 #else
8273 ipw_handle_data_packet(priv, rxb,
8274 &stats);
8275 #endif
8276 break;
8277 }
8278 #endif
8279
8280 header =
8281 (struct ieee80211_hdr_4addr *)(rxb->skb->
8282 data +
8283 IPW_RX_FRAME_SIZE);
8284 /* TODO: Check Ad-Hoc dest/source and make sure
8285 * that we are actually parsing these packets
8286 * correctly -- we should probably use the
8287 * frame control of the packet and disregard
8288 * the current iw_mode */
8289
8290 network_packet =
8291 is_network_packet(priv, header);
8292 if (network_packet && priv->assoc_network) {
8293 priv->assoc_network->stats.rssi =
8294 stats.rssi;
8295 priv->exp_avg_rssi =
8296 exponential_average(priv->exp_avg_rssi,
8297 stats.rssi, DEPTH_RSSI);
8298 }
8299
8300 IPW_DEBUG_RX("Frame: len=%u\n",
8301 le16_to_cpu(pkt->u.frame.length));
8302
8303 if (le16_to_cpu(pkt->u.frame.length) <
8304 ieee80211_get_hdrlen(le16_to_cpu(
8305 header->frame_ctl))) {
8306 IPW_DEBUG_DROP
8307 ("Received packet is too small. "
8308 "Dropping.\n");
8309 priv->ieee->stats.rx_errors++;
8310 priv->wstats.discard.misc++;
8311 break;
8312 }
8313
8314 switch (WLAN_FC_GET_TYPE
8315 (le16_to_cpu(header->frame_ctl))) {
8316
8317 case IEEE80211_FTYPE_MGMT:
8318 ipw_handle_mgmt_packet(priv, rxb,
8319 &stats);
8320 break;
8321
8322 case IEEE80211_FTYPE_CTL:
8323 break;
8324
8325 case IEEE80211_FTYPE_DATA:
8326 if (unlikely(!network_packet ||
8327 is_duplicate_packet(priv,
8328 header)))
8329 {
8330 IPW_DEBUG_DROP("Dropping: "
8331 MAC_FMT ", "
8332 MAC_FMT ", "
8333 MAC_FMT "\n",
8334 MAC_ARG(header->
8335 addr1),
8336 MAC_ARG(header->
8337 addr2),
8338 MAC_ARG(header->
8339 addr3));
8340 break;
8341 }
8342
8343 ipw_handle_data_packet(priv, rxb,
8344 &stats);
8345
8346 break;
8347 }
8348 break;
8349 }
8350
8351 case RX_HOST_NOTIFICATION_TYPE:{
8352 IPW_DEBUG_RX
8353 ("Notification: subtype=%02X flags=%02X size=%d\n",
8354 pkt->u.notification.subtype,
8355 pkt->u.notification.flags,
8356 le16_to_cpu(pkt->u.notification.size));
8357 ipw_rx_notification(priv, &pkt->u.notification);
8358 break;
8359 }
8360
8361 default:
8362 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8363 pkt->header.message_type);
8364 break;
8365 }
8366
8367 /* For now we just don't re-use anything. We can tweak this
8368 * later to try and re-use notification packets and SKBs that
8369 * fail to Rx correctly */
8370 if (rxb->skb != NULL) {
8371 dev_kfree_skb_any(rxb->skb);
8372 rxb->skb = NULL;
8373 }
8374
8375 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8376 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8377 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8378
8379 i = (i + 1) % RX_QUEUE_SIZE;
8380 }
8381
8382 /* Backtrack one entry */
8383 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
8384
8385 ipw_rx_queue_restock(priv);
8386 }
8387
8388 #define DEFAULT_RTS_THRESHOLD 2304U
8389 #define MIN_RTS_THRESHOLD 1U
8390 #define MAX_RTS_THRESHOLD 2304U
8391 #define DEFAULT_BEACON_INTERVAL 100U
8392 #define DEFAULT_SHORT_RETRY_LIMIT 7U
8393 #define DEFAULT_LONG_RETRY_LIMIT 4U
8394
8395 /**
8396 * ipw_sw_reset
8397 * @option: options to control different reset behaviour
8398 * 0 = reset everything except the 'disable' module_param
8399 * 1 = reset everything and print out driver info (for probe only)
8400 * 2 = reset everything
8401 */
8402 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8403 {
8404 int band, modulation;
8405 int old_mode = priv->ieee->iw_mode;
8406
8407 /* Initialize module parameter values here */
8408 priv->config = 0;
8409
8410 /* We default to disabling the LED code as right now it causes
8411 * too many systems to lock up... */
8412 if (!led)
8413 priv->config |= CFG_NO_LED;
8414
8415 if (associate)
8416 priv->config |= CFG_ASSOCIATE;
8417 else
8418 IPW_DEBUG_INFO("Auto associate disabled.\n");
8419
8420 if (auto_create)
8421 priv->config |= CFG_ADHOC_CREATE;
8422 else
8423 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8424
8425 priv->config &= ~CFG_STATIC_ESSID;
8426 priv->essid_len = 0;
8427 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8428
8429 if (disable && option) {
8430 priv->status |= STATUS_RF_KILL_SW;
8431 IPW_DEBUG_INFO("Radio disabled.\n");
8432 }
8433
8434 if (channel != 0) {
8435 priv->config |= CFG_STATIC_CHANNEL;
8436 priv->channel = channel;
8437 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
8438 /* TODO: Validate that provided channel is in range */
8439 }
8440 #ifdef CONFIG_IPW2200_QOS
8441 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8442 burst_duration_CCK, burst_duration_OFDM);
8443 #endif /* CONFIG_IPW2200_QOS */
8444
8445 switch (mode) {
8446 case 1:
8447 priv->ieee->iw_mode = IW_MODE_ADHOC;
8448 priv->net_dev->type = ARPHRD_ETHER;
8449
8450 break;
8451 #ifdef CONFIG_IPW2200_MONITOR
8452 case 2:
8453 priv->ieee->iw_mode = IW_MODE_MONITOR;
8454 #ifdef CONFIG_IPW2200_RADIOTAP
8455 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8456 #else
8457 priv->net_dev->type = ARPHRD_IEEE80211;
8458 #endif
8459 break;
8460 #endif
8461 default:
8462 case 0:
8463 priv->net_dev->type = ARPHRD_ETHER;
8464 priv->ieee->iw_mode = IW_MODE_INFRA;
8465 break;
8466 }
8467
8468 if (hwcrypto) {
8469 priv->ieee->host_encrypt = 0;
8470 priv->ieee->host_encrypt_msdu = 0;
8471 priv->ieee->host_decrypt = 0;
8472 priv->ieee->host_mc_decrypt = 0;
8473 }
8474 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8475
8476 /* IPW2200/2915 is abled to do hardware fragmentation. */
8477 priv->ieee->host_open_frag = 0;
8478
8479 if ((priv->pci_dev->device == 0x4223) ||
8480 (priv->pci_dev->device == 0x4224)) {
8481 if (option == 1)
8482 printk(KERN_INFO DRV_NAME
8483 ": Detected Intel PRO/Wireless 2915ABG Network "
8484 "Connection\n");
8485 priv->ieee->abg_true = 1;
8486 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
8487 modulation = IEEE80211_OFDM_MODULATION |
8488 IEEE80211_CCK_MODULATION;
8489 priv->adapter = IPW_2915ABG;
8490 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8491 } else {
8492 if (option == 1)
8493 printk(KERN_INFO DRV_NAME
8494 ": Detected Intel PRO/Wireless 2200BG Network "
8495 "Connection\n");
8496
8497 priv->ieee->abg_true = 0;
8498 band = IEEE80211_24GHZ_BAND;
8499 modulation = IEEE80211_OFDM_MODULATION |
8500 IEEE80211_CCK_MODULATION;
8501 priv->adapter = IPW_2200BG;
8502 priv->ieee->mode = IEEE_G | IEEE_B;
8503 }
8504
8505 priv->ieee->freq_band = band;
8506 priv->ieee->modulation = modulation;
8507
8508 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
8509
8510 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8511 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8512
8513 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8514 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8515 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8516
8517 /* If power management is turned on, default to AC mode */
8518 priv->power_mode = IPW_POWER_AC;
8519 priv->tx_power = IPW_TX_POWER_DEFAULT;
8520
8521 return old_mode == priv->ieee->iw_mode;
8522 }
8523
8524 /*
8525 * This file defines the Wireless Extension handlers. It does not
8526 * define any methods of hardware manipulation and relies on the
8527 * functions defined in ipw_main to provide the HW interaction.
8528 *
8529 * The exception to this is the use of the ipw_get_ordinal()
8530 * function used to poll the hardware vs. making unecessary calls.
8531 *
8532 */
8533
8534 static int ipw_wx_get_name(struct net_device *dev,
8535 struct iw_request_info *info,
8536 union iwreq_data *wrqu, char *extra)
8537 {
8538 struct ipw_priv *priv = ieee80211_priv(dev);
8539 mutex_lock(&priv->mutex);
8540 if (priv->status & STATUS_RF_KILL_MASK)
8541 strcpy(wrqu->name, "radio off");
8542 else if (!(priv->status & STATUS_ASSOCIATED))
8543 strcpy(wrqu->name, "unassociated");
8544 else
8545 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8546 ipw_modes[priv->assoc_request.ieee_mode]);
8547 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8548 mutex_unlock(&priv->mutex);
8549 return 0;
8550 }
8551
8552 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8553 {
8554 if (channel == 0) {
8555 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8556 priv->config &= ~CFG_STATIC_CHANNEL;
8557 IPW_DEBUG_ASSOC("Attempting to associate with new "
8558 "parameters.\n");
8559 ipw_associate(priv);
8560 return 0;
8561 }
8562
8563 priv->config |= CFG_STATIC_CHANNEL;
8564
8565 if (priv->channel == channel) {
8566 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8567 channel);
8568 return 0;
8569 }
8570
8571 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8572 priv->channel = channel;
8573
8574 #ifdef CONFIG_IPW2200_MONITOR
8575 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8576 int i;
8577 if (priv->status & STATUS_SCANNING) {
8578 IPW_DEBUG_SCAN("Scan abort triggered due to "
8579 "channel change.\n");
8580 ipw_abort_scan(priv);
8581 }
8582
8583 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8584 udelay(10);
8585
8586 if (priv->status & STATUS_SCANNING)
8587 IPW_DEBUG_SCAN("Still scanning...\n");
8588 else
8589 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8590 1000 - i);
8591
8592 return 0;
8593 }
8594 #endif /* CONFIG_IPW2200_MONITOR */
8595
8596 /* Network configuration changed -- force [re]association */
8597 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8598 if (!ipw_disassociate(priv))
8599 ipw_associate(priv);
8600
8601 return 0;
8602 }
8603
8604 static int ipw_wx_set_freq(struct net_device *dev,
8605 struct iw_request_info *info,
8606 union iwreq_data *wrqu, char *extra)
8607 {
8608 struct ipw_priv *priv = ieee80211_priv(dev);
8609 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8610 struct iw_freq *fwrq = &wrqu->freq;
8611 int ret = 0, i;
8612 u8 channel, flags;
8613 int band;
8614
8615 if (fwrq->m == 0) {
8616 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8617 mutex_lock(&priv->mutex);
8618 ret = ipw_set_channel(priv, 0);
8619 mutex_unlock(&priv->mutex);
8620 return ret;
8621 }
8622 /* if setting by freq convert to channel */
8623 if (fwrq->e == 1) {
8624 channel = ieee80211_freq_to_channel(priv->ieee, fwrq->m);
8625 if (channel == 0)
8626 return -EINVAL;
8627 } else
8628 channel = fwrq->m;
8629
8630 if (!(band = ieee80211_is_valid_channel(priv->ieee, channel)))
8631 return -EINVAL;
8632
8633 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8634 i = ieee80211_channel_to_index(priv->ieee, channel);
8635 if (i == -1)
8636 return -EINVAL;
8637
8638 flags = (band == IEEE80211_24GHZ_BAND) ?
8639 geo->bg[i].flags : geo->a[i].flags;
8640 if (flags & IEEE80211_CH_PASSIVE_ONLY) {
8641 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8642 return -EINVAL;
8643 }
8644 }
8645
8646 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8647 mutex_lock(&priv->mutex);
8648 ret = ipw_set_channel(priv, channel);
8649 mutex_unlock(&priv->mutex);
8650 return ret;
8651 }
8652
8653 static int ipw_wx_get_freq(struct net_device *dev,
8654 struct iw_request_info *info,
8655 union iwreq_data *wrqu, char *extra)
8656 {
8657 struct ipw_priv *priv = ieee80211_priv(dev);
8658
8659 wrqu->freq.e = 0;
8660
8661 /* If we are associated, trying to associate, or have a statically
8662 * configured CHANNEL then return that; otherwise return ANY */
8663 mutex_lock(&priv->mutex);
8664 if (priv->config & CFG_STATIC_CHANNEL ||
8665 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8666 int i;
8667
8668 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
8669 BUG_ON(i == -1);
8670 wrqu->freq.e = 1;
8671
8672 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
8673 case IEEE80211_52GHZ_BAND:
8674 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8675 break;
8676
8677 case IEEE80211_24GHZ_BAND:
8678 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8679 break;
8680
8681 default:
8682 BUG();
8683 }
8684 } else
8685 wrqu->freq.m = 0;
8686
8687 mutex_unlock(&priv->mutex);
8688 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8689 return 0;
8690 }
8691
8692 static int ipw_wx_set_mode(struct net_device *dev,
8693 struct iw_request_info *info,
8694 union iwreq_data *wrqu, char *extra)
8695 {
8696 struct ipw_priv *priv = ieee80211_priv(dev);
8697 int err = 0;
8698
8699 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8700
8701 switch (wrqu->mode) {
8702 #ifdef CONFIG_IPW2200_MONITOR
8703 case IW_MODE_MONITOR:
8704 #endif
8705 case IW_MODE_ADHOC:
8706 case IW_MODE_INFRA:
8707 break;
8708 case IW_MODE_AUTO:
8709 wrqu->mode = IW_MODE_INFRA;
8710 break;
8711 default:
8712 return -EINVAL;
8713 }
8714 if (wrqu->mode == priv->ieee->iw_mode)
8715 return 0;
8716
8717 mutex_lock(&priv->mutex);
8718
8719 ipw_sw_reset(priv, 0);
8720
8721 #ifdef CONFIG_IPW2200_MONITOR
8722 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8723 priv->net_dev->type = ARPHRD_ETHER;
8724
8725 if (wrqu->mode == IW_MODE_MONITOR)
8726 #ifdef CONFIG_IPW2200_RADIOTAP
8727 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8728 #else
8729 priv->net_dev->type = ARPHRD_IEEE80211;
8730 #endif
8731 #endif /* CONFIG_IPW2200_MONITOR */
8732
8733 /* Free the existing firmware and reset the fw_loaded
8734 * flag so ipw_load() will bring in the new firmawre */
8735 free_firmware();
8736
8737 priv->ieee->iw_mode = wrqu->mode;
8738
8739 queue_work(priv->workqueue, &priv->adapter_restart);
8740 mutex_unlock(&priv->mutex);
8741 return err;
8742 }
8743
8744 static int ipw_wx_get_mode(struct net_device *dev,
8745 struct iw_request_info *info,
8746 union iwreq_data *wrqu, char *extra)
8747 {
8748 struct ipw_priv *priv = ieee80211_priv(dev);
8749 mutex_lock(&priv->mutex);
8750 wrqu->mode = priv->ieee->iw_mode;
8751 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8752 mutex_unlock(&priv->mutex);
8753 return 0;
8754 }
8755
8756 /* Values are in microsecond */
8757 static const s32 timeout_duration[] = {
8758 350000,
8759 250000,
8760 75000,
8761 37000,
8762 25000,
8763 };
8764
8765 static const s32 period_duration[] = {
8766 400000,
8767 700000,
8768 1000000,
8769 1000000,
8770 1000000
8771 };
8772
8773 static int ipw_wx_get_range(struct net_device *dev,
8774 struct iw_request_info *info,
8775 union iwreq_data *wrqu, char *extra)
8776 {
8777 struct ipw_priv *priv = ieee80211_priv(dev);
8778 struct iw_range *range = (struct iw_range *)extra;
8779 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8780 int i = 0, j;
8781
8782 wrqu->data.length = sizeof(*range);
8783 memset(range, 0, sizeof(*range));
8784
8785 /* 54Mbs == ~27 Mb/s real (802.11g) */
8786 range->throughput = 27 * 1000 * 1000;
8787
8788 range->max_qual.qual = 100;
8789 /* TODO: Find real max RSSI and stick here */
8790 range->max_qual.level = 0;
8791 range->max_qual.noise = 0;
8792 range->max_qual.updated = 7; /* Updated all three */
8793
8794 range->avg_qual.qual = 70;
8795 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8796 range->avg_qual.level = 0; /* FIXME to real average level */
8797 range->avg_qual.noise = 0;
8798 range->avg_qual.updated = 7; /* Updated all three */
8799 mutex_lock(&priv->mutex);
8800 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8801
8802 for (i = 0; i < range->num_bitrates; i++)
8803 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8804 500000;
8805
8806 range->max_rts = DEFAULT_RTS_THRESHOLD;
8807 range->min_frag = MIN_FRAG_THRESHOLD;
8808 range->max_frag = MAX_FRAG_THRESHOLD;
8809
8810 range->encoding_size[0] = 5;
8811 range->encoding_size[1] = 13;
8812 range->num_encoding_sizes = 2;
8813 range->max_encoding_tokens = WEP_KEYS;
8814
8815 /* Set the Wireless Extension versions */
8816 range->we_version_compiled = WIRELESS_EXT;
8817 range->we_version_source = 18;
8818
8819 i = 0;
8820 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8821 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8822 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8823 (geo->bg[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8824 continue;
8825
8826 range->freq[i].i = geo->bg[j].channel;
8827 range->freq[i].m = geo->bg[j].freq * 100000;
8828 range->freq[i].e = 1;
8829 i++;
8830 }
8831 }
8832
8833 if (priv->ieee->mode & IEEE_A) {
8834 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8835 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8836 (geo->a[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8837 continue;
8838
8839 range->freq[i].i = geo->a[j].channel;
8840 range->freq[i].m = geo->a[j].freq * 100000;
8841 range->freq[i].e = 1;
8842 i++;
8843 }
8844 }
8845
8846 range->num_channels = i;
8847 range->num_frequency = i;
8848
8849 mutex_unlock(&priv->mutex);
8850
8851 /* Event capability (kernel + driver) */
8852 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8853 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8854 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8855 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8856 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8857
8858 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8859 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8860
8861 IPW_DEBUG_WX("GET Range\n");
8862 return 0;
8863 }
8864
8865 static int ipw_wx_set_wap(struct net_device *dev,
8866 struct iw_request_info *info,
8867 union iwreq_data *wrqu, char *extra)
8868 {
8869 struct ipw_priv *priv = ieee80211_priv(dev);
8870
8871 static const unsigned char any[] = {
8872 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
8873 };
8874 static const unsigned char off[] = {
8875 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
8876 };
8877
8878 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8879 return -EINVAL;
8880 mutex_lock(&priv->mutex);
8881 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
8882 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8883 /* we disable mandatory BSSID association */
8884 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8885 priv->config &= ~CFG_STATIC_BSSID;
8886 IPW_DEBUG_ASSOC("Attempting to associate with new "
8887 "parameters.\n");
8888 ipw_associate(priv);
8889 mutex_unlock(&priv->mutex);
8890 return 0;
8891 }
8892
8893 priv->config |= CFG_STATIC_BSSID;
8894 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8895 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8896 mutex_unlock(&priv->mutex);
8897 return 0;
8898 }
8899
8900 IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n",
8901 MAC_ARG(wrqu->ap_addr.sa_data));
8902
8903 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8904
8905 /* Network configuration changed -- force [re]association */
8906 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8907 if (!ipw_disassociate(priv))
8908 ipw_associate(priv);
8909
8910 mutex_unlock(&priv->mutex);
8911 return 0;
8912 }
8913
8914 static int ipw_wx_get_wap(struct net_device *dev,
8915 struct iw_request_info *info,
8916 union iwreq_data *wrqu, char *extra)
8917 {
8918 struct ipw_priv *priv = ieee80211_priv(dev);
8919 /* If we are associated, trying to associate, or have a statically
8920 * configured BSSID then return that; otherwise return ANY */
8921 mutex_lock(&priv->mutex);
8922 if (priv->config & CFG_STATIC_BSSID ||
8923 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8924 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8925 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8926 } else
8927 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
8928
8929 IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
8930 MAC_ARG(wrqu->ap_addr.sa_data));
8931 mutex_unlock(&priv->mutex);
8932 return 0;
8933 }
8934
8935 static int ipw_wx_set_essid(struct net_device *dev,
8936 struct iw_request_info *info,
8937 union iwreq_data *wrqu, char *extra)
8938 {
8939 struct ipw_priv *priv = ieee80211_priv(dev);
8940 int length;
8941
8942 mutex_lock(&priv->mutex);
8943
8944 if (!wrqu->essid.flags)
8945 {
8946 IPW_DEBUG_WX("Setting ESSID to ANY\n");
8947 ipw_disassociate(priv);
8948 priv->config &= ~CFG_STATIC_ESSID;
8949 ipw_associate(priv);
8950 mutex_unlock(&priv->mutex);
8951 return 0;
8952 }
8953
8954 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
8955
8956 priv->config |= CFG_STATIC_ESSID;
8957
8958 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
8959 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
8960 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
8961 mutex_unlock(&priv->mutex);
8962 return 0;
8963 }
8964
8965 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(extra, length),
8966 length);
8967
8968 priv->essid_len = length;
8969 memcpy(priv->essid, extra, priv->essid_len);
8970
8971 /* Network configuration changed -- force [re]association */
8972 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
8973 if (!ipw_disassociate(priv))
8974 ipw_associate(priv);
8975
8976 mutex_unlock(&priv->mutex);
8977 return 0;
8978 }
8979
8980 static int ipw_wx_get_essid(struct net_device *dev,
8981 struct iw_request_info *info,
8982 union iwreq_data *wrqu, char *extra)
8983 {
8984 struct ipw_priv *priv = ieee80211_priv(dev);
8985
8986 /* If we are associated, trying to associate, or have a statically
8987 * configured ESSID then return that; otherwise return ANY */
8988 mutex_lock(&priv->mutex);
8989 if (priv->config & CFG_STATIC_ESSID ||
8990 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8991 IPW_DEBUG_WX("Getting essid: '%s'\n",
8992 escape_essid(priv->essid, priv->essid_len));
8993 memcpy(extra, priv->essid, priv->essid_len);
8994 wrqu->essid.length = priv->essid_len;
8995 wrqu->essid.flags = 1; /* active */
8996 } else {
8997 IPW_DEBUG_WX("Getting essid: ANY\n");
8998 wrqu->essid.length = 0;
8999 wrqu->essid.flags = 0; /* active */
9000 }
9001 mutex_unlock(&priv->mutex);
9002 return 0;
9003 }
9004
9005 static int ipw_wx_set_nick(struct net_device *dev,
9006 struct iw_request_info *info,
9007 union iwreq_data *wrqu, char *extra)
9008 {
9009 struct ipw_priv *priv = ieee80211_priv(dev);
9010
9011 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9012 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9013 return -E2BIG;
9014 mutex_lock(&priv->mutex);
9015 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
9016 memset(priv->nick, 0, sizeof(priv->nick));
9017 memcpy(priv->nick, extra, wrqu->data.length);
9018 IPW_DEBUG_TRACE("<<\n");
9019 mutex_unlock(&priv->mutex);
9020 return 0;
9021
9022 }
9023
9024 static int ipw_wx_get_nick(struct net_device *dev,
9025 struct iw_request_info *info,
9026 union iwreq_data *wrqu, char *extra)
9027 {
9028 struct ipw_priv *priv = ieee80211_priv(dev);
9029 IPW_DEBUG_WX("Getting nick\n");
9030 mutex_lock(&priv->mutex);
9031 wrqu->data.length = strlen(priv->nick);
9032 memcpy(extra, priv->nick, wrqu->data.length);
9033 wrqu->data.flags = 1; /* active */
9034 mutex_unlock(&priv->mutex);
9035 return 0;
9036 }
9037
9038 static int ipw_wx_set_sens(struct net_device *dev,
9039 struct iw_request_info *info,
9040 union iwreq_data *wrqu, char *extra)
9041 {
9042 struct ipw_priv *priv = ieee80211_priv(dev);
9043 int err = 0;
9044
9045 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9046 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9047 mutex_lock(&priv->mutex);
9048
9049 if (wrqu->sens.fixed == 0)
9050 {
9051 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9052 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9053 goto out;
9054 }
9055 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9056 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9057 err = -EINVAL;
9058 goto out;
9059 }
9060
9061 priv->roaming_threshold = wrqu->sens.value;
9062 priv->disassociate_threshold = 3*wrqu->sens.value;
9063 out:
9064 mutex_unlock(&priv->mutex);
9065 return err;
9066 }
9067
9068 static int ipw_wx_get_sens(struct net_device *dev,
9069 struct iw_request_info *info,
9070 union iwreq_data *wrqu, char *extra)
9071 {
9072 struct ipw_priv *priv = ieee80211_priv(dev);
9073 mutex_lock(&priv->mutex);
9074 wrqu->sens.fixed = 1;
9075 wrqu->sens.value = priv->roaming_threshold;
9076 mutex_unlock(&priv->mutex);
9077
9078 IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
9079 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9080
9081 return 0;
9082 }
9083
9084 static int ipw_wx_set_rate(struct net_device *dev,
9085 struct iw_request_info *info,
9086 union iwreq_data *wrqu, char *extra)
9087 {
9088 /* TODO: We should use semaphores or locks for access to priv */
9089 struct ipw_priv *priv = ieee80211_priv(dev);
9090 u32 target_rate = wrqu->bitrate.value;
9091 u32 fixed, mask;
9092
9093 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9094 /* value = X, fixed = 1 means only rate X */
9095 /* value = X, fixed = 0 means all rates lower equal X */
9096
9097 if (target_rate == -1) {
9098 fixed = 0;
9099 mask = IEEE80211_DEFAULT_RATES_MASK;
9100 /* Now we should reassociate */
9101 goto apply;
9102 }
9103
9104 mask = 0;
9105 fixed = wrqu->bitrate.fixed;
9106
9107 if (target_rate == 1000000 || !fixed)
9108 mask |= IEEE80211_CCK_RATE_1MB_MASK;
9109 if (target_rate == 1000000)
9110 goto apply;
9111
9112 if (target_rate == 2000000 || !fixed)
9113 mask |= IEEE80211_CCK_RATE_2MB_MASK;
9114 if (target_rate == 2000000)
9115 goto apply;
9116
9117 if (target_rate == 5500000 || !fixed)
9118 mask |= IEEE80211_CCK_RATE_5MB_MASK;
9119 if (target_rate == 5500000)
9120 goto apply;
9121
9122 if (target_rate == 6000000 || !fixed)
9123 mask |= IEEE80211_OFDM_RATE_6MB_MASK;
9124 if (target_rate == 6000000)
9125 goto apply;
9126
9127 if (target_rate == 9000000 || !fixed)
9128 mask |= IEEE80211_OFDM_RATE_9MB_MASK;
9129 if (target_rate == 9000000)
9130 goto apply;
9131
9132 if (target_rate == 11000000 || !fixed)
9133 mask |= IEEE80211_CCK_RATE_11MB_MASK;
9134 if (target_rate == 11000000)
9135 goto apply;
9136
9137 if (target_rate == 12000000 || !fixed)
9138 mask |= IEEE80211_OFDM_RATE_12MB_MASK;
9139 if (target_rate == 12000000)
9140 goto apply;
9141
9142 if (target_rate == 18000000 || !fixed)
9143 mask |= IEEE80211_OFDM_RATE_18MB_MASK;
9144 if (target_rate == 18000000)
9145 goto apply;
9146
9147 if (target_rate == 24000000 || !fixed)
9148 mask |= IEEE80211_OFDM_RATE_24MB_MASK;
9149 if (target_rate == 24000000)
9150 goto apply;
9151
9152 if (target_rate == 36000000 || !fixed)
9153 mask |= IEEE80211_OFDM_RATE_36MB_MASK;
9154 if (target_rate == 36000000)
9155 goto apply;
9156
9157 if (target_rate == 48000000 || !fixed)
9158 mask |= IEEE80211_OFDM_RATE_48MB_MASK;
9159 if (target_rate == 48000000)
9160 goto apply;
9161
9162 if (target_rate == 54000000 || !fixed)
9163 mask |= IEEE80211_OFDM_RATE_54MB_MASK;
9164 if (target_rate == 54000000)
9165 goto apply;
9166
9167 IPW_DEBUG_WX("invalid rate specified, returning error\n");
9168 return -EINVAL;
9169
9170 apply:
9171 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9172 mask, fixed ? "fixed" : "sub-rates");
9173 mutex_lock(&priv->mutex);
9174 if (mask == IEEE80211_DEFAULT_RATES_MASK) {
9175 priv->config &= ~CFG_FIXED_RATE;
9176 ipw_set_fixed_rate(priv, priv->ieee->mode);
9177 } else
9178 priv->config |= CFG_FIXED_RATE;
9179
9180 if (priv->rates_mask == mask) {
9181 IPW_DEBUG_WX("Mask set to current mask.\n");
9182 mutex_unlock(&priv->mutex);
9183 return 0;
9184 }
9185
9186 priv->rates_mask = mask;
9187
9188 /* Network configuration changed -- force [re]association */
9189 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9190 if (!ipw_disassociate(priv))
9191 ipw_associate(priv);
9192
9193 mutex_unlock(&priv->mutex);
9194 return 0;
9195 }
9196
9197 static int ipw_wx_get_rate(struct net_device *dev,
9198 struct iw_request_info *info,
9199 union iwreq_data *wrqu, char *extra)
9200 {
9201 struct ipw_priv *priv = ieee80211_priv(dev);
9202 mutex_lock(&priv->mutex);
9203 wrqu->bitrate.value = priv->last_rate;
9204 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9205 mutex_unlock(&priv->mutex);
9206 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
9207 return 0;
9208 }
9209
9210 static int ipw_wx_set_rts(struct net_device *dev,
9211 struct iw_request_info *info,
9212 union iwreq_data *wrqu, char *extra)
9213 {
9214 struct ipw_priv *priv = ieee80211_priv(dev);
9215 mutex_lock(&priv->mutex);
9216 if (wrqu->rts.disabled || !wrqu->rts.fixed)
9217 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9218 else {
9219 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9220 wrqu->rts.value > MAX_RTS_THRESHOLD) {
9221 mutex_unlock(&priv->mutex);
9222 return -EINVAL;
9223 }
9224 priv->rts_threshold = wrqu->rts.value;
9225 }
9226
9227 ipw_send_rts_threshold(priv, priv->rts_threshold);
9228 mutex_unlock(&priv->mutex);
9229 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
9230 return 0;
9231 }
9232
9233 static int ipw_wx_get_rts(struct net_device *dev,
9234 struct iw_request_info *info,
9235 union iwreq_data *wrqu, char *extra)
9236 {
9237 struct ipw_priv *priv = ieee80211_priv(dev);
9238 mutex_lock(&priv->mutex);
9239 wrqu->rts.value = priv->rts_threshold;
9240 wrqu->rts.fixed = 0; /* no auto select */
9241 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9242 mutex_unlock(&priv->mutex);
9243 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
9244 return 0;
9245 }
9246
9247 static int ipw_wx_set_txpow(struct net_device *dev,
9248 struct iw_request_info *info,
9249 union iwreq_data *wrqu, char *extra)
9250 {
9251 struct ipw_priv *priv = ieee80211_priv(dev);
9252 int err = 0;
9253
9254 mutex_lock(&priv->mutex);
9255 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9256 err = -EINPROGRESS;
9257 goto out;
9258 }
9259
9260 if (!wrqu->power.fixed)
9261 wrqu->power.value = IPW_TX_POWER_DEFAULT;
9262
9263 if (wrqu->power.flags != IW_TXPOW_DBM) {
9264 err = -EINVAL;
9265 goto out;
9266 }
9267
9268 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9269 (wrqu->power.value < IPW_TX_POWER_MIN)) {
9270 err = -EINVAL;
9271 goto out;
9272 }
9273
9274 priv->tx_power = wrqu->power.value;
9275 err = ipw_set_tx_power(priv);
9276 out:
9277 mutex_unlock(&priv->mutex);
9278 return err;
9279 }
9280
9281 static int ipw_wx_get_txpow(struct net_device *dev,
9282 struct iw_request_info *info,
9283 union iwreq_data *wrqu, char *extra)
9284 {
9285 struct ipw_priv *priv = ieee80211_priv(dev);
9286 mutex_lock(&priv->mutex);
9287 wrqu->power.value = priv->tx_power;
9288 wrqu->power.fixed = 1;
9289 wrqu->power.flags = IW_TXPOW_DBM;
9290 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9291 mutex_unlock(&priv->mutex);
9292
9293 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
9294 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9295
9296 return 0;
9297 }
9298
9299 static int ipw_wx_set_frag(struct net_device *dev,
9300 struct iw_request_info *info,
9301 union iwreq_data *wrqu, char *extra)
9302 {
9303 struct ipw_priv *priv = ieee80211_priv(dev);
9304 mutex_lock(&priv->mutex);
9305 if (wrqu->frag.disabled || !wrqu->frag.fixed)
9306 priv->ieee->fts = DEFAULT_FTS;
9307 else {
9308 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9309 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9310 mutex_unlock(&priv->mutex);
9311 return -EINVAL;
9312 }
9313
9314 priv->ieee->fts = wrqu->frag.value & ~0x1;
9315 }
9316
9317 ipw_send_frag_threshold(priv, wrqu->frag.value);
9318 mutex_unlock(&priv->mutex);
9319 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
9320 return 0;
9321 }
9322
9323 static int ipw_wx_get_frag(struct net_device *dev,
9324 struct iw_request_info *info,
9325 union iwreq_data *wrqu, char *extra)
9326 {
9327 struct ipw_priv *priv = ieee80211_priv(dev);
9328 mutex_lock(&priv->mutex);
9329 wrqu->frag.value = priv->ieee->fts;
9330 wrqu->frag.fixed = 0; /* no auto select */
9331 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9332 mutex_unlock(&priv->mutex);
9333 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
9334
9335 return 0;
9336 }
9337
9338 static int ipw_wx_set_retry(struct net_device *dev,
9339 struct iw_request_info *info,
9340 union iwreq_data *wrqu, char *extra)
9341 {
9342 struct ipw_priv *priv = ieee80211_priv(dev);
9343
9344 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9345 return -EINVAL;
9346
9347 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9348 return 0;
9349
9350 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9351 return -EINVAL;
9352
9353 mutex_lock(&priv->mutex);
9354 if (wrqu->retry.flags & IW_RETRY_SHORT)
9355 priv->short_retry_limit = (u8) wrqu->retry.value;
9356 else if (wrqu->retry.flags & IW_RETRY_LONG)
9357 priv->long_retry_limit = (u8) wrqu->retry.value;
9358 else {
9359 priv->short_retry_limit = (u8) wrqu->retry.value;
9360 priv->long_retry_limit = (u8) wrqu->retry.value;
9361 }
9362
9363 ipw_send_retry_limit(priv, priv->short_retry_limit,
9364 priv->long_retry_limit);
9365 mutex_unlock(&priv->mutex);
9366 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9367 priv->short_retry_limit, priv->long_retry_limit);
9368 return 0;
9369 }
9370
9371 static int ipw_wx_get_retry(struct net_device *dev,
9372 struct iw_request_info *info,
9373 union iwreq_data *wrqu, char *extra)
9374 {
9375 struct ipw_priv *priv = ieee80211_priv(dev);
9376
9377 mutex_lock(&priv->mutex);
9378 wrqu->retry.disabled = 0;
9379
9380 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9381 mutex_unlock(&priv->mutex);
9382 return -EINVAL;
9383 }
9384
9385 if (wrqu->retry.flags & IW_RETRY_LONG) {
9386 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9387 wrqu->retry.value = priv->long_retry_limit;
9388 } else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9389 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9390 wrqu->retry.value = priv->short_retry_limit;
9391 } else {
9392 wrqu->retry.flags = IW_RETRY_LIMIT;
9393 wrqu->retry.value = priv->short_retry_limit;
9394 }
9395 mutex_unlock(&priv->mutex);
9396
9397 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
9398
9399 return 0;
9400 }
9401
9402 static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
9403 int essid_len)
9404 {
9405 struct ipw_scan_request_ext scan;
9406 int err = 0, scan_type;
9407
9408 if (!(priv->status & STATUS_INIT) ||
9409 (priv->status & STATUS_EXIT_PENDING))
9410 return 0;
9411
9412 mutex_lock(&priv->mutex);
9413
9414 if (priv->status & STATUS_RF_KILL_MASK) {
9415 IPW_DEBUG_HC("Aborting scan due to RF kill activation\n");
9416 priv->status |= STATUS_SCAN_PENDING;
9417 goto done;
9418 }
9419
9420 IPW_DEBUG_HC("starting request direct scan!\n");
9421
9422 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
9423 /* We should not sleep here; otherwise we will block most
9424 * of the system (for instance, we hold rtnl_lock when we
9425 * get here).
9426 */
9427 err = -EAGAIN;
9428 goto done;
9429 }
9430 memset(&scan, 0, sizeof(scan));
9431
9432 if (priv->config & CFG_SPEED_SCAN)
9433 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
9434 cpu_to_le16(30);
9435 else
9436 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
9437 cpu_to_le16(20);
9438
9439 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
9440 cpu_to_le16(20);
9441 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
9442 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
9443
9444 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
9445
9446 err = ipw_send_ssid(priv, essid, essid_len);
9447 if (err) {
9448 IPW_DEBUG_HC("Attempt to send SSID command failed\n");
9449 goto done;
9450 }
9451 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
9452
9453 ipw_add_scan_channels(priv, &scan, scan_type);
9454
9455 err = ipw_send_scan_request_ext(priv, &scan);
9456 if (err) {
9457 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
9458 goto done;
9459 }
9460
9461 priv->status |= STATUS_SCANNING;
9462
9463 done:
9464 mutex_unlock(&priv->mutex);
9465 return err;
9466 }
9467
9468 static int ipw_wx_set_scan(struct net_device *dev,
9469 struct iw_request_info *info,
9470 union iwreq_data *wrqu, char *extra)
9471 {
9472 struct ipw_priv *priv = ieee80211_priv(dev);
9473 struct iw_scan_req *req = (struct iw_scan_req *)extra;
9474
9475 if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9476 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9477 ipw_request_direct_scan(priv, req->essid,
9478 req->essid_len);
9479 return 0;
9480 }
9481 if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9482 queue_work(priv->workqueue,
9483 &priv->request_passive_scan);
9484 return 0;
9485 }
9486 }
9487
9488 IPW_DEBUG_WX("Start scan\n");
9489
9490 queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
9491
9492 return 0;
9493 }
9494
9495 static int ipw_wx_get_scan(struct net_device *dev,
9496 struct iw_request_info *info,
9497 union iwreq_data *wrqu, char *extra)
9498 {
9499 struct ipw_priv *priv = ieee80211_priv(dev);
9500 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
9501 }
9502
9503 static int ipw_wx_set_encode(struct net_device *dev,
9504 struct iw_request_info *info,
9505 union iwreq_data *wrqu, char *key)
9506 {
9507 struct ipw_priv *priv = ieee80211_priv(dev);
9508 int ret;
9509 u32 cap = priv->capability;
9510
9511 mutex_lock(&priv->mutex);
9512 ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
9513
9514 /* In IBSS mode, we need to notify the firmware to update
9515 * the beacon info after we changed the capability. */
9516 if (cap != priv->capability &&
9517 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9518 priv->status & STATUS_ASSOCIATED)
9519 ipw_disassociate(priv);
9520
9521 mutex_unlock(&priv->mutex);
9522 return ret;
9523 }
9524
9525 static int ipw_wx_get_encode(struct net_device *dev,
9526 struct iw_request_info *info,
9527 union iwreq_data *wrqu, char *key)
9528 {
9529 struct ipw_priv *priv = ieee80211_priv(dev);
9530 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
9531 }
9532
9533 static int ipw_wx_set_power(struct net_device *dev,
9534 struct iw_request_info *info,
9535 union iwreq_data *wrqu, char *extra)
9536 {
9537 struct ipw_priv *priv = ieee80211_priv(dev);
9538 int err;
9539 mutex_lock(&priv->mutex);
9540 if (wrqu->power.disabled) {
9541 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9542 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9543 if (err) {
9544 IPW_DEBUG_WX("failed setting power mode.\n");
9545 mutex_unlock(&priv->mutex);
9546 return err;
9547 }
9548 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9549 mutex_unlock(&priv->mutex);
9550 return 0;
9551 }
9552
9553 switch (wrqu->power.flags & IW_POWER_MODE) {
9554 case IW_POWER_ON: /* If not specified */
9555 case IW_POWER_MODE: /* If set all mask */
9556 case IW_POWER_ALL_R: /* If explicitely state all */
9557 break;
9558 default: /* Otherwise we don't support it */
9559 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9560 wrqu->power.flags);
9561 mutex_unlock(&priv->mutex);
9562 return -EOPNOTSUPP;
9563 }
9564
9565 /* If the user hasn't specified a power management mode yet, default
9566 * to BATTERY */
9567 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9568 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9569 else
9570 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9571
9572 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9573 if (err) {
9574 IPW_DEBUG_WX("failed setting power mode.\n");
9575 mutex_unlock(&priv->mutex);
9576 return err;
9577 }
9578
9579 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9580 mutex_unlock(&priv->mutex);
9581 return 0;
9582 }
9583
9584 static int ipw_wx_get_power(struct net_device *dev,
9585 struct iw_request_info *info,
9586 union iwreq_data *wrqu, char *extra)
9587 {
9588 struct ipw_priv *priv = ieee80211_priv(dev);
9589 mutex_lock(&priv->mutex);
9590 if (!(priv->power_mode & IPW_POWER_ENABLED))
9591 wrqu->power.disabled = 1;
9592 else
9593 wrqu->power.disabled = 0;
9594
9595 mutex_unlock(&priv->mutex);
9596 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9597
9598 return 0;
9599 }
9600
9601 static int ipw_wx_set_powermode(struct net_device *dev,
9602 struct iw_request_info *info,
9603 union iwreq_data *wrqu, char *extra)
9604 {
9605 struct ipw_priv *priv = ieee80211_priv(dev);
9606 int mode = *(int *)extra;
9607 int err;
9608
9609 mutex_lock(&priv->mutex);
9610 if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9611 mode = IPW_POWER_AC;
9612
9613 if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9614 err = ipw_send_power_mode(priv, mode);
9615 if (err) {
9616 IPW_DEBUG_WX("failed setting power mode.\n");
9617 mutex_unlock(&priv->mutex);
9618 return err;
9619 }
9620 priv->power_mode = IPW_POWER_ENABLED | mode;
9621 }
9622 mutex_unlock(&priv->mutex);
9623 return 0;
9624 }
9625
9626 #define MAX_WX_STRING 80
9627 static int ipw_wx_get_powermode(struct net_device *dev,
9628 struct iw_request_info *info,
9629 union iwreq_data *wrqu, char *extra)
9630 {
9631 struct ipw_priv *priv = ieee80211_priv(dev);
9632 int level = IPW_POWER_LEVEL(priv->power_mode);
9633 char *p = extra;
9634
9635 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9636
9637 switch (level) {
9638 case IPW_POWER_AC:
9639 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9640 break;
9641 case IPW_POWER_BATTERY:
9642 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9643 break;
9644 default:
9645 p += snprintf(p, MAX_WX_STRING - (p - extra),
9646 "(Timeout %dms, Period %dms)",
9647 timeout_duration[level - 1] / 1000,
9648 period_duration[level - 1] / 1000);
9649 }
9650
9651 if (!(priv->power_mode & IPW_POWER_ENABLED))
9652 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9653
9654 wrqu->data.length = p - extra + 1;
9655
9656 return 0;
9657 }
9658
9659 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9660 struct iw_request_info *info,
9661 union iwreq_data *wrqu, char *extra)
9662 {
9663 struct ipw_priv *priv = ieee80211_priv(dev);
9664 int mode = *(int *)extra;
9665 u8 band = 0, modulation = 0;
9666
9667 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9668 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9669 return -EINVAL;
9670 }
9671 mutex_lock(&priv->mutex);
9672 if (priv->adapter == IPW_2915ABG) {
9673 priv->ieee->abg_true = 1;
9674 if (mode & IEEE_A) {
9675 band |= IEEE80211_52GHZ_BAND;
9676 modulation |= IEEE80211_OFDM_MODULATION;
9677 } else
9678 priv->ieee->abg_true = 0;
9679 } else {
9680 if (mode & IEEE_A) {
9681 IPW_WARNING("Attempt to set 2200BG into "
9682 "802.11a mode\n");
9683 mutex_unlock(&priv->mutex);
9684 return -EINVAL;
9685 }
9686
9687 priv->ieee->abg_true = 0;
9688 }
9689
9690 if (mode & IEEE_B) {
9691 band |= IEEE80211_24GHZ_BAND;
9692 modulation |= IEEE80211_CCK_MODULATION;
9693 } else
9694 priv->ieee->abg_true = 0;
9695
9696 if (mode & IEEE_G) {
9697 band |= IEEE80211_24GHZ_BAND;
9698 modulation |= IEEE80211_OFDM_MODULATION;
9699 } else
9700 priv->ieee->abg_true = 0;
9701
9702 priv->ieee->mode = mode;
9703 priv->ieee->freq_band = band;
9704 priv->ieee->modulation = modulation;
9705 init_supported_rates(priv, &priv->rates);
9706
9707 /* Network configuration changed -- force [re]association */
9708 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9709 if (!ipw_disassociate(priv)) {
9710 ipw_send_supported_rates(priv, &priv->rates);
9711 ipw_associate(priv);
9712 }
9713
9714 /* Update the band LEDs */
9715 ipw_led_band_on(priv);
9716
9717 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9718 mode & IEEE_A ? 'a' : '.',
9719 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9720 mutex_unlock(&priv->mutex);
9721 return 0;
9722 }
9723
9724 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9725 struct iw_request_info *info,
9726 union iwreq_data *wrqu, char *extra)
9727 {
9728 struct ipw_priv *priv = ieee80211_priv(dev);
9729 mutex_lock(&priv->mutex);
9730 switch (priv->ieee->mode) {
9731 case IEEE_A:
9732 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9733 break;
9734 case IEEE_B:
9735 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9736 break;
9737 case IEEE_A | IEEE_B:
9738 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9739 break;
9740 case IEEE_G:
9741 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9742 break;
9743 case IEEE_A | IEEE_G:
9744 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9745 break;
9746 case IEEE_B | IEEE_G:
9747 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9748 break;
9749 case IEEE_A | IEEE_B | IEEE_G:
9750 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9751 break;
9752 default:
9753 strncpy(extra, "unknown", MAX_WX_STRING);
9754 break;
9755 }
9756
9757 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9758
9759 wrqu->data.length = strlen(extra) + 1;
9760 mutex_unlock(&priv->mutex);
9761
9762 return 0;
9763 }
9764
9765 static int ipw_wx_set_preamble(struct net_device *dev,
9766 struct iw_request_info *info,
9767 union iwreq_data *wrqu, char *extra)
9768 {
9769 struct ipw_priv *priv = ieee80211_priv(dev);
9770 int mode = *(int *)extra;
9771 mutex_lock(&priv->mutex);
9772 /* Switching from SHORT -> LONG requires a disassociation */
9773 if (mode == 1) {
9774 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9775 priv->config |= CFG_PREAMBLE_LONG;
9776
9777 /* Network configuration changed -- force [re]association */
9778 IPW_DEBUG_ASSOC
9779 ("[re]association triggered due to preamble change.\n");
9780 if (!ipw_disassociate(priv))
9781 ipw_associate(priv);
9782 }
9783 goto done;
9784 }
9785
9786 if (mode == 0) {
9787 priv->config &= ~CFG_PREAMBLE_LONG;
9788 goto done;
9789 }
9790 mutex_unlock(&priv->mutex);
9791 return -EINVAL;
9792
9793 done:
9794 mutex_unlock(&priv->mutex);
9795 return 0;
9796 }
9797
9798 static int ipw_wx_get_preamble(struct net_device *dev,
9799 struct iw_request_info *info,
9800 union iwreq_data *wrqu, char *extra)
9801 {
9802 struct ipw_priv *priv = ieee80211_priv(dev);
9803 mutex_lock(&priv->mutex);
9804 if (priv->config & CFG_PREAMBLE_LONG)
9805 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9806 else
9807 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9808 mutex_unlock(&priv->mutex);
9809 return 0;
9810 }
9811
9812 #ifdef CONFIG_IPW2200_MONITOR
9813 static int ipw_wx_set_monitor(struct net_device *dev,
9814 struct iw_request_info *info,
9815 union iwreq_data *wrqu, char *extra)
9816 {
9817 struct ipw_priv *priv = ieee80211_priv(dev);
9818 int *parms = (int *)extra;
9819 int enable = (parms[0] > 0);
9820 mutex_lock(&priv->mutex);
9821 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9822 if (enable) {
9823 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9824 #ifdef CONFIG_IPW2200_RADIOTAP
9825 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9826 #else
9827 priv->net_dev->type = ARPHRD_IEEE80211;
9828 #endif
9829 queue_work(priv->workqueue, &priv->adapter_restart);
9830 }
9831
9832 ipw_set_channel(priv, parms[1]);
9833 } else {
9834 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9835 mutex_unlock(&priv->mutex);
9836 return 0;
9837 }
9838 priv->net_dev->type = ARPHRD_ETHER;
9839 queue_work(priv->workqueue, &priv->adapter_restart);
9840 }
9841 mutex_unlock(&priv->mutex);
9842 return 0;
9843 }
9844
9845 #endif /* CONFIG_IPW2200_MONITOR */
9846
9847 static int ipw_wx_reset(struct net_device *dev,
9848 struct iw_request_info *info,
9849 union iwreq_data *wrqu, char *extra)
9850 {
9851 struct ipw_priv *priv = ieee80211_priv(dev);
9852 IPW_DEBUG_WX("RESET\n");
9853 queue_work(priv->workqueue, &priv->adapter_restart);
9854 return 0;
9855 }
9856
9857 static int ipw_wx_sw_reset(struct net_device *dev,
9858 struct iw_request_info *info,
9859 union iwreq_data *wrqu, char *extra)
9860 {
9861 struct ipw_priv *priv = ieee80211_priv(dev);
9862 union iwreq_data wrqu_sec = {
9863 .encoding = {
9864 .flags = IW_ENCODE_DISABLED,
9865 },
9866 };
9867 int ret;
9868
9869 IPW_DEBUG_WX("SW_RESET\n");
9870
9871 mutex_lock(&priv->mutex);
9872
9873 ret = ipw_sw_reset(priv, 2);
9874 if (!ret) {
9875 free_firmware();
9876 ipw_adapter_restart(priv);
9877 }
9878
9879 /* The SW reset bit might have been toggled on by the 'disable'
9880 * module parameter, so take appropriate action */
9881 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9882
9883 mutex_unlock(&priv->mutex);
9884 ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9885 mutex_lock(&priv->mutex);
9886
9887 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9888 /* Configuration likely changed -- force [re]association */
9889 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9890 "reset.\n");
9891 if (!ipw_disassociate(priv))
9892 ipw_associate(priv);
9893 }
9894
9895 mutex_unlock(&priv->mutex);
9896
9897 return 0;
9898 }
9899
9900 /* Rebase the WE IOCTLs to zero for the handler array */
9901 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9902 static iw_handler ipw_wx_handlers[] = {
9903 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9904 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9905 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9906 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9907 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9908 IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
9909 IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
9910 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9911 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9912 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9913 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9914 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9915 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9916 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9917 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9918 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9919 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9920 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9921 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9922 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
9923 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
9924 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
9925 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
9926 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
9927 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
9928 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
9929 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
9930 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
9931 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
9932 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
9933 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
9934 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
9935 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
9936 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
9937 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
9938 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
9939 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
9940 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
9941 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
9942 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
9943 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
9944 };
9945
9946 enum {
9947 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9948 IPW_PRIV_GET_POWER,
9949 IPW_PRIV_SET_MODE,
9950 IPW_PRIV_GET_MODE,
9951 IPW_PRIV_SET_PREAMBLE,
9952 IPW_PRIV_GET_PREAMBLE,
9953 IPW_PRIV_RESET,
9954 IPW_PRIV_SW_RESET,
9955 #ifdef CONFIG_IPW2200_MONITOR
9956 IPW_PRIV_SET_MONITOR,
9957 #endif
9958 };
9959
9960 static struct iw_priv_args ipw_priv_args[] = {
9961 {
9962 .cmd = IPW_PRIV_SET_POWER,
9963 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9964 .name = "set_power"},
9965 {
9966 .cmd = IPW_PRIV_GET_POWER,
9967 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9968 .name = "get_power"},
9969 {
9970 .cmd = IPW_PRIV_SET_MODE,
9971 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9972 .name = "set_mode"},
9973 {
9974 .cmd = IPW_PRIV_GET_MODE,
9975 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9976 .name = "get_mode"},
9977 {
9978 .cmd = IPW_PRIV_SET_PREAMBLE,
9979 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9980 .name = "set_preamble"},
9981 {
9982 .cmd = IPW_PRIV_GET_PREAMBLE,
9983 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
9984 .name = "get_preamble"},
9985 {
9986 IPW_PRIV_RESET,
9987 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
9988 {
9989 IPW_PRIV_SW_RESET,
9990 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
9991 #ifdef CONFIG_IPW2200_MONITOR
9992 {
9993 IPW_PRIV_SET_MONITOR,
9994 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
9995 #endif /* CONFIG_IPW2200_MONITOR */
9996 };
9997
9998 static iw_handler ipw_priv_handler[] = {
9999 ipw_wx_set_powermode,
10000 ipw_wx_get_powermode,
10001 ipw_wx_set_wireless_mode,
10002 ipw_wx_get_wireless_mode,
10003 ipw_wx_set_preamble,
10004 ipw_wx_get_preamble,
10005 ipw_wx_reset,
10006 ipw_wx_sw_reset,
10007 #ifdef CONFIG_IPW2200_MONITOR
10008 ipw_wx_set_monitor,
10009 #endif
10010 };
10011
10012 static struct iw_handler_def ipw_wx_handler_def = {
10013 .standard = ipw_wx_handlers,
10014 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
10015 .num_private = ARRAY_SIZE(ipw_priv_handler),
10016 .num_private_args = ARRAY_SIZE(ipw_priv_args),
10017 .private = ipw_priv_handler,
10018 .private_args = ipw_priv_args,
10019 .get_wireless_stats = ipw_get_wireless_stats,
10020 };
10021
10022 /*
10023 * Get wireless statistics.
10024 * Called by /proc/net/wireless
10025 * Also called by SIOCGIWSTATS
10026 */
10027 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10028 {
10029 struct ipw_priv *priv = ieee80211_priv(dev);
10030 struct iw_statistics *wstats;
10031
10032 wstats = &priv->wstats;
10033
10034 /* if hw is disabled, then ipw_get_ordinal() can't be called.
10035 * netdev->get_wireless_stats seems to be called before fw is
10036 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
10037 * and associated; if not associcated, the values are all meaningless
10038 * anyway, so set them all to NULL and INVALID */
10039 if (!(priv->status & STATUS_ASSOCIATED)) {
10040 wstats->miss.beacon = 0;
10041 wstats->discard.retries = 0;
10042 wstats->qual.qual = 0;
10043 wstats->qual.level = 0;
10044 wstats->qual.noise = 0;
10045 wstats->qual.updated = 7;
10046 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10047 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10048 return wstats;
10049 }
10050
10051 wstats->qual.qual = priv->quality;
10052 wstats->qual.level = priv->exp_avg_rssi;
10053 wstats->qual.noise = priv->exp_avg_noise;
10054 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10055 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10056
10057 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10058 wstats->discard.retries = priv->last_tx_failures;
10059 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10060
10061 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10062 goto fail_get_ordinal;
10063 wstats->discard.retries += tx_retry; */
10064
10065 return wstats;
10066 }
10067
10068 /* net device stuff */
10069
10070 static void init_sys_config(struct ipw_sys_config *sys_config)
10071 {
10072 memset(sys_config, 0, sizeof(struct ipw_sys_config));
10073 sys_config->bt_coexistence = 0;
10074 sys_config->answer_broadcast_ssid_probe = 0;
10075 sys_config->accept_all_data_frames = 0;
10076 sys_config->accept_non_directed_frames = 1;
10077 sys_config->exclude_unicast_unencrypted = 0;
10078 sys_config->disable_unicast_decryption = 1;
10079 sys_config->exclude_multicast_unencrypted = 0;
10080 sys_config->disable_multicast_decryption = 1;
10081 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10082 antenna = CFG_SYS_ANTENNA_BOTH;
10083 sys_config->antenna_diversity = antenna;
10084 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
10085 sys_config->dot11g_auto_detection = 0;
10086 sys_config->enable_cts_to_self = 0;
10087 sys_config->bt_coexist_collision_thr = 0;
10088 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
10089 sys_config->silence_threshold = 0x1e;
10090 }
10091
10092 static int ipw_net_open(struct net_device *dev)
10093 {
10094 struct ipw_priv *priv = ieee80211_priv(dev);
10095 IPW_DEBUG_INFO("dev->open\n");
10096 /* we should be verifying the device is ready to be opened */
10097 mutex_lock(&priv->mutex);
10098 if (!(priv->status & STATUS_RF_KILL_MASK) &&
10099 (priv->status & STATUS_ASSOCIATED))
10100 netif_start_queue(dev);
10101 mutex_unlock(&priv->mutex);
10102 return 0;
10103 }
10104
10105 static int ipw_net_stop(struct net_device *dev)
10106 {
10107 IPW_DEBUG_INFO("dev->close\n");
10108 netif_stop_queue(dev);
10109 return 0;
10110 }
10111
10112 /*
10113 todo:
10114
10115 modify to send one tfd per fragment instead of using chunking. otherwise
10116 we need to heavily modify the ieee80211_skb_to_txb.
10117 */
10118
10119 static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10120 int pri)
10121 {
10122 struct ieee80211_hdr_3addrqos *hdr = (struct ieee80211_hdr_3addrqos *)
10123 txb->fragments[0]->data;
10124 int i = 0;
10125 struct tfd_frame *tfd;
10126 #ifdef CONFIG_IPW2200_QOS
10127 int tx_id = ipw_get_tx_queue_number(priv, pri);
10128 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10129 #else
10130 struct clx2_tx_queue *txq = &priv->txq[0];
10131 #endif
10132 struct clx2_queue *q = &txq->q;
10133 u8 id, hdr_len, unicast;
10134 u16 remaining_bytes;
10135 int fc;
10136
10137 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10138 switch (priv->ieee->iw_mode) {
10139 case IW_MODE_ADHOC:
10140 unicast = !is_multicast_ether_addr(hdr->addr1);
10141 id = ipw_find_station(priv, hdr->addr1);
10142 if (id == IPW_INVALID_STATION) {
10143 id = ipw_add_station(priv, hdr->addr1);
10144 if (id == IPW_INVALID_STATION) {
10145 IPW_WARNING("Attempt to send data to "
10146 "invalid cell: " MAC_FMT "\n",
10147 MAC_ARG(hdr->addr1));
10148 goto drop;
10149 }
10150 }
10151 break;
10152
10153 case IW_MODE_INFRA:
10154 default:
10155 unicast = !is_multicast_ether_addr(hdr->addr3);
10156 id = 0;
10157 break;
10158 }
10159
10160 tfd = &txq->bd[q->first_empty];
10161 txq->txb[q->first_empty] = txb;
10162 memset(tfd, 0, sizeof(*tfd));
10163 tfd->u.data.station_number = id;
10164
10165 tfd->control_flags.message_type = TX_FRAME_TYPE;
10166 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10167
10168 tfd->u.data.cmd_id = DINO_CMD_TX;
10169 tfd->u.data.len = cpu_to_le16(txb->payload_size);
10170 remaining_bytes = txb->payload_size;
10171
10172 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10173 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10174 else
10175 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10176
10177 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10178 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10179
10180 fc = le16_to_cpu(hdr->frame_ctl);
10181 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10182
10183 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10184
10185 if (likely(unicast))
10186 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10187
10188 if (txb->encrypted && !priv->ieee->host_encrypt) {
10189 switch (priv->ieee->sec.level) {
10190 case SEC_LEVEL_3:
10191 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10192 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10193 /* XXX: ACK flag must be set for CCMP even if it
10194 * is a multicast/broadcast packet, because CCMP
10195 * group communication encrypted by GTK is
10196 * actually done by the AP. */
10197 if (!unicast)
10198 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10199
10200 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10201 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10202 tfd->u.data.key_index = 0;
10203 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10204 break;
10205 case SEC_LEVEL_2:
10206 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10207 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10208 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10209 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10210 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10211 break;
10212 case SEC_LEVEL_1:
10213 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10214 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10215 tfd->u.data.key_index = priv->ieee->tx_keyidx;
10216 if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <=
10217 40)
10218 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10219 else
10220 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10221 break;
10222 case SEC_LEVEL_0:
10223 break;
10224 default:
10225 printk(KERN_ERR "Unknow security level %d\n",
10226 priv->ieee->sec.level);
10227 break;
10228 }
10229 } else
10230 /* No hardware encryption */
10231 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10232
10233 #ifdef CONFIG_IPW2200_QOS
10234 if (fc & IEEE80211_STYPE_QOS_DATA)
10235 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10236 #endif /* CONFIG_IPW2200_QOS */
10237
10238 /* payload */
10239 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10240 txb->nr_frags));
10241 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10242 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10243 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10244 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10245 i, le32_to_cpu(tfd->u.data.num_chunks),
10246 txb->fragments[i]->len - hdr_len);
10247 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10248 i, tfd->u.data.num_chunks,
10249 txb->fragments[i]->len - hdr_len);
10250 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10251 txb->fragments[i]->len - hdr_len);
10252
10253 tfd->u.data.chunk_ptr[i] =
10254 cpu_to_le32(pci_map_single
10255 (priv->pci_dev,
10256 txb->fragments[i]->data + hdr_len,
10257 txb->fragments[i]->len - hdr_len,
10258 PCI_DMA_TODEVICE));
10259 tfd->u.data.chunk_len[i] =
10260 cpu_to_le16(txb->fragments[i]->len - hdr_len);
10261 }
10262
10263 if (i != txb->nr_frags) {
10264 struct sk_buff *skb;
10265 u16 remaining_bytes = 0;
10266 int j;
10267
10268 for (j = i; j < txb->nr_frags; j++)
10269 remaining_bytes += txb->fragments[j]->len - hdr_len;
10270
10271 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10272 remaining_bytes);
10273 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10274 if (skb != NULL) {
10275 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10276 for (j = i; j < txb->nr_frags; j++) {
10277 int size = txb->fragments[j]->len - hdr_len;
10278
10279 printk(KERN_INFO "Adding frag %d %d...\n",
10280 j, size);
10281 memcpy(skb_put(skb, size),
10282 txb->fragments[j]->data + hdr_len, size);
10283 }
10284 dev_kfree_skb_any(txb->fragments[i]);
10285 txb->fragments[i] = skb;
10286 tfd->u.data.chunk_ptr[i] =
10287 cpu_to_le32(pci_map_single
10288 (priv->pci_dev, skb->data,
10289 tfd->u.data.chunk_len[i],
10290 PCI_DMA_TODEVICE));
10291
10292 tfd->u.data.num_chunks =
10293 cpu_to_le32(le32_to_cpu(tfd->u.data.num_chunks) +
10294 1);
10295 }
10296 }
10297
10298 /* kick DMA */
10299 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10300 ipw_write32(priv, q->reg_w, q->first_empty);
10301
10302 if (ipw_queue_space(q) < q->high_mark)
10303 netif_stop_queue(priv->net_dev);
10304
10305 return NETDEV_TX_OK;
10306
10307 drop:
10308 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10309 ieee80211_txb_free(txb);
10310 return NETDEV_TX_OK;
10311 }
10312
10313 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10314 {
10315 struct ipw_priv *priv = ieee80211_priv(dev);
10316 #ifdef CONFIG_IPW2200_QOS
10317 int tx_id = ipw_get_tx_queue_number(priv, pri);
10318 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10319 #else
10320 struct clx2_tx_queue *txq = &priv->txq[0];
10321 #endif /* CONFIG_IPW2200_QOS */
10322
10323 if (ipw_queue_space(&txq->q) < txq->q.high_mark)
10324 return 1;
10325
10326 return 0;
10327 }
10328
10329 #ifdef CONFIG_IPW2200_PROMISCUOUS
10330 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10331 struct ieee80211_txb *txb)
10332 {
10333 struct ieee80211_rx_stats dummystats;
10334 struct ieee80211_hdr *hdr;
10335 u8 n;
10336 u16 filter = priv->prom_priv->filter;
10337 int hdr_only = 0;
10338
10339 if (filter & IPW_PROM_NO_TX)
10340 return;
10341
10342 memset(&dummystats, 0, sizeof(dummystats));
10343
10344 /* Filtering of fragment chains is done agains the first fragment */
10345 hdr = (void *)txb->fragments[0]->data;
10346 if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) {
10347 if (filter & IPW_PROM_NO_MGMT)
10348 return;
10349 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10350 hdr_only = 1;
10351 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) {
10352 if (filter & IPW_PROM_NO_CTL)
10353 return;
10354 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10355 hdr_only = 1;
10356 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) {
10357 if (filter & IPW_PROM_NO_DATA)
10358 return;
10359 if (filter & IPW_PROM_DATA_HEADER_ONLY)
10360 hdr_only = 1;
10361 }
10362
10363 for(n=0; n<txb->nr_frags; ++n) {
10364 struct sk_buff *src = txb->fragments[n];
10365 struct sk_buff *dst;
10366 struct ieee80211_radiotap_header *rt_hdr;
10367 int len;
10368
10369 if (hdr_only) {
10370 hdr = (void *)src->data;
10371 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10372 } else
10373 len = src->len;
10374
10375 dst = alloc_skb(
10376 len + IEEE80211_RADIOTAP_HDRLEN, GFP_ATOMIC);
10377 if (!dst) continue;
10378
10379 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10380
10381 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10382 rt_hdr->it_pad = 0;
10383 rt_hdr->it_present = 0; /* after all, it's just an idea */
10384 rt_hdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
10385
10386 *(u16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10387 ieee80211chan2mhz(priv->channel));
10388 if (priv->channel > 14) /* 802.11a */
10389 *(u16*)skb_put(dst, sizeof(u16)) =
10390 cpu_to_le16(IEEE80211_CHAN_OFDM |
10391 IEEE80211_CHAN_5GHZ);
10392 else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10393 *(u16*)skb_put(dst, sizeof(u16)) =
10394 cpu_to_le16(IEEE80211_CHAN_CCK |
10395 IEEE80211_CHAN_2GHZ);
10396 else /* 802.11g */
10397 *(u16*)skb_put(dst, sizeof(u16)) =
10398 cpu_to_le16(IEEE80211_CHAN_OFDM |
10399 IEEE80211_CHAN_2GHZ);
10400
10401 rt_hdr->it_len = dst->len;
10402
10403 skb_copy_from_linear_data(src, skb_put(dst, len), len);
10404
10405 if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats))
10406 dev_kfree_skb_any(dst);
10407 }
10408 }
10409 #endif
10410
10411 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
10412 struct net_device *dev, int pri)
10413 {
10414 struct ipw_priv *priv = ieee80211_priv(dev);
10415 unsigned long flags;
10416 int ret;
10417
10418 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10419 spin_lock_irqsave(&priv->lock, flags);
10420
10421 if (!(priv->status & STATUS_ASSOCIATED)) {
10422 IPW_DEBUG_INFO("Tx attempt while not associated.\n");
10423 priv->ieee->stats.tx_carrier_errors++;
10424 netif_stop_queue(dev);
10425 goto fail_unlock;
10426 }
10427
10428 #ifdef CONFIG_IPW2200_PROMISCUOUS
10429 if (rtap_iface && netif_running(priv->prom_net_dev))
10430 ipw_handle_promiscuous_tx(priv, txb);
10431 #endif
10432
10433 ret = ipw_tx_skb(priv, txb, pri);
10434 if (ret == NETDEV_TX_OK)
10435 __ipw_led_activity_on(priv);
10436 spin_unlock_irqrestore(&priv->lock, flags);
10437
10438 return ret;
10439
10440 fail_unlock:
10441 spin_unlock_irqrestore(&priv->lock, flags);
10442 return 1;
10443 }
10444
10445 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
10446 {
10447 struct ipw_priv *priv = ieee80211_priv(dev);
10448
10449 priv->ieee->stats.tx_packets = priv->tx_packets;
10450 priv->ieee->stats.rx_packets = priv->rx_packets;
10451 return &priv->ieee->stats;
10452 }
10453
10454 static void ipw_net_set_multicast_list(struct net_device *dev)
10455 {
10456
10457 }
10458
10459 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10460 {
10461 struct ipw_priv *priv = ieee80211_priv(dev);
10462 struct sockaddr *addr = p;
10463 if (!is_valid_ether_addr(addr->sa_data))
10464 return -EADDRNOTAVAIL;
10465 mutex_lock(&priv->mutex);
10466 priv->config |= CFG_CUSTOM_MAC;
10467 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10468 printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n",
10469 priv->net_dev->name, MAC_ARG(priv->mac_addr));
10470 queue_work(priv->workqueue, &priv->adapter_restart);
10471 mutex_unlock(&priv->mutex);
10472 return 0;
10473 }
10474
10475 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10476 struct ethtool_drvinfo *info)
10477 {
10478 struct ipw_priv *p = ieee80211_priv(dev);
10479 char vers[64];
10480 char date[32];
10481 u32 len;
10482
10483 strcpy(info->driver, DRV_NAME);
10484 strcpy(info->version, DRV_VERSION);
10485
10486 len = sizeof(vers);
10487 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10488 len = sizeof(date);
10489 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10490
10491 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10492 vers, date);
10493 strcpy(info->bus_info, pci_name(p->pci_dev));
10494 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10495 }
10496
10497 static u32 ipw_ethtool_get_link(struct net_device *dev)
10498 {
10499 struct ipw_priv *priv = ieee80211_priv(dev);
10500 return (priv->status & STATUS_ASSOCIATED) != 0;
10501 }
10502
10503 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10504 {
10505 return IPW_EEPROM_IMAGE_SIZE;
10506 }
10507
10508 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10509 struct ethtool_eeprom *eeprom, u8 * bytes)
10510 {
10511 struct ipw_priv *p = ieee80211_priv(dev);
10512
10513 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10514 return -EINVAL;
10515 mutex_lock(&p->mutex);
10516 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10517 mutex_unlock(&p->mutex);
10518 return 0;
10519 }
10520
10521 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10522 struct ethtool_eeprom *eeprom, u8 * bytes)
10523 {
10524 struct ipw_priv *p = ieee80211_priv(dev);
10525 int i;
10526
10527 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10528 return -EINVAL;
10529 mutex_lock(&p->mutex);
10530 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10531 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10532 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10533 mutex_unlock(&p->mutex);
10534 return 0;
10535 }
10536
10537 static const struct ethtool_ops ipw_ethtool_ops = {
10538 .get_link = ipw_ethtool_get_link,
10539 .get_drvinfo = ipw_ethtool_get_drvinfo,
10540 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10541 .get_eeprom = ipw_ethtool_get_eeprom,
10542 .set_eeprom = ipw_ethtool_set_eeprom,
10543 };
10544
10545 static irqreturn_t ipw_isr(int irq, void *data)
10546 {
10547 struct ipw_priv *priv = data;
10548 u32 inta, inta_mask;
10549
10550 if (!priv)
10551 return IRQ_NONE;
10552
10553 spin_lock(&priv->irq_lock);
10554
10555 if (!(priv->status & STATUS_INT_ENABLED)) {
10556 /* IRQ is disabled */
10557 goto none;
10558 }
10559
10560 inta = ipw_read32(priv, IPW_INTA_RW);
10561 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10562
10563 if (inta == 0xFFFFFFFF) {
10564 /* Hardware disappeared */
10565 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10566 goto none;
10567 }
10568
10569 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10570 /* Shared interrupt */
10571 goto none;
10572 }
10573
10574 /* tell the device to stop sending interrupts */
10575 __ipw_disable_interrupts(priv);
10576
10577 /* ack current interrupts */
10578 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10579 ipw_write32(priv, IPW_INTA_RW, inta);
10580
10581 /* Cache INTA value for our tasklet */
10582 priv->isr_inta = inta;
10583
10584 tasklet_schedule(&priv->irq_tasklet);
10585
10586 spin_unlock(&priv->irq_lock);
10587
10588 return IRQ_HANDLED;
10589 none:
10590 spin_unlock(&priv->irq_lock);
10591 return IRQ_NONE;
10592 }
10593
10594 static void ipw_rf_kill(void *adapter)
10595 {
10596 struct ipw_priv *priv = adapter;
10597 unsigned long flags;
10598
10599 spin_lock_irqsave(&priv->lock, flags);
10600
10601 if (rf_kill_active(priv)) {
10602 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10603 if (priv->workqueue)
10604 queue_delayed_work(priv->workqueue,
10605 &priv->rf_kill, 2 * HZ);
10606 goto exit_unlock;
10607 }
10608
10609 /* RF Kill is now disabled, so bring the device back up */
10610
10611 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10612 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10613 "device\n");
10614
10615 /* we can not do an adapter restart while inside an irq lock */
10616 queue_work(priv->workqueue, &priv->adapter_restart);
10617 } else
10618 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10619 "enabled\n");
10620
10621 exit_unlock:
10622 spin_unlock_irqrestore(&priv->lock, flags);
10623 }
10624
10625 static void ipw_bg_rf_kill(struct work_struct *work)
10626 {
10627 struct ipw_priv *priv =
10628 container_of(work, struct ipw_priv, rf_kill.work);
10629 mutex_lock(&priv->mutex);
10630 ipw_rf_kill(priv);
10631 mutex_unlock(&priv->mutex);
10632 }
10633
10634 static void ipw_link_up(struct ipw_priv *priv)
10635 {
10636 priv->last_seq_num = -1;
10637 priv->last_frag_num = -1;
10638 priv->last_packet_time = 0;
10639
10640 netif_carrier_on(priv->net_dev);
10641 if (netif_queue_stopped(priv->net_dev)) {
10642 IPW_DEBUG_NOTIF("waking queue\n");
10643 netif_wake_queue(priv->net_dev);
10644 } else {
10645 IPW_DEBUG_NOTIF("starting queue\n");
10646 netif_start_queue(priv->net_dev);
10647 }
10648
10649 cancel_delayed_work(&priv->request_scan);
10650 ipw_reset_stats(priv);
10651 /* Ensure the rate is updated immediately */
10652 priv->last_rate = ipw_get_current_rate(priv);
10653 ipw_gather_stats(priv);
10654 ipw_led_link_up(priv);
10655 notify_wx_assoc_event(priv);
10656
10657 if (priv->config & CFG_BACKGROUND_SCAN)
10658 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10659 }
10660
10661 static void ipw_bg_link_up(struct work_struct *work)
10662 {
10663 struct ipw_priv *priv =
10664 container_of(work, struct ipw_priv, link_up);
10665 mutex_lock(&priv->mutex);
10666 ipw_link_up(priv);
10667 mutex_unlock(&priv->mutex);
10668 }
10669
10670 static void ipw_link_down(struct ipw_priv *priv)
10671 {
10672 ipw_led_link_down(priv);
10673 netif_carrier_off(priv->net_dev);
10674 netif_stop_queue(priv->net_dev);
10675 notify_wx_assoc_event(priv);
10676
10677 /* Cancel any queued work ... */
10678 cancel_delayed_work(&priv->request_scan);
10679 cancel_delayed_work(&priv->adhoc_check);
10680 cancel_delayed_work(&priv->gather_stats);
10681
10682 ipw_reset_stats(priv);
10683
10684 if (!(priv->status & STATUS_EXIT_PENDING)) {
10685 /* Queue up another scan... */
10686 queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
10687 }
10688 }
10689
10690 static void ipw_bg_link_down(struct work_struct *work)
10691 {
10692 struct ipw_priv *priv =
10693 container_of(work, struct ipw_priv, link_down);
10694 mutex_lock(&priv->mutex);
10695 ipw_link_down(priv);
10696 mutex_unlock(&priv->mutex);
10697 }
10698
10699 static int ipw_setup_deferred_work(struct ipw_priv *priv)
10700 {
10701 int ret = 0;
10702
10703 priv->workqueue = create_workqueue(DRV_NAME);
10704 init_waitqueue_head(&priv->wait_command_queue);
10705 init_waitqueue_head(&priv->wait_state);
10706
10707 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10708 INIT_WORK(&priv->associate, ipw_bg_associate);
10709 INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10710 INIT_WORK(&priv->system_config, ipw_system_config);
10711 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10712 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10713 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10714 INIT_WORK(&priv->up, ipw_bg_up);
10715 INIT_WORK(&priv->down, ipw_bg_down);
10716 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10717 INIT_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10718 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10719 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10720 INIT_WORK(&priv->roam, ipw_bg_roam);
10721 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10722 INIT_WORK(&priv->link_up, ipw_bg_link_up);
10723 INIT_WORK(&priv->link_down, ipw_bg_link_down);
10724 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10725 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10726 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10727 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10728
10729 #ifdef CONFIG_IPW2200_QOS
10730 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10731 #endif /* CONFIG_IPW2200_QOS */
10732
10733 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10734 ipw_irq_tasklet, (unsigned long)priv);
10735
10736 return ret;
10737 }
10738
10739 static void shim__set_security(struct net_device *dev,
10740 struct ieee80211_security *sec)
10741 {
10742 struct ipw_priv *priv = ieee80211_priv(dev);
10743 int i;
10744 for (i = 0; i < 4; i++) {
10745 if (sec->flags & (1 << i)) {
10746 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10747 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10748 if (sec->key_sizes[i] == 0)
10749 priv->ieee->sec.flags &= ~(1 << i);
10750 else {
10751 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10752 sec->key_sizes[i]);
10753 priv->ieee->sec.flags |= (1 << i);
10754 }
10755 priv->status |= STATUS_SECURITY_UPDATED;
10756 } else if (sec->level != SEC_LEVEL_1)
10757 priv->ieee->sec.flags &= ~(1 << i);
10758 }
10759
10760 if (sec->flags & SEC_ACTIVE_KEY) {
10761 if (sec->active_key <= 3) {
10762 priv->ieee->sec.active_key = sec->active_key;
10763 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10764 } else
10765 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10766 priv->status |= STATUS_SECURITY_UPDATED;
10767 } else
10768 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10769
10770 if ((sec->flags & SEC_AUTH_MODE) &&
10771 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10772 priv->ieee->sec.auth_mode = sec->auth_mode;
10773 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10774 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10775 priv->capability |= CAP_SHARED_KEY;
10776 else
10777 priv->capability &= ~CAP_SHARED_KEY;
10778 priv->status |= STATUS_SECURITY_UPDATED;
10779 }
10780
10781 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10782 priv->ieee->sec.flags |= SEC_ENABLED;
10783 priv->ieee->sec.enabled = sec->enabled;
10784 priv->status |= STATUS_SECURITY_UPDATED;
10785 if (sec->enabled)
10786 priv->capability |= CAP_PRIVACY_ON;
10787 else
10788 priv->capability &= ~CAP_PRIVACY_ON;
10789 }
10790
10791 if (sec->flags & SEC_ENCRYPT)
10792 priv->ieee->sec.encrypt = sec->encrypt;
10793
10794 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10795 priv->ieee->sec.level = sec->level;
10796 priv->ieee->sec.flags |= SEC_LEVEL;
10797 priv->status |= STATUS_SECURITY_UPDATED;
10798 }
10799
10800 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10801 ipw_set_hwcrypto_keys(priv);
10802
10803 /* To match current functionality of ipw2100 (which works well w/
10804 * various supplicants, we don't force a disassociate if the
10805 * privacy capability changes ... */
10806 #if 0
10807 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10808 (((priv->assoc_request.capability &
10809 WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
10810 (!(priv->assoc_request.capability &
10811 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
10812 IPW_DEBUG_ASSOC("Disassociating due to capability "
10813 "change.\n");
10814 ipw_disassociate(priv);
10815 }
10816 #endif
10817 }
10818
10819 static int init_supported_rates(struct ipw_priv *priv,
10820 struct ipw_supported_rates *rates)
10821 {
10822 /* TODO: Mask out rates based on priv->rates_mask */
10823
10824 memset(rates, 0, sizeof(*rates));
10825 /* configure supported rates */
10826 switch (priv->ieee->freq_band) {
10827 case IEEE80211_52GHZ_BAND:
10828 rates->ieee_mode = IPW_A_MODE;
10829 rates->purpose = IPW_RATE_CAPABILITIES;
10830 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10831 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10832 break;
10833
10834 default: /* Mixed or 2.4Ghz */
10835 rates->ieee_mode = IPW_G_MODE;
10836 rates->purpose = IPW_RATE_CAPABILITIES;
10837 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
10838 IEEE80211_CCK_DEFAULT_RATES_MASK);
10839 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
10840 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10841 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10842 }
10843 break;
10844 }
10845
10846 return 0;
10847 }
10848
10849 static int ipw_config(struct ipw_priv *priv)
10850 {
10851 /* This is only called from ipw_up, which resets/reloads the firmware
10852 so, we don't need to first disable the card before we configure
10853 it */
10854 if (ipw_set_tx_power(priv))
10855 goto error;
10856
10857 /* initialize adapter address */
10858 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10859 goto error;
10860
10861 /* set basic system config settings */
10862 init_sys_config(&priv->sys_config);
10863
10864 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10865 * Does not support BT priority yet (don't abort or defer our Tx) */
10866 if (bt_coexist) {
10867 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10868
10869 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10870 priv->sys_config.bt_coexistence
10871 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10872 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10873 priv->sys_config.bt_coexistence
10874 |= CFG_BT_COEXISTENCE_OOB;
10875 }
10876
10877 #ifdef CONFIG_IPW2200_PROMISCUOUS
10878 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10879 priv->sys_config.accept_all_data_frames = 1;
10880 priv->sys_config.accept_non_directed_frames = 1;
10881 priv->sys_config.accept_all_mgmt_bcpr = 1;
10882 priv->sys_config.accept_all_mgmt_frames = 1;
10883 }
10884 #endif
10885
10886 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10887 priv->sys_config.answer_broadcast_ssid_probe = 1;
10888 else
10889 priv->sys_config.answer_broadcast_ssid_probe = 0;
10890
10891 if (ipw_send_system_config(priv))
10892 goto error;
10893
10894 init_supported_rates(priv, &priv->rates);
10895 if (ipw_send_supported_rates(priv, &priv->rates))
10896 goto error;
10897
10898 /* Set request-to-send threshold */
10899 if (priv->rts_threshold) {
10900 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10901 goto error;
10902 }
10903 #ifdef CONFIG_IPW2200_QOS
10904 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10905 ipw_qos_activate(priv, NULL);
10906 #endif /* CONFIG_IPW2200_QOS */
10907
10908 if (ipw_set_random_seed(priv))
10909 goto error;
10910
10911 /* final state transition to the RUN state */
10912 if (ipw_send_host_complete(priv))
10913 goto error;
10914
10915 priv->status |= STATUS_INIT;
10916
10917 ipw_led_init(priv);
10918 ipw_led_radio_on(priv);
10919 priv->notif_missed_beacons = 0;
10920
10921 /* Set hardware WEP key if it is configured. */
10922 if ((priv->capability & CAP_PRIVACY_ON) &&
10923 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10924 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10925 ipw_set_hwcrypto_keys(priv);
10926
10927 return 0;
10928
10929 error:
10930 return -EIO;
10931 }
10932
10933 /*
10934 * NOTE:
10935 *
10936 * These tables have been tested in conjunction with the
10937 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10938 *
10939 * Altering this values, using it on other hardware, or in geographies
10940 * not intended for resale of the above mentioned Intel adapters has
10941 * not been tested.
10942 *
10943 * Remember to update the table in README.ipw2200 when changing this
10944 * table.
10945 *
10946 */
10947 static const struct ieee80211_geo ipw_geos[] = {
10948 { /* Restricted */
10949 "---",
10950 .bg_channels = 11,
10951 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10952 {2427, 4}, {2432, 5}, {2437, 6},
10953 {2442, 7}, {2447, 8}, {2452, 9},
10954 {2457, 10}, {2462, 11}},
10955 },
10956
10957 { /* Custom US/Canada */
10958 "ZZF",
10959 .bg_channels = 11,
10960 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10961 {2427, 4}, {2432, 5}, {2437, 6},
10962 {2442, 7}, {2447, 8}, {2452, 9},
10963 {2457, 10}, {2462, 11}},
10964 .a_channels = 8,
10965 .a = {{5180, 36},
10966 {5200, 40},
10967 {5220, 44},
10968 {5240, 48},
10969 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10970 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10971 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10972 {5320, 64, IEEE80211_CH_PASSIVE_ONLY}},
10973 },
10974
10975 { /* Rest of World */
10976 "ZZD",
10977 .bg_channels = 13,
10978 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10979 {2427, 4}, {2432, 5}, {2437, 6},
10980 {2442, 7}, {2447, 8}, {2452, 9},
10981 {2457, 10}, {2462, 11}, {2467, 12},
10982 {2472, 13}},
10983 },
10984
10985 { /* Custom USA & Europe & High */
10986 "ZZA",
10987 .bg_channels = 11,
10988 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10989 {2427, 4}, {2432, 5}, {2437, 6},
10990 {2442, 7}, {2447, 8}, {2452, 9},
10991 {2457, 10}, {2462, 11}},
10992 .a_channels = 13,
10993 .a = {{5180, 36},
10994 {5200, 40},
10995 {5220, 44},
10996 {5240, 48},
10997 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10998 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10999 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11000 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11001 {5745, 149},
11002 {5765, 153},
11003 {5785, 157},
11004 {5805, 161},
11005 {5825, 165}},
11006 },
11007
11008 { /* Custom NA & Europe */
11009 "ZZB",
11010 .bg_channels = 11,
11011 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11012 {2427, 4}, {2432, 5}, {2437, 6},
11013 {2442, 7}, {2447, 8}, {2452, 9},
11014 {2457, 10}, {2462, 11}},
11015 .a_channels = 13,
11016 .a = {{5180, 36},
11017 {5200, 40},
11018 {5220, 44},
11019 {5240, 48},
11020 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11021 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11022 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11023 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11024 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11025 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11026 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11027 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11028 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11029 },
11030
11031 { /* Custom Japan */
11032 "ZZC",
11033 .bg_channels = 11,
11034 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11035 {2427, 4}, {2432, 5}, {2437, 6},
11036 {2442, 7}, {2447, 8}, {2452, 9},
11037 {2457, 10}, {2462, 11}},
11038 .a_channels = 4,
11039 .a = {{5170, 34}, {5190, 38},
11040 {5210, 42}, {5230, 46}},
11041 },
11042
11043 { /* Custom */
11044 "ZZM",
11045 .bg_channels = 11,
11046 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11047 {2427, 4}, {2432, 5}, {2437, 6},
11048 {2442, 7}, {2447, 8}, {2452, 9},
11049 {2457, 10}, {2462, 11}},
11050 },
11051
11052 { /* Europe */
11053 "ZZE",
11054 .bg_channels = 13,
11055 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11056 {2427, 4}, {2432, 5}, {2437, 6},
11057 {2442, 7}, {2447, 8}, {2452, 9},
11058 {2457, 10}, {2462, 11}, {2467, 12},
11059 {2472, 13}},
11060 .a_channels = 19,
11061 .a = {{5180, 36},
11062 {5200, 40},
11063 {5220, 44},
11064 {5240, 48},
11065 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11066 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11067 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11068 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11069 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11070 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11071 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11072 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11073 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11074 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11075 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11076 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11077 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11078 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11079 {5700, 140, IEEE80211_CH_PASSIVE_ONLY}},
11080 },
11081
11082 { /* Custom Japan */
11083 "ZZJ",
11084 .bg_channels = 14,
11085 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11086 {2427, 4}, {2432, 5}, {2437, 6},
11087 {2442, 7}, {2447, 8}, {2452, 9},
11088 {2457, 10}, {2462, 11}, {2467, 12},
11089 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY}},
11090 .a_channels = 4,
11091 .a = {{5170, 34}, {5190, 38},
11092 {5210, 42}, {5230, 46}},
11093 },
11094
11095 { /* Rest of World */
11096 "ZZR",
11097 .bg_channels = 14,
11098 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11099 {2427, 4}, {2432, 5}, {2437, 6},
11100 {2442, 7}, {2447, 8}, {2452, 9},
11101 {2457, 10}, {2462, 11}, {2467, 12},
11102 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY |
11103 IEEE80211_CH_PASSIVE_ONLY}},
11104 },
11105
11106 { /* High Band */
11107 "ZZH",
11108 .bg_channels = 13,
11109 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11110 {2427, 4}, {2432, 5}, {2437, 6},
11111 {2442, 7}, {2447, 8}, {2452, 9},
11112 {2457, 10}, {2462, 11},
11113 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11114 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11115 .a_channels = 4,
11116 .a = {{5745, 149}, {5765, 153},
11117 {5785, 157}, {5805, 161}},
11118 },
11119
11120 { /* Custom Europe */
11121 "ZZG",
11122 .bg_channels = 13,
11123 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11124 {2427, 4}, {2432, 5}, {2437, 6},
11125 {2442, 7}, {2447, 8}, {2452, 9},
11126 {2457, 10}, {2462, 11},
11127 {2467, 12}, {2472, 13}},
11128 .a_channels = 4,
11129 .a = {{5180, 36}, {5200, 40},
11130 {5220, 44}, {5240, 48}},
11131 },
11132
11133 { /* Europe */
11134 "ZZK",
11135 .bg_channels = 13,
11136 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11137 {2427, 4}, {2432, 5}, {2437, 6},
11138 {2442, 7}, {2447, 8}, {2452, 9},
11139 {2457, 10}, {2462, 11},
11140 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11141 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11142 .a_channels = 24,
11143 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11144 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11145 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11146 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11147 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11148 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11149 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11150 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11151 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11152 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11153 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11154 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11155 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11156 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11157 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11158 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11159 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11160 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11161 {5700, 140, IEEE80211_CH_PASSIVE_ONLY},
11162 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11163 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11164 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11165 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11166 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11167 },
11168
11169 { /* Europe */
11170 "ZZL",
11171 .bg_channels = 11,
11172 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11173 {2427, 4}, {2432, 5}, {2437, 6},
11174 {2442, 7}, {2447, 8}, {2452, 9},
11175 {2457, 10}, {2462, 11}},
11176 .a_channels = 13,
11177 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11178 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11179 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11180 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11181 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11182 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11183 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11184 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11185 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11186 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11187 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11188 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11189 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11190 }
11191 };
11192
11193 #define MAX_HW_RESTARTS 5
11194 static int ipw_up(struct ipw_priv *priv)
11195 {
11196 int rc, i, j;
11197
11198 if (priv->status & STATUS_EXIT_PENDING)
11199 return -EIO;
11200
11201 if (cmdlog && !priv->cmdlog) {
11202 priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11203 GFP_KERNEL);
11204 if (priv->cmdlog == NULL) {
11205 IPW_ERROR("Error allocating %d command log entries.\n",
11206 cmdlog);
11207 return -ENOMEM;
11208 } else {
11209 priv->cmdlog_len = cmdlog;
11210 }
11211 }
11212
11213 for (i = 0; i < MAX_HW_RESTARTS; i++) {
11214 /* Load the microcode, firmware, and eeprom.
11215 * Also start the clocks. */
11216 rc = ipw_load(priv);
11217 if (rc) {
11218 IPW_ERROR("Unable to load firmware: %d\n", rc);
11219 return rc;
11220 }
11221
11222 ipw_init_ordinals(priv);
11223 if (!(priv->config & CFG_CUSTOM_MAC))
11224 eeprom_parse_mac(priv, priv->mac_addr);
11225 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11226
11227 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11228 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11229 ipw_geos[j].name, 3))
11230 break;
11231 }
11232 if (j == ARRAY_SIZE(ipw_geos)) {
11233 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11234 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11235 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11236 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11237 j = 0;
11238 }
11239 if (ieee80211_set_geo(priv->ieee, &ipw_geos[j])) {
11240 IPW_WARNING("Could not set geography.");
11241 return 0;
11242 }
11243
11244 if (priv->status & STATUS_RF_KILL_SW) {
11245 IPW_WARNING("Radio disabled by module parameter.\n");
11246 return 0;
11247 } else if (rf_kill_active(priv)) {
11248 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11249 "Kill switch must be turned off for "
11250 "wireless networking to work.\n");
11251 queue_delayed_work(priv->workqueue, &priv->rf_kill,
11252 2 * HZ);
11253 return 0;
11254 }
11255
11256 rc = ipw_config(priv);
11257 if (!rc) {
11258 IPW_DEBUG_INFO("Configured device on count %i\n", i);
11259
11260 /* If configure to try and auto-associate, kick
11261 * off a scan. */
11262 queue_delayed_work(priv->workqueue,
11263 &priv->request_scan, 0);
11264
11265 return 0;
11266 }
11267
11268 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11269 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11270 i, MAX_HW_RESTARTS);
11271
11272 /* We had an error bringing up the hardware, so take it
11273 * all the way back down so we can try again */
11274 ipw_down(priv);
11275 }
11276
11277 /* tried to restart and config the device for as long as our
11278 * patience could withstand */
11279 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11280
11281 return -EIO;
11282 }
11283
11284 static void ipw_bg_up(struct work_struct *work)
11285 {
11286 struct ipw_priv *priv =
11287 container_of(work, struct ipw_priv, up);
11288 mutex_lock(&priv->mutex);
11289 ipw_up(priv);
11290 mutex_unlock(&priv->mutex);
11291 }
11292
11293 static void ipw_deinit(struct ipw_priv *priv)
11294 {
11295 int i;
11296
11297 if (priv->status & STATUS_SCANNING) {
11298 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11299 ipw_abort_scan(priv);
11300 }
11301
11302 if (priv->status & STATUS_ASSOCIATED) {
11303 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11304 ipw_disassociate(priv);
11305 }
11306
11307 ipw_led_shutdown(priv);
11308
11309 /* Wait up to 1s for status to change to not scanning and not
11310 * associated (disassociation can take a while for a ful 802.11
11311 * exchange */
11312 for (i = 1000; i && (priv->status &
11313 (STATUS_DISASSOCIATING |
11314 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11315 udelay(10);
11316
11317 if (priv->status & (STATUS_DISASSOCIATING |
11318 STATUS_ASSOCIATED | STATUS_SCANNING))
11319 IPW_DEBUG_INFO("Still associated or scanning...\n");
11320 else
11321 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11322
11323 /* Attempt to disable the card */
11324 ipw_send_card_disable(priv, 0);
11325
11326 priv->status &= ~STATUS_INIT;
11327 }
11328
11329 static void ipw_down(struct ipw_priv *priv)
11330 {
11331 int exit_pending = priv->status & STATUS_EXIT_PENDING;
11332
11333 priv->status |= STATUS_EXIT_PENDING;
11334
11335 if (ipw_is_init(priv))
11336 ipw_deinit(priv);
11337
11338 /* Wipe out the EXIT_PENDING status bit if we are not actually
11339 * exiting the module */
11340 if (!exit_pending)
11341 priv->status &= ~STATUS_EXIT_PENDING;
11342
11343 /* tell the device to stop sending interrupts */
11344 ipw_disable_interrupts(priv);
11345
11346 /* Clear all bits but the RF Kill */
11347 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11348 netif_carrier_off(priv->net_dev);
11349 netif_stop_queue(priv->net_dev);
11350
11351 ipw_stop_nic(priv);
11352
11353 ipw_led_radio_off(priv);
11354 }
11355
11356 static void ipw_bg_down(struct work_struct *work)
11357 {
11358 struct ipw_priv *priv =
11359 container_of(work, struct ipw_priv, down);
11360 mutex_lock(&priv->mutex);
11361 ipw_down(priv);
11362 mutex_unlock(&priv->mutex);
11363 }
11364
11365 /* Called by register_netdev() */
11366 static int ipw_net_init(struct net_device *dev)
11367 {
11368 struct ipw_priv *priv = ieee80211_priv(dev);
11369 mutex_lock(&priv->mutex);
11370
11371 if (ipw_up(priv)) {
11372 mutex_unlock(&priv->mutex);
11373 return -EIO;
11374 }
11375
11376 mutex_unlock(&priv->mutex);
11377 return 0;
11378 }
11379
11380 /* PCI driver stuff */
11381 static struct pci_device_id card_ids[] = {
11382 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11383 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11384 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11385 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11386 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11387 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11388 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11389 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11390 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11391 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11392 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11393 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11394 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11395 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11396 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11397 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11398 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11399 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
11400 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11401 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11402 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11403 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11404
11405 /* required last entry */
11406 {0,}
11407 };
11408
11409 MODULE_DEVICE_TABLE(pci, card_ids);
11410
11411 static struct attribute *ipw_sysfs_entries[] = {
11412 &dev_attr_rf_kill.attr,
11413 &dev_attr_direct_dword.attr,
11414 &dev_attr_indirect_byte.attr,
11415 &dev_attr_indirect_dword.attr,
11416 &dev_attr_mem_gpio_reg.attr,
11417 &dev_attr_command_event_reg.attr,
11418 &dev_attr_nic_type.attr,
11419 &dev_attr_status.attr,
11420 &dev_attr_cfg.attr,
11421 &dev_attr_error.attr,
11422 &dev_attr_event_log.attr,
11423 &dev_attr_cmd_log.attr,
11424 &dev_attr_eeprom_delay.attr,
11425 &dev_attr_ucode_version.attr,
11426 &dev_attr_rtc.attr,
11427 &dev_attr_scan_age.attr,
11428 &dev_attr_led.attr,
11429 &dev_attr_speed_scan.attr,
11430 &dev_attr_net_stats.attr,
11431 &dev_attr_channels.attr,
11432 #ifdef CONFIG_IPW2200_PROMISCUOUS
11433 &dev_attr_rtap_iface.attr,
11434 &dev_attr_rtap_filter.attr,
11435 #endif
11436 NULL
11437 };
11438
11439 static struct attribute_group ipw_attribute_group = {
11440 .name = NULL, /* put in device directory */
11441 .attrs = ipw_sysfs_entries,
11442 };
11443
11444 #ifdef CONFIG_IPW2200_PROMISCUOUS
11445 static int ipw_prom_open(struct net_device *dev)
11446 {
11447 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11448 struct ipw_priv *priv = prom_priv->priv;
11449
11450 IPW_DEBUG_INFO("prom dev->open\n");
11451 netif_carrier_off(dev);
11452 netif_stop_queue(dev);
11453
11454 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11455 priv->sys_config.accept_all_data_frames = 1;
11456 priv->sys_config.accept_non_directed_frames = 1;
11457 priv->sys_config.accept_all_mgmt_bcpr = 1;
11458 priv->sys_config.accept_all_mgmt_frames = 1;
11459
11460 ipw_send_system_config(priv);
11461 }
11462
11463 return 0;
11464 }
11465
11466 static int ipw_prom_stop(struct net_device *dev)
11467 {
11468 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11469 struct ipw_priv *priv = prom_priv->priv;
11470
11471 IPW_DEBUG_INFO("prom dev->stop\n");
11472
11473 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11474 priv->sys_config.accept_all_data_frames = 0;
11475 priv->sys_config.accept_non_directed_frames = 0;
11476 priv->sys_config.accept_all_mgmt_bcpr = 0;
11477 priv->sys_config.accept_all_mgmt_frames = 0;
11478
11479 ipw_send_system_config(priv);
11480 }
11481
11482 return 0;
11483 }
11484
11485 static int ipw_prom_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
11486 {
11487 IPW_DEBUG_INFO("prom dev->xmit\n");
11488 netif_stop_queue(dev);
11489 return -EOPNOTSUPP;
11490 }
11491
11492 static struct net_device_stats *ipw_prom_get_stats(struct net_device *dev)
11493 {
11494 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11495 return &prom_priv->ieee->stats;
11496 }
11497
11498 static int ipw_prom_alloc(struct ipw_priv *priv)
11499 {
11500 int rc = 0;
11501
11502 if (priv->prom_net_dev)
11503 return -EPERM;
11504
11505 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
11506 if (priv->prom_net_dev == NULL)
11507 return -ENOMEM;
11508
11509 priv->prom_priv = ieee80211_priv(priv->prom_net_dev);
11510 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11511 priv->prom_priv->priv = priv;
11512
11513 strcpy(priv->prom_net_dev->name, "rtap%d");
11514
11515 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11516 priv->prom_net_dev->open = ipw_prom_open;
11517 priv->prom_net_dev->stop = ipw_prom_stop;
11518 priv->prom_net_dev->get_stats = ipw_prom_get_stats;
11519 priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit;
11520
11521 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11522
11523 rc = register_netdev(priv->prom_net_dev);
11524 if (rc) {
11525 free_ieee80211(priv->prom_net_dev);
11526 priv->prom_net_dev = NULL;
11527 return rc;
11528 }
11529
11530 return 0;
11531 }
11532
11533 static void ipw_prom_free(struct ipw_priv *priv)
11534 {
11535 if (!priv->prom_net_dev)
11536 return;
11537
11538 unregister_netdev(priv->prom_net_dev);
11539 free_ieee80211(priv->prom_net_dev);
11540
11541 priv->prom_net_dev = NULL;
11542 }
11543
11544 #endif
11545
11546
11547 static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11548 {
11549 int err = 0;
11550 struct net_device *net_dev;
11551 void __iomem *base;
11552 u32 length, val;
11553 struct ipw_priv *priv;
11554 int i;
11555
11556 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
11557 if (net_dev == NULL) {
11558 err = -ENOMEM;
11559 goto out;
11560 }
11561
11562 priv = ieee80211_priv(net_dev);
11563 priv->ieee = netdev_priv(net_dev);
11564
11565 priv->net_dev = net_dev;
11566 priv->pci_dev = pdev;
11567 ipw_debug_level = debug;
11568 spin_lock_init(&priv->irq_lock);
11569 spin_lock_init(&priv->lock);
11570 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11571 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11572
11573 mutex_init(&priv->mutex);
11574 if (pci_enable_device(pdev)) {
11575 err = -ENODEV;
11576 goto out_free_ieee80211;
11577 }
11578
11579 pci_set_master(pdev);
11580
11581 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11582 if (!err)
11583 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
11584 if (err) {
11585 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11586 goto out_pci_disable_device;
11587 }
11588
11589 pci_set_drvdata(pdev, priv);
11590
11591 err = pci_request_regions(pdev, DRV_NAME);
11592 if (err)
11593 goto out_pci_disable_device;
11594
11595 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11596 * PCI Tx retries from interfering with C3 CPU state */
11597 pci_read_config_dword(pdev, 0x40, &val);
11598 if ((val & 0x0000ff00) != 0)
11599 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11600
11601 length = pci_resource_len(pdev, 0);
11602 priv->hw_len = length;
11603
11604 base = ioremap_nocache(pci_resource_start(pdev, 0), length);
11605 if (!base) {
11606 err = -ENODEV;
11607 goto out_pci_release_regions;
11608 }
11609
11610 priv->hw_base = base;
11611 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11612 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11613
11614 err = ipw_setup_deferred_work(priv);
11615 if (err) {
11616 IPW_ERROR("Unable to setup deferred work\n");
11617 goto out_iounmap;
11618 }
11619
11620 ipw_sw_reset(priv, 1);
11621
11622 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11623 if (err) {
11624 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11625 goto out_destroy_workqueue;
11626 }
11627
11628 SET_MODULE_OWNER(net_dev);
11629 SET_NETDEV_DEV(net_dev, &pdev->dev);
11630
11631 mutex_lock(&priv->mutex);
11632
11633 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11634 priv->ieee->set_security = shim__set_security;
11635 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11636
11637 #ifdef CONFIG_IPW2200_QOS
11638 priv->ieee->is_qos_active = ipw_is_qos_active;
11639 priv->ieee->handle_probe_response = ipw_handle_beacon;
11640 priv->ieee->handle_beacon = ipw_handle_probe_response;
11641 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11642 #endif /* CONFIG_IPW2200_QOS */
11643
11644 priv->ieee->perfect_rssi = -20;
11645 priv->ieee->worst_rssi = -85;
11646
11647 net_dev->open = ipw_net_open;
11648 net_dev->stop = ipw_net_stop;
11649 net_dev->init = ipw_net_init;
11650 net_dev->get_stats = ipw_net_get_stats;
11651 net_dev->set_multicast_list = ipw_net_set_multicast_list;
11652 net_dev->set_mac_address = ipw_net_set_mac_address;
11653 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11654 net_dev->wireless_data = &priv->wireless_data;
11655 net_dev->wireless_handlers = &ipw_wx_handler_def;
11656 net_dev->ethtool_ops = &ipw_ethtool_ops;
11657 net_dev->irq = pdev->irq;
11658 net_dev->base_addr = (unsigned long)priv->hw_base;
11659 net_dev->mem_start = pci_resource_start(pdev, 0);
11660 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11661
11662 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11663 if (err) {
11664 IPW_ERROR("failed to create sysfs device attributes\n");
11665 mutex_unlock(&priv->mutex);
11666 goto out_release_irq;
11667 }
11668
11669 mutex_unlock(&priv->mutex);
11670 err = register_netdev(net_dev);
11671 if (err) {
11672 IPW_ERROR("failed to register network device\n");
11673 goto out_remove_sysfs;
11674 }
11675
11676 #ifdef CONFIG_IPW2200_PROMISCUOUS
11677 if (rtap_iface) {
11678 err = ipw_prom_alloc(priv);
11679 if (err) {
11680 IPW_ERROR("Failed to register promiscuous network "
11681 "device (error %d).\n", err);
11682 unregister_netdev(priv->net_dev);
11683 goto out_remove_sysfs;
11684 }
11685 }
11686 #endif
11687
11688 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11689 "channels, %d 802.11a channels)\n",
11690 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11691 priv->ieee->geo.a_channels);
11692
11693 return 0;
11694
11695 out_remove_sysfs:
11696 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11697 out_release_irq:
11698 free_irq(pdev->irq, priv);
11699 out_destroy_workqueue:
11700 destroy_workqueue(priv->workqueue);
11701 priv->workqueue = NULL;
11702 out_iounmap:
11703 iounmap(priv->hw_base);
11704 out_pci_release_regions:
11705 pci_release_regions(pdev);
11706 out_pci_disable_device:
11707 pci_disable_device(pdev);
11708 pci_set_drvdata(pdev, NULL);
11709 out_free_ieee80211:
11710 free_ieee80211(priv->net_dev);
11711 out:
11712 return err;
11713 }
11714
11715 static void ipw_pci_remove(struct pci_dev *pdev)
11716 {
11717 struct ipw_priv *priv = pci_get_drvdata(pdev);
11718 struct list_head *p, *q;
11719 int i;
11720
11721 if (!priv)
11722 return;
11723
11724 mutex_lock(&priv->mutex);
11725
11726 priv->status |= STATUS_EXIT_PENDING;
11727 ipw_down(priv);
11728 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11729
11730 mutex_unlock(&priv->mutex);
11731
11732 unregister_netdev(priv->net_dev);
11733
11734 if (priv->rxq) {
11735 ipw_rx_queue_free(priv, priv->rxq);
11736 priv->rxq = NULL;
11737 }
11738 ipw_tx_queue_free(priv);
11739
11740 if (priv->cmdlog) {
11741 kfree(priv->cmdlog);
11742 priv->cmdlog = NULL;
11743 }
11744 /* ipw_down will ensure that there is no more pending work
11745 * in the workqueue's, so we can safely remove them now. */
11746 cancel_delayed_work(&priv->adhoc_check);
11747 cancel_delayed_work(&priv->gather_stats);
11748 cancel_delayed_work(&priv->request_scan);
11749 cancel_delayed_work(&priv->rf_kill);
11750 cancel_delayed_work(&priv->scan_check);
11751 destroy_workqueue(priv->workqueue);
11752 priv->workqueue = NULL;
11753
11754 /* Free MAC hash list for ADHOC */
11755 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11756 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11757 list_del(p);
11758 kfree(list_entry(p, struct ipw_ibss_seq, list));
11759 }
11760 }
11761
11762 kfree(priv->error);
11763 priv->error = NULL;
11764
11765 #ifdef CONFIG_IPW2200_PROMISCUOUS
11766 ipw_prom_free(priv);
11767 #endif
11768
11769 free_irq(pdev->irq, priv);
11770 iounmap(priv->hw_base);
11771 pci_release_regions(pdev);
11772 pci_disable_device(pdev);
11773 pci_set_drvdata(pdev, NULL);
11774 free_ieee80211(priv->net_dev);
11775 free_firmware();
11776 }
11777
11778 #ifdef CONFIG_PM
11779 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11780 {
11781 struct ipw_priv *priv = pci_get_drvdata(pdev);
11782 struct net_device *dev = priv->net_dev;
11783
11784 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11785
11786 /* Take down the device; powers it off, etc. */
11787 ipw_down(priv);
11788
11789 /* Remove the PRESENT state of the device */
11790 netif_device_detach(dev);
11791
11792 pci_save_state(pdev);
11793 pci_disable_device(pdev);
11794 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11795
11796 return 0;
11797 }
11798
11799 static int ipw_pci_resume(struct pci_dev *pdev)
11800 {
11801 struct ipw_priv *priv = pci_get_drvdata(pdev);
11802 struct net_device *dev = priv->net_dev;
11803 int err;
11804 u32 val;
11805
11806 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11807
11808 pci_set_power_state(pdev, PCI_D0);
11809 err = pci_enable_device(pdev);
11810 if (err) {
11811 printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
11812 dev->name);
11813 return err;
11814 }
11815 pci_restore_state(pdev);
11816
11817 /*
11818 * Suspend/Resume resets the PCI configuration space, so we have to
11819 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11820 * from interfering with C3 CPU state. pci_restore_state won't help
11821 * here since it only restores the first 64 bytes pci config header.
11822 */
11823 pci_read_config_dword(pdev, 0x40, &val);
11824 if ((val & 0x0000ff00) != 0)
11825 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11826
11827 /* Set the device back into the PRESENT state; this will also wake
11828 * the queue of needed */
11829 netif_device_attach(dev);
11830
11831 /* Bring the device back up */
11832 queue_work(priv->workqueue, &priv->up);
11833
11834 return 0;
11835 }
11836 #endif
11837
11838 static void ipw_pci_shutdown(struct pci_dev *pdev)
11839 {
11840 struct ipw_priv *priv = pci_get_drvdata(pdev);
11841
11842 /* Take down the device; powers it off, etc. */
11843 ipw_down(priv);
11844
11845 pci_disable_device(pdev);
11846 }
11847
11848 /* driver initialization stuff */
11849 static struct pci_driver ipw_driver = {
11850 .name = DRV_NAME,
11851 .id_table = card_ids,
11852 .probe = ipw_pci_probe,
11853 .remove = __devexit_p(ipw_pci_remove),
11854 #ifdef CONFIG_PM
11855 .suspend = ipw_pci_suspend,
11856 .resume = ipw_pci_resume,
11857 #endif
11858 .shutdown = ipw_pci_shutdown,
11859 };
11860
11861 static int __init ipw_init(void)
11862 {
11863 int ret;
11864
11865 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11866 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11867
11868 ret = pci_register_driver(&ipw_driver);
11869 if (ret) {
11870 IPW_ERROR("Unable to initialize PCI module\n");
11871 return ret;
11872 }
11873
11874 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11875 if (ret) {
11876 IPW_ERROR("Unable to create driver sysfs file\n");
11877 pci_unregister_driver(&ipw_driver);
11878 return ret;
11879 }
11880
11881 return ret;
11882 }
11883
11884 static void __exit ipw_exit(void)
11885 {
11886 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11887 pci_unregister_driver(&ipw_driver);
11888 }
11889
11890 module_param(disable, int, 0444);
11891 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11892
11893 module_param(associate, int, 0444);
11894 MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
11895
11896 module_param(auto_create, int, 0444);
11897 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11898
11899 module_param(led, int, 0444);
11900 MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n");
11901
11902 module_param(debug, int, 0444);
11903 MODULE_PARM_DESC(debug, "debug output mask");
11904
11905 module_param(channel, int, 0444);
11906 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11907
11908 #ifdef CONFIG_IPW2200_PROMISCUOUS
11909 module_param(rtap_iface, int, 0444);
11910 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
11911 #endif
11912
11913 #ifdef CONFIG_IPW2200_QOS
11914 module_param(qos_enable, int, 0444);
11915 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
11916
11917 module_param(qos_burst_enable, int, 0444);
11918 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11919
11920 module_param(qos_no_ack_mask, int, 0444);
11921 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
11922
11923 module_param(burst_duration_CCK, int, 0444);
11924 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
11925
11926 module_param(burst_duration_OFDM, int, 0444);
11927 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
11928 #endif /* CONFIG_IPW2200_QOS */
11929
11930 #ifdef CONFIG_IPW2200_MONITOR
11931 module_param(mode, int, 0444);
11932 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
11933 #else
11934 module_param(mode, int, 0444);
11935 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
11936 #endif
11937
11938 module_param(bt_coexist, int, 0444);
11939 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
11940
11941 module_param(hwcrypto, int, 0444);
11942 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
11943
11944 module_param(cmdlog, int, 0444);
11945 MODULE_PARM_DESC(cmdlog,
11946 "allocate a ring buffer for logging firmware commands");
11947
11948 module_param(roaming, int, 0444);
11949 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
11950
11951 module_param(antenna, int, 0444);
11952 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
11953
11954 module_exit(ipw_exit);
11955 module_init(ipw_init);