]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/wireless/ipw2x00/ipw2200.c
ethernet: Remove casts to same type
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / ipw2x00 / ipw2200.c
1 /******************************************************************************
2
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 Intel Linux Wireless <ilw@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31 ******************************************************************************/
32
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <net/cfg80211-wext.h>
36 #include "ipw2200.h"
37 #include "ipw.h"
38
39
40 #ifndef KBUILD_EXTMOD
41 #define VK "k"
42 #else
43 #define VK
44 #endif
45
46 #ifdef CONFIG_IPW2200_DEBUG
47 #define VD "d"
48 #else
49 #define VD
50 #endif
51
52 #ifdef CONFIG_IPW2200_MONITOR
53 #define VM "m"
54 #else
55 #define VM
56 #endif
57
58 #ifdef CONFIG_IPW2200_PROMISCUOUS
59 #define VP "p"
60 #else
61 #define VP
62 #endif
63
64 #ifdef CONFIG_IPW2200_RADIOTAP
65 #define VR "r"
66 #else
67 #define VR
68 #endif
69
70 #ifdef CONFIG_IPW2200_QOS
71 #define VQ "q"
72 #else
73 #define VQ
74 #endif
75
76 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
77 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
78 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
79 #define DRV_VERSION IPW2200_VERSION
80
81 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
82
83 MODULE_DESCRIPTION(DRV_DESCRIPTION);
84 MODULE_VERSION(DRV_VERSION);
85 MODULE_AUTHOR(DRV_COPYRIGHT);
86 MODULE_LICENSE("GPL");
87 MODULE_FIRMWARE("ipw2200-ibss.fw");
88 #ifdef CONFIG_IPW2200_MONITOR
89 MODULE_FIRMWARE("ipw2200-sniffer.fw");
90 #endif
91 MODULE_FIRMWARE("ipw2200-bss.fw");
92
93 static int cmdlog = 0;
94 static int debug = 0;
95 static int default_channel = 0;
96 static int network_mode = 0;
97
98 static u32 ipw_debug_level;
99 static int associate;
100 static int auto_create = 1;
101 static int led_support = 1;
102 static int disable = 0;
103 static int bt_coexist = 0;
104 static int hwcrypto = 0;
105 static int roaming = 1;
106 static const char ipw_modes[] = {
107 'a', 'b', 'g', '?'
108 };
109 static int antenna = CFG_SYS_ANTENNA_BOTH;
110
111 #ifdef CONFIG_IPW2200_PROMISCUOUS
112 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
113 #endif
114
115 static struct ieee80211_rate ipw2200_rates[] = {
116 { .bitrate = 10 },
117 { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
118 { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
119 { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
120 { .bitrate = 60 },
121 { .bitrate = 90 },
122 { .bitrate = 120 },
123 { .bitrate = 180 },
124 { .bitrate = 240 },
125 { .bitrate = 360 },
126 { .bitrate = 480 },
127 { .bitrate = 540 }
128 };
129
130 #define ipw2200_a_rates (ipw2200_rates + 4)
131 #define ipw2200_num_a_rates 8
132 #define ipw2200_bg_rates (ipw2200_rates + 0)
133 #define ipw2200_num_bg_rates 12
134
135 /* Ugly macro to convert literal channel numbers into their mhz equivalents
136 * There are certianly some conditions that will break this (like feeding it '30')
137 * but they shouldn't arise since nothing talks on channel 30. */
138 #define ieee80211chan2mhz(x) \
139 (((x) <= 14) ? \
140 (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
141 ((x) + 1000) * 5)
142
143 #ifdef CONFIG_IPW2200_QOS
144 static int qos_enable = 0;
145 static int qos_burst_enable = 0;
146 static int qos_no_ack_mask = 0;
147 static int burst_duration_CCK = 0;
148 static int burst_duration_OFDM = 0;
149
150 static struct libipw_qos_parameters def_qos_parameters_OFDM = {
151 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
152 QOS_TX3_CW_MIN_OFDM},
153 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
154 QOS_TX3_CW_MAX_OFDM},
155 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
156 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
157 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
158 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
159 };
160
161 static struct libipw_qos_parameters def_qos_parameters_CCK = {
162 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
163 QOS_TX3_CW_MIN_CCK},
164 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
165 QOS_TX3_CW_MAX_CCK},
166 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
167 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
168 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
169 QOS_TX3_TXOP_LIMIT_CCK}
170 };
171
172 static struct libipw_qos_parameters def_parameters_OFDM = {
173 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
174 DEF_TX3_CW_MIN_OFDM},
175 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
176 DEF_TX3_CW_MAX_OFDM},
177 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
178 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
179 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
180 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
181 };
182
183 static struct libipw_qos_parameters def_parameters_CCK = {
184 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
185 DEF_TX3_CW_MIN_CCK},
186 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
187 DEF_TX3_CW_MAX_CCK},
188 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
189 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
190 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
191 DEF_TX3_TXOP_LIMIT_CCK}
192 };
193
194 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
195
196 static int from_priority_to_tx_queue[] = {
197 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
198 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
199 };
200
201 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
202
203 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
204 *qos_param);
205 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
206 *qos_param);
207 #endif /* CONFIG_IPW2200_QOS */
208
209 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
210 static void ipw_remove_current_network(struct ipw_priv *priv);
211 static void ipw_rx(struct ipw_priv *priv);
212 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
213 struct clx2_tx_queue *txq, int qindex);
214 static int ipw_queue_reset(struct ipw_priv *priv);
215
216 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
217 int len, int sync);
218
219 static void ipw_tx_queue_free(struct ipw_priv *);
220
221 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
222 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
223 static void ipw_rx_queue_replenish(void *);
224 static int ipw_up(struct ipw_priv *);
225 static void ipw_bg_up(struct work_struct *work);
226 static void ipw_down(struct ipw_priv *);
227 static void ipw_bg_down(struct work_struct *work);
228 static int ipw_config(struct ipw_priv *);
229 static int init_supported_rates(struct ipw_priv *priv,
230 struct ipw_supported_rates *prates);
231 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
232 static void ipw_send_wep_keys(struct ipw_priv *, int);
233
234 static int snprint_line(char *buf, size_t count,
235 const u8 * data, u32 len, u32 ofs)
236 {
237 int out, i, j, l;
238 char c;
239
240 out = snprintf(buf, count, "%08X", ofs);
241
242 for (l = 0, i = 0; i < 2; i++) {
243 out += snprintf(buf + out, count - out, " ");
244 for (j = 0; j < 8 && l < len; j++, l++)
245 out += snprintf(buf + out, count - out, "%02X ",
246 data[(i * 8 + j)]);
247 for (; j < 8; j++)
248 out += snprintf(buf + out, count - out, " ");
249 }
250
251 out += snprintf(buf + out, count - out, " ");
252 for (l = 0, i = 0; i < 2; i++) {
253 out += snprintf(buf + out, count - out, " ");
254 for (j = 0; j < 8 && l < len; j++, l++) {
255 c = data[(i * 8 + j)];
256 if (!isascii(c) || !isprint(c))
257 c = '.';
258
259 out += snprintf(buf + out, count - out, "%c", c);
260 }
261
262 for (; j < 8; j++)
263 out += snprintf(buf + out, count - out, " ");
264 }
265
266 return out;
267 }
268
269 static void printk_buf(int level, const u8 * data, u32 len)
270 {
271 char line[81];
272 u32 ofs = 0;
273 if (!(ipw_debug_level & level))
274 return;
275
276 while (len) {
277 snprint_line(line, sizeof(line), &data[ofs],
278 min(len, 16U), ofs);
279 printk(KERN_DEBUG "%s\n", line);
280 ofs += 16;
281 len -= min(len, 16U);
282 }
283 }
284
285 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
286 {
287 size_t out = size;
288 u32 ofs = 0;
289 int total = 0;
290
291 while (size && len) {
292 out = snprint_line(output, size, &data[ofs],
293 min_t(size_t, len, 16U), ofs);
294
295 ofs += 16;
296 output += out;
297 size -= out;
298 len -= min_t(size_t, len, 16U);
299 total += out;
300 }
301 return total;
302 }
303
304 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
305 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
306 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
307
308 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
309 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
310 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
311
312 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
313 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
314 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
315 {
316 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
317 __LINE__, (u32) (b), (u32) (c));
318 _ipw_write_reg8(a, b, c);
319 }
320
321 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
322 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
323 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
324 {
325 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
326 __LINE__, (u32) (b), (u32) (c));
327 _ipw_write_reg16(a, b, c);
328 }
329
330 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
331 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
332 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
333 {
334 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
335 __LINE__, (u32) (b), (u32) (c));
336 _ipw_write_reg32(a, b, c);
337 }
338
339 /* 8-bit direct write (low 4K) */
340 static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs,
341 u8 val)
342 {
343 writeb(val, ipw->hw_base + ofs);
344 }
345
346 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
347 #define ipw_write8(ipw, ofs, val) do { \
348 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \
349 __LINE__, (u32)(ofs), (u32)(val)); \
350 _ipw_write8(ipw, ofs, val); \
351 } while (0)
352
353 /* 16-bit direct write (low 4K) */
354 static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs,
355 u16 val)
356 {
357 writew(val, ipw->hw_base + ofs);
358 }
359
360 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
361 #define ipw_write16(ipw, ofs, val) do { \
362 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \
363 __LINE__, (u32)(ofs), (u32)(val)); \
364 _ipw_write16(ipw, ofs, val); \
365 } while (0)
366
367 /* 32-bit direct write (low 4K) */
368 static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs,
369 u32 val)
370 {
371 writel(val, ipw->hw_base + ofs);
372 }
373
374 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
375 #define ipw_write32(ipw, ofs, val) do { \
376 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \
377 __LINE__, (u32)(ofs), (u32)(val)); \
378 _ipw_write32(ipw, ofs, val); \
379 } while (0)
380
381 /* 8-bit direct read (low 4K) */
382 static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs)
383 {
384 return readb(ipw->hw_base + ofs);
385 }
386
387 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
388 #define ipw_read8(ipw, ofs) ({ \
389 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \
390 (u32)(ofs)); \
391 _ipw_read8(ipw, ofs); \
392 })
393
394 /* 16-bit direct read (low 4K) */
395 static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs)
396 {
397 return readw(ipw->hw_base + ofs);
398 }
399
400 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
401 #define ipw_read16(ipw, ofs) ({ \
402 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \
403 (u32)(ofs)); \
404 _ipw_read16(ipw, ofs); \
405 })
406
407 /* 32-bit direct read (low 4K) */
408 static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs)
409 {
410 return readl(ipw->hw_base + ofs);
411 }
412
413 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
414 #define ipw_read32(ipw, ofs) ({ \
415 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \
416 (u32)(ofs)); \
417 _ipw_read32(ipw, ofs); \
418 })
419
420 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
421 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
422 #define ipw_read_indirect(a, b, c, d) ({ \
423 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \
424 __LINE__, (u32)(b), (u32)(d)); \
425 _ipw_read_indirect(a, b, c, d); \
426 })
427
428 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
429 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
430 int num);
431 #define ipw_write_indirect(a, b, c, d) do { \
432 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \
433 __LINE__, (u32)(b), (u32)(d)); \
434 _ipw_write_indirect(a, b, c, d); \
435 } while (0)
436
437 /* 32-bit indirect write (above 4K) */
438 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
439 {
440 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
441 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
442 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
443 }
444
445 /* 8-bit indirect write (above 4K) */
446 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
447 {
448 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
449 u32 dif_len = reg - aligned_addr;
450
451 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
452 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
453 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
454 }
455
456 /* 16-bit indirect write (above 4K) */
457 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
458 {
459 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
460 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
461
462 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
463 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
464 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
465 }
466
467 /* 8-bit indirect read (above 4K) */
468 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
469 {
470 u32 word;
471 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
472 IPW_DEBUG_IO(" reg = 0x%8X :\n", reg);
473 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
474 return (word >> ((reg & 0x3) * 8)) & 0xff;
475 }
476
477 /* 32-bit indirect read (above 4K) */
478 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
479 {
480 u32 value;
481
482 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
483
484 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
485 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
486 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value);
487 return value;
488 }
489
490 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
491 /* for area above 1st 4K of SRAM/reg space */
492 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
493 int num)
494 {
495 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
496 u32 dif_len = addr - aligned_addr;
497 u32 i;
498
499 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
500
501 if (num <= 0) {
502 return;
503 }
504
505 /* Read the first dword (or portion) byte by byte */
506 if (unlikely(dif_len)) {
507 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
508 /* Start reading at aligned_addr + dif_len */
509 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
510 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
511 aligned_addr += 4;
512 }
513
514 /* Read all of the middle dwords as dwords, with auto-increment */
515 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
516 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
517 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
518
519 /* Read the last dword (or portion) byte by byte */
520 if (unlikely(num)) {
521 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
522 for (i = 0; num > 0; i++, num--)
523 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
524 }
525 }
526
527 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
528 /* for area above 1st 4K of SRAM/reg space */
529 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
530 int num)
531 {
532 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
533 u32 dif_len = addr - aligned_addr;
534 u32 i;
535
536 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
537
538 if (num <= 0) {
539 return;
540 }
541
542 /* Write the first dword (or portion) byte by byte */
543 if (unlikely(dif_len)) {
544 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
545 /* Start writing at aligned_addr + dif_len */
546 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
547 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
548 aligned_addr += 4;
549 }
550
551 /* Write all of the middle dwords as dwords, with auto-increment */
552 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
553 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
554 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
555
556 /* Write the last dword (or portion) byte by byte */
557 if (unlikely(num)) {
558 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
559 for (i = 0; num > 0; i++, num--, buf++)
560 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
561 }
562 }
563
564 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
565 /* for 1st 4K of SRAM/regs space */
566 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
567 int num)
568 {
569 memcpy_toio((priv->hw_base + addr), buf, num);
570 }
571
572 /* Set bit(s) in low 4K of SRAM/regs */
573 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
574 {
575 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
576 }
577
578 /* Clear bit(s) in low 4K of SRAM/regs */
579 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
580 {
581 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
582 }
583
584 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
585 {
586 if (priv->status & STATUS_INT_ENABLED)
587 return;
588 priv->status |= STATUS_INT_ENABLED;
589 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
590 }
591
592 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
593 {
594 if (!(priv->status & STATUS_INT_ENABLED))
595 return;
596 priv->status &= ~STATUS_INT_ENABLED;
597 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
598 }
599
600 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
601 {
602 unsigned long flags;
603
604 spin_lock_irqsave(&priv->irq_lock, flags);
605 __ipw_enable_interrupts(priv);
606 spin_unlock_irqrestore(&priv->irq_lock, flags);
607 }
608
609 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
610 {
611 unsigned long flags;
612
613 spin_lock_irqsave(&priv->irq_lock, flags);
614 __ipw_disable_interrupts(priv);
615 spin_unlock_irqrestore(&priv->irq_lock, flags);
616 }
617
618 static char *ipw_error_desc(u32 val)
619 {
620 switch (val) {
621 case IPW_FW_ERROR_OK:
622 return "ERROR_OK";
623 case IPW_FW_ERROR_FAIL:
624 return "ERROR_FAIL";
625 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
626 return "MEMORY_UNDERFLOW";
627 case IPW_FW_ERROR_MEMORY_OVERFLOW:
628 return "MEMORY_OVERFLOW";
629 case IPW_FW_ERROR_BAD_PARAM:
630 return "BAD_PARAM";
631 case IPW_FW_ERROR_BAD_CHECKSUM:
632 return "BAD_CHECKSUM";
633 case IPW_FW_ERROR_NMI_INTERRUPT:
634 return "NMI_INTERRUPT";
635 case IPW_FW_ERROR_BAD_DATABASE:
636 return "BAD_DATABASE";
637 case IPW_FW_ERROR_ALLOC_FAIL:
638 return "ALLOC_FAIL";
639 case IPW_FW_ERROR_DMA_UNDERRUN:
640 return "DMA_UNDERRUN";
641 case IPW_FW_ERROR_DMA_STATUS:
642 return "DMA_STATUS";
643 case IPW_FW_ERROR_DINO_ERROR:
644 return "DINO_ERROR";
645 case IPW_FW_ERROR_EEPROM_ERROR:
646 return "EEPROM_ERROR";
647 case IPW_FW_ERROR_SYSASSERT:
648 return "SYSASSERT";
649 case IPW_FW_ERROR_FATAL_ERROR:
650 return "FATAL_ERROR";
651 default:
652 return "UNKNOWN_ERROR";
653 }
654 }
655
656 static void ipw_dump_error_log(struct ipw_priv *priv,
657 struct ipw_fw_error *error)
658 {
659 u32 i;
660
661 if (!error) {
662 IPW_ERROR("Error allocating and capturing error log. "
663 "Nothing to dump.\n");
664 return;
665 }
666
667 IPW_ERROR("Start IPW Error Log Dump:\n");
668 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
669 error->status, error->config);
670
671 for (i = 0; i < error->elem_len; i++)
672 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
673 ipw_error_desc(error->elem[i].desc),
674 error->elem[i].time,
675 error->elem[i].blink1,
676 error->elem[i].blink2,
677 error->elem[i].link1,
678 error->elem[i].link2, error->elem[i].data);
679 for (i = 0; i < error->log_len; i++)
680 IPW_ERROR("%i\t0x%08x\t%i\n",
681 error->log[i].time,
682 error->log[i].data, error->log[i].event);
683 }
684
685 static inline int ipw_is_init(struct ipw_priv *priv)
686 {
687 return (priv->status & STATUS_INIT) ? 1 : 0;
688 }
689
690 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
691 {
692 u32 addr, field_info, field_len, field_count, total_len;
693
694 IPW_DEBUG_ORD("ordinal = %i\n", ord);
695
696 if (!priv || !val || !len) {
697 IPW_DEBUG_ORD("Invalid argument\n");
698 return -EINVAL;
699 }
700
701 /* verify device ordinal tables have been initialized */
702 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
703 IPW_DEBUG_ORD("Access ordinals before initialization\n");
704 return -EINVAL;
705 }
706
707 switch (IPW_ORD_TABLE_ID_MASK & ord) {
708 case IPW_ORD_TABLE_0_MASK:
709 /*
710 * TABLE 0: Direct access to a table of 32 bit values
711 *
712 * This is a very simple table with the data directly
713 * read from the table
714 */
715
716 /* remove the table id from the ordinal */
717 ord &= IPW_ORD_TABLE_VALUE_MASK;
718
719 /* boundary check */
720 if (ord > priv->table0_len) {
721 IPW_DEBUG_ORD("ordinal value (%i) longer then "
722 "max (%i)\n", ord, priv->table0_len);
723 return -EINVAL;
724 }
725
726 /* verify we have enough room to store the value */
727 if (*len < sizeof(u32)) {
728 IPW_DEBUG_ORD("ordinal buffer length too small, "
729 "need %zd\n", sizeof(u32));
730 return -EINVAL;
731 }
732
733 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
734 ord, priv->table0_addr + (ord << 2));
735
736 *len = sizeof(u32);
737 ord <<= 2;
738 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
739 break;
740
741 case IPW_ORD_TABLE_1_MASK:
742 /*
743 * TABLE 1: Indirect access to a table of 32 bit values
744 *
745 * This is a fairly large table of u32 values each
746 * representing starting addr for the data (which is
747 * also a u32)
748 */
749
750 /* remove the table id from the ordinal */
751 ord &= IPW_ORD_TABLE_VALUE_MASK;
752
753 /* boundary check */
754 if (ord > priv->table1_len) {
755 IPW_DEBUG_ORD("ordinal value too long\n");
756 return -EINVAL;
757 }
758
759 /* verify we have enough room to store the value */
760 if (*len < sizeof(u32)) {
761 IPW_DEBUG_ORD("ordinal buffer length too small, "
762 "need %zd\n", sizeof(u32));
763 return -EINVAL;
764 }
765
766 *((u32 *) val) =
767 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
768 *len = sizeof(u32);
769 break;
770
771 case IPW_ORD_TABLE_2_MASK:
772 /*
773 * TABLE 2: Indirect access to a table of variable sized values
774 *
775 * This table consist of six values, each containing
776 * - dword containing the starting offset of the data
777 * - dword containing the lengh in the first 16bits
778 * and the count in the second 16bits
779 */
780
781 /* remove the table id from the ordinal */
782 ord &= IPW_ORD_TABLE_VALUE_MASK;
783
784 /* boundary check */
785 if (ord > priv->table2_len) {
786 IPW_DEBUG_ORD("ordinal value too long\n");
787 return -EINVAL;
788 }
789
790 /* get the address of statistic */
791 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
792
793 /* get the second DW of statistics ;
794 * two 16-bit words - first is length, second is count */
795 field_info =
796 ipw_read_reg32(priv,
797 priv->table2_addr + (ord << 3) +
798 sizeof(u32));
799
800 /* get each entry length */
801 field_len = *((u16 *) & field_info);
802
803 /* get number of entries */
804 field_count = *(((u16 *) & field_info) + 1);
805
806 /* abort if not enough memory */
807 total_len = field_len * field_count;
808 if (total_len > *len) {
809 *len = total_len;
810 return -EINVAL;
811 }
812
813 *len = total_len;
814 if (!total_len)
815 return 0;
816
817 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
818 "field_info = 0x%08x\n",
819 addr, total_len, field_info);
820 ipw_read_indirect(priv, addr, val, total_len);
821 break;
822
823 default:
824 IPW_DEBUG_ORD("Invalid ordinal!\n");
825 return -EINVAL;
826
827 }
828
829 return 0;
830 }
831
832 static void ipw_init_ordinals(struct ipw_priv *priv)
833 {
834 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
835 priv->table0_len = ipw_read32(priv, priv->table0_addr);
836
837 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
838 priv->table0_addr, priv->table0_len);
839
840 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
841 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
842
843 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
844 priv->table1_addr, priv->table1_len);
845
846 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
847 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
848 priv->table2_len &= 0x0000ffff; /* use first two bytes */
849
850 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
851 priv->table2_addr, priv->table2_len);
852
853 }
854
855 static u32 ipw_register_toggle(u32 reg)
856 {
857 reg &= ~IPW_START_STANDBY;
858 if (reg & IPW_GATE_ODMA)
859 reg &= ~IPW_GATE_ODMA;
860 if (reg & IPW_GATE_IDMA)
861 reg &= ~IPW_GATE_IDMA;
862 if (reg & IPW_GATE_ADMA)
863 reg &= ~IPW_GATE_ADMA;
864 return reg;
865 }
866
867 /*
868 * LED behavior:
869 * - On radio ON, turn on any LEDs that require to be on during start
870 * - On initialization, start unassociated blink
871 * - On association, disable unassociated blink
872 * - On disassociation, start unassociated blink
873 * - On radio OFF, turn off any LEDs started during radio on
874 *
875 */
876 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
877 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
878 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
879
880 static void ipw_led_link_on(struct ipw_priv *priv)
881 {
882 unsigned long flags;
883 u32 led;
884
885 /* If configured to not use LEDs, or nic_type is 1,
886 * then we don't toggle a LINK led */
887 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
888 return;
889
890 spin_lock_irqsave(&priv->lock, flags);
891
892 if (!(priv->status & STATUS_RF_KILL_MASK) &&
893 !(priv->status & STATUS_LED_LINK_ON)) {
894 IPW_DEBUG_LED("Link LED On\n");
895 led = ipw_read_reg32(priv, IPW_EVENT_REG);
896 led |= priv->led_association_on;
897
898 led = ipw_register_toggle(led);
899
900 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
901 ipw_write_reg32(priv, IPW_EVENT_REG, led);
902
903 priv->status |= STATUS_LED_LINK_ON;
904
905 /* If we aren't associated, schedule turning the LED off */
906 if (!(priv->status & STATUS_ASSOCIATED))
907 schedule_delayed_work(&priv->led_link_off,
908 LD_TIME_LINK_ON);
909 }
910
911 spin_unlock_irqrestore(&priv->lock, flags);
912 }
913
914 static void ipw_bg_led_link_on(struct work_struct *work)
915 {
916 struct ipw_priv *priv =
917 container_of(work, struct ipw_priv, led_link_on.work);
918 mutex_lock(&priv->mutex);
919 ipw_led_link_on(priv);
920 mutex_unlock(&priv->mutex);
921 }
922
923 static void ipw_led_link_off(struct ipw_priv *priv)
924 {
925 unsigned long flags;
926 u32 led;
927
928 /* If configured not to use LEDs, or nic type is 1,
929 * then we don't goggle the LINK led. */
930 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
931 return;
932
933 spin_lock_irqsave(&priv->lock, flags);
934
935 if (priv->status & STATUS_LED_LINK_ON) {
936 led = ipw_read_reg32(priv, IPW_EVENT_REG);
937 led &= priv->led_association_off;
938 led = ipw_register_toggle(led);
939
940 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
941 ipw_write_reg32(priv, IPW_EVENT_REG, led);
942
943 IPW_DEBUG_LED("Link LED Off\n");
944
945 priv->status &= ~STATUS_LED_LINK_ON;
946
947 /* If we aren't associated and the radio is on, schedule
948 * turning the LED on (blink while unassociated) */
949 if (!(priv->status & STATUS_RF_KILL_MASK) &&
950 !(priv->status & STATUS_ASSOCIATED))
951 schedule_delayed_work(&priv->led_link_on,
952 LD_TIME_LINK_OFF);
953
954 }
955
956 spin_unlock_irqrestore(&priv->lock, flags);
957 }
958
959 static void ipw_bg_led_link_off(struct work_struct *work)
960 {
961 struct ipw_priv *priv =
962 container_of(work, struct ipw_priv, led_link_off.work);
963 mutex_lock(&priv->mutex);
964 ipw_led_link_off(priv);
965 mutex_unlock(&priv->mutex);
966 }
967
968 static void __ipw_led_activity_on(struct ipw_priv *priv)
969 {
970 u32 led;
971
972 if (priv->config & CFG_NO_LED)
973 return;
974
975 if (priv->status & STATUS_RF_KILL_MASK)
976 return;
977
978 if (!(priv->status & STATUS_LED_ACT_ON)) {
979 led = ipw_read_reg32(priv, IPW_EVENT_REG);
980 led |= priv->led_activity_on;
981
982 led = ipw_register_toggle(led);
983
984 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
985 ipw_write_reg32(priv, IPW_EVENT_REG, led);
986
987 IPW_DEBUG_LED("Activity LED On\n");
988
989 priv->status |= STATUS_LED_ACT_ON;
990
991 cancel_delayed_work(&priv->led_act_off);
992 schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
993 } else {
994 /* Reschedule LED off for full time period */
995 cancel_delayed_work(&priv->led_act_off);
996 schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
997 }
998 }
999
1000 #if 0
1001 void ipw_led_activity_on(struct ipw_priv *priv)
1002 {
1003 unsigned long flags;
1004 spin_lock_irqsave(&priv->lock, flags);
1005 __ipw_led_activity_on(priv);
1006 spin_unlock_irqrestore(&priv->lock, flags);
1007 }
1008 #endif /* 0 */
1009
1010 static void ipw_led_activity_off(struct ipw_priv *priv)
1011 {
1012 unsigned long flags;
1013 u32 led;
1014
1015 if (priv->config & CFG_NO_LED)
1016 return;
1017
1018 spin_lock_irqsave(&priv->lock, flags);
1019
1020 if (priv->status & STATUS_LED_ACT_ON) {
1021 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1022 led &= priv->led_activity_off;
1023
1024 led = ipw_register_toggle(led);
1025
1026 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1027 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1028
1029 IPW_DEBUG_LED("Activity LED Off\n");
1030
1031 priv->status &= ~STATUS_LED_ACT_ON;
1032 }
1033
1034 spin_unlock_irqrestore(&priv->lock, flags);
1035 }
1036
1037 static void ipw_bg_led_activity_off(struct work_struct *work)
1038 {
1039 struct ipw_priv *priv =
1040 container_of(work, struct ipw_priv, led_act_off.work);
1041 mutex_lock(&priv->mutex);
1042 ipw_led_activity_off(priv);
1043 mutex_unlock(&priv->mutex);
1044 }
1045
1046 static void ipw_led_band_on(struct ipw_priv *priv)
1047 {
1048 unsigned long flags;
1049 u32 led;
1050
1051 /* Only nic type 1 supports mode LEDs */
1052 if (priv->config & CFG_NO_LED ||
1053 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1054 return;
1055
1056 spin_lock_irqsave(&priv->lock, flags);
1057
1058 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1059 if (priv->assoc_network->mode == IEEE_A) {
1060 led |= priv->led_ofdm_on;
1061 led &= priv->led_association_off;
1062 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1063 } else if (priv->assoc_network->mode == IEEE_G) {
1064 led |= priv->led_ofdm_on;
1065 led |= priv->led_association_on;
1066 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1067 } else {
1068 led &= priv->led_ofdm_off;
1069 led |= priv->led_association_on;
1070 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1071 }
1072
1073 led = ipw_register_toggle(led);
1074
1075 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1076 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1077
1078 spin_unlock_irqrestore(&priv->lock, flags);
1079 }
1080
1081 static void ipw_led_band_off(struct ipw_priv *priv)
1082 {
1083 unsigned long flags;
1084 u32 led;
1085
1086 /* Only nic type 1 supports mode LEDs */
1087 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1088 return;
1089
1090 spin_lock_irqsave(&priv->lock, flags);
1091
1092 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1093 led &= priv->led_ofdm_off;
1094 led &= priv->led_association_off;
1095
1096 led = ipw_register_toggle(led);
1097
1098 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1099 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1100
1101 spin_unlock_irqrestore(&priv->lock, flags);
1102 }
1103
1104 static void ipw_led_radio_on(struct ipw_priv *priv)
1105 {
1106 ipw_led_link_on(priv);
1107 }
1108
1109 static void ipw_led_radio_off(struct ipw_priv *priv)
1110 {
1111 ipw_led_activity_off(priv);
1112 ipw_led_link_off(priv);
1113 }
1114
1115 static void ipw_led_link_up(struct ipw_priv *priv)
1116 {
1117 /* Set the Link Led on for all nic types */
1118 ipw_led_link_on(priv);
1119 }
1120
1121 static void ipw_led_link_down(struct ipw_priv *priv)
1122 {
1123 ipw_led_activity_off(priv);
1124 ipw_led_link_off(priv);
1125
1126 if (priv->status & STATUS_RF_KILL_MASK)
1127 ipw_led_radio_off(priv);
1128 }
1129
1130 static void ipw_led_init(struct ipw_priv *priv)
1131 {
1132 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1133
1134 /* Set the default PINs for the link and activity leds */
1135 priv->led_activity_on = IPW_ACTIVITY_LED;
1136 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1137
1138 priv->led_association_on = IPW_ASSOCIATED_LED;
1139 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1140
1141 /* Set the default PINs for the OFDM leds */
1142 priv->led_ofdm_on = IPW_OFDM_LED;
1143 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1144
1145 switch (priv->nic_type) {
1146 case EEPROM_NIC_TYPE_1:
1147 /* In this NIC type, the LEDs are reversed.... */
1148 priv->led_activity_on = IPW_ASSOCIATED_LED;
1149 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1150 priv->led_association_on = IPW_ACTIVITY_LED;
1151 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1152
1153 if (!(priv->config & CFG_NO_LED))
1154 ipw_led_band_on(priv);
1155
1156 /* And we don't blink link LEDs for this nic, so
1157 * just return here */
1158 return;
1159
1160 case EEPROM_NIC_TYPE_3:
1161 case EEPROM_NIC_TYPE_2:
1162 case EEPROM_NIC_TYPE_4:
1163 case EEPROM_NIC_TYPE_0:
1164 break;
1165
1166 default:
1167 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1168 priv->nic_type);
1169 priv->nic_type = EEPROM_NIC_TYPE_0;
1170 break;
1171 }
1172
1173 if (!(priv->config & CFG_NO_LED)) {
1174 if (priv->status & STATUS_ASSOCIATED)
1175 ipw_led_link_on(priv);
1176 else
1177 ipw_led_link_off(priv);
1178 }
1179 }
1180
1181 static void ipw_led_shutdown(struct ipw_priv *priv)
1182 {
1183 ipw_led_activity_off(priv);
1184 ipw_led_link_off(priv);
1185 ipw_led_band_off(priv);
1186 cancel_delayed_work(&priv->led_link_on);
1187 cancel_delayed_work(&priv->led_link_off);
1188 cancel_delayed_work(&priv->led_act_off);
1189 }
1190
1191 /*
1192 * The following adds a new attribute to the sysfs representation
1193 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1194 * used for controlling the debug level.
1195 *
1196 * See the level definitions in ipw for details.
1197 */
1198 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1199 {
1200 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1201 }
1202
1203 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1204 size_t count)
1205 {
1206 char *p = (char *)buf;
1207 u32 val;
1208
1209 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1210 p++;
1211 if (p[0] == 'x' || p[0] == 'X')
1212 p++;
1213 val = simple_strtoul(p, &p, 16);
1214 } else
1215 val = simple_strtoul(p, &p, 10);
1216 if (p == buf)
1217 printk(KERN_INFO DRV_NAME
1218 ": %s is not in hex or decimal form.\n", buf);
1219 else
1220 ipw_debug_level = val;
1221
1222 return strnlen(buf, count);
1223 }
1224
1225 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1226 show_debug_level, store_debug_level);
1227
1228 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1229 {
1230 /* length = 1st dword in log */
1231 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1232 }
1233
1234 static void ipw_capture_event_log(struct ipw_priv *priv,
1235 u32 log_len, struct ipw_event *log)
1236 {
1237 u32 base;
1238
1239 if (log_len) {
1240 base = ipw_read32(priv, IPW_EVENT_LOG);
1241 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1242 (u8 *) log, sizeof(*log) * log_len);
1243 }
1244 }
1245
1246 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1247 {
1248 struct ipw_fw_error *error;
1249 u32 log_len = ipw_get_event_log_len(priv);
1250 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1251 u32 elem_len = ipw_read_reg32(priv, base);
1252
1253 error = kmalloc(sizeof(*error) +
1254 sizeof(*error->elem) * elem_len +
1255 sizeof(*error->log) * log_len, GFP_ATOMIC);
1256 if (!error) {
1257 IPW_ERROR("Memory allocation for firmware error log "
1258 "failed.\n");
1259 return NULL;
1260 }
1261 error->jiffies = jiffies;
1262 error->status = priv->status;
1263 error->config = priv->config;
1264 error->elem_len = elem_len;
1265 error->log_len = log_len;
1266 error->elem = (struct ipw_error_elem *)error->payload;
1267 error->log = (struct ipw_event *)(error->elem + elem_len);
1268
1269 ipw_capture_event_log(priv, log_len, error->log);
1270
1271 if (elem_len)
1272 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1273 sizeof(*error->elem) * elem_len);
1274
1275 return error;
1276 }
1277
1278 static ssize_t show_event_log(struct device *d,
1279 struct device_attribute *attr, char *buf)
1280 {
1281 struct ipw_priv *priv = dev_get_drvdata(d);
1282 u32 log_len = ipw_get_event_log_len(priv);
1283 u32 log_size;
1284 struct ipw_event *log;
1285 u32 len = 0, i;
1286
1287 /* not using min() because of its strict type checking */
1288 log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1289 sizeof(*log) * log_len : PAGE_SIZE;
1290 log = kzalloc(log_size, GFP_KERNEL);
1291 if (!log) {
1292 IPW_ERROR("Unable to allocate memory for log\n");
1293 return 0;
1294 }
1295 log_len = log_size / sizeof(*log);
1296 ipw_capture_event_log(priv, log_len, log);
1297
1298 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1299 for (i = 0; i < log_len; i++)
1300 len += snprintf(buf + len, PAGE_SIZE - len,
1301 "\n%08X%08X%08X",
1302 log[i].time, log[i].event, log[i].data);
1303 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1304 kfree(log);
1305 return len;
1306 }
1307
1308 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1309
1310 static ssize_t show_error(struct device *d,
1311 struct device_attribute *attr, char *buf)
1312 {
1313 struct ipw_priv *priv = dev_get_drvdata(d);
1314 u32 len = 0, i;
1315 if (!priv->error)
1316 return 0;
1317 len += snprintf(buf + len, PAGE_SIZE - len,
1318 "%08lX%08X%08X%08X",
1319 priv->error->jiffies,
1320 priv->error->status,
1321 priv->error->config, priv->error->elem_len);
1322 for (i = 0; i < priv->error->elem_len; i++)
1323 len += snprintf(buf + len, PAGE_SIZE - len,
1324 "\n%08X%08X%08X%08X%08X%08X%08X",
1325 priv->error->elem[i].time,
1326 priv->error->elem[i].desc,
1327 priv->error->elem[i].blink1,
1328 priv->error->elem[i].blink2,
1329 priv->error->elem[i].link1,
1330 priv->error->elem[i].link2,
1331 priv->error->elem[i].data);
1332
1333 len += snprintf(buf + len, PAGE_SIZE - len,
1334 "\n%08X", priv->error->log_len);
1335 for (i = 0; i < priv->error->log_len; i++)
1336 len += snprintf(buf + len, PAGE_SIZE - len,
1337 "\n%08X%08X%08X",
1338 priv->error->log[i].time,
1339 priv->error->log[i].event,
1340 priv->error->log[i].data);
1341 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1342 return len;
1343 }
1344
1345 static ssize_t clear_error(struct device *d,
1346 struct device_attribute *attr,
1347 const char *buf, size_t count)
1348 {
1349 struct ipw_priv *priv = dev_get_drvdata(d);
1350
1351 kfree(priv->error);
1352 priv->error = NULL;
1353 return count;
1354 }
1355
1356 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1357
1358 static ssize_t show_cmd_log(struct device *d,
1359 struct device_attribute *attr, char *buf)
1360 {
1361 struct ipw_priv *priv = dev_get_drvdata(d);
1362 u32 len = 0, i;
1363 if (!priv->cmdlog)
1364 return 0;
1365 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1366 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1367 i = (i + 1) % priv->cmdlog_len) {
1368 len +=
1369 snprintf(buf + len, PAGE_SIZE - len,
1370 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1371 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1372 priv->cmdlog[i].cmd.len);
1373 len +=
1374 snprintk_buf(buf + len, PAGE_SIZE - len,
1375 (u8 *) priv->cmdlog[i].cmd.param,
1376 priv->cmdlog[i].cmd.len);
1377 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1378 }
1379 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1380 return len;
1381 }
1382
1383 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1384
1385 #ifdef CONFIG_IPW2200_PROMISCUOUS
1386 static void ipw_prom_free(struct ipw_priv *priv);
1387 static int ipw_prom_alloc(struct ipw_priv *priv);
1388 static ssize_t store_rtap_iface(struct device *d,
1389 struct device_attribute *attr,
1390 const char *buf, size_t count)
1391 {
1392 struct ipw_priv *priv = dev_get_drvdata(d);
1393 int rc = 0;
1394
1395 if (count < 1)
1396 return -EINVAL;
1397
1398 switch (buf[0]) {
1399 case '0':
1400 if (!rtap_iface)
1401 return count;
1402
1403 if (netif_running(priv->prom_net_dev)) {
1404 IPW_WARNING("Interface is up. Cannot unregister.\n");
1405 return count;
1406 }
1407
1408 ipw_prom_free(priv);
1409 rtap_iface = 0;
1410 break;
1411
1412 case '1':
1413 if (rtap_iface)
1414 return count;
1415
1416 rc = ipw_prom_alloc(priv);
1417 if (!rc)
1418 rtap_iface = 1;
1419 break;
1420
1421 default:
1422 return -EINVAL;
1423 }
1424
1425 if (rc) {
1426 IPW_ERROR("Failed to register promiscuous network "
1427 "device (error %d).\n", rc);
1428 }
1429
1430 return count;
1431 }
1432
1433 static ssize_t show_rtap_iface(struct device *d,
1434 struct device_attribute *attr,
1435 char *buf)
1436 {
1437 struct ipw_priv *priv = dev_get_drvdata(d);
1438 if (rtap_iface)
1439 return sprintf(buf, "%s", priv->prom_net_dev->name);
1440 else {
1441 buf[0] = '-';
1442 buf[1] = '1';
1443 buf[2] = '\0';
1444 return 3;
1445 }
1446 }
1447
1448 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1449 store_rtap_iface);
1450
1451 static ssize_t store_rtap_filter(struct device *d,
1452 struct device_attribute *attr,
1453 const char *buf, size_t count)
1454 {
1455 struct ipw_priv *priv = dev_get_drvdata(d);
1456
1457 if (!priv->prom_priv) {
1458 IPW_ERROR("Attempting to set filter without "
1459 "rtap_iface enabled.\n");
1460 return -EPERM;
1461 }
1462
1463 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1464
1465 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1466 BIT_ARG16(priv->prom_priv->filter));
1467
1468 return count;
1469 }
1470
1471 static ssize_t show_rtap_filter(struct device *d,
1472 struct device_attribute *attr,
1473 char *buf)
1474 {
1475 struct ipw_priv *priv = dev_get_drvdata(d);
1476 return sprintf(buf, "0x%04X",
1477 priv->prom_priv ? priv->prom_priv->filter : 0);
1478 }
1479
1480 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1481 store_rtap_filter);
1482 #endif
1483
1484 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1485 char *buf)
1486 {
1487 struct ipw_priv *priv = dev_get_drvdata(d);
1488 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1489 }
1490
1491 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1492 const char *buf, size_t count)
1493 {
1494 struct ipw_priv *priv = dev_get_drvdata(d);
1495 struct net_device *dev = priv->net_dev;
1496 char buffer[] = "00000000";
1497 unsigned long len =
1498 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1499 unsigned long val;
1500 char *p = buffer;
1501
1502 IPW_DEBUG_INFO("enter\n");
1503
1504 strncpy(buffer, buf, len);
1505 buffer[len] = 0;
1506
1507 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1508 p++;
1509 if (p[0] == 'x' || p[0] == 'X')
1510 p++;
1511 val = simple_strtoul(p, &p, 16);
1512 } else
1513 val = simple_strtoul(p, &p, 10);
1514 if (p == buffer) {
1515 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1516 } else {
1517 priv->ieee->scan_age = val;
1518 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1519 }
1520
1521 IPW_DEBUG_INFO("exit\n");
1522 return len;
1523 }
1524
1525 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1526
1527 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1528 char *buf)
1529 {
1530 struct ipw_priv *priv = dev_get_drvdata(d);
1531 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1532 }
1533
1534 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1535 const char *buf, size_t count)
1536 {
1537 struct ipw_priv *priv = dev_get_drvdata(d);
1538
1539 IPW_DEBUG_INFO("enter\n");
1540
1541 if (count == 0)
1542 return 0;
1543
1544 if (*buf == 0) {
1545 IPW_DEBUG_LED("Disabling LED control.\n");
1546 priv->config |= CFG_NO_LED;
1547 ipw_led_shutdown(priv);
1548 } else {
1549 IPW_DEBUG_LED("Enabling LED control.\n");
1550 priv->config &= ~CFG_NO_LED;
1551 ipw_led_init(priv);
1552 }
1553
1554 IPW_DEBUG_INFO("exit\n");
1555 return count;
1556 }
1557
1558 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1559
1560 static ssize_t show_status(struct device *d,
1561 struct device_attribute *attr, char *buf)
1562 {
1563 struct ipw_priv *p = dev_get_drvdata(d);
1564 return sprintf(buf, "0x%08x\n", (int)p->status);
1565 }
1566
1567 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1568
1569 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1570 char *buf)
1571 {
1572 struct ipw_priv *p = dev_get_drvdata(d);
1573 return sprintf(buf, "0x%08x\n", (int)p->config);
1574 }
1575
1576 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1577
1578 static ssize_t show_nic_type(struct device *d,
1579 struct device_attribute *attr, char *buf)
1580 {
1581 struct ipw_priv *priv = dev_get_drvdata(d);
1582 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1583 }
1584
1585 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1586
1587 static ssize_t show_ucode_version(struct device *d,
1588 struct device_attribute *attr, char *buf)
1589 {
1590 u32 len = sizeof(u32), tmp = 0;
1591 struct ipw_priv *p = dev_get_drvdata(d);
1592
1593 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1594 return 0;
1595
1596 return sprintf(buf, "0x%08x\n", tmp);
1597 }
1598
1599 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1600
1601 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1602 char *buf)
1603 {
1604 u32 len = sizeof(u32), tmp = 0;
1605 struct ipw_priv *p = dev_get_drvdata(d);
1606
1607 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1608 return 0;
1609
1610 return sprintf(buf, "0x%08x\n", tmp);
1611 }
1612
1613 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1614
1615 /*
1616 * Add a device attribute to view/control the delay between eeprom
1617 * operations.
1618 */
1619 static ssize_t show_eeprom_delay(struct device *d,
1620 struct device_attribute *attr, char *buf)
1621 {
1622 struct ipw_priv *p = dev_get_drvdata(d);
1623 int n = p->eeprom_delay;
1624 return sprintf(buf, "%i\n", n);
1625 }
1626 static ssize_t store_eeprom_delay(struct device *d,
1627 struct device_attribute *attr,
1628 const char *buf, size_t count)
1629 {
1630 struct ipw_priv *p = dev_get_drvdata(d);
1631 sscanf(buf, "%i", &p->eeprom_delay);
1632 return strnlen(buf, count);
1633 }
1634
1635 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1636 show_eeprom_delay, store_eeprom_delay);
1637
1638 static ssize_t show_command_event_reg(struct device *d,
1639 struct device_attribute *attr, char *buf)
1640 {
1641 u32 reg = 0;
1642 struct ipw_priv *p = dev_get_drvdata(d);
1643
1644 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1645 return sprintf(buf, "0x%08x\n", reg);
1646 }
1647 static ssize_t store_command_event_reg(struct device *d,
1648 struct device_attribute *attr,
1649 const char *buf, size_t count)
1650 {
1651 u32 reg;
1652 struct ipw_priv *p = dev_get_drvdata(d);
1653
1654 sscanf(buf, "%x", &reg);
1655 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1656 return strnlen(buf, count);
1657 }
1658
1659 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1660 show_command_event_reg, store_command_event_reg);
1661
1662 static ssize_t show_mem_gpio_reg(struct device *d,
1663 struct device_attribute *attr, char *buf)
1664 {
1665 u32 reg = 0;
1666 struct ipw_priv *p = dev_get_drvdata(d);
1667
1668 reg = ipw_read_reg32(p, 0x301100);
1669 return sprintf(buf, "0x%08x\n", reg);
1670 }
1671 static ssize_t store_mem_gpio_reg(struct device *d,
1672 struct device_attribute *attr,
1673 const char *buf, size_t count)
1674 {
1675 u32 reg;
1676 struct ipw_priv *p = dev_get_drvdata(d);
1677
1678 sscanf(buf, "%x", &reg);
1679 ipw_write_reg32(p, 0x301100, reg);
1680 return strnlen(buf, count);
1681 }
1682
1683 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1684 show_mem_gpio_reg, store_mem_gpio_reg);
1685
1686 static ssize_t show_indirect_dword(struct device *d,
1687 struct device_attribute *attr, char *buf)
1688 {
1689 u32 reg = 0;
1690 struct ipw_priv *priv = dev_get_drvdata(d);
1691
1692 if (priv->status & STATUS_INDIRECT_DWORD)
1693 reg = ipw_read_reg32(priv, priv->indirect_dword);
1694 else
1695 reg = 0;
1696
1697 return sprintf(buf, "0x%08x\n", reg);
1698 }
1699 static ssize_t store_indirect_dword(struct device *d,
1700 struct device_attribute *attr,
1701 const char *buf, size_t count)
1702 {
1703 struct ipw_priv *priv = dev_get_drvdata(d);
1704
1705 sscanf(buf, "%x", &priv->indirect_dword);
1706 priv->status |= STATUS_INDIRECT_DWORD;
1707 return strnlen(buf, count);
1708 }
1709
1710 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1711 show_indirect_dword, store_indirect_dword);
1712
1713 static ssize_t show_indirect_byte(struct device *d,
1714 struct device_attribute *attr, char *buf)
1715 {
1716 u8 reg = 0;
1717 struct ipw_priv *priv = dev_get_drvdata(d);
1718
1719 if (priv->status & STATUS_INDIRECT_BYTE)
1720 reg = ipw_read_reg8(priv, priv->indirect_byte);
1721 else
1722 reg = 0;
1723
1724 return sprintf(buf, "0x%02x\n", reg);
1725 }
1726 static ssize_t store_indirect_byte(struct device *d,
1727 struct device_attribute *attr,
1728 const char *buf, size_t count)
1729 {
1730 struct ipw_priv *priv = dev_get_drvdata(d);
1731
1732 sscanf(buf, "%x", &priv->indirect_byte);
1733 priv->status |= STATUS_INDIRECT_BYTE;
1734 return strnlen(buf, count);
1735 }
1736
1737 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1738 show_indirect_byte, store_indirect_byte);
1739
1740 static ssize_t show_direct_dword(struct device *d,
1741 struct device_attribute *attr, char *buf)
1742 {
1743 u32 reg = 0;
1744 struct ipw_priv *priv = dev_get_drvdata(d);
1745
1746 if (priv->status & STATUS_DIRECT_DWORD)
1747 reg = ipw_read32(priv, priv->direct_dword);
1748 else
1749 reg = 0;
1750
1751 return sprintf(buf, "0x%08x\n", reg);
1752 }
1753 static ssize_t store_direct_dword(struct device *d,
1754 struct device_attribute *attr,
1755 const char *buf, size_t count)
1756 {
1757 struct ipw_priv *priv = dev_get_drvdata(d);
1758
1759 sscanf(buf, "%x", &priv->direct_dword);
1760 priv->status |= STATUS_DIRECT_DWORD;
1761 return strnlen(buf, count);
1762 }
1763
1764 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1765 show_direct_dword, store_direct_dword);
1766
1767 static int rf_kill_active(struct ipw_priv *priv)
1768 {
1769 if (0 == (ipw_read32(priv, 0x30) & 0x10000)) {
1770 priv->status |= STATUS_RF_KILL_HW;
1771 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
1772 } else {
1773 priv->status &= ~STATUS_RF_KILL_HW;
1774 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
1775 }
1776
1777 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1778 }
1779
1780 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1781 char *buf)
1782 {
1783 /* 0 - RF kill not enabled
1784 1 - SW based RF kill active (sysfs)
1785 2 - HW based RF kill active
1786 3 - Both HW and SW baed RF kill active */
1787 struct ipw_priv *priv = dev_get_drvdata(d);
1788 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1789 (rf_kill_active(priv) ? 0x2 : 0x0);
1790 return sprintf(buf, "%i\n", val);
1791 }
1792
1793 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1794 {
1795 if ((disable_radio ? 1 : 0) ==
1796 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1797 return 0;
1798
1799 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1800 disable_radio ? "OFF" : "ON");
1801
1802 if (disable_radio) {
1803 priv->status |= STATUS_RF_KILL_SW;
1804
1805 cancel_delayed_work(&priv->request_scan);
1806 cancel_delayed_work(&priv->request_direct_scan);
1807 cancel_delayed_work(&priv->request_passive_scan);
1808 cancel_delayed_work(&priv->scan_event);
1809 schedule_work(&priv->down);
1810 } else {
1811 priv->status &= ~STATUS_RF_KILL_SW;
1812 if (rf_kill_active(priv)) {
1813 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1814 "disabled by HW switch\n");
1815 /* Make sure the RF_KILL check timer is running */
1816 cancel_delayed_work(&priv->rf_kill);
1817 schedule_delayed_work(&priv->rf_kill,
1818 round_jiffies_relative(2 * HZ));
1819 } else
1820 schedule_work(&priv->up);
1821 }
1822
1823 return 1;
1824 }
1825
1826 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1827 const char *buf, size_t count)
1828 {
1829 struct ipw_priv *priv = dev_get_drvdata(d);
1830
1831 ipw_radio_kill_sw(priv, buf[0] == '1');
1832
1833 return count;
1834 }
1835
1836 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1837
1838 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1839 char *buf)
1840 {
1841 struct ipw_priv *priv = dev_get_drvdata(d);
1842 int pos = 0, len = 0;
1843 if (priv->config & CFG_SPEED_SCAN) {
1844 while (priv->speed_scan[pos] != 0)
1845 len += sprintf(&buf[len], "%d ",
1846 priv->speed_scan[pos++]);
1847 return len + sprintf(&buf[len], "\n");
1848 }
1849
1850 return sprintf(buf, "0\n");
1851 }
1852
1853 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1854 const char *buf, size_t count)
1855 {
1856 struct ipw_priv *priv = dev_get_drvdata(d);
1857 int channel, pos = 0;
1858 const char *p = buf;
1859
1860 /* list of space separated channels to scan, optionally ending with 0 */
1861 while ((channel = simple_strtol(p, NULL, 0))) {
1862 if (pos == MAX_SPEED_SCAN - 1) {
1863 priv->speed_scan[pos] = 0;
1864 break;
1865 }
1866
1867 if (libipw_is_valid_channel(priv->ieee, channel))
1868 priv->speed_scan[pos++] = channel;
1869 else
1870 IPW_WARNING("Skipping invalid channel request: %d\n",
1871 channel);
1872 p = strchr(p, ' ');
1873 if (!p)
1874 break;
1875 while (*p == ' ' || *p == '\t')
1876 p++;
1877 }
1878
1879 if (pos == 0)
1880 priv->config &= ~CFG_SPEED_SCAN;
1881 else {
1882 priv->speed_scan_pos = 0;
1883 priv->config |= CFG_SPEED_SCAN;
1884 }
1885
1886 return count;
1887 }
1888
1889 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1890 store_speed_scan);
1891
1892 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1893 char *buf)
1894 {
1895 struct ipw_priv *priv = dev_get_drvdata(d);
1896 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1897 }
1898
1899 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1900 const char *buf, size_t count)
1901 {
1902 struct ipw_priv *priv = dev_get_drvdata(d);
1903 if (buf[0] == '1')
1904 priv->config |= CFG_NET_STATS;
1905 else
1906 priv->config &= ~CFG_NET_STATS;
1907
1908 return count;
1909 }
1910
1911 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1912 show_net_stats, store_net_stats);
1913
1914 static ssize_t show_channels(struct device *d,
1915 struct device_attribute *attr,
1916 char *buf)
1917 {
1918 struct ipw_priv *priv = dev_get_drvdata(d);
1919 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
1920 int len = 0, i;
1921
1922 len = sprintf(&buf[len],
1923 "Displaying %d channels in 2.4Ghz band "
1924 "(802.11bg):\n", geo->bg_channels);
1925
1926 for (i = 0; i < geo->bg_channels; i++) {
1927 len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1928 geo->bg[i].channel,
1929 geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ?
1930 " (radar spectrum)" : "",
1931 ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) ||
1932 (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT))
1933 ? "" : ", IBSS",
1934 geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1935 "passive only" : "active/passive",
1936 geo->bg[i].flags & LIBIPW_CH_B_ONLY ?
1937 "B" : "B/G");
1938 }
1939
1940 len += sprintf(&buf[len],
1941 "Displaying %d channels in 5.2Ghz band "
1942 "(802.11a):\n", geo->a_channels);
1943 for (i = 0; i < geo->a_channels; i++) {
1944 len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1945 geo->a[i].channel,
1946 geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ?
1947 " (radar spectrum)" : "",
1948 ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) ||
1949 (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT))
1950 ? "" : ", IBSS",
1951 geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1952 "passive only" : "active/passive");
1953 }
1954
1955 return len;
1956 }
1957
1958 static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1959
1960 static void notify_wx_assoc_event(struct ipw_priv *priv)
1961 {
1962 union iwreq_data wrqu;
1963 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1964 if (priv->status & STATUS_ASSOCIATED)
1965 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1966 else
1967 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1968 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1969 }
1970
1971 static void ipw_irq_tasklet(struct ipw_priv *priv)
1972 {
1973 u32 inta, inta_mask, handled = 0;
1974 unsigned long flags;
1975 int rc = 0;
1976
1977 spin_lock_irqsave(&priv->irq_lock, flags);
1978
1979 inta = ipw_read32(priv, IPW_INTA_RW);
1980 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1981
1982 if (inta == 0xFFFFFFFF) {
1983 /* Hardware disappeared */
1984 IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n");
1985 /* Only handle the cached INTA values */
1986 inta = 0;
1987 }
1988 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1989
1990 /* Add any cached INTA values that need to be handled */
1991 inta |= priv->isr_inta;
1992
1993 spin_unlock_irqrestore(&priv->irq_lock, flags);
1994
1995 spin_lock_irqsave(&priv->lock, flags);
1996
1997 /* handle all the justifications for the interrupt */
1998 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1999 ipw_rx(priv);
2000 handled |= IPW_INTA_BIT_RX_TRANSFER;
2001 }
2002
2003 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
2004 IPW_DEBUG_HC("Command completed.\n");
2005 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
2006 priv->status &= ~STATUS_HCMD_ACTIVE;
2007 wake_up_interruptible(&priv->wait_command_queue);
2008 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
2009 }
2010
2011 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
2012 IPW_DEBUG_TX("TX_QUEUE_1\n");
2013 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
2014 handled |= IPW_INTA_BIT_TX_QUEUE_1;
2015 }
2016
2017 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
2018 IPW_DEBUG_TX("TX_QUEUE_2\n");
2019 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
2020 handled |= IPW_INTA_BIT_TX_QUEUE_2;
2021 }
2022
2023 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
2024 IPW_DEBUG_TX("TX_QUEUE_3\n");
2025 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
2026 handled |= IPW_INTA_BIT_TX_QUEUE_3;
2027 }
2028
2029 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
2030 IPW_DEBUG_TX("TX_QUEUE_4\n");
2031 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
2032 handled |= IPW_INTA_BIT_TX_QUEUE_4;
2033 }
2034
2035 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
2036 IPW_WARNING("STATUS_CHANGE\n");
2037 handled |= IPW_INTA_BIT_STATUS_CHANGE;
2038 }
2039
2040 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
2041 IPW_WARNING("TX_PERIOD_EXPIRED\n");
2042 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
2043 }
2044
2045 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
2046 IPW_WARNING("HOST_CMD_DONE\n");
2047 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
2048 }
2049
2050 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
2051 IPW_WARNING("FW_INITIALIZATION_DONE\n");
2052 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
2053 }
2054
2055 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
2056 IPW_WARNING("PHY_OFF_DONE\n");
2057 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2058 }
2059
2060 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2061 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2062 priv->status |= STATUS_RF_KILL_HW;
2063 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
2064 wake_up_interruptible(&priv->wait_command_queue);
2065 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2066 cancel_delayed_work(&priv->request_scan);
2067 cancel_delayed_work(&priv->request_direct_scan);
2068 cancel_delayed_work(&priv->request_passive_scan);
2069 cancel_delayed_work(&priv->scan_event);
2070 schedule_work(&priv->link_down);
2071 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
2072 handled |= IPW_INTA_BIT_RF_KILL_DONE;
2073 }
2074
2075 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2076 IPW_WARNING("Firmware error detected. Restarting.\n");
2077 if (priv->error) {
2078 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2079 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2080 struct ipw_fw_error *error =
2081 ipw_alloc_error_log(priv);
2082 ipw_dump_error_log(priv, error);
2083 kfree(error);
2084 }
2085 } else {
2086 priv->error = ipw_alloc_error_log(priv);
2087 if (priv->error)
2088 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2089 else
2090 IPW_DEBUG_FW("Error allocating sysfs 'error' "
2091 "log.\n");
2092 if (ipw_debug_level & IPW_DL_FW_ERRORS)
2093 ipw_dump_error_log(priv, priv->error);
2094 }
2095
2096 /* XXX: If hardware encryption is for WPA/WPA2,
2097 * we have to notify the supplicant. */
2098 if (priv->ieee->sec.encrypt) {
2099 priv->status &= ~STATUS_ASSOCIATED;
2100 notify_wx_assoc_event(priv);
2101 }
2102
2103 /* Keep the restart process from trying to send host
2104 * commands by clearing the INIT status bit */
2105 priv->status &= ~STATUS_INIT;
2106
2107 /* Cancel currently queued command. */
2108 priv->status &= ~STATUS_HCMD_ACTIVE;
2109 wake_up_interruptible(&priv->wait_command_queue);
2110
2111 schedule_work(&priv->adapter_restart);
2112 handled |= IPW_INTA_BIT_FATAL_ERROR;
2113 }
2114
2115 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2116 IPW_ERROR("Parity error\n");
2117 handled |= IPW_INTA_BIT_PARITY_ERROR;
2118 }
2119
2120 if (handled != inta) {
2121 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2122 }
2123
2124 spin_unlock_irqrestore(&priv->lock, flags);
2125
2126 /* enable all interrupts */
2127 ipw_enable_interrupts(priv);
2128 }
2129
2130 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2131 static char *get_cmd_string(u8 cmd)
2132 {
2133 switch (cmd) {
2134 IPW_CMD(HOST_COMPLETE);
2135 IPW_CMD(POWER_DOWN);
2136 IPW_CMD(SYSTEM_CONFIG);
2137 IPW_CMD(MULTICAST_ADDRESS);
2138 IPW_CMD(SSID);
2139 IPW_CMD(ADAPTER_ADDRESS);
2140 IPW_CMD(PORT_TYPE);
2141 IPW_CMD(RTS_THRESHOLD);
2142 IPW_CMD(FRAG_THRESHOLD);
2143 IPW_CMD(POWER_MODE);
2144 IPW_CMD(WEP_KEY);
2145 IPW_CMD(TGI_TX_KEY);
2146 IPW_CMD(SCAN_REQUEST);
2147 IPW_CMD(SCAN_REQUEST_EXT);
2148 IPW_CMD(ASSOCIATE);
2149 IPW_CMD(SUPPORTED_RATES);
2150 IPW_CMD(SCAN_ABORT);
2151 IPW_CMD(TX_FLUSH);
2152 IPW_CMD(QOS_PARAMETERS);
2153 IPW_CMD(DINO_CONFIG);
2154 IPW_CMD(RSN_CAPABILITIES);
2155 IPW_CMD(RX_KEY);
2156 IPW_CMD(CARD_DISABLE);
2157 IPW_CMD(SEED_NUMBER);
2158 IPW_CMD(TX_POWER);
2159 IPW_CMD(COUNTRY_INFO);
2160 IPW_CMD(AIRONET_INFO);
2161 IPW_CMD(AP_TX_POWER);
2162 IPW_CMD(CCKM_INFO);
2163 IPW_CMD(CCX_VER_INFO);
2164 IPW_CMD(SET_CALIBRATION);
2165 IPW_CMD(SENSITIVITY_CALIB);
2166 IPW_CMD(RETRY_LIMIT);
2167 IPW_CMD(IPW_PRE_POWER_DOWN);
2168 IPW_CMD(VAP_BEACON_TEMPLATE);
2169 IPW_CMD(VAP_DTIM_PERIOD);
2170 IPW_CMD(EXT_SUPPORTED_RATES);
2171 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2172 IPW_CMD(VAP_QUIET_INTERVALS);
2173 IPW_CMD(VAP_CHANNEL_SWITCH);
2174 IPW_CMD(VAP_MANDATORY_CHANNELS);
2175 IPW_CMD(VAP_CELL_PWR_LIMIT);
2176 IPW_CMD(VAP_CF_PARAM_SET);
2177 IPW_CMD(VAP_SET_BEACONING_STATE);
2178 IPW_CMD(MEASUREMENT);
2179 IPW_CMD(POWER_CAPABILITY);
2180 IPW_CMD(SUPPORTED_CHANNELS);
2181 IPW_CMD(TPC_REPORT);
2182 IPW_CMD(WME_INFO);
2183 IPW_CMD(PRODUCTION_COMMAND);
2184 default:
2185 return "UNKNOWN";
2186 }
2187 }
2188
2189 #define HOST_COMPLETE_TIMEOUT HZ
2190
2191 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2192 {
2193 int rc = 0;
2194 unsigned long flags;
2195 unsigned long now, end;
2196
2197 spin_lock_irqsave(&priv->lock, flags);
2198 if (priv->status & STATUS_HCMD_ACTIVE) {
2199 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2200 get_cmd_string(cmd->cmd));
2201 spin_unlock_irqrestore(&priv->lock, flags);
2202 return -EAGAIN;
2203 }
2204
2205 priv->status |= STATUS_HCMD_ACTIVE;
2206
2207 if (priv->cmdlog) {
2208 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2209 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2210 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2211 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2212 cmd->len);
2213 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2214 }
2215
2216 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2217 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2218 priv->status);
2219
2220 #ifndef DEBUG_CMD_WEP_KEY
2221 if (cmd->cmd == IPW_CMD_WEP_KEY)
2222 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2223 else
2224 #endif
2225 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2226
2227 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2228 if (rc) {
2229 priv->status &= ~STATUS_HCMD_ACTIVE;
2230 IPW_ERROR("Failed to send %s: Reason %d\n",
2231 get_cmd_string(cmd->cmd), rc);
2232 spin_unlock_irqrestore(&priv->lock, flags);
2233 goto exit;
2234 }
2235 spin_unlock_irqrestore(&priv->lock, flags);
2236
2237 now = jiffies;
2238 end = now + HOST_COMPLETE_TIMEOUT;
2239 again:
2240 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2241 !(priv->
2242 status & STATUS_HCMD_ACTIVE),
2243 end - now);
2244 if (rc < 0) {
2245 now = jiffies;
2246 if (time_before(now, end))
2247 goto again;
2248 rc = 0;
2249 }
2250
2251 if (rc == 0) {
2252 spin_lock_irqsave(&priv->lock, flags);
2253 if (priv->status & STATUS_HCMD_ACTIVE) {
2254 IPW_ERROR("Failed to send %s: Command timed out.\n",
2255 get_cmd_string(cmd->cmd));
2256 priv->status &= ~STATUS_HCMD_ACTIVE;
2257 spin_unlock_irqrestore(&priv->lock, flags);
2258 rc = -EIO;
2259 goto exit;
2260 }
2261 spin_unlock_irqrestore(&priv->lock, flags);
2262 } else
2263 rc = 0;
2264
2265 if (priv->status & STATUS_RF_KILL_HW) {
2266 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2267 get_cmd_string(cmd->cmd));
2268 rc = -EIO;
2269 goto exit;
2270 }
2271
2272 exit:
2273 if (priv->cmdlog) {
2274 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2275 priv->cmdlog_pos %= priv->cmdlog_len;
2276 }
2277 return rc;
2278 }
2279
2280 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2281 {
2282 struct host_cmd cmd = {
2283 .cmd = command,
2284 };
2285
2286 return __ipw_send_cmd(priv, &cmd);
2287 }
2288
2289 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2290 void *data)
2291 {
2292 struct host_cmd cmd = {
2293 .cmd = command,
2294 .len = len,
2295 .param = data,
2296 };
2297
2298 return __ipw_send_cmd(priv, &cmd);
2299 }
2300
2301 static int ipw_send_host_complete(struct ipw_priv *priv)
2302 {
2303 if (!priv) {
2304 IPW_ERROR("Invalid args\n");
2305 return -1;
2306 }
2307
2308 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2309 }
2310
2311 static int ipw_send_system_config(struct ipw_priv *priv)
2312 {
2313 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2314 sizeof(priv->sys_config),
2315 &priv->sys_config);
2316 }
2317
2318 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2319 {
2320 if (!priv || !ssid) {
2321 IPW_ERROR("Invalid args\n");
2322 return -1;
2323 }
2324
2325 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2326 ssid);
2327 }
2328
2329 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2330 {
2331 if (!priv || !mac) {
2332 IPW_ERROR("Invalid args\n");
2333 return -1;
2334 }
2335
2336 IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
2337 priv->net_dev->name, mac);
2338
2339 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2340 }
2341
2342 static void ipw_adapter_restart(void *adapter)
2343 {
2344 struct ipw_priv *priv = adapter;
2345
2346 if (priv->status & STATUS_RF_KILL_MASK)
2347 return;
2348
2349 ipw_down(priv);
2350
2351 if (priv->assoc_network &&
2352 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2353 ipw_remove_current_network(priv);
2354
2355 if (ipw_up(priv)) {
2356 IPW_ERROR("Failed to up device\n");
2357 return;
2358 }
2359 }
2360
2361 static void ipw_bg_adapter_restart(struct work_struct *work)
2362 {
2363 struct ipw_priv *priv =
2364 container_of(work, struct ipw_priv, adapter_restart);
2365 mutex_lock(&priv->mutex);
2366 ipw_adapter_restart(priv);
2367 mutex_unlock(&priv->mutex);
2368 }
2369
2370 static void ipw_abort_scan(struct ipw_priv *priv);
2371
2372 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2373
2374 static void ipw_scan_check(void *data)
2375 {
2376 struct ipw_priv *priv = data;
2377
2378 if (priv->status & STATUS_SCAN_ABORTING) {
2379 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2380 "adapter after (%dms).\n",
2381 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2382 schedule_work(&priv->adapter_restart);
2383 } else if (priv->status & STATUS_SCANNING) {
2384 IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
2385 "after (%dms).\n",
2386 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2387 ipw_abort_scan(priv);
2388 schedule_delayed_work(&priv->scan_check, HZ);
2389 }
2390 }
2391
2392 static void ipw_bg_scan_check(struct work_struct *work)
2393 {
2394 struct ipw_priv *priv =
2395 container_of(work, struct ipw_priv, scan_check.work);
2396 mutex_lock(&priv->mutex);
2397 ipw_scan_check(priv);
2398 mutex_unlock(&priv->mutex);
2399 }
2400
2401 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2402 struct ipw_scan_request_ext *request)
2403 {
2404 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2405 sizeof(*request), request);
2406 }
2407
2408 static int ipw_send_scan_abort(struct ipw_priv *priv)
2409 {
2410 if (!priv) {
2411 IPW_ERROR("Invalid args\n");
2412 return -1;
2413 }
2414
2415 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2416 }
2417
2418 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2419 {
2420 struct ipw_sensitivity_calib calib = {
2421 .beacon_rssi_raw = cpu_to_le16(sens),
2422 };
2423
2424 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2425 &calib);
2426 }
2427
2428 static int ipw_send_associate(struct ipw_priv *priv,
2429 struct ipw_associate *associate)
2430 {
2431 if (!priv || !associate) {
2432 IPW_ERROR("Invalid args\n");
2433 return -1;
2434 }
2435
2436 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2437 associate);
2438 }
2439
2440 static int ipw_send_supported_rates(struct ipw_priv *priv,
2441 struct ipw_supported_rates *rates)
2442 {
2443 if (!priv || !rates) {
2444 IPW_ERROR("Invalid args\n");
2445 return -1;
2446 }
2447
2448 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2449 rates);
2450 }
2451
2452 static int ipw_set_random_seed(struct ipw_priv *priv)
2453 {
2454 u32 val;
2455
2456 if (!priv) {
2457 IPW_ERROR("Invalid args\n");
2458 return -1;
2459 }
2460
2461 get_random_bytes(&val, sizeof(val));
2462
2463 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2464 }
2465
2466 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2467 {
2468 __le32 v = cpu_to_le32(phy_off);
2469 if (!priv) {
2470 IPW_ERROR("Invalid args\n");
2471 return -1;
2472 }
2473
2474 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2475 }
2476
2477 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2478 {
2479 if (!priv || !power) {
2480 IPW_ERROR("Invalid args\n");
2481 return -1;
2482 }
2483
2484 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2485 }
2486
2487 static int ipw_set_tx_power(struct ipw_priv *priv)
2488 {
2489 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
2490 struct ipw_tx_power tx_power;
2491 s8 max_power;
2492 int i;
2493
2494 memset(&tx_power, 0, sizeof(tx_power));
2495
2496 /* configure device for 'G' band */
2497 tx_power.ieee_mode = IPW_G_MODE;
2498 tx_power.num_channels = geo->bg_channels;
2499 for (i = 0; i < geo->bg_channels; i++) {
2500 max_power = geo->bg[i].max_power;
2501 tx_power.channels_tx_power[i].channel_number =
2502 geo->bg[i].channel;
2503 tx_power.channels_tx_power[i].tx_power = max_power ?
2504 min(max_power, priv->tx_power) : priv->tx_power;
2505 }
2506 if (ipw_send_tx_power(priv, &tx_power))
2507 return -EIO;
2508
2509 /* configure device to also handle 'B' band */
2510 tx_power.ieee_mode = IPW_B_MODE;
2511 if (ipw_send_tx_power(priv, &tx_power))
2512 return -EIO;
2513
2514 /* configure device to also handle 'A' band */
2515 if (priv->ieee->abg_true) {
2516 tx_power.ieee_mode = IPW_A_MODE;
2517 tx_power.num_channels = geo->a_channels;
2518 for (i = 0; i < tx_power.num_channels; i++) {
2519 max_power = geo->a[i].max_power;
2520 tx_power.channels_tx_power[i].channel_number =
2521 geo->a[i].channel;
2522 tx_power.channels_tx_power[i].tx_power = max_power ?
2523 min(max_power, priv->tx_power) : priv->tx_power;
2524 }
2525 if (ipw_send_tx_power(priv, &tx_power))
2526 return -EIO;
2527 }
2528 return 0;
2529 }
2530
2531 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2532 {
2533 struct ipw_rts_threshold rts_threshold = {
2534 .rts_threshold = cpu_to_le16(rts),
2535 };
2536
2537 if (!priv) {
2538 IPW_ERROR("Invalid args\n");
2539 return -1;
2540 }
2541
2542 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2543 sizeof(rts_threshold), &rts_threshold);
2544 }
2545
2546 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2547 {
2548 struct ipw_frag_threshold frag_threshold = {
2549 .frag_threshold = cpu_to_le16(frag),
2550 };
2551
2552 if (!priv) {
2553 IPW_ERROR("Invalid args\n");
2554 return -1;
2555 }
2556
2557 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2558 sizeof(frag_threshold), &frag_threshold);
2559 }
2560
2561 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2562 {
2563 __le32 param;
2564
2565 if (!priv) {
2566 IPW_ERROR("Invalid args\n");
2567 return -1;
2568 }
2569
2570 /* If on battery, set to 3, if AC set to CAM, else user
2571 * level */
2572 switch (mode) {
2573 case IPW_POWER_BATTERY:
2574 param = cpu_to_le32(IPW_POWER_INDEX_3);
2575 break;
2576 case IPW_POWER_AC:
2577 param = cpu_to_le32(IPW_POWER_MODE_CAM);
2578 break;
2579 default:
2580 param = cpu_to_le32(mode);
2581 break;
2582 }
2583
2584 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2585 &param);
2586 }
2587
2588 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2589 {
2590 struct ipw_retry_limit retry_limit = {
2591 .short_retry_limit = slimit,
2592 .long_retry_limit = llimit
2593 };
2594
2595 if (!priv) {
2596 IPW_ERROR("Invalid args\n");
2597 return -1;
2598 }
2599
2600 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2601 &retry_limit);
2602 }
2603
2604 /*
2605 * The IPW device contains a Microwire compatible EEPROM that stores
2606 * various data like the MAC address. Usually the firmware has exclusive
2607 * access to the eeprom, but during device initialization (before the
2608 * device driver has sent the HostComplete command to the firmware) the
2609 * device driver has read access to the EEPROM by way of indirect addressing
2610 * through a couple of memory mapped registers.
2611 *
2612 * The following is a simplified implementation for pulling data out of the
2613 * the eeprom, along with some helper functions to find information in
2614 * the per device private data's copy of the eeprom.
2615 *
2616 * NOTE: To better understand how these functions work (i.e what is a chip
2617 * select and why do have to keep driving the eeprom clock?), read
2618 * just about any data sheet for a Microwire compatible EEPROM.
2619 */
2620
2621 /* write a 32 bit value into the indirect accessor register */
2622 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2623 {
2624 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2625
2626 /* the eeprom requires some time to complete the operation */
2627 udelay(p->eeprom_delay);
2628 }
2629
2630 /* perform a chip select operation */
2631 static void eeprom_cs(struct ipw_priv *priv)
2632 {
2633 eeprom_write_reg(priv, 0);
2634 eeprom_write_reg(priv, EEPROM_BIT_CS);
2635 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2636 eeprom_write_reg(priv, EEPROM_BIT_CS);
2637 }
2638
2639 /* perform a chip select operation */
2640 static void eeprom_disable_cs(struct ipw_priv *priv)
2641 {
2642 eeprom_write_reg(priv, EEPROM_BIT_CS);
2643 eeprom_write_reg(priv, 0);
2644 eeprom_write_reg(priv, EEPROM_BIT_SK);
2645 }
2646
2647 /* push a single bit down to the eeprom */
2648 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2649 {
2650 int d = (bit ? EEPROM_BIT_DI : 0);
2651 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2652 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2653 }
2654
2655 /* push an opcode followed by an address down to the eeprom */
2656 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2657 {
2658 int i;
2659
2660 eeprom_cs(priv);
2661 eeprom_write_bit(priv, 1);
2662 eeprom_write_bit(priv, op & 2);
2663 eeprom_write_bit(priv, op & 1);
2664 for (i = 7; i >= 0; i--) {
2665 eeprom_write_bit(priv, addr & (1 << i));
2666 }
2667 }
2668
2669 /* pull 16 bits off the eeprom, one bit at a time */
2670 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2671 {
2672 int i;
2673 u16 r = 0;
2674
2675 /* Send READ Opcode */
2676 eeprom_op(priv, EEPROM_CMD_READ, addr);
2677
2678 /* Send dummy bit */
2679 eeprom_write_reg(priv, EEPROM_BIT_CS);
2680
2681 /* Read the byte off the eeprom one bit at a time */
2682 for (i = 0; i < 16; i++) {
2683 u32 data = 0;
2684 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2685 eeprom_write_reg(priv, EEPROM_BIT_CS);
2686 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2687 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2688 }
2689
2690 /* Send another dummy bit */
2691 eeprom_write_reg(priv, 0);
2692 eeprom_disable_cs(priv);
2693
2694 return r;
2695 }
2696
2697 /* helper function for pulling the mac address out of the private */
2698 /* data's copy of the eeprom data */
2699 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2700 {
2701 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2702 }
2703
2704 /*
2705 * Either the device driver (i.e. the host) or the firmware can
2706 * load eeprom data into the designated region in SRAM. If neither
2707 * happens then the FW will shutdown with a fatal error.
2708 *
2709 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2710 * bit needs region of shared SRAM needs to be non-zero.
2711 */
2712 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2713 {
2714 int i;
2715 __le16 *eeprom = (__le16 *) priv->eeprom;
2716
2717 IPW_DEBUG_TRACE(">>\n");
2718
2719 /* read entire contents of eeprom into private buffer */
2720 for (i = 0; i < 128; i++)
2721 eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2722
2723 /*
2724 If the data looks correct, then copy it to our private
2725 copy. Otherwise let the firmware know to perform the operation
2726 on its own.
2727 */
2728 if (priv->eeprom[EEPROM_VERSION] != 0) {
2729 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2730
2731 /* write the eeprom data to sram */
2732 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2733 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2734
2735 /* Do not load eeprom data on fatal error or suspend */
2736 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2737 } else {
2738 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2739
2740 /* Load eeprom data on fatal error or suspend */
2741 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2742 }
2743
2744 IPW_DEBUG_TRACE("<<\n");
2745 }
2746
2747 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2748 {
2749 count >>= 2;
2750 if (!count)
2751 return;
2752 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2753 while (count--)
2754 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2755 }
2756
2757 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2758 {
2759 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2760 CB_NUMBER_OF_ELEMENTS_SMALL *
2761 sizeof(struct command_block));
2762 }
2763
2764 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2765 { /* start dma engine but no transfers yet */
2766
2767 IPW_DEBUG_FW(">> :\n");
2768
2769 /* Start the dma */
2770 ipw_fw_dma_reset_command_blocks(priv);
2771
2772 /* Write CB base address */
2773 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2774
2775 IPW_DEBUG_FW("<< :\n");
2776 return 0;
2777 }
2778
2779 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2780 {
2781 u32 control = 0;
2782
2783 IPW_DEBUG_FW(">> :\n");
2784
2785 /* set the Stop and Abort bit */
2786 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2787 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2788 priv->sram_desc.last_cb_index = 0;
2789
2790 IPW_DEBUG_FW("<<\n");
2791 }
2792
2793 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2794 struct command_block *cb)
2795 {
2796 u32 address =
2797 IPW_SHARED_SRAM_DMA_CONTROL +
2798 (sizeof(struct command_block) * index);
2799 IPW_DEBUG_FW(">> :\n");
2800
2801 ipw_write_indirect(priv, address, (u8 *) cb,
2802 (int)sizeof(struct command_block));
2803
2804 IPW_DEBUG_FW("<< :\n");
2805 return 0;
2806
2807 }
2808
2809 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2810 {
2811 u32 control = 0;
2812 u32 index = 0;
2813
2814 IPW_DEBUG_FW(">> :\n");
2815
2816 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2817 ipw_fw_dma_write_command_block(priv, index,
2818 &priv->sram_desc.cb_list[index]);
2819
2820 /* Enable the DMA in the CSR register */
2821 ipw_clear_bit(priv, IPW_RESET_REG,
2822 IPW_RESET_REG_MASTER_DISABLED |
2823 IPW_RESET_REG_STOP_MASTER);
2824
2825 /* Set the Start bit. */
2826 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2827 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2828
2829 IPW_DEBUG_FW("<< :\n");
2830 return 0;
2831 }
2832
2833 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2834 {
2835 u32 address;
2836 u32 register_value = 0;
2837 u32 cb_fields_address = 0;
2838
2839 IPW_DEBUG_FW(">> :\n");
2840 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2841 IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address);
2842
2843 /* Read the DMA Controlor register */
2844 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2845 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value);
2846
2847 /* Print the CB values */
2848 cb_fields_address = address;
2849 register_value = ipw_read_reg32(priv, cb_fields_address);
2850 IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value);
2851
2852 cb_fields_address += sizeof(u32);
2853 register_value = ipw_read_reg32(priv, cb_fields_address);
2854 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value);
2855
2856 cb_fields_address += sizeof(u32);
2857 register_value = ipw_read_reg32(priv, cb_fields_address);
2858 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n",
2859 register_value);
2860
2861 cb_fields_address += sizeof(u32);
2862 register_value = ipw_read_reg32(priv, cb_fields_address);
2863 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value);
2864
2865 IPW_DEBUG_FW(">> :\n");
2866 }
2867
2868 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2869 {
2870 u32 current_cb_address = 0;
2871 u32 current_cb_index = 0;
2872
2873 IPW_DEBUG_FW("<< :\n");
2874 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2875
2876 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2877 sizeof(struct command_block);
2878
2879 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n",
2880 current_cb_index, current_cb_address);
2881
2882 IPW_DEBUG_FW(">> :\n");
2883 return current_cb_index;
2884
2885 }
2886
2887 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2888 u32 src_address,
2889 u32 dest_address,
2890 u32 length,
2891 int interrupt_enabled, int is_last)
2892 {
2893
2894 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2895 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2896 CB_DEST_SIZE_LONG;
2897 struct command_block *cb;
2898 u32 last_cb_element = 0;
2899
2900 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2901 src_address, dest_address, length);
2902
2903 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2904 return -1;
2905
2906 last_cb_element = priv->sram_desc.last_cb_index;
2907 cb = &priv->sram_desc.cb_list[last_cb_element];
2908 priv->sram_desc.last_cb_index++;
2909
2910 /* Calculate the new CB control word */
2911 if (interrupt_enabled)
2912 control |= CB_INT_ENABLED;
2913
2914 if (is_last)
2915 control |= CB_LAST_VALID;
2916
2917 control |= length;
2918
2919 /* Calculate the CB Element's checksum value */
2920 cb->status = control ^ src_address ^ dest_address;
2921
2922 /* Copy the Source and Destination addresses */
2923 cb->dest_addr = dest_address;
2924 cb->source_addr = src_address;
2925
2926 /* Copy the Control Word last */
2927 cb->control = control;
2928
2929 return 0;
2930 }
2931
2932 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2933 int nr, u32 dest_address, u32 len)
2934 {
2935 int ret, i;
2936 u32 size;
2937
2938 IPW_DEBUG_FW(">>\n");
2939 IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
2940 nr, dest_address, len);
2941
2942 for (i = 0; i < nr; i++) {
2943 size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
2944 ret = ipw_fw_dma_add_command_block(priv, src_address[i],
2945 dest_address +
2946 i * CB_MAX_LENGTH, size,
2947 0, 0);
2948 if (ret) {
2949 IPW_DEBUG_FW_INFO(": Failed\n");
2950 return -1;
2951 } else
2952 IPW_DEBUG_FW_INFO(": Added new cb\n");
2953 }
2954
2955 IPW_DEBUG_FW("<<\n");
2956 return 0;
2957 }
2958
2959 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2960 {
2961 u32 current_index = 0, previous_index;
2962 u32 watchdog = 0;
2963
2964 IPW_DEBUG_FW(">> :\n");
2965
2966 current_index = ipw_fw_dma_command_block_index(priv);
2967 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2968 (int)priv->sram_desc.last_cb_index);
2969
2970 while (current_index < priv->sram_desc.last_cb_index) {
2971 udelay(50);
2972 previous_index = current_index;
2973 current_index = ipw_fw_dma_command_block_index(priv);
2974
2975 if (previous_index < current_index) {
2976 watchdog = 0;
2977 continue;
2978 }
2979 if (++watchdog > 400) {
2980 IPW_DEBUG_FW_INFO("Timeout\n");
2981 ipw_fw_dma_dump_command_block(priv);
2982 ipw_fw_dma_abort(priv);
2983 return -1;
2984 }
2985 }
2986
2987 ipw_fw_dma_abort(priv);
2988
2989 /*Disable the DMA in the CSR register */
2990 ipw_set_bit(priv, IPW_RESET_REG,
2991 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2992
2993 IPW_DEBUG_FW("<< dmaWaitSync\n");
2994 return 0;
2995 }
2996
2997 static void ipw_remove_current_network(struct ipw_priv *priv)
2998 {
2999 struct list_head *element, *safe;
3000 struct libipw_network *network = NULL;
3001 unsigned long flags;
3002
3003 spin_lock_irqsave(&priv->ieee->lock, flags);
3004 list_for_each_safe(element, safe, &priv->ieee->network_list) {
3005 network = list_entry(element, struct libipw_network, list);
3006 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
3007 list_del(element);
3008 list_add_tail(&network->list,
3009 &priv->ieee->network_free_list);
3010 }
3011 }
3012 spin_unlock_irqrestore(&priv->ieee->lock, flags);
3013 }
3014
3015 /**
3016 * Check that card is still alive.
3017 * Reads debug register from domain0.
3018 * If card is present, pre-defined value should
3019 * be found there.
3020 *
3021 * @param priv
3022 * @return 1 if card is present, 0 otherwise
3023 */
3024 static inline int ipw_alive(struct ipw_priv *priv)
3025 {
3026 return ipw_read32(priv, 0x90) == 0xd55555d5;
3027 }
3028
3029 /* timeout in msec, attempted in 10-msec quanta */
3030 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
3031 int timeout)
3032 {
3033 int i = 0;
3034
3035 do {
3036 if ((ipw_read32(priv, addr) & mask) == mask)
3037 return i;
3038 mdelay(10);
3039 i += 10;
3040 } while (i < timeout);
3041
3042 return -ETIME;
3043 }
3044
3045 /* These functions load the firmware and micro code for the operation of
3046 * the ipw hardware. It assumes the buffer has all the bits for the
3047 * image and the caller is handling the memory allocation and clean up.
3048 */
3049
3050 static int ipw_stop_master(struct ipw_priv *priv)
3051 {
3052 int rc;
3053
3054 IPW_DEBUG_TRACE(">>\n");
3055 /* stop master. typical delay - 0 */
3056 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3057
3058 /* timeout is in msec, polled in 10-msec quanta */
3059 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3060 IPW_RESET_REG_MASTER_DISABLED, 100);
3061 if (rc < 0) {
3062 IPW_ERROR("wait for stop master failed after 100ms\n");
3063 return -1;
3064 }
3065
3066 IPW_DEBUG_INFO("stop master %dms\n", rc);
3067
3068 return rc;
3069 }
3070
3071 static void ipw_arc_release(struct ipw_priv *priv)
3072 {
3073 IPW_DEBUG_TRACE(">>\n");
3074 mdelay(5);
3075
3076 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3077
3078 /* no one knows timing, for safety add some delay */
3079 mdelay(5);
3080 }
3081
3082 struct fw_chunk {
3083 __le32 address;
3084 __le32 length;
3085 };
3086
3087 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3088 {
3089 int rc = 0, i, addr;
3090 u8 cr = 0;
3091 __le16 *image;
3092
3093 image = (__le16 *) data;
3094
3095 IPW_DEBUG_TRACE(">>\n");
3096
3097 rc = ipw_stop_master(priv);
3098
3099 if (rc < 0)
3100 return rc;
3101
3102 for (addr = IPW_SHARED_LOWER_BOUND;
3103 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3104 ipw_write32(priv, addr, 0);
3105 }
3106
3107 /* no ucode (yet) */
3108 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3109 /* destroy DMA queues */
3110 /* reset sequence */
3111
3112 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3113 ipw_arc_release(priv);
3114 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3115 mdelay(1);
3116
3117 /* reset PHY */
3118 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3119 mdelay(1);
3120
3121 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3122 mdelay(1);
3123
3124 /* enable ucode store */
3125 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3126 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3127 mdelay(1);
3128
3129 /* write ucode */
3130 /**
3131 * @bug
3132 * Do NOT set indirect address register once and then
3133 * store data to indirect data register in the loop.
3134 * It seems very reasonable, but in this case DINO do not
3135 * accept ucode. It is essential to set address each time.
3136 */
3137 /* load new ipw uCode */
3138 for (i = 0; i < len / 2; i++)
3139 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3140 le16_to_cpu(image[i]));
3141
3142 /* enable DINO */
3143 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3144 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3145
3146 /* this is where the igx / win driver deveates from the VAP driver. */
3147
3148 /* wait for alive response */
3149 for (i = 0; i < 100; i++) {
3150 /* poll for incoming data */
3151 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3152 if (cr & DINO_RXFIFO_DATA)
3153 break;
3154 mdelay(1);
3155 }
3156
3157 if (cr & DINO_RXFIFO_DATA) {
3158 /* alive_command_responce size is NOT multiple of 4 */
3159 __le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3160
3161 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3162 response_buffer[i] =
3163 cpu_to_le32(ipw_read_reg32(priv,
3164 IPW_BASEBAND_RX_FIFO_READ));
3165 memcpy(&priv->dino_alive, response_buffer,
3166 sizeof(priv->dino_alive));
3167 if (priv->dino_alive.alive_command == 1
3168 && priv->dino_alive.ucode_valid == 1) {
3169 rc = 0;
3170 IPW_DEBUG_INFO
3171 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3172 "of %02d/%02d/%02d %02d:%02d\n",
3173 priv->dino_alive.software_revision,
3174 priv->dino_alive.software_revision,
3175 priv->dino_alive.device_identifier,
3176 priv->dino_alive.device_identifier,
3177 priv->dino_alive.time_stamp[0],
3178 priv->dino_alive.time_stamp[1],
3179 priv->dino_alive.time_stamp[2],
3180 priv->dino_alive.time_stamp[3],
3181 priv->dino_alive.time_stamp[4]);
3182 } else {
3183 IPW_DEBUG_INFO("Microcode is not alive\n");
3184 rc = -EINVAL;
3185 }
3186 } else {
3187 IPW_DEBUG_INFO("No alive response from DINO\n");
3188 rc = -ETIME;
3189 }
3190
3191 /* disable DINO, otherwise for some reason
3192 firmware have problem getting alive resp. */
3193 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3194
3195 return rc;
3196 }
3197
3198 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3199 {
3200 int ret = -1;
3201 int offset = 0;
3202 struct fw_chunk *chunk;
3203 int total_nr = 0;
3204 int i;
3205 struct pci_pool *pool;
3206 void **virts;
3207 dma_addr_t *phys;
3208
3209 IPW_DEBUG_TRACE("<< :\n");
3210
3211 virts = kmalloc(sizeof(void *) * CB_NUMBER_OF_ELEMENTS_SMALL,
3212 GFP_KERNEL);
3213 if (!virts)
3214 return -ENOMEM;
3215
3216 phys = kmalloc(sizeof(dma_addr_t) * CB_NUMBER_OF_ELEMENTS_SMALL,
3217 GFP_KERNEL);
3218 if (!phys) {
3219 kfree(virts);
3220 return -ENOMEM;
3221 }
3222 pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
3223 if (!pool) {
3224 IPW_ERROR("pci_pool_create failed\n");
3225 kfree(phys);
3226 kfree(virts);
3227 return -ENOMEM;
3228 }
3229
3230 /* Start the Dma */
3231 ret = ipw_fw_dma_enable(priv);
3232
3233 /* the DMA is already ready this would be a bug. */
3234 BUG_ON(priv->sram_desc.last_cb_index > 0);
3235
3236 do {
3237 u32 chunk_len;
3238 u8 *start;
3239 int size;
3240 int nr = 0;
3241
3242 chunk = (struct fw_chunk *)(data + offset);
3243 offset += sizeof(struct fw_chunk);
3244 chunk_len = le32_to_cpu(chunk->length);
3245 start = data + offset;
3246
3247 nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
3248 for (i = 0; i < nr; i++) {
3249 virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL,
3250 &phys[total_nr]);
3251 if (!virts[total_nr]) {
3252 ret = -ENOMEM;
3253 goto out;
3254 }
3255 size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
3256 CB_MAX_LENGTH);
3257 memcpy(virts[total_nr], start, size);
3258 start += size;
3259 total_nr++;
3260 /* We don't support fw chunk larger than 64*8K */
3261 BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
3262 }
3263
3264 /* build DMA packet and queue up for sending */
3265 /* dma to chunk->address, the chunk->length bytes from data +
3266 * offeset*/
3267 /* Dma loading */
3268 ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
3269 nr, le32_to_cpu(chunk->address),
3270 chunk_len);
3271 if (ret) {
3272 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3273 goto out;
3274 }
3275
3276 offset += chunk_len;
3277 } while (offset < len);
3278
3279 /* Run the DMA and wait for the answer */
3280 ret = ipw_fw_dma_kick(priv);
3281 if (ret) {
3282 IPW_ERROR("dmaKick Failed\n");
3283 goto out;
3284 }
3285
3286 ret = ipw_fw_dma_wait(priv);
3287 if (ret) {
3288 IPW_ERROR("dmaWaitSync Failed\n");
3289 goto out;
3290 }
3291 out:
3292 for (i = 0; i < total_nr; i++)
3293 pci_pool_free(pool, virts[i], phys[i]);
3294
3295 pci_pool_destroy(pool);
3296 kfree(phys);
3297 kfree(virts);
3298
3299 return ret;
3300 }
3301
3302 /* stop nic */
3303 static int ipw_stop_nic(struct ipw_priv *priv)
3304 {
3305 int rc = 0;
3306
3307 /* stop */
3308 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3309
3310 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3311 IPW_RESET_REG_MASTER_DISABLED, 500);
3312 if (rc < 0) {
3313 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3314 return rc;
3315 }
3316
3317 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3318
3319 return rc;
3320 }
3321
3322 static void ipw_start_nic(struct ipw_priv *priv)
3323 {
3324 IPW_DEBUG_TRACE(">>\n");
3325
3326 /* prvHwStartNic release ARC */
3327 ipw_clear_bit(priv, IPW_RESET_REG,
3328 IPW_RESET_REG_MASTER_DISABLED |
3329 IPW_RESET_REG_STOP_MASTER |
3330 CBD_RESET_REG_PRINCETON_RESET);
3331
3332 /* enable power management */
3333 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3334 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3335
3336 IPW_DEBUG_TRACE("<<\n");
3337 }
3338
3339 static int ipw_init_nic(struct ipw_priv *priv)
3340 {
3341 int rc;
3342
3343 IPW_DEBUG_TRACE(">>\n");
3344 /* reset */
3345 /*prvHwInitNic */
3346 /* set "initialization complete" bit to move adapter to D0 state */
3347 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3348
3349 /* low-level PLL activation */
3350 ipw_write32(priv, IPW_READ_INT_REGISTER,
3351 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3352
3353 /* wait for clock stabilization */
3354 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3355 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3356 if (rc < 0)
3357 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3358
3359 /* assert SW reset */
3360 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3361
3362 udelay(10);
3363
3364 /* set "initialization complete" bit to move adapter to D0 state */
3365 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3366
3367 IPW_DEBUG_TRACE(">>\n");
3368 return 0;
3369 }
3370
3371 /* Call this function from process context, it will sleep in request_firmware.
3372 * Probe is an ok place to call this from.
3373 */
3374 static int ipw_reset_nic(struct ipw_priv *priv)
3375 {
3376 int rc = 0;
3377 unsigned long flags;
3378
3379 IPW_DEBUG_TRACE(">>\n");
3380
3381 rc = ipw_init_nic(priv);
3382
3383 spin_lock_irqsave(&priv->lock, flags);
3384 /* Clear the 'host command active' bit... */
3385 priv->status &= ~STATUS_HCMD_ACTIVE;
3386 wake_up_interruptible(&priv->wait_command_queue);
3387 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3388 wake_up_interruptible(&priv->wait_state);
3389 spin_unlock_irqrestore(&priv->lock, flags);
3390
3391 IPW_DEBUG_TRACE("<<\n");
3392 return rc;
3393 }
3394
3395
3396 struct ipw_fw {
3397 __le32 ver;
3398 __le32 boot_size;
3399 __le32 ucode_size;
3400 __le32 fw_size;
3401 u8 data[0];
3402 };
3403
3404 static int ipw_get_fw(struct ipw_priv *priv,
3405 const struct firmware **raw, const char *name)
3406 {
3407 struct ipw_fw *fw;
3408 int rc;
3409
3410 /* ask firmware_class module to get the boot firmware off disk */
3411 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3412 if (rc < 0) {
3413 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3414 return rc;
3415 }
3416
3417 if ((*raw)->size < sizeof(*fw)) {
3418 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3419 return -EINVAL;
3420 }
3421
3422 fw = (void *)(*raw)->data;
3423
3424 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3425 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3426 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3427 name, (*raw)->size);
3428 return -EINVAL;
3429 }
3430
3431 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3432 name,
3433 le32_to_cpu(fw->ver) >> 16,
3434 le32_to_cpu(fw->ver) & 0xff,
3435 (*raw)->size - sizeof(*fw));
3436 return 0;
3437 }
3438
3439 #define IPW_RX_BUF_SIZE (3000)
3440
3441 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3442 struct ipw_rx_queue *rxq)
3443 {
3444 unsigned long flags;
3445 int i;
3446
3447 spin_lock_irqsave(&rxq->lock, flags);
3448
3449 INIT_LIST_HEAD(&rxq->rx_free);
3450 INIT_LIST_HEAD(&rxq->rx_used);
3451
3452 /* Fill the rx_used queue with _all_ of the Rx buffers */
3453 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3454 /* In the reset function, these buffers may have been allocated
3455 * to an SKB, so we need to unmap and free potential storage */
3456 if (rxq->pool[i].skb != NULL) {
3457 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3458 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3459 dev_kfree_skb(rxq->pool[i].skb);
3460 rxq->pool[i].skb = NULL;
3461 }
3462 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3463 }
3464
3465 /* Set us so that we have processed and used all buffers, but have
3466 * not restocked the Rx queue with fresh buffers */
3467 rxq->read = rxq->write = 0;
3468 rxq->free_count = 0;
3469 spin_unlock_irqrestore(&rxq->lock, flags);
3470 }
3471
3472 #ifdef CONFIG_PM
3473 static int fw_loaded = 0;
3474 static const struct firmware *raw = NULL;
3475
3476 static void free_firmware(void)
3477 {
3478 if (fw_loaded) {
3479 release_firmware(raw);
3480 raw = NULL;
3481 fw_loaded = 0;
3482 }
3483 }
3484 #else
3485 #define free_firmware() do {} while (0)
3486 #endif
3487
3488 static int ipw_load(struct ipw_priv *priv)
3489 {
3490 #ifndef CONFIG_PM
3491 const struct firmware *raw = NULL;
3492 #endif
3493 struct ipw_fw *fw;
3494 u8 *boot_img, *ucode_img, *fw_img;
3495 u8 *name = NULL;
3496 int rc = 0, retries = 3;
3497
3498 switch (priv->ieee->iw_mode) {
3499 case IW_MODE_ADHOC:
3500 name = "ipw2200-ibss.fw";
3501 break;
3502 #ifdef CONFIG_IPW2200_MONITOR
3503 case IW_MODE_MONITOR:
3504 name = "ipw2200-sniffer.fw";
3505 break;
3506 #endif
3507 case IW_MODE_INFRA:
3508 name = "ipw2200-bss.fw";
3509 break;
3510 }
3511
3512 if (!name) {
3513 rc = -EINVAL;
3514 goto error;
3515 }
3516
3517 #ifdef CONFIG_PM
3518 if (!fw_loaded) {
3519 #endif
3520 rc = ipw_get_fw(priv, &raw, name);
3521 if (rc < 0)
3522 goto error;
3523 #ifdef CONFIG_PM
3524 }
3525 #endif
3526
3527 fw = (void *)raw->data;
3528 boot_img = &fw->data[0];
3529 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3530 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3531 le32_to_cpu(fw->ucode_size)];
3532
3533 if (rc < 0)
3534 goto error;
3535
3536 if (!priv->rxq)
3537 priv->rxq = ipw_rx_queue_alloc(priv);
3538 else
3539 ipw_rx_queue_reset(priv, priv->rxq);
3540 if (!priv->rxq) {
3541 IPW_ERROR("Unable to initialize Rx queue\n");
3542 goto error;
3543 }
3544
3545 retry:
3546 /* Ensure interrupts are disabled */
3547 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3548 priv->status &= ~STATUS_INT_ENABLED;
3549
3550 /* ack pending interrupts */
3551 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3552
3553 ipw_stop_nic(priv);
3554
3555 rc = ipw_reset_nic(priv);
3556 if (rc < 0) {
3557 IPW_ERROR("Unable to reset NIC\n");
3558 goto error;
3559 }
3560
3561 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3562 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3563
3564 /* DMA the initial boot firmware into the device */
3565 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3566 if (rc < 0) {
3567 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3568 goto error;
3569 }
3570
3571 /* kick start the device */
3572 ipw_start_nic(priv);
3573
3574 /* wait for the device to finish its initial startup sequence */
3575 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3576 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3577 if (rc < 0) {
3578 IPW_ERROR("device failed to boot initial fw image\n");
3579 goto error;
3580 }
3581 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3582
3583 /* ack fw init done interrupt */
3584 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3585
3586 /* DMA the ucode into the device */
3587 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3588 if (rc < 0) {
3589 IPW_ERROR("Unable to load ucode: %d\n", rc);
3590 goto error;
3591 }
3592
3593 /* stop nic */
3594 ipw_stop_nic(priv);
3595
3596 /* DMA bss firmware into the device */
3597 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3598 if (rc < 0) {
3599 IPW_ERROR("Unable to load firmware: %d\n", rc);
3600 goto error;
3601 }
3602 #ifdef CONFIG_PM
3603 fw_loaded = 1;
3604 #endif
3605
3606 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3607
3608 rc = ipw_queue_reset(priv);
3609 if (rc < 0) {
3610 IPW_ERROR("Unable to initialize queues\n");
3611 goto error;
3612 }
3613
3614 /* Ensure interrupts are disabled */
3615 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3616 /* ack pending interrupts */
3617 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3618
3619 /* kick start the device */
3620 ipw_start_nic(priv);
3621
3622 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3623 if (retries > 0) {
3624 IPW_WARNING("Parity error. Retrying init.\n");
3625 retries--;
3626 goto retry;
3627 }
3628
3629 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3630 rc = -EIO;
3631 goto error;
3632 }
3633
3634 /* wait for the device */
3635 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3636 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3637 if (rc < 0) {
3638 IPW_ERROR("device failed to start within 500ms\n");
3639 goto error;
3640 }
3641 IPW_DEBUG_INFO("device response after %dms\n", rc);
3642
3643 /* ack fw init done interrupt */
3644 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3645
3646 /* read eeprom data and initialize the eeprom region of sram */
3647 priv->eeprom_delay = 1;
3648 ipw_eeprom_init_sram(priv);
3649
3650 /* enable interrupts */
3651 ipw_enable_interrupts(priv);
3652
3653 /* Ensure our queue has valid packets */
3654 ipw_rx_queue_replenish(priv);
3655
3656 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3657
3658 /* ack pending interrupts */
3659 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3660
3661 #ifndef CONFIG_PM
3662 release_firmware(raw);
3663 #endif
3664 return 0;
3665
3666 error:
3667 if (priv->rxq) {
3668 ipw_rx_queue_free(priv, priv->rxq);
3669 priv->rxq = NULL;
3670 }
3671 ipw_tx_queue_free(priv);
3672 release_firmware(raw);
3673 #ifdef CONFIG_PM
3674 fw_loaded = 0;
3675 raw = NULL;
3676 #endif
3677
3678 return rc;
3679 }
3680
3681 /**
3682 * DMA services
3683 *
3684 * Theory of operation
3685 *
3686 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3687 * 2 empty entries always kept in the buffer to protect from overflow.
3688 *
3689 * For Tx queue, there are low mark and high mark limits. If, after queuing
3690 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3691 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3692 * Tx queue resumed.
3693 *
3694 * The IPW operates with six queues, one receive queue in the device's
3695 * sram, one transmit queue for sending commands to the device firmware,
3696 * and four transmit queues for data.
3697 *
3698 * The four transmit queues allow for performing quality of service (qos)
3699 * transmissions as per the 802.11 protocol. Currently Linux does not
3700 * provide a mechanism to the user for utilizing prioritized queues, so
3701 * we only utilize the first data transmit queue (queue1).
3702 */
3703
3704 /**
3705 * Driver allocates buffers of this size for Rx
3706 */
3707
3708 /**
3709 * ipw_rx_queue_space - Return number of free slots available in queue.
3710 */
3711 static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3712 {
3713 int s = q->read - q->write;
3714 if (s <= 0)
3715 s += RX_QUEUE_SIZE;
3716 /* keep some buffer to not confuse full and empty queue */
3717 s -= 2;
3718 if (s < 0)
3719 s = 0;
3720 return s;
3721 }
3722
3723 static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3724 {
3725 int s = q->last_used - q->first_empty;
3726 if (s <= 0)
3727 s += q->n_bd;
3728 s -= 2; /* keep some reserve to not confuse empty and full situations */
3729 if (s < 0)
3730 s = 0;
3731 return s;
3732 }
3733
3734 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3735 {
3736 return (++index == n_bd) ? 0 : index;
3737 }
3738
3739 /**
3740 * Initialize common DMA queue structure
3741 *
3742 * @param q queue to init
3743 * @param count Number of BD's to allocate. Should be power of 2
3744 * @param read_register Address for 'read' register
3745 * (not offset within BAR, full address)
3746 * @param write_register Address for 'write' register
3747 * (not offset within BAR, full address)
3748 * @param base_register Address for 'base' register
3749 * (not offset within BAR, full address)
3750 * @param size Address for 'size' register
3751 * (not offset within BAR, full address)
3752 */
3753 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3754 int count, u32 read, u32 write, u32 base, u32 size)
3755 {
3756 q->n_bd = count;
3757
3758 q->low_mark = q->n_bd / 4;
3759 if (q->low_mark < 4)
3760 q->low_mark = 4;
3761
3762 q->high_mark = q->n_bd / 8;
3763 if (q->high_mark < 2)
3764 q->high_mark = 2;
3765
3766 q->first_empty = q->last_used = 0;
3767 q->reg_r = read;
3768 q->reg_w = write;
3769
3770 ipw_write32(priv, base, q->dma_addr);
3771 ipw_write32(priv, size, count);
3772 ipw_write32(priv, read, 0);
3773 ipw_write32(priv, write, 0);
3774
3775 _ipw_read32(priv, 0x90);
3776 }
3777
3778 static int ipw_queue_tx_init(struct ipw_priv *priv,
3779 struct clx2_tx_queue *q,
3780 int count, u32 read, u32 write, u32 base, u32 size)
3781 {
3782 struct pci_dev *dev = priv->pci_dev;
3783
3784 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3785 if (!q->txb) {
3786 IPW_ERROR("vmalloc for auxiliary BD structures failed\n");
3787 return -ENOMEM;
3788 }
3789
3790 q->bd =
3791 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3792 if (!q->bd) {
3793 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3794 sizeof(q->bd[0]) * count);
3795 kfree(q->txb);
3796 q->txb = NULL;
3797 return -ENOMEM;
3798 }
3799
3800 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3801 return 0;
3802 }
3803
3804 /**
3805 * Free one TFD, those at index [txq->q.last_used].
3806 * Do NOT advance any indexes
3807 *
3808 * @param dev
3809 * @param txq
3810 */
3811 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3812 struct clx2_tx_queue *txq)
3813 {
3814 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3815 struct pci_dev *dev = priv->pci_dev;
3816 int i;
3817
3818 /* classify bd */
3819 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3820 /* nothing to cleanup after for host commands */
3821 return;
3822
3823 /* sanity check */
3824 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3825 IPW_ERROR("Too many chunks: %i\n",
3826 le32_to_cpu(bd->u.data.num_chunks));
3827 /** @todo issue fatal error, it is quite serious situation */
3828 return;
3829 }
3830
3831 /* unmap chunks if any */
3832 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3833 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3834 le16_to_cpu(bd->u.data.chunk_len[i]),
3835 PCI_DMA_TODEVICE);
3836 if (txq->txb[txq->q.last_used]) {
3837 libipw_txb_free(txq->txb[txq->q.last_used]);
3838 txq->txb[txq->q.last_used] = NULL;
3839 }
3840 }
3841 }
3842
3843 /**
3844 * Deallocate DMA queue.
3845 *
3846 * Empty queue by removing and destroying all BD's.
3847 * Free all buffers.
3848 *
3849 * @param dev
3850 * @param q
3851 */
3852 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3853 {
3854 struct clx2_queue *q = &txq->q;
3855 struct pci_dev *dev = priv->pci_dev;
3856
3857 if (q->n_bd == 0)
3858 return;
3859
3860 /* first, empty all BD's */
3861 for (; q->first_empty != q->last_used;
3862 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3863 ipw_queue_tx_free_tfd(priv, txq);
3864 }
3865
3866 /* free buffers belonging to queue itself */
3867 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3868 q->dma_addr);
3869 kfree(txq->txb);
3870
3871 /* 0 fill whole structure */
3872 memset(txq, 0, sizeof(*txq));
3873 }
3874
3875 /**
3876 * Destroy all DMA queues and structures
3877 *
3878 * @param priv
3879 */
3880 static void ipw_tx_queue_free(struct ipw_priv *priv)
3881 {
3882 /* Tx CMD queue */
3883 ipw_queue_tx_free(priv, &priv->txq_cmd);
3884
3885 /* Tx queues */
3886 ipw_queue_tx_free(priv, &priv->txq[0]);
3887 ipw_queue_tx_free(priv, &priv->txq[1]);
3888 ipw_queue_tx_free(priv, &priv->txq[2]);
3889 ipw_queue_tx_free(priv, &priv->txq[3]);
3890 }
3891
3892 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3893 {
3894 /* First 3 bytes are manufacturer */
3895 bssid[0] = priv->mac_addr[0];
3896 bssid[1] = priv->mac_addr[1];
3897 bssid[2] = priv->mac_addr[2];
3898
3899 /* Last bytes are random */
3900 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3901
3902 bssid[0] &= 0xfe; /* clear multicast bit */
3903 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3904 }
3905
3906 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3907 {
3908 struct ipw_station_entry entry;
3909 int i;
3910
3911 for (i = 0; i < priv->num_stations; i++) {
3912 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3913 /* Another node is active in network */
3914 priv->missed_adhoc_beacons = 0;
3915 if (!(priv->config & CFG_STATIC_CHANNEL))
3916 /* when other nodes drop out, we drop out */
3917 priv->config &= ~CFG_ADHOC_PERSIST;
3918
3919 return i;
3920 }
3921 }
3922
3923 if (i == MAX_STATIONS)
3924 return IPW_INVALID_STATION;
3925
3926 IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
3927
3928 entry.reserved = 0;
3929 entry.support_mode = 0;
3930 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3931 memcpy(priv->stations[i], bssid, ETH_ALEN);
3932 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3933 &entry, sizeof(entry));
3934 priv->num_stations++;
3935
3936 return i;
3937 }
3938
3939 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3940 {
3941 int i;
3942
3943 for (i = 0; i < priv->num_stations; i++)
3944 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3945 return i;
3946
3947 return IPW_INVALID_STATION;
3948 }
3949
3950 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3951 {
3952 int err;
3953
3954 if (priv->status & STATUS_ASSOCIATING) {
3955 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3956 schedule_work(&priv->disassociate);
3957 return;
3958 }
3959
3960 if (!(priv->status & STATUS_ASSOCIATED)) {
3961 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3962 return;
3963 }
3964
3965 IPW_DEBUG_ASSOC("Disassocation attempt from %pM "
3966 "on channel %d.\n",
3967 priv->assoc_request.bssid,
3968 priv->assoc_request.channel);
3969
3970 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3971 priv->status |= STATUS_DISASSOCIATING;
3972
3973 if (quiet)
3974 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3975 else
3976 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3977
3978 err = ipw_send_associate(priv, &priv->assoc_request);
3979 if (err) {
3980 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3981 "failed.\n");
3982 return;
3983 }
3984
3985 }
3986
3987 static int ipw_disassociate(void *data)
3988 {
3989 struct ipw_priv *priv = data;
3990 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3991 return 0;
3992 ipw_send_disassociate(data, 0);
3993 netif_carrier_off(priv->net_dev);
3994 return 1;
3995 }
3996
3997 static void ipw_bg_disassociate(struct work_struct *work)
3998 {
3999 struct ipw_priv *priv =
4000 container_of(work, struct ipw_priv, disassociate);
4001 mutex_lock(&priv->mutex);
4002 ipw_disassociate(priv);
4003 mutex_unlock(&priv->mutex);
4004 }
4005
4006 static void ipw_system_config(struct work_struct *work)
4007 {
4008 struct ipw_priv *priv =
4009 container_of(work, struct ipw_priv, system_config);
4010
4011 #ifdef CONFIG_IPW2200_PROMISCUOUS
4012 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
4013 priv->sys_config.accept_all_data_frames = 1;
4014 priv->sys_config.accept_non_directed_frames = 1;
4015 priv->sys_config.accept_all_mgmt_bcpr = 1;
4016 priv->sys_config.accept_all_mgmt_frames = 1;
4017 }
4018 #endif
4019
4020 ipw_send_system_config(priv);
4021 }
4022
4023 struct ipw_status_code {
4024 u16 status;
4025 const char *reason;
4026 };
4027
4028 static const struct ipw_status_code ipw_status_codes[] = {
4029 {0x00, "Successful"},
4030 {0x01, "Unspecified failure"},
4031 {0x0A, "Cannot support all requested capabilities in the "
4032 "Capability information field"},
4033 {0x0B, "Reassociation denied due to inability to confirm that "
4034 "association exists"},
4035 {0x0C, "Association denied due to reason outside the scope of this "
4036 "standard"},
4037 {0x0D,
4038 "Responding station does not support the specified authentication "
4039 "algorithm"},
4040 {0x0E,
4041 "Received an Authentication frame with authentication sequence "
4042 "transaction sequence number out of expected sequence"},
4043 {0x0F, "Authentication rejected because of challenge failure"},
4044 {0x10, "Authentication rejected due to timeout waiting for next "
4045 "frame in sequence"},
4046 {0x11, "Association denied because AP is unable to handle additional "
4047 "associated stations"},
4048 {0x12,
4049 "Association denied due to requesting station not supporting all "
4050 "of the datarates in the BSSBasicServiceSet Parameter"},
4051 {0x13,
4052 "Association denied due to requesting station not supporting "
4053 "short preamble operation"},
4054 {0x14,
4055 "Association denied due to requesting station not supporting "
4056 "PBCC encoding"},
4057 {0x15,
4058 "Association denied due to requesting station not supporting "
4059 "channel agility"},
4060 {0x19,
4061 "Association denied due to requesting station not supporting "
4062 "short slot operation"},
4063 {0x1A,
4064 "Association denied due to requesting station not supporting "
4065 "DSSS-OFDM operation"},
4066 {0x28, "Invalid Information Element"},
4067 {0x29, "Group Cipher is not valid"},
4068 {0x2A, "Pairwise Cipher is not valid"},
4069 {0x2B, "AKMP is not valid"},
4070 {0x2C, "Unsupported RSN IE version"},
4071 {0x2D, "Invalid RSN IE Capabilities"},
4072 {0x2E, "Cipher suite is rejected per security policy"},
4073 };
4074
4075 static const char *ipw_get_status_code(u16 status)
4076 {
4077 int i;
4078 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
4079 if (ipw_status_codes[i].status == (status & 0xff))
4080 return ipw_status_codes[i].reason;
4081 return "Unknown status value.";
4082 }
4083
4084 static void inline average_init(struct average *avg)
4085 {
4086 memset(avg, 0, sizeof(*avg));
4087 }
4088
4089 #define DEPTH_RSSI 8
4090 #define DEPTH_NOISE 16
4091 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
4092 {
4093 return ((depth-1)*prev_avg + val)/depth;
4094 }
4095
4096 static void average_add(struct average *avg, s16 val)
4097 {
4098 avg->sum -= avg->entries[avg->pos];
4099 avg->sum += val;
4100 avg->entries[avg->pos++] = val;
4101 if (unlikely(avg->pos == AVG_ENTRIES)) {
4102 avg->init = 1;
4103 avg->pos = 0;
4104 }
4105 }
4106
4107 static s16 average_value(struct average *avg)
4108 {
4109 if (!unlikely(avg->init)) {
4110 if (avg->pos)
4111 return avg->sum / avg->pos;
4112 return 0;
4113 }
4114
4115 return avg->sum / AVG_ENTRIES;
4116 }
4117
4118 static void ipw_reset_stats(struct ipw_priv *priv)
4119 {
4120 u32 len = sizeof(u32);
4121
4122 priv->quality = 0;
4123
4124 average_init(&priv->average_missed_beacons);
4125 priv->exp_avg_rssi = -60;
4126 priv->exp_avg_noise = -85 + 0x100;
4127
4128 priv->last_rate = 0;
4129 priv->last_missed_beacons = 0;
4130 priv->last_rx_packets = 0;
4131 priv->last_tx_packets = 0;
4132 priv->last_tx_failures = 0;
4133
4134 /* Firmware managed, reset only when NIC is restarted, so we have to
4135 * normalize on the current value */
4136 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4137 &priv->last_rx_err, &len);
4138 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4139 &priv->last_tx_failures, &len);
4140
4141 /* Driver managed, reset with each association */
4142 priv->missed_adhoc_beacons = 0;
4143 priv->missed_beacons = 0;
4144 priv->tx_packets = 0;
4145 priv->rx_packets = 0;
4146
4147 }
4148
4149 static u32 ipw_get_max_rate(struct ipw_priv *priv)
4150 {
4151 u32 i = 0x80000000;
4152 u32 mask = priv->rates_mask;
4153 /* If currently associated in B mode, restrict the maximum
4154 * rate match to B rates */
4155 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4156 mask &= LIBIPW_CCK_RATES_MASK;
4157
4158 /* TODO: Verify that the rate is supported by the current rates
4159 * list. */
4160
4161 while (i && !(mask & i))
4162 i >>= 1;
4163 switch (i) {
4164 case LIBIPW_CCK_RATE_1MB_MASK:
4165 return 1000000;
4166 case LIBIPW_CCK_RATE_2MB_MASK:
4167 return 2000000;
4168 case LIBIPW_CCK_RATE_5MB_MASK:
4169 return 5500000;
4170 case LIBIPW_OFDM_RATE_6MB_MASK:
4171 return 6000000;
4172 case LIBIPW_OFDM_RATE_9MB_MASK:
4173 return 9000000;
4174 case LIBIPW_CCK_RATE_11MB_MASK:
4175 return 11000000;
4176 case LIBIPW_OFDM_RATE_12MB_MASK:
4177 return 12000000;
4178 case LIBIPW_OFDM_RATE_18MB_MASK:
4179 return 18000000;
4180 case LIBIPW_OFDM_RATE_24MB_MASK:
4181 return 24000000;
4182 case LIBIPW_OFDM_RATE_36MB_MASK:
4183 return 36000000;
4184 case LIBIPW_OFDM_RATE_48MB_MASK:
4185 return 48000000;
4186 case LIBIPW_OFDM_RATE_54MB_MASK:
4187 return 54000000;
4188 }
4189
4190 if (priv->ieee->mode == IEEE_B)
4191 return 11000000;
4192 else
4193 return 54000000;
4194 }
4195
4196 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4197 {
4198 u32 rate, len = sizeof(rate);
4199 int err;
4200
4201 if (!(priv->status & STATUS_ASSOCIATED))
4202 return 0;
4203
4204 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4205 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4206 &len);
4207 if (err) {
4208 IPW_DEBUG_INFO("failed querying ordinals.\n");
4209 return 0;
4210 }
4211 } else
4212 return ipw_get_max_rate(priv);
4213
4214 switch (rate) {
4215 case IPW_TX_RATE_1MB:
4216 return 1000000;
4217 case IPW_TX_RATE_2MB:
4218 return 2000000;
4219 case IPW_TX_RATE_5MB:
4220 return 5500000;
4221 case IPW_TX_RATE_6MB:
4222 return 6000000;
4223 case IPW_TX_RATE_9MB:
4224 return 9000000;
4225 case IPW_TX_RATE_11MB:
4226 return 11000000;
4227 case IPW_TX_RATE_12MB:
4228 return 12000000;
4229 case IPW_TX_RATE_18MB:
4230 return 18000000;
4231 case IPW_TX_RATE_24MB:
4232 return 24000000;
4233 case IPW_TX_RATE_36MB:
4234 return 36000000;
4235 case IPW_TX_RATE_48MB:
4236 return 48000000;
4237 case IPW_TX_RATE_54MB:
4238 return 54000000;
4239 }
4240
4241 return 0;
4242 }
4243
4244 #define IPW_STATS_INTERVAL (2 * HZ)
4245 static void ipw_gather_stats(struct ipw_priv *priv)
4246 {
4247 u32 rx_err, rx_err_delta, rx_packets_delta;
4248 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4249 u32 missed_beacons_percent, missed_beacons_delta;
4250 u32 quality = 0;
4251 u32 len = sizeof(u32);
4252 s16 rssi;
4253 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4254 rate_quality;
4255 u32 max_rate;
4256
4257 if (!(priv->status & STATUS_ASSOCIATED)) {
4258 priv->quality = 0;
4259 return;
4260 }
4261
4262 /* Update the statistics */
4263 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4264 &priv->missed_beacons, &len);
4265 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4266 priv->last_missed_beacons = priv->missed_beacons;
4267 if (priv->assoc_request.beacon_interval) {
4268 missed_beacons_percent = missed_beacons_delta *
4269 (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4270 (IPW_STATS_INTERVAL * 10);
4271 } else {
4272 missed_beacons_percent = 0;
4273 }
4274 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4275
4276 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4277 rx_err_delta = rx_err - priv->last_rx_err;
4278 priv->last_rx_err = rx_err;
4279
4280 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4281 tx_failures_delta = tx_failures - priv->last_tx_failures;
4282 priv->last_tx_failures = tx_failures;
4283
4284 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4285 priv->last_rx_packets = priv->rx_packets;
4286
4287 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4288 priv->last_tx_packets = priv->tx_packets;
4289
4290 /* Calculate quality based on the following:
4291 *
4292 * Missed beacon: 100% = 0, 0% = 70% missed
4293 * Rate: 60% = 1Mbs, 100% = Max
4294 * Rx and Tx errors represent a straight % of total Rx/Tx
4295 * RSSI: 100% = > -50, 0% = < -80
4296 * Rx errors: 100% = 0, 0% = 50% missed
4297 *
4298 * The lowest computed quality is used.
4299 *
4300 */
4301 #define BEACON_THRESHOLD 5
4302 beacon_quality = 100 - missed_beacons_percent;
4303 if (beacon_quality < BEACON_THRESHOLD)
4304 beacon_quality = 0;
4305 else
4306 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4307 (100 - BEACON_THRESHOLD);
4308 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4309 beacon_quality, missed_beacons_percent);
4310
4311 priv->last_rate = ipw_get_current_rate(priv);
4312 max_rate = ipw_get_max_rate(priv);
4313 rate_quality = priv->last_rate * 40 / max_rate + 60;
4314 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4315 rate_quality, priv->last_rate / 1000000);
4316
4317 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4318 rx_quality = 100 - (rx_err_delta * 100) /
4319 (rx_packets_delta + rx_err_delta);
4320 else
4321 rx_quality = 100;
4322 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4323 rx_quality, rx_err_delta, rx_packets_delta);
4324
4325 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4326 tx_quality = 100 - (tx_failures_delta * 100) /
4327 (tx_packets_delta + tx_failures_delta);
4328 else
4329 tx_quality = 100;
4330 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4331 tx_quality, tx_failures_delta, tx_packets_delta);
4332
4333 rssi = priv->exp_avg_rssi;
4334 signal_quality =
4335 (100 *
4336 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4337 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4338 (priv->ieee->perfect_rssi - rssi) *
4339 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4340 62 * (priv->ieee->perfect_rssi - rssi))) /
4341 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4342 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4343 if (signal_quality > 100)
4344 signal_quality = 100;
4345 else if (signal_quality < 1)
4346 signal_quality = 0;
4347
4348 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4349 signal_quality, rssi);
4350
4351 quality = min(rx_quality, signal_quality);
4352 quality = min(tx_quality, quality);
4353 quality = min(rate_quality, quality);
4354 quality = min(beacon_quality, quality);
4355 if (quality == beacon_quality)
4356 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4357 quality);
4358 if (quality == rate_quality)
4359 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4360 quality);
4361 if (quality == tx_quality)
4362 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4363 quality);
4364 if (quality == rx_quality)
4365 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4366 quality);
4367 if (quality == signal_quality)
4368 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4369 quality);
4370
4371 priv->quality = quality;
4372
4373 schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL);
4374 }
4375
4376 static void ipw_bg_gather_stats(struct work_struct *work)
4377 {
4378 struct ipw_priv *priv =
4379 container_of(work, struct ipw_priv, gather_stats.work);
4380 mutex_lock(&priv->mutex);
4381 ipw_gather_stats(priv);
4382 mutex_unlock(&priv->mutex);
4383 }
4384
4385 /* Missed beacon behavior:
4386 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4387 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4388 * Above disassociate threshold, give up and stop scanning.
4389 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4390 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4391 int missed_count)
4392 {
4393 priv->notif_missed_beacons = missed_count;
4394
4395 if (missed_count > priv->disassociate_threshold &&
4396 priv->status & STATUS_ASSOCIATED) {
4397 /* If associated and we've hit the missed
4398 * beacon threshold, disassociate, turn
4399 * off roaming, and abort any active scans */
4400 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4401 IPW_DL_STATE | IPW_DL_ASSOC,
4402 "Missed beacon: %d - disassociate\n", missed_count);
4403 priv->status &= ~STATUS_ROAMING;
4404 if (priv->status & STATUS_SCANNING) {
4405 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4406 IPW_DL_STATE,
4407 "Aborting scan with missed beacon.\n");
4408 schedule_work(&priv->abort_scan);
4409 }
4410
4411 schedule_work(&priv->disassociate);
4412 return;
4413 }
4414
4415 if (priv->status & STATUS_ROAMING) {
4416 /* If we are currently roaming, then just
4417 * print a debug statement... */
4418 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4419 "Missed beacon: %d - roam in progress\n",
4420 missed_count);
4421 return;
4422 }
4423
4424 if (roaming &&
4425 (missed_count > priv->roaming_threshold &&
4426 missed_count <= priv->disassociate_threshold)) {
4427 /* If we are not already roaming, set the ROAM
4428 * bit in the status and kick off a scan.
4429 * This can happen several times before we reach
4430 * disassociate_threshold. */
4431 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4432 "Missed beacon: %d - initiate "
4433 "roaming\n", missed_count);
4434 if (!(priv->status & STATUS_ROAMING)) {
4435 priv->status |= STATUS_ROAMING;
4436 if (!(priv->status & STATUS_SCANNING))
4437 schedule_delayed_work(&priv->request_scan, 0);
4438 }
4439 return;
4440 }
4441
4442 if (priv->status & STATUS_SCANNING &&
4443 missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
4444 /* Stop scan to keep fw from getting
4445 * stuck (only if we aren't roaming --
4446 * otherwise we'll never scan more than 2 or 3
4447 * channels..) */
4448 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4449 "Aborting scan with missed beacon.\n");
4450 schedule_work(&priv->abort_scan);
4451 }
4452
4453 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4454 }
4455
4456 static void ipw_scan_event(struct work_struct *work)
4457 {
4458 union iwreq_data wrqu;
4459
4460 struct ipw_priv *priv =
4461 container_of(work, struct ipw_priv, scan_event.work);
4462
4463 wrqu.data.length = 0;
4464 wrqu.data.flags = 0;
4465 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4466 }
4467
4468 static void handle_scan_event(struct ipw_priv *priv)
4469 {
4470 /* Only userspace-requested scan completion events go out immediately */
4471 if (!priv->user_requested_scan) {
4472 if (!delayed_work_pending(&priv->scan_event))
4473 schedule_delayed_work(&priv->scan_event,
4474 round_jiffies_relative(msecs_to_jiffies(4000)));
4475 } else {
4476 union iwreq_data wrqu;
4477
4478 priv->user_requested_scan = 0;
4479 cancel_delayed_work(&priv->scan_event);
4480
4481 wrqu.data.length = 0;
4482 wrqu.data.flags = 0;
4483 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4484 }
4485 }
4486
4487 /**
4488 * Handle host notification packet.
4489 * Called from interrupt routine
4490 */
4491 static void ipw_rx_notification(struct ipw_priv *priv,
4492 struct ipw_rx_notification *notif)
4493 {
4494 DECLARE_SSID_BUF(ssid);
4495 u16 size = le16_to_cpu(notif->size);
4496
4497 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4498
4499 switch (notif->subtype) {
4500 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4501 struct notif_association *assoc = &notif->u.assoc;
4502
4503 switch (assoc->state) {
4504 case CMAS_ASSOCIATED:{
4505 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4506 IPW_DL_ASSOC,
4507 "associated: '%s' %pM\n",
4508 print_ssid(ssid, priv->essid,
4509 priv->essid_len),
4510 priv->bssid);
4511
4512 switch (priv->ieee->iw_mode) {
4513 case IW_MODE_INFRA:
4514 memcpy(priv->ieee->bssid,
4515 priv->bssid, ETH_ALEN);
4516 break;
4517
4518 case IW_MODE_ADHOC:
4519 memcpy(priv->ieee->bssid,
4520 priv->bssid, ETH_ALEN);
4521
4522 /* clear out the station table */
4523 priv->num_stations = 0;
4524
4525 IPW_DEBUG_ASSOC
4526 ("queueing adhoc check\n");
4527 schedule_delayed_work(
4528 &priv->adhoc_check,
4529 le16_to_cpu(priv->
4530 assoc_request.
4531 beacon_interval));
4532 break;
4533 }
4534
4535 priv->status &= ~STATUS_ASSOCIATING;
4536 priv->status |= STATUS_ASSOCIATED;
4537 schedule_work(&priv->system_config);
4538
4539 #ifdef CONFIG_IPW2200_QOS
4540 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4541 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
4542 if ((priv->status & STATUS_AUTH) &&
4543 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4544 == IEEE80211_STYPE_ASSOC_RESP)) {
4545 if ((sizeof
4546 (struct
4547 libipw_assoc_response)
4548 <= size)
4549 && (size <= 2314)) {
4550 struct
4551 libipw_rx_stats
4552 stats = {
4553 .len = size - 1,
4554 };
4555
4556 IPW_DEBUG_QOS
4557 ("QoS Associate "
4558 "size %d\n", size);
4559 libipw_rx_mgt(priv->
4560 ieee,
4561 (struct
4562 libipw_hdr_4addr
4563 *)
4564 &notif->u.raw, &stats);
4565 }
4566 }
4567 #endif
4568
4569 schedule_work(&priv->link_up);
4570
4571 break;
4572 }
4573
4574 case CMAS_AUTHENTICATED:{
4575 if (priv->
4576 status & (STATUS_ASSOCIATED |
4577 STATUS_AUTH)) {
4578 struct notif_authenticate *auth
4579 = &notif->u.auth;
4580 IPW_DEBUG(IPW_DL_NOTIF |
4581 IPW_DL_STATE |
4582 IPW_DL_ASSOC,
4583 "deauthenticated: '%s' "
4584 "%pM"
4585 ": (0x%04X) - %s\n",
4586 print_ssid(ssid,
4587 priv->
4588 essid,
4589 priv->
4590 essid_len),
4591 priv->bssid,
4592 le16_to_cpu(auth->status),
4593 ipw_get_status_code
4594 (le16_to_cpu
4595 (auth->status)));
4596
4597 priv->status &=
4598 ~(STATUS_ASSOCIATING |
4599 STATUS_AUTH |
4600 STATUS_ASSOCIATED);
4601
4602 schedule_work(&priv->link_down);
4603 break;
4604 }
4605
4606 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4607 IPW_DL_ASSOC,
4608 "authenticated: '%s' %pM\n",
4609 print_ssid(ssid, priv->essid,
4610 priv->essid_len),
4611 priv->bssid);
4612 break;
4613 }
4614
4615 case CMAS_INIT:{
4616 if (priv->status & STATUS_AUTH) {
4617 struct
4618 libipw_assoc_response
4619 *resp;
4620 resp =
4621 (struct
4622 libipw_assoc_response
4623 *)&notif->u.raw;
4624 IPW_DEBUG(IPW_DL_NOTIF |
4625 IPW_DL_STATE |
4626 IPW_DL_ASSOC,
4627 "association failed (0x%04X): %s\n",
4628 le16_to_cpu(resp->status),
4629 ipw_get_status_code
4630 (le16_to_cpu
4631 (resp->status)));
4632 }
4633
4634 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4635 IPW_DL_ASSOC,
4636 "disassociated: '%s' %pM\n",
4637 print_ssid(ssid, priv->essid,
4638 priv->essid_len),
4639 priv->bssid);
4640
4641 priv->status &=
4642 ~(STATUS_DISASSOCIATING |
4643 STATUS_ASSOCIATING |
4644 STATUS_ASSOCIATED | STATUS_AUTH);
4645 if (priv->assoc_network
4646 && (priv->assoc_network->
4647 capability &
4648 WLAN_CAPABILITY_IBSS))
4649 ipw_remove_current_network
4650 (priv);
4651
4652 schedule_work(&priv->link_down);
4653
4654 break;
4655 }
4656
4657 case CMAS_RX_ASSOC_RESP:
4658 break;
4659
4660 default:
4661 IPW_ERROR("assoc: unknown (%d)\n",
4662 assoc->state);
4663 break;
4664 }
4665
4666 break;
4667 }
4668
4669 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4670 struct notif_authenticate *auth = &notif->u.auth;
4671 switch (auth->state) {
4672 case CMAS_AUTHENTICATED:
4673 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4674 "authenticated: '%s' %pM\n",
4675 print_ssid(ssid, priv->essid,
4676 priv->essid_len),
4677 priv->bssid);
4678 priv->status |= STATUS_AUTH;
4679 break;
4680
4681 case CMAS_INIT:
4682 if (priv->status & STATUS_AUTH) {
4683 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4684 IPW_DL_ASSOC,
4685 "authentication failed (0x%04X): %s\n",
4686 le16_to_cpu(auth->status),
4687 ipw_get_status_code(le16_to_cpu
4688 (auth->
4689 status)));
4690 }
4691 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4692 IPW_DL_ASSOC,
4693 "deauthenticated: '%s' %pM\n",
4694 print_ssid(ssid, priv->essid,
4695 priv->essid_len),
4696 priv->bssid);
4697
4698 priv->status &= ~(STATUS_ASSOCIATING |
4699 STATUS_AUTH |
4700 STATUS_ASSOCIATED);
4701
4702 schedule_work(&priv->link_down);
4703 break;
4704
4705 case CMAS_TX_AUTH_SEQ_1:
4706 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4707 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4708 break;
4709 case CMAS_RX_AUTH_SEQ_2:
4710 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4711 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4712 break;
4713 case CMAS_AUTH_SEQ_1_PASS:
4714 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4715 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4716 break;
4717 case CMAS_AUTH_SEQ_1_FAIL:
4718 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4719 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4720 break;
4721 case CMAS_TX_AUTH_SEQ_3:
4722 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4723 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4724 break;
4725 case CMAS_RX_AUTH_SEQ_4:
4726 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4727 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4728 break;
4729 case CMAS_AUTH_SEQ_2_PASS:
4730 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4731 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4732 break;
4733 case CMAS_AUTH_SEQ_2_FAIL:
4734 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4735 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4736 break;
4737 case CMAS_TX_ASSOC:
4738 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4739 IPW_DL_ASSOC, "TX_ASSOC\n");
4740 break;
4741 case CMAS_RX_ASSOC_RESP:
4742 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4743 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4744
4745 break;
4746 case CMAS_ASSOCIATED:
4747 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4748 IPW_DL_ASSOC, "ASSOCIATED\n");
4749 break;
4750 default:
4751 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4752 auth->state);
4753 break;
4754 }
4755 break;
4756 }
4757
4758 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4759 struct notif_channel_result *x =
4760 &notif->u.channel_result;
4761
4762 if (size == sizeof(*x)) {
4763 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4764 x->channel_num);
4765 } else {
4766 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4767 "(should be %zd)\n",
4768 size, sizeof(*x));
4769 }
4770 break;
4771 }
4772
4773 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4774 struct notif_scan_complete *x = &notif->u.scan_complete;
4775 if (size == sizeof(*x)) {
4776 IPW_DEBUG_SCAN
4777 ("Scan completed: type %d, %d channels, "
4778 "%d status\n", x->scan_type,
4779 x->num_channels, x->status);
4780 } else {
4781 IPW_ERROR("Scan completed of wrong size %d "
4782 "(should be %zd)\n",
4783 size, sizeof(*x));
4784 }
4785
4786 priv->status &=
4787 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4788
4789 wake_up_interruptible(&priv->wait_state);
4790 cancel_delayed_work(&priv->scan_check);
4791
4792 if (priv->status & STATUS_EXIT_PENDING)
4793 break;
4794
4795 priv->ieee->scans++;
4796
4797 #ifdef CONFIG_IPW2200_MONITOR
4798 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4799 priv->status |= STATUS_SCAN_FORCED;
4800 schedule_delayed_work(&priv->request_scan, 0);
4801 break;
4802 }
4803 priv->status &= ~STATUS_SCAN_FORCED;
4804 #endif /* CONFIG_IPW2200_MONITOR */
4805
4806 /* Do queued direct scans first */
4807 if (priv->status & STATUS_DIRECT_SCAN_PENDING)
4808 schedule_delayed_work(&priv->request_direct_scan, 0);
4809
4810 if (!(priv->status & (STATUS_ASSOCIATED |
4811 STATUS_ASSOCIATING |
4812 STATUS_ROAMING |
4813 STATUS_DISASSOCIATING)))
4814 schedule_work(&priv->associate);
4815 else if (priv->status & STATUS_ROAMING) {
4816 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4817 /* If a scan completed and we are in roam mode, then
4818 * the scan that completed was the one requested as a
4819 * result of entering roam... so, schedule the
4820 * roam work */
4821 schedule_work(&priv->roam);
4822 else
4823 /* Don't schedule if we aborted the scan */
4824 priv->status &= ~STATUS_ROAMING;
4825 } else if (priv->status & STATUS_SCAN_PENDING)
4826 schedule_delayed_work(&priv->request_scan, 0);
4827 else if (priv->config & CFG_BACKGROUND_SCAN
4828 && priv->status & STATUS_ASSOCIATED)
4829 schedule_delayed_work(&priv->request_scan,
4830 round_jiffies_relative(HZ));
4831
4832 /* Send an empty event to user space.
4833 * We don't send the received data on the event because
4834 * it would require us to do complex transcoding, and
4835 * we want to minimise the work done in the irq handler
4836 * Use a request to extract the data.
4837 * Also, we generate this even for any scan, regardless
4838 * on how the scan was initiated. User space can just
4839 * sync on periodic scan to get fresh data...
4840 * Jean II */
4841 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4842 handle_scan_event(priv);
4843 break;
4844 }
4845
4846 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4847 struct notif_frag_length *x = &notif->u.frag_len;
4848
4849 if (size == sizeof(*x))
4850 IPW_ERROR("Frag length: %d\n",
4851 le16_to_cpu(x->frag_length));
4852 else
4853 IPW_ERROR("Frag length of wrong size %d "
4854 "(should be %zd)\n",
4855 size, sizeof(*x));
4856 break;
4857 }
4858
4859 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4860 struct notif_link_deterioration *x =
4861 &notif->u.link_deterioration;
4862
4863 if (size == sizeof(*x)) {
4864 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4865 "link deterioration: type %d, cnt %d\n",
4866 x->silence_notification_type,
4867 x->silence_count);
4868 memcpy(&priv->last_link_deterioration, x,
4869 sizeof(*x));
4870 } else {
4871 IPW_ERROR("Link Deterioration of wrong size %d "
4872 "(should be %zd)\n",
4873 size, sizeof(*x));
4874 }
4875 break;
4876 }
4877
4878 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4879 IPW_ERROR("Dino config\n");
4880 if (priv->hcmd
4881 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4882 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4883
4884 break;
4885 }
4886
4887 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4888 struct notif_beacon_state *x = &notif->u.beacon_state;
4889 if (size != sizeof(*x)) {
4890 IPW_ERROR
4891 ("Beacon state of wrong size %d (should "
4892 "be %zd)\n", size, sizeof(*x));
4893 break;
4894 }
4895
4896 if (le32_to_cpu(x->state) ==
4897 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4898 ipw_handle_missed_beacon(priv,
4899 le32_to_cpu(x->
4900 number));
4901
4902 break;
4903 }
4904
4905 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4906 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4907 if (size == sizeof(*x)) {
4908 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4909 "0x%02x station %d\n",
4910 x->key_state, x->security_type,
4911 x->station_index);
4912 break;
4913 }
4914
4915 IPW_ERROR
4916 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4917 size, sizeof(*x));
4918 break;
4919 }
4920
4921 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4922 struct notif_calibration *x = &notif->u.calibration;
4923
4924 if (size == sizeof(*x)) {
4925 memcpy(&priv->calib, x, sizeof(*x));
4926 IPW_DEBUG_INFO("TODO: Calibration\n");
4927 break;
4928 }
4929
4930 IPW_ERROR
4931 ("Calibration of wrong size %d (should be %zd)\n",
4932 size, sizeof(*x));
4933 break;
4934 }
4935
4936 case HOST_NOTIFICATION_NOISE_STATS:{
4937 if (size == sizeof(u32)) {
4938 priv->exp_avg_noise =
4939 exponential_average(priv->exp_avg_noise,
4940 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4941 DEPTH_NOISE);
4942 break;
4943 }
4944
4945 IPW_ERROR
4946 ("Noise stat is wrong size %d (should be %zd)\n",
4947 size, sizeof(u32));
4948 break;
4949 }
4950
4951 default:
4952 IPW_DEBUG_NOTIF("Unknown notification: "
4953 "subtype=%d,flags=0x%2x,size=%d\n",
4954 notif->subtype, notif->flags, size);
4955 }
4956 }
4957
4958 /**
4959 * Destroys all DMA structures and initialise them again
4960 *
4961 * @param priv
4962 * @return error code
4963 */
4964 static int ipw_queue_reset(struct ipw_priv *priv)
4965 {
4966 int rc = 0;
4967 /** @todo customize queue sizes */
4968 int nTx = 64, nTxCmd = 8;
4969 ipw_tx_queue_free(priv);
4970 /* Tx CMD queue */
4971 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4972 IPW_TX_CMD_QUEUE_READ_INDEX,
4973 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4974 IPW_TX_CMD_QUEUE_BD_BASE,
4975 IPW_TX_CMD_QUEUE_BD_SIZE);
4976 if (rc) {
4977 IPW_ERROR("Tx Cmd queue init failed\n");
4978 goto error;
4979 }
4980 /* Tx queue(s) */
4981 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4982 IPW_TX_QUEUE_0_READ_INDEX,
4983 IPW_TX_QUEUE_0_WRITE_INDEX,
4984 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4985 if (rc) {
4986 IPW_ERROR("Tx 0 queue init failed\n");
4987 goto error;
4988 }
4989 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4990 IPW_TX_QUEUE_1_READ_INDEX,
4991 IPW_TX_QUEUE_1_WRITE_INDEX,
4992 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4993 if (rc) {
4994 IPW_ERROR("Tx 1 queue init failed\n");
4995 goto error;
4996 }
4997 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4998 IPW_TX_QUEUE_2_READ_INDEX,
4999 IPW_TX_QUEUE_2_WRITE_INDEX,
5000 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
5001 if (rc) {
5002 IPW_ERROR("Tx 2 queue init failed\n");
5003 goto error;
5004 }
5005 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
5006 IPW_TX_QUEUE_3_READ_INDEX,
5007 IPW_TX_QUEUE_3_WRITE_INDEX,
5008 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
5009 if (rc) {
5010 IPW_ERROR("Tx 3 queue init failed\n");
5011 goto error;
5012 }
5013 /* statistics */
5014 priv->rx_bufs_min = 0;
5015 priv->rx_pend_max = 0;
5016 return rc;
5017
5018 error:
5019 ipw_tx_queue_free(priv);
5020 return rc;
5021 }
5022
5023 /**
5024 * Reclaim Tx queue entries no more used by NIC.
5025 *
5026 * When FW advances 'R' index, all entries between old and
5027 * new 'R' index need to be reclaimed. As result, some free space
5028 * forms. If there is enough free space (> low mark), wake Tx queue.
5029 *
5030 * @note Need to protect against garbage in 'R' index
5031 * @param priv
5032 * @param txq
5033 * @param qindex
5034 * @return Number of used entries remains in the queue
5035 */
5036 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
5037 struct clx2_tx_queue *txq, int qindex)
5038 {
5039 u32 hw_tail;
5040 int used;
5041 struct clx2_queue *q = &txq->q;
5042
5043 hw_tail = ipw_read32(priv, q->reg_r);
5044 if (hw_tail >= q->n_bd) {
5045 IPW_ERROR
5046 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
5047 hw_tail, q->n_bd);
5048 goto done;
5049 }
5050 for (; q->last_used != hw_tail;
5051 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
5052 ipw_queue_tx_free_tfd(priv, txq);
5053 priv->tx_packets++;
5054 }
5055 done:
5056 if ((ipw_tx_queue_space(q) > q->low_mark) &&
5057 (qindex >= 0))
5058 netif_wake_queue(priv->net_dev);
5059 used = q->first_empty - q->last_used;
5060 if (used < 0)
5061 used += q->n_bd;
5062
5063 return used;
5064 }
5065
5066 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
5067 int len, int sync)
5068 {
5069 struct clx2_tx_queue *txq = &priv->txq_cmd;
5070 struct clx2_queue *q = &txq->q;
5071 struct tfd_frame *tfd;
5072
5073 if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
5074 IPW_ERROR("No space for Tx\n");
5075 return -EBUSY;
5076 }
5077
5078 tfd = &txq->bd[q->first_empty];
5079 txq->txb[q->first_empty] = NULL;
5080
5081 memset(tfd, 0, sizeof(*tfd));
5082 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
5083 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
5084 priv->hcmd_seq++;
5085 tfd->u.cmd.index = hcmd;
5086 tfd->u.cmd.length = len;
5087 memcpy(tfd->u.cmd.payload, buf, len);
5088 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
5089 ipw_write32(priv, q->reg_w, q->first_empty);
5090 _ipw_read32(priv, 0x90);
5091
5092 return 0;
5093 }
5094
5095 /*
5096 * Rx theory of operation
5097 *
5098 * The host allocates 32 DMA target addresses and passes the host address
5099 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5100 * 0 to 31
5101 *
5102 * Rx Queue Indexes
5103 * The host/firmware share two index registers for managing the Rx buffers.
5104 *
5105 * The READ index maps to the first position that the firmware may be writing
5106 * to -- the driver can read up to (but not including) this position and get
5107 * good data.
5108 * The READ index is managed by the firmware once the card is enabled.
5109 *
5110 * The WRITE index maps to the last position the driver has read from -- the
5111 * position preceding WRITE is the last slot the firmware can place a packet.
5112 *
5113 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5114 * WRITE = READ.
5115 *
5116 * During initialization the host sets up the READ queue position to the first
5117 * INDEX position, and WRITE to the last (READ - 1 wrapped)
5118 *
5119 * When the firmware places a packet in a buffer it will advance the READ index
5120 * and fire the RX interrupt. The driver can then query the READ index and
5121 * process as many packets as possible, moving the WRITE index forward as it
5122 * resets the Rx queue buffers with new memory.
5123 *
5124 * The management in the driver is as follows:
5125 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
5126 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5127 * to replensish the ipw->rxq->rx_free.
5128 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5129 * ipw->rxq is replenished and the READ INDEX is updated (updating the
5130 * 'processed' and 'read' driver indexes as well)
5131 * + A received packet is processed and handed to the kernel network stack,
5132 * detached from the ipw->rxq. The driver 'processed' index is updated.
5133 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5134 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5135 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
5136 * were enough free buffers and RX_STALLED is set it is cleared.
5137 *
5138 *
5139 * Driver sequence:
5140 *
5141 * ipw_rx_queue_alloc() Allocates rx_free
5142 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
5143 * ipw_rx_queue_restock
5144 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
5145 * queue, updates firmware pointers, and updates
5146 * the WRITE index. If insufficient rx_free buffers
5147 * are available, schedules ipw_rx_queue_replenish
5148 *
5149 * -- enable interrupts --
5150 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
5151 * READ INDEX, detaching the SKB from the pool.
5152 * Moves the packet buffer from queue to rx_used.
5153 * Calls ipw_rx_queue_restock to refill any empty
5154 * slots.
5155 * ...
5156 *
5157 */
5158
5159 /*
5160 * If there are slots in the RX queue that need to be restocked,
5161 * and we have free pre-allocated buffers, fill the ranks as much
5162 * as we can pulling from rx_free.
5163 *
5164 * This moves the 'write' index forward to catch up with 'processed', and
5165 * also updates the memory address in the firmware to reference the new
5166 * target buffer.
5167 */
5168 static void ipw_rx_queue_restock(struct ipw_priv *priv)
5169 {
5170 struct ipw_rx_queue *rxq = priv->rxq;
5171 struct list_head *element;
5172 struct ipw_rx_mem_buffer *rxb;
5173 unsigned long flags;
5174 int write;
5175
5176 spin_lock_irqsave(&rxq->lock, flags);
5177 write = rxq->write;
5178 while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5179 element = rxq->rx_free.next;
5180 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5181 list_del(element);
5182
5183 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5184 rxb->dma_addr);
5185 rxq->queue[rxq->write] = rxb;
5186 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5187 rxq->free_count--;
5188 }
5189 spin_unlock_irqrestore(&rxq->lock, flags);
5190
5191 /* If the pre-allocated buffer pool is dropping low, schedule to
5192 * refill it */
5193 if (rxq->free_count <= RX_LOW_WATERMARK)
5194 schedule_work(&priv->rx_replenish);
5195
5196 /* If we've added more space for the firmware to place data, tell it */
5197 if (write != rxq->write)
5198 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5199 }
5200
5201 /*
5202 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5203 * Also restock the Rx queue via ipw_rx_queue_restock.
5204 *
5205 * This is called as a scheduled work item (except for during intialization)
5206 */
5207 static void ipw_rx_queue_replenish(void *data)
5208 {
5209 struct ipw_priv *priv = data;
5210 struct ipw_rx_queue *rxq = priv->rxq;
5211 struct list_head *element;
5212 struct ipw_rx_mem_buffer *rxb;
5213 unsigned long flags;
5214
5215 spin_lock_irqsave(&rxq->lock, flags);
5216 while (!list_empty(&rxq->rx_used)) {
5217 element = rxq->rx_used.next;
5218 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5219 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5220 if (!rxb->skb) {
5221 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5222 priv->net_dev->name);
5223 /* We don't reschedule replenish work here -- we will
5224 * call the restock method and if it still needs
5225 * more buffers it will schedule replenish */
5226 break;
5227 }
5228 list_del(element);
5229
5230 rxb->dma_addr =
5231 pci_map_single(priv->pci_dev, rxb->skb->data,
5232 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5233
5234 list_add_tail(&rxb->list, &rxq->rx_free);
5235 rxq->free_count++;
5236 }
5237 spin_unlock_irqrestore(&rxq->lock, flags);
5238
5239 ipw_rx_queue_restock(priv);
5240 }
5241
5242 static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5243 {
5244 struct ipw_priv *priv =
5245 container_of(work, struct ipw_priv, rx_replenish);
5246 mutex_lock(&priv->mutex);
5247 ipw_rx_queue_replenish(priv);
5248 mutex_unlock(&priv->mutex);
5249 }
5250
5251 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5252 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5253 * This free routine walks the list of POOL entries and if SKB is set to
5254 * non NULL it is unmapped and freed
5255 */
5256 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5257 {
5258 int i;
5259
5260 if (!rxq)
5261 return;
5262
5263 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5264 if (rxq->pool[i].skb != NULL) {
5265 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5266 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5267 dev_kfree_skb(rxq->pool[i].skb);
5268 }
5269 }
5270
5271 kfree(rxq);
5272 }
5273
5274 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5275 {
5276 struct ipw_rx_queue *rxq;
5277 int i;
5278
5279 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5280 if (unlikely(!rxq)) {
5281 IPW_ERROR("memory allocation failed\n");
5282 return NULL;
5283 }
5284 spin_lock_init(&rxq->lock);
5285 INIT_LIST_HEAD(&rxq->rx_free);
5286 INIT_LIST_HEAD(&rxq->rx_used);
5287
5288 /* Fill the rx_used queue with _all_ of the Rx buffers */
5289 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5290 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5291
5292 /* Set us so that we have processed and used all buffers, but have
5293 * not restocked the Rx queue with fresh buffers */
5294 rxq->read = rxq->write = 0;
5295 rxq->free_count = 0;
5296
5297 return rxq;
5298 }
5299
5300 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5301 {
5302 rate &= ~LIBIPW_BASIC_RATE_MASK;
5303 if (ieee_mode == IEEE_A) {
5304 switch (rate) {
5305 case LIBIPW_OFDM_RATE_6MB:
5306 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ?
5307 1 : 0;
5308 case LIBIPW_OFDM_RATE_9MB:
5309 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ?
5310 1 : 0;
5311 case LIBIPW_OFDM_RATE_12MB:
5312 return priv->
5313 rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5314 case LIBIPW_OFDM_RATE_18MB:
5315 return priv->
5316 rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5317 case LIBIPW_OFDM_RATE_24MB:
5318 return priv->
5319 rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5320 case LIBIPW_OFDM_RATE_36MB:
5321 return priv->
5322 rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5323 case LIBIPW_OFDM_RATE_48MB:
5324 return priv->
5325 rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5326 case LIBIPW_OFDM_RATE_54MB:
5327 return priv->
5328 rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5329 default:
5330 return 0;
5331 }
5332 }
5333
5334 /* B and G mixed */
5335 switch (rate) {
5336 case LIBIPW_CCK_RATE_1MB:
5337 return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0;
5338 case LIBIPW_CCK_RATE_2MB:
5339 return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0;
5340 case LIBIPW_CCK_RATE_5MB:
5341 return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0;
5342 case LIBIPW_CCK_RATE_11MB:
5343 return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0;
5344 }
5345
5346 /* If we are limited to B modulations, bail at this point */
5347 if (ieee_mode == IEEE_B)
5348 return 0;
5349
5350 /* G */
5351 switch (rate) {
5352 case LIBIPW_OFDM_RATE_6MB:
5353 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0;
5354 case LIBIPW_OFDM_RATE_9MB:
5355 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0;
5356 case LIBIPW_OFDM_RATE_12MB:
5357 return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5358 case LIBIPW_OFDM_RATE_18MB:
5359 return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5360 case LIBIPW_OFDM_RATE_24MB:
5361 return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5362 case LIBIPW_OFDM_RATE_36MB:
5363 return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5364 case LIBIPW_OFDM_RATE_48MB:
5365 return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5366 case LIBIPW_OFDM_RATE_54MB:
5367 return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5368 }
5369
5370 return 0;
5371 }
5372
5373 static int ipw_compatible_rates(struct ipw_priv *priv,
5374 const struct libipw_network *network,
5375 struct ipw_supported_rates *rates)
5376 {
5377 int num_rates, i;
5378
5379 memset(rates, 0, sizeof(*rates));
5380 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5381 rates->num_rates = 0;
5382 for (i = 0; i < num_rates; i++) {
5383 if (!ipw_is_rate_in_mask(priv, network->mode,
5384 network->rates[i])) {
5385
5386 if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) {
5387 IPW_DEBUG_SCAN("Adding masked mandatory "
5388 "rate %02X\n",
5389 network->rates[i]);
5390 rates->supported_rates[rates->num_rates++] =
5391 network->rates[i];
5392 continue;
5393 }
5394
5395 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5396 network->rates[i], priv->rates_mask);
5397 continue;
5398 }
5399
5400 rates->supported_rates[rates->num_rates++] = network->rates[i];
5401 }
5402
5403 num_rates = min(network->rates_ex_len,
5404 (u8) (IPW_MAX_RATES - num_rates));
5405 for (i = 0; i < num_rates; i++) {
5406 if (!ipw_is_rate_in_mask(priv, network->mode,
5407 network->rates_ex[i])) {
5408 if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) {
5409 IPW_DEBUG_SCAN("Adding masked mandatory "
5410 "rate %02X\n",
5411 network->rates_ex[i]);
5412 rates->supported_rates[rates->num_rates++] =
5413 network->rates[i];
5414 continue;
5415 }
5416
5417 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5418 network->rates_ex[i], priv->rates_mask);
5419 continue;
5420 }
5421
5422 rates->supported_rates[rates->num_rates++] =
5423 network->rates_ex[i];
5424 }
5425
5426 return 1;
5427 }
5428
5429 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5430 const struct ipw_supported_rates *src)
5431 {
5432 u8 i;
5433 for (i = 0; i < src->num_rates; i++)
5434 dest->supported_rates[i] = src->supported_rates[i];
5435 dest->num_rates = src->num_rates;
5436 }
5437
5438 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5439 * mask should ever be used -- right now all callers to add the scan rates are
5440 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5441 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5442 u8 modulation, u32 rate_mask)
5443 {
5444 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5445 LIBIPW_BASIC_RATE_MASK : 0;
5446
5447 if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK)
5448 rates->supported_rates[rates->num_rates++] =
5449 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB;
5450
5451 if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK)
5452 rates->supported_rates[rates->num_rates++] =
5453 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB;
5454
5455 if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK)
5456 rates->supported_rates[rates->num_rates++] = basic_mask |
5457 LIBIPW_CCK_RATE_5MB;
5458
5459 if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK)
5460 rates->supported_rates[rates->num_rates++] = basic_mask |
5461 LIBIPW_CCK_RATE_11MB;
5462 }
5463
5464 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5465 u8 modulation, u32 rate_mask)
5466 {
5467 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5468 LIBIPW_BASIC_RATE_MASK : 0;
5469
5470 if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK)
5471 rates->supported_rates[rates->num_rates++] = basic_mask |
5472 LIBIPW_OFDM_RATE_6MB;
5473
5474 if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK)
5475 rates->supported_rates[rates->num_rates++] =
5476 LIBIPW_OFDM_RATE_9MB;
5477
5478 if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK)
5479 rates->supported_rates[rates->num_rates++] = basic_mask |
5480 LIBIPW_OFDM_RATE_12MB;
5481
5482 if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK)
5483 rates->supported_rates[rates->num_rates++] =
5484 LIBIPW_OFDM_RATE_18MB;
5485
5486 if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK)
5487 rates->supported_rates[rates->num_rates++] = basic_mask |
5488 LIBIPW_OFDM_RATE_24MB;
5489
5490 if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK)
5491 rates->supported_rates[rates->num_rates++] =
5492 LIBIPW_OFDM_RATE_36MB;
5493
5494 if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK)
5495 rates->supported_rates[rates->num_rates++] =
5496 LIBIPW_OFDM_RATE_48MB;
5497
5498 if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK)
5499 rates->supported_rates[rates->num_rates++] =
5500 LIBIPW_OFDM_RATE_54MB;
5501 }
5502
5503 struct ipw_network_match {
5504 struct libipw_network *network;
5505 struct ipw_supported_rates rates;
5506 };
5507
5508 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5509 struct ipw_network_match *match,
5510 struct libipw_network *network,
5511 int roaming)
5512 {
5513 struct ipw_supported_rates rates;
5514 DECLARE_SSID_BUF(ssid);
5515
5516 /* Verify that this network's capability is compatible with the
5517 * current mode (AdHoc or Infrastructure) */
5518 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5519 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5520 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded due to "
5521 "capability mismatch.\n",
5522 print_ssid(ssid, network->ssid,
5523 network->ssid_len),
5524 network->bssid);
5525 return 0;
5526 }
5527
5528 if (unlikely(roaming)) {
5529 /* If we are roaming, then ensure check if this is a valid
5530 * network to try and roam to */
5531 if ((network->ssid_len != match->network->ssid_len) ||
5532 memcmp(network->ssid, match->network->ssid,
5533 network->ssid_len)) {
5534 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5535 "because of non-network ESSID.\n",
5536 print_ssid(ssid, network->ssid,
5537 network->ssid_len),
5538 network->bssid);
5539 return 0;
5540 }
5541 } else {
5542 /* If an ESSID has been configured then compare the broadcast
5543 * ESSID to ours */
5544 if ((priv->config & CFG_STATIC_ESSID) &&
5545 ((network->ssid_len != priv->essid_len) ||
5546 memcmp(network->ssid, priv->essid,
5547 min(network->ssid_len, priv->essid_len)))) {
5548 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5549
5550 strncpy(escaped,
5551 print_ssid(ssid, network->ssid,
5552 network->ssid_len),
5553 sizeof(escaped));
5554 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5555 "because of ESSID mismatch: '%s'.\n",
5556 escaped, network->bssid,
5557 print_ssid(ssid, priv->essid,
5558 priv->essid_len));
5559 return 0;
5560 }
5561 }
5562
5563 /* If the old network rate is better than this one, don't bother
5564 * testing everything else. */
5565
5566 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5567 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5568 "current network.\n",
5569 print_ssid(ssid, match->network->ssid,
5570 match->network->ssid_len));
5571 return 0;
5572 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5573 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5574 "current network.\n",
5575 print_ssid(ssid, match->network->ssid,
5576 match->network->ssid_len));
5577 return 0;
5578 }
5579
5580 /* Now go through and see if the requested network is valid... */
5581 if (priv->ieee->scan_age != 0 &&
5582 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5583 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5584 "because of age: %ums.\n",
5585 print_ssid(ssid, network->ssid,
5586 network->ssid_len),
5587 network->bssid,
5588 jiffies_to_msecs(jiffies -
5589 network->last_scanned));
5590 return 0;
5591 }
5592
5593 if ((priv->config & CFG_STATIC_CHANNEL) &&
5594 (network->channel != priv->channel)) {
5595 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5596 "because of channel mismatch: %d != %d.\n",
5597 print_ssid(ssid, network->ssid,
5598 network->ssid_len),
5599 network->bssid,
5600 network->channel, priv->channel);
5601 return 0;
5602 }
5603
5604 /* Verify privacy compatibility */
5605 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5606 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5607 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5608 "because of privacy mismatch: %s != %s.\n",
5609 print_ssid(ssid, network->ssid,
5610 network->ssid_len),
5611 network->bssid,
5612 priv->
5613 capability & CAP_PRIVACY_ON ? "on" : "off",
5614 network->
5615 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5616 "off");
5617 return 0;
5618 }
5619
5620 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5621 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5622 "because of the same BSSID match: %pM"
5623 ".\n", print_ssid(ssid, network->ssid,
5624 network->ssid_len),
5625 network->bssid,
5626 priv->bssid);
5627 return 0;
5628 }
5629
5630 /* Filter out any incompatible freq / mode combinations */
5631 if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5632 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5633 "because of invalid frequency/mode "
5634 "combination.\n",
5635 print_ssid(ssid, network->ssid,
5636 network->ssid_len),
5637 network->bssid);
5638 return 0;
5639 }
5640
5641 /* Ensure that the rates supported by the driver are compatible with
5642 * this AP, including verification of basic rates (mandatory) */
5643 if (!ipw_compatible_rates(priv, network, &rates)) {
5644 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5645 "because configured rate mask excludes "
5646 "AP mandatory rate.\n",
5647 print_ssid(ssid, network->ssid,
5648 network->ssid_len),
5649 network->bssid);
5650 return 0;
5651 }
5652
5653 if (rates.num_rates == 0) {
5654 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5655 "because of no compatible rates.\n",
5656 print_ssid(ssid, network->ssid,
5657 network->ssid_len),
5658 network->bssid);
5659 return 0;
5660 }
5661
5662 /* TODO: Perform any further minimal comparititive tests. We do not
5663 * want to put too much policy logic here; intelligent scan selection
5664 * should occur within a generic IEEE 802.11 user space tool. */
5665
5666 /* Set up 'new' AP to this network */
5667 ipw_copy_rates(&match->rates, &rates);
5668 match->network = network;
5669 IPW_DEBUG_MERGE("Network '%s (%pM)' is a viable match.\n",
5670 print_ssid(ssid, network->ssid, network->ssid_len),
5671 network->bssid);
5672
5673 return 1;
5674 }
5675
5676 static void ipw_merge_adhoc_network(struct work_struct *work)
5677 {
5678 DECLARE_SSID_BUF(ssid);
5679 struct ipw_priv *priv =
5680 container_of(work, struct ipw_priv, merge_networks);
5681 struct libipw_network *network = NULL;
5682 struct ipw_network_match match = {
5683 .network = priv->assoc_network
5684 };
5685
5686 if ((priv->status & STATUS_ASSOCIATED) &&
5687 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5688 /* First pass through ROAM process -- look for a better
5689 * network */
5690 unsigned long flags;
5691
5692 spin_lock_irqsave(&priv->ieee->lock, flags);
5693 list_for_each_entry(network, &priv->ieee->network_list, list) {
5694 if (network != priv->assoc_network)
5695 ipw_find_adhoc_network(priv, &match, network,
5696 1);
5697 }
5698 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5699
5700 if (match.network == priv->assoc_network) {
5701 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5702 "merge to.\n");
5703 return;
5704 }
5705
5706 mutex_lock(&priv->mutex);
5707 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5708 IPW_DEBUG_MERGE("remove network %s\n",
5709 print_ssid(ssid, priv->essid,
5710 priv->essid_len));
5711 ipw_remove_current_network(priv);
5712 }
5713
5714 ipw_disassociate(priv);
5715 priv->assoc_network = match.network;
5716 mutex_unlock(&priv->mutex);
5717 return;
5718 }
5719 }
5720
5721 static int ipw_best_network(struct ipw_priv *priv,
5722 struct ipw_network_match *match,
5723 struct libipw_network *network, int roaming)
5724 {
5725 struct ipw_supported_rates rates;
5726 DECLARE_SSID_BUF(ssid);
5727
5728 /* Verify that this network's capability is compatible with the
5729 * current mode (AdHoc or Infrastructure) */
5730 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5731 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5732 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5733 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5734 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded due to "
5735 "capability mismatch.\n",
5736 print_ssid(ssid, network->ssid,
5737 network->ssid_len),
5738 network->bssid);
5739 return 0;
5740 }
5741
5742 if (unlikely(roaming)) {
5743 /* If we are roaming, then ensure check if this is a valid
5744 * network to try and roam to */
5745 if ((network->ssid_len != match->network->ssid_len) ||
5746 memcmp(network->ssid, match->network->ssid,
5747 network->ssid_len)) {
5748 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5749 "because of non-network ESSID.\n",
5750 print_ssid(ssid, network->ssid,
5751 network->ssid_len),
5752 network->bssid);
5753 return 0;
5754 }
5755 } else {
5756 /* If an ESSID has been configured then compare the broadcast
5757 * ESSID to ours */
5758 if ((priv->config & CFG_STATIC_ESSID) &&
5759 ((network->ssid_len != priv->essid_len) ||
5760 memcmp(network->ssid, priv->essid,
5761 min(network->ssid_len, priv->essid_len)))) {
5762 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5763 strncpy(escaped,
5764 print_ssid(ssid, network->ssid,
5765 network->ssid_len),
5766 sizeof(escaped));
5767 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5768 "because of ESSID mismatch: '%s'.\n",
5769 escaped, network->bssid,
5770 print_ssid(ssid, priv->essid,
5771 priv->essid_len));
5772 return 0;
5773 }
5774 }
5775
5776 /* If the old network rate is better than this one, don't bother
5777 * testing everything else. */
5778 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5779 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5780 strncpy(escaped,
5781 print_ssid(ssid, network->ssid, network->ssid_len),
5782 sizeof(escaped));
5783 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded because "
5784 "'%s (%pM)' has a stronger signal.\n",
5785 escaped, network->bssid,
5786 print_ssid(ssid, match->network->ssid,
5787 match->network->ssid_len),
5788 match->network->bssid);
5789 return 0;
5790 }
5791
5792 /* If this network has already had an association attempt within the
5793 * last 3 seconds, do not try and associate again... */
5794 if (network->last_associate &&
5795 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5796 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5797 "because of storming (%ums since last "
5798 "assoc attempt).\n",
5799 print_ssid(ssid, network->ssid,
5800 network->ssid_len),
5801 network->bssid,
5802 jiffies_to_msecs(jiffies -
5803 network->last_associate));
5804 return 0;
5805 }
5806
5807 /* Now go through and see if the requested network is valid... */
5808 if (priv->ieee->scan_age != 0 &&
5809 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5810 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5811 "because of age: %ums.\n",
5812 print_ssid(ssid, network->ssid,
5813 network->ssid_len),
5814 network->bssid,
5815 jiffies_to_msecs(jiffies -
5816 network->last_scanned));
5817 return 0;
5818 }
5819
5820 if ((priv->config & CFG_STATIC_CHANNEL) &&
5821 (network->channel != priv->channel)) {
5822 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5823 "because of channel mismatch: %d != %d.\n",
5824 print_ssid(ssid, network->ssid,
5825 network->ssid_len),
5826 network->bssid,
5827 network->channel, priv->channel);
5828 return 0;
5829 }
5830
5831 /* Verify privacy compatibility */
5832 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5833 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5834 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5835 "because of privacy mismatch: %s != %s.\n",
5836 print_ssid(ssid, network->ssid,
5837 network->ssid_len),
5838 network->bssid,
5839 priv->capability & CAP_PRIVACY_ON ? "on" :
5840 "off",
5841 network->capability &
5842 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5843 return 0;
5844 }
5845
5846 if ((priv->config & CFG_STATIC_BSSID) &&
5847 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5848 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5849 "because of BSSID mismatch: %pM.\n",
5850 print_ssid(ssid, network->ssid,
5851 network->ssid_len),
5852 network->bssid, priv->bssid);
5853 return 0;
5854 }
5855
5856 /* Filter out any incompatible freq / mode combinations */
5857 if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5858 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5859 "because of invalid frequency/mode "
5860 "combination.\n",
5861 print_ssid(ssid, network->ssid,
5862 network->ssid_len),
5863 network->bssid);
5864 return 0;
5865 }
5866
5867 /* Filter out invalid channel in current GEO */
5868 if (!libipw_is_valid_channel(priv->ieee, network->channel)) {
5869 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5870 "because of invalid channel in current GEO\n",
5871 print_ssid(ssid, network->ssid,
5872 network->ssid_len),
5873 network->bssid);
5874 return 0;
5875 }
5876
5877 /* Ensure that the rates supported by the driver are compatible with
5878 * this AP, including verification of basic rates (mandatory) */
5879 if (!ipw_compatible_rates(priv, network, &rates)) {
5880 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5881 "because configured rate mask excludes "
5882 "AP mandatory rate.\n",
5883 print_ssid(ssid, network->ssid,
5884 network->ssid_len),
5885 network->bssid);
5886 return 0;
5887 }
5888
5889 if (rates.num_rates == 0) {
5890 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5891 "because of no compatible rates.\n",
5892 print_ssid(ssid, network->ssid,
5893 network->ssid_len),
5894 network->bssid);
5895 return 0;
5896 }
5897
5898 /* TODO: Perform any further minimal comparititive tests. We do not
5899 * want to put too much policy logic here; intelligent scan selection
5900 * should occur within a generic IEEE 802.11 user space tool. */
5901
5902 /* Set up 'new' AP to this network */
5903 ipw_copy_rates(&match->rates, &rates);
5904 match->network = network;
5905
5906 IPW_DEBUG_ASSOC("Network '%s (%pM)' is a viable match.\n",
5907 print_ssid(ssid, network->ssid, network->ssid_len),
5908 network->bssid);
5909
5910 return 1;
5911 }
5912
5913 static void ipw_adhoc_create(struct ipw_priv *priv,
5914 struct libipw_network *network)
5915 {
5916 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
5917 int i;
5918
5919 /*
5920 * For the purposes of scanning, we can set our wireless mode
5921 * to trigger scans across combinations of bands, but when it
5922 * comes to creating a new ad-hoc network, we have tell the FW
5923 * exactly which band to use.
5924 *
5925 * We also have the possibility of an invalid channel for the
5926 * chossen band. Attempting to create a new ad-hoc network
5927 * with an invalid channel for wireless mode will trigger a
5928 * FW fatal error.
5929 *
5930 */
5931 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
5932 case LIBIPW_52GHZ_BAND:
5933 network->mode = IEEE_A;
5934 i = libipw_channel_to_index(priv->ieee, priv->channel);
5935 BUG_ON(i == -1);
5936 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5937 IPW_WARNING("Overriding invalid channel\n");
5938 priv->channel = geo->a[0].channel;
5939 }
5940 break;
5941
5942 case LIBIPW_24GHZ_BAND:
5943 if (priv->ieee->mode & IEEE_G)
5944 network->mode = IEEE_G;
5945 else
5946 network->mode = IEEE_B;
5947 i = libipw_channel_to_index(priv->ieee, priv->channel);
5948 BUG_ON(i == -1);
5949 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5950 IPW_WARNING("Overriding invalid channel\n");
5951 priv->channel = geo->bg[0].channel;
5952 }
5953 break;
5954
5955 default:
5956 IPW_WARNING("Overriding invalid channel\n");
5957 if (priv->ieee->mode & IEEE_A) {
5958 network->mode = IEEE_A;
5959 priv->channel = geo->a[0].channel;
5960 } else if (priv->ieee->mode & IEEE_G) {
5961 network->mode = IEEE_G;
5962 priv->channel = geo->bg[0].channel;
5963 } else {
5964 network->mode = IEEE_B;
5965 priv->channel = geo->bg[0].channel;
5966 }
5967 break;
5968 }
5969
5970 network->channel = priv->channel;
5971 priv->config |= CFG_ADHOC_PERSIST;
5972 ipw_create_bssid(priv, network->bssid);
5973 network->ssid_len = priv->essid_len;
5974 memcpy(network->ssid, priv->essid, priv->essid_len);
5975 memset(&network->stats, 0, sizeof(network->stats));
5976 network->capability = WLAN_CAPABILITY_IBSS;
5977 if (!(priv->config & CFG_PREAMBLE_LONG))
5978 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5979 if (priv->capability & CAP_PRIVACY_ON)
5980 network->capability |= WLAN_CAPABILITY_PRIVACY;
5981 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5982 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5983 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5984 memcpy(network->rates_ex,
5985 &priv->rates.supported_rates[network->rates_len],
5986 network->rates_ex_len);
5987 network->last_scanned = 0;
5988 network->flags = 0;
5989 network->last_associate = 0;
5990 network->time_stamp[0] = 0;
5991 network->time_stamp[1] = 0;
5992 network->beacon_interval = 100; /* Default */
5993 network->listen_interval = 10; /* Default */
5994 network->atim_window = 0; /* Default */
5995 network->wpa_ie_len = 0;
5996 network->rsn_ie_len = 0;
5997 }
5998
5999 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
6000 {
6001 struct ipw_tgi_tx_key key;
6002
6003 if (!(priv->ieee->sec.flags & (1 << index)))
6004 return;
6005
6006 key.key_id = index;
6007 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
6008 key.security_type = type;
6009 key.station_index = 0; /* always 0 for BSS */
6010 key.flags = 0;
6011 /* 0 for new key; previous value of counter (after fatal error) */
6012 key.tx_counter[0] = cpu_to_le32(0);
6013 key.tx_counter[1] = cpu_to_le32(0);
6014
6015 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
6016 }
6017
6018 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
6019 {
6020 struct ipw_wep_key key;
6021 int i;
6022
6023 key.cmd_id = DINO_CMD_WEP_KEY;
6024 key.seq_num = 0;
6025
6026 /* Note: AES keys cannot be set for multiple times.
6027 * Only set it at the first time. */
6028 for (i = 0; i < 4; i++) {
6029 key.key_index = i | type;
6030 if (!(priv->ieee->sec.flags & (1 << i))) {
6031 key.key_size = 0;
6032 continue;
6033 }
6034
6035 key.key_size = priv->ieee->sec.key_sizes[i];
6036 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
6037
6038 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
6039 }
6040 }
6041
6042 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
6043 {
6044 if (priv->ieee->host_encrypt)
6045 return;
6046
6047 switch (level) {
6048 case SEC_LEVEL_3:
6049 priv->sys_config.disable_unicast_decryption = 0;
6050 priv->ieee->host_decrypt = 0;
6051 break;
6052 case SEC_LEVEL_2:
6053 priv->sys_config.disable_unicast_decryption = 1;
6054 priv->ieee->host_decrypt = 1;
6055 break;
6056 case SEC_LEVEL_1:
6057 priv->sys_config.disable_unicast_decryption = 0;
6058 priv->ieee->host_decrypt = 0;
6059 break;
6060 case SEC_LEVEL_0:
6061 priv->sys_config.disable_unicast_decryption = 1;
6062 break;
6063 default:
6064 break;
6065 }
6066 }
6067
6068 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
6069 {
6070 if (priv->ieee->host_encrypt)
6071 return;
6072
6073 switch (level) {
6074 case SEC_LEVEL_3:
6075 priv->sys_config.disable_multicast_decryption = 0;
6076 break;
6077 case SEC_LEVEL_2:
6078 priv->sys_config.disable_multicast_decryption = 1;
6079 break;
6080 case SEC_LEVEL_1:
6081 priv->sys_config.disable_multicast_decryption = 0;
6082 break;
6083 case SEC_LEVEL_0:
6084 priv->sys_config.disable_multicast_decryption = 1;
6085 break;
6086 default:
6087 break;
6088 }
6089 }
6090
6091 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
6092 {
6093 switch (priv->ieee->sec.level) {
6094 case SEC_LEVEL_3:
6095 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6096 ipw_send_tgi_tx_key(priv,
6097 DCT_FLAG_EXT_SECURITY_CCM,
6098 priv->ieee->sec.active_key);
6099
6100 if (!priv->ieee->host_mc_decrypt)
6101 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
6102 break;
6103 case SEC_LEVEL_2:
6104 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6105 ipw_send_tgi_tx_key(priv,
6106 DCT_FLAG_EXT_SECURITY_TKIP,
6107 priv->ieee->sec.active_key);
6108 break;
6109 case SEC_LEVEL_1:
6110 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6111 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6112 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6113 break;
6114 case SEC_LEVEL_0:
6115 default:
6116 break;
6117 }
6118 }
6119
6120 static void ipw_adhoc_check(void *data)
6121 {
6122 struct ipw_priv *priv = data;
6123
6124 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6125 !(priv->config & CFG_ADHOC_PERSIST)) {
6126 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6127 IPW_DL_STATE | IPW_DL_ASSOC,
6128 "Missed beacon: %d - disassociate\n",
6129 priv->missed_adhoc_beacons);
6130 ipw_remove_current_network(priv);
6131 ipw_disassociate(priv);
6132 return;
6133 }
6134
6135 schedule_delayed_work(&priv->adhoc_check,
6136 le16_to_cpu(priv->assoc_request.beacon_interval));
6137 }
6138
6139 static void ipw_bg_adhoc_check(struct work_struct *work)
6140 {
6141 struct ipw_priv *priv =
6142 container_of(work, struct ipw_priv, adhoc_check.work);
6143 mutex_lock(&priv->mutex);
6144 ipw_adhoc_check(priv);
6145 mutex_unlock(&priv->mutex);
6146 }
6147
6148 static void ipw_debug_config(struct ipw_priv *priv)
6149 {
6150 DECLARE_SSID_BUF(ssid);
6151 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6152 "[CFG 0x%08X]\n", priv->config);
6153 if (priv->config & CFG_STATIC_CHANNEL)
6154 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6155 else
6156 IPW_DEBUG_INFO("Channel unlocked.\n");
6157 if (priv->config & CFG_STATIC_ESSID)
6158 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
6159 print_ssid(ssid, priv->essid, priv->essid_len));
6160 else
6161 IPW_DEBUG_INFO("ESSID unlocked.\n");
6162 if (priv->config & CFG_STATIC_BSSID)
6163 IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
6164 else
6165 IPW_DEBUG_INFO("BSSID unlocked.\n");
6166 if (priv->capability & CAP_PRIVACY_ON)
6167 IPW_DEBUG_INFO("PRIVACY on\n");
6168 else
6169 IPW_DEBUG_INFO("PRIVACY off\n");
6170 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6171 }
6172
6173 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6174 {
6175 /* TODO: Verify that this works... */
6176 struct ipw_fixed_rate fr;
6177 u32 reg;
6178 u16 mask = 0;
6179 u16 new_tx_rates = priv->rates_mask;
6180
6181 /* Identify 'current FW band' and match it with the fixed
6182 * Tx rates */
6183
6184 switch (priv->ieee->freq_band) {
6185 case LIBIPW_52GHZ_BAND: /* A only */
6186 /* IEEE_A */
6187 if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) {
6188 /* Invalid fixed rate mask */
6189 IPW_DEBUG_WX
6190 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6191 new_tx_rates = 0;
6192 break;
6193 }
6194
6195 new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A;
6196 break;
6197
6198 default: /* 2.4Ghz or Mixed */
6199 /* IEEE_B */
6200 if (mode == IEEE_B) {
6201 if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) {
6202 /* Invalid fixed rate mask */
6203 IPW_DEBUG_WX
6204 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6205 new_tx_rates = 0;
6206 }
6207 break;
6208 }
6209
6210 /* IEEE_G */
6211 if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK |
6212 LIBIPW_OFDM_RATES_MASK)) {
6213 /* Invalid fixed rate mask */
6214 IPW_DEBUG_WX
6215 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6216 new_tx_rates = 0;
6217 break;
6218 }
6219
6220 if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) {
6221 mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1);
6222 new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK;
6223 }
6224
6225 if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) {
6226 mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1);
6227 new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK;
6228 }
6229
6230 if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) {
6231 mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1);
6232 new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK;
6233 }
6234
6235 new_tx_rates |= mask;
6236 break;
6237 }
6238
6239 fr.tx_rates = cpu_to_le16(new_tx_rates);
6240
6241 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6242 ipw_write_reg32(priv, reg, *(u32 *) & fr);
6243 }
6244
6245 static void ipw_abort_scan(struct ipw_priv *priv)
6246 {
6247 int err;
6248
6249 if (priv->status & STATUS_SCAN_ABORTING) {
6250 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6251 return;
6252 }
6253 priv->status |= STATUS_SCAN_ABORTING;
6254
6255 err = ipw_send_scan_abort(priv);
6256 if (err)
6257 IPW_DEBUG_HC("Request to abort scan failed.\n");
6258 }
6259
6260 static void ipw_add_scan_channels(struct ipw_priv *priv,
6261 struct ipw_scan_request_ext *scan,
6262 int scan_type)
6263 {
6264 int channel_index = 0;
6265 const struct libipw_geo *geo;
6266 int i;
6267
6268 geo = libipw_get_geo(priv->ieee);
6269
6270 if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) {
6271 int start = channel_index;
6272 for (i = 0; i < geo->a_channels; i++) {
6273 if ((priv->status & STATUS_ASSOCIATED) &&
6274 geo->a[i].channel == priv->channel)
6275 continue;
6276 channel_index++;
6277 scan->channels_list[channel_index] = geo->a[i].channel;
6278 ipw_set_scan_type(scan, channel_index,
6279 geo->a[i].
6280 flags & LIBIPW_CH_PASSIVE_ONLY ?
6281 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6282 scan_type);
6283 }
6284
6285 if (start != channel_index) {
6286 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6287 (channel_index - start);
6288 channel_index++;
6289 }
6290 }
6291
6292 if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) {
6293 int start = channel_index;
6294 if (priv->config & CFG_SPEED_SCAN) {
6295 int index;
6296 u8 channels[LIBIPW_24GHZ_CHANNELS] = {
6297 /* nop out the list */
6298 [0] = 0
6299 };
6300
6301 u8 channel;
6302 while (channel_index < IPW_SCAN_CHANNELS - 1) {
6303 channel =
6304 priv->speed_scan[priv->speed_scan_pos];
6305 if (channel == 0) {
6306 priv->speed_scan_pos = 0;
6307 channel = priv->speed_scan[0];
6308 }
6309 if ((priv->status & STATUS_ASSOCIATED) &&
6310 channel == priv->channel) {
6311 priv->speed_scan_pos++;
6312 continue;
6313 }
6314
6315 /* If this channel has already been
6316 * added in scan, break from loop
6317 * and this will be the first channel
6318 * in the next scan.
6319 */
6320 if (channels[channel - 1] != 0)
6321 break;
6322
6323 channels[channel - 1] = 1;
6324 priv->speed_scan_pos++;
6325 channel_index++;
6326 scan->channels_list[channel_index] = channel;
6327 index =
6328 libipw_channel_to_index(priv->ieee, channel);
6329 ipw_set_scan_type(scan, channel_index,
6330 geo->bg[index].
6331 flags &
6332 LIBIPW_CH_PASSIVE_ONLY ?
6333 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6334 : scan_type);
6335 }
6336 } else {
6337 for (i = 0; i < geo->bg_channels; i++) {
6338 if ((priv->status & STATUS_ASSOCIATED) &&
6339 geo->bg[i].channel == priv->channel)
6340 continue;
6341 channel_index++;
6342 scan->channels_list[channel_index] =
6343 geo->bg[i].channel;
6344 ipw_set_scan_type(scan, channel_index,
6345 geo->bg[i].
6346 flags &
6347 LIBIPW_CH_PASSIVE_ONLY ?
6348 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6349 : scan_type);
6350 }
6351 }
6352
6353 if (start != channel_index) {
6354 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6355 (channel_index - start);
6356 }
6357 }
6358 }
6359
6360 static int ipw_passive_dwell_time(struct ipw_priv *priv)
6361 {
6362 /* staying on passive channels longer than the DTIM interval during a
6363 * scan, while associated, causes the firmware to cancel the scan
6364 * without notification. Hence, don't stay on passive channels longer
6365 * than the beacon interval.
6366 */
6367 if (priv->status & STATUS_ASSOCIATED
6368 && priv->assoc_network->beacon_interval > 10)
6369 return priv->assoc_network->beacon_interval - 10;
6370 else
6371 return 120;
6372 }
6373
6374 static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6375 {
6376 struct ipw_scan_request_ext scan;
6377 int err = 0, scan_type;
6378
6379 if (!(priv->status & STATUS_INIT) ||
6380 (priv->status & STATUS_EXIT_PENDING))
6381 return 0;
6382
6383 mutex_lock(&priv->mutex);
6384
6385 if (direct && (priv->direct_scan_ssid_len == 0)) {
6386 IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
6387 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6388 goto done;
6389 }
6390
6391 if (priv->status & STATUS_SCANNING) {
6392 IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n");
6393 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6394 STATUS_SCAN_PENDING;
6395 goto done;
6396 }
6397
6398 if (!(priv->status & STATUS_SCAN_FORCED) &&
6399 priv->status & STATUS_SCAN_ABORTING) {
6400 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6401 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6402 STATUS_SCAN_PENDING;
6403 goto done;
6404 }
6405
6406 if (priv->status & STATUS_RF_KILL_MASK) {
6407 IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
6408 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6409 STATUS_SCAN_PENDING;
6410 goto done;
6411 }
6412
6413 memset(&scan, 0, sizeof(scan));
6414 scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee));
6415
6416 if (type == IW_SCAN_TYPE_PASSIVE) {
6417 IPW_DEBUG_WX("use passive scanning\n");
6418 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6419 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6420 cpu_to_le16(ipw_passive_dwell_time(priv));
6421 ipw_add_scan_channels(priv, &scan, scan_type);
6422 goto send_request;
6423 }
6424
6425 /* Use active scan by default. */
6426 if (priv->config & CFG_SPEED_SCAN)
6427 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6428 cpu_to_le16(30);
6429 else
6430 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6431 cpu_to_le16(20);
6432
6433 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6434 cpu_to_le16(20);
6435
6436 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6437 cpu_to_le16(ipw_passive_dwell_time(priv));
6438 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6439
6440 #ifdef CONFIG_IPW2200_MONITOR
6441 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6442 u8 channel;
6443 u8 band = 0;
6444
6445 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
6446 case LIBIPW_52GHZ_BAND:
6447 band = (u8) (IPW_A_MODE << 6) | 1;
6448 channel = priv->channel;
6449 break;
6450
6451 case LIBIPW_24GHZ_BAND:
6452 band = (u8) (IPW_B_MODE << 6) | 1;
6453 channel = priv->channel;
6454 break;
6455
6456 default:
6457 band = (u8) (IPW_B_MODE << 6) | 1;
6458 channel = 9;
6459 break;
6460 }
6461
6462 scan.channels_list[0] = band;
6463 scan.channels_list[1] = channel;
6464 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6465
6466 /* NOTE: The card will sit on this channel for this time
6467 * period. Scan aborts are timing sensitive and frequently
6468 * result in firmware restarts. As such, it is best to
6469 * set a small dwell_time here and just keep re-issuing
6470 * scans. Otherwise fast channel hopping will not actually
6471 * hop channels.
6472 *
6473 * TODO: Move SPEED SCAN support to all modes and bands */
6474 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6475 cpu_to_le16(2000);
6476 } else {
6477 #endif /* CONFIG_IPW2200_MONITOR */
6478 /* Honor direct scans first, otherwise if we are roaming make
6479 * this a direct scan for the current network. Finally,
6480 * ensure that every other scan is a fast channel hop scan */
6481 if (direct) {
6482 err = ipw_send_ssid(priv, priv->direct_scan_ssid,
6483 priv->direct_scan_ssid_len);
6484 if (err) {
6485 IPW_DEBUG_HC("Attempt to send SSID command "
6486 "failed\n");
6487 goto done;
6488 }
6489
6490 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6491 } else if ((priv->status & STATUS_ROAMING)
6492 || (!(priv->status & STATUS_ASSOCIATED)
6493 && (priv->config & CFG_STATIC_ESSID)
6494 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6495 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6496 if (err) {
6497 IPW_DEBUG_HC("Attempt to send SSID command "
6498 "failed.\n");
6499 goto done;
6500 }
6501
6502 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6503 } else
6504 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6505
6506 ipw_add_scan_channels(priv, &scan, scan_type);
6507 #ifdef CONFIG_IPW2200_MONITOR
6508 }
6509 #endif
6510
6511 send_request:
6512 err = ipw_send_scan_request_ext(priv, &scan);
6513 if (err) {
6514 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6515 goto done;
6516 }
6517
6518 priv->status |= STATUS_SCANNING;
6519 if (direct) {
6520 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6521 priv->direct_scan_ssid_len = 0;
6522 } else
6523 priv->status &= ~STATUS_SCAN_PENDING;
6524
6525 schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG);
6526 done:
6527 mutex_unlock(&priv->mutex);
6528 return err;
6529 }
6530
6531 static void ipw_request_passive_scan(struct work_struct *work)
6532 {
6533 struct ipw_priv *priv =
6534 container_of(work, struct ipw_priv, request_passive_scan.work);
6535 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
6536 }
6537
6538 static void ipw_request_scan(struct work_struct *work)
6539 {
6540 struct ipw_priv *priv =
6541 container_of(work, struct ipw_priv, request_scan.work);
6542 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
6543 }
6544
6545 static void ipw_request_direct_scan(struct work_struct *work)
6546 {
6547 struct ipw_priv *priv =
6548 container_of(work, struct ipw_priv, request_direct_scan.work);
6549 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
6550 }
6551
6552 static void ipw_bg_abort_scan(struct work_struct *work)
6553 {
6554 struct ipw_priv *priv =
6555 container_of(work, struct ipw_priv, abort_scan);
6556 mutex_lock(&priv->mutex);
6557 ipw_abort_scan(priv);
6558 mutex_unlock(&priv->mutex);
6559 }
6560
6561 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6562 {
6563 /* This is called when wpa_supplicant loads and closes the driver
6564 * interface. */
6565 priv->ieee->wpa_enabled = value;
6566 return 0;
6567 }
6568
6569 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6570 {
6571 struct libipw_device *ieee = priv->ieee;
6572 struct libipw_security sec = {
6573 .flags = SEC_AUTH_MODE,
6574 };
6575 int ret = 0;
6576
6577 if (value & IW_AUTH_ALG_SHARED_KEY) {
6578 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6579 ieee->open_wep = 0;
6580 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6581 sec.auth_mode = WLAN_AUTH_OPEN;
6582 ieee->open_wep = 1;
6583 } else if (value & IW_AUTH_ALG_LEAP) {
6584 sec.auth_mode = WLAN_AUTH_LEAP;
6585 ieee->open_wep = 1;
6586 } else
6587 return -EINVAL;
6588
6589 if (ieee->set_security)
6590 ieee->set_security(ieee->dev, &sec);
6591 else
6592 ret = -EOPNOTSUPP;
6593
6594 return ret;
6595 }
6596
6597 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6598 int wpa_ie_len)
6599 {
6600 /* make sure WPA is enabled */
6601 ipw_wpa_enable(priv, 1);
6602 }
6603
6604 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6605 char *capabilities, int length)
6606 {
6607 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6608
6609 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6610 capabilities);
6611 }
6612
6613 /*
6614 * WE-18 support
6615 */
6616
6617 /* SIOCSIWGENIE */
6618 static int ipw_wx_set_genie(struct net_device *dev,
6619 struct iw_request_info *info,
6620 union iwreq_data *wrqu, char *extra)
6621 {
6622 struct ipw_priv *priv = libipw_priv(dev);
6623 struct libipw_device *ieee = priv->ieee;
6624 u8 *buf;
6625 int err = 0;
6626
6627 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6628 (wrqu->data.length && extra == NULL))
6629 return -EINVAL;
6630
6631 if (wrqu->data.length) {
6632 buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
6633 if (buf == NULL) {
6634 err = -ENOMEM;
6635 goto out;
6636 }
6637
6638 kfree(ieee->wpa_ie);
6639 ieee->wpa_ie = buf;
6640 ieee->wpa_ie_len = wrqu->data.length;
6641 } else {
6642 kfree(ieee->wpa_ie);
6643 ieee->wpa_ie = NULL;
6644 ieee->wpa_ie_len = 0;
6645 }
6646
6647 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6648 out:
6649 return err;
6650 }
6651
6652 /* SIOCGIWGENIE */
6653 static int ipw_wx_get_genie(struct net_device *dev,
6654 struct iw_request_info *info,
6655 union iwreq_data *wrqu, char *extra)
6656 {
6657 struct ipw_priv *priv = libipw_priv(dev);
6658 struct libipw_device *ieee = priv->ieee;
6659 int err = 0;
6660
6661 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6662 wrqu->data.length = 0;
6663 goto out;
6664 }
6665
6666 if (wrqu->data.length < ieee->wpa_ie_len) {
6667 err = -E2BIG;
6668 goto out;
6669 }
6670
6671 wrqu->data.length = ieee->wpa_ie_len;
6672 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6673
6674 out:
6675 return err;
6676 }
6677
6678 static int wext_cipher2level(int cipher)
6679 {
6680 switch (cipher) {
6681 case IW_AUTH_CIPHER_NONE:
6682 return SEC_LEVEL_0;
6683 case IW_AUTH_CIPHER_WEP40:
6684 case IW_AUTH_CIPHER_WEP104:
6685 return SEC_LEVEL_1;
6686 case IW_AUTH_CIPHER_TKIP:
6687 return SEC_LEVEL_2;
6688 case IW_AUTH_CIPHER_CCMP:
6689 return SEC_LEVEL_3;
6690 default:
6691 return -1;
6692 }
6693 }
6694
6695 /* SIOCSIWAUTH */
6696 static int ipw_wx_set_auth(struct net_device *dev,
6697 struct iw_request_info *info,
6698 union iwreq_data *wrqu, char *extra)
6699 {
6700 struct ipw_priv *priv = libipw_priv(dev);
6701 struct libipw_device *ieee = priv->ieee;
6702 struct iw_param *param = &wrqu->param;
6703 struct lib80211_crypt_data *crypt;
6704 unsigned long flags;
6705 int ret = 0;
6706
6707 switch (param->flags & IW_AUTH_INDEX) {
6708 case IW_AUTH_WPA_VERSION:
6709 break;
6710 case IW_AUTH_CIPHER_PAIRWISE:
6711 ipw_set_hw_decrypt_unicast(priv,
6712 wext_cipher2level(param->value));
6713 break;
6714 case IW_AUTH_CIPHER_GROUP:
6715 ipw_set_hw_decrypt_multicast(priv,
6716 wext_cipher2level(param->value));
6717 break;
6718 case IW_AUTH_KEY_MGMT:
6719 /*
6720 * ipw2200 does not use these parameters
6721 */
6722 break;
6723
6724 case IW_AUTH_TKIP_COUNTERMEASURES:
6725 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6726 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6727 break;
6728
6729 flags = crypt->ops->get_flags(crypt->priv);
6730
6731 if (param->value)
6732 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6733 else
6734 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6735
6736 crypt->ops->set_flags(flags, crypt->priv);
6737
6738 break;
6739
6740 case IW_AUTH_DROP_UNENCRYPTED:{
6741 /* HACK:
6742 *
6743 * wpa_supplicant calls set_wpa_enabled when the driver
6744 * is loaded and unloaded, regardless of if WPA is being
6745 * used. No other calls are made which can be used to
6746 * determine if encryption will be used or not prior to
6747 * association being expected. If encryption is not being
6748 * used, drop_unencrypted is set to false, else true -- we
6749 * can use this to determine if the CAP_PRIVACY_ON bit should
6750 * be set.
6751 */
6752 struct libipw_security sec = {
6753 .flags = SEC_ENABLED,
6754 .enabled = param->value,
6755 };
6756 priv->ieee->drop_unencrypted = param->value;
6757 /* We only change SEC_LEVEL for open mode. Others
6758 * are set by ipw_wpa_set_encryption.
6759 */
6760 if (!param->value) {
6761 sec.flags |= SEC_LEVEL;
6762 sec.level = SEC_LEVEL_0;
6763 } else {
6764 sec.flags |= SEC_LEVEL;
6765 sec.level = SEC_LEVEL_1;
6766 }
6767 if (priv->ieee->set_security)
6768 priv->ieee->set_security(priv->ieee->dev, &sec);
6769 break;
6770 }
6771
6772 case IW_AUTH_80211_AUTH_ALG:
6773 ret = ipw_wpa_set_auth_algs(priv, param->value);
6774 break;
6775
6776 case IW_AUTH_WPA_ENABLED:
6777 ret = ipw_wpa_enable(priv, param->value);
6778 ipw_disassociate(priv);
6779 break;
6780
6781 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6782 ieee->ieee802_1x = param->value;
6783 break;
6784
6785 case IW_AUTH_PRIVACY_INVOKED:
6786 ieee->privacy_invoked = param->value;
6787 break;
6788
6789 default:
6790 return -EOPNOTSUPP;
6791 }
6792 return ret;
6793 }
6794
6795 /* SIOCGIWAUTH */
6796 static int ipw_wx_get_auth(struct net_device *dev,
6797 struct iw_request_info *info,
6798 union iwreq_data *wrqu, char *extra)
6799 {
6800 struct ipw_priv *priv = libipw_priv(dev);
6801 struct libipw_device *ieee = priv->ieee;
6802 struct lib80211_crypt_data *crypt;
6803 struct iw_param *param = &wrqu->param;
6804 int ret = 0;
6805
6806 switch (param->flags & IW_AUTH_INDEX) {
6807 case IW_AUTH_WPA_VERSION:
6808 case IW_AUTH_CIPHER_PAIRWISE:
6809 case IW_AUTH_CIPHER_GROUP:
6810 case IW_AUTH_KEY_MGMT:
6811 /*
6812 * wpa_supplicant will control these internally
6813 */
6814 ret = -EOPNOTSUPP;
6815 break;
6816
6817 case IW_AUTH_TKIP_COUNTERMEASURES:
6818 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6819 if (!crypt || !crypt->ops->get_flags)
6820 break;
6821
6822 param->value = (crypt->ops->get_flags(crypt->priv) &
6823 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6824
6825 break;
6826
6827 case IW_AUTH_DROP_UNENCRYPTED:
6828 param->value = ieee->drop_unencrypted;
6829 break;
6830
6831 case IW_AUTH_80211_AUTH_ALG:
6832 param->value = ieee->sec.auth_mode;
6833 break;
6834
6835 case IW_AUTH_WPA_ENABLED:
6836 param->value = ieee->wpa_enabled;
6837 break;
6838
6839 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6840 param->value = ieee->ieee802_1x;
6841 break;
6842
6843 case IW_AUTH_ROAMING_CONTROL:
6844 case IW_AUTH_PRIVACY_INVOKED:
6845 param->value = ieee->privacy_invoked;
6846 break;
6847
6848 default:
6849 return -EOPNOTSUPP;
6850 }
6851 return 0;
6852 }
6853
6854 /* SIOCSIWENCODEEXT */
6855 static int ipw_wx_set_encodeext(struct net_device *dev,
6856 struct iw_request_info *info,
6857 union iwreq_data *wrqu, char *extra)
6858 {
6859 struct ipw_priv *priv = libipw_priv(dev);
6860 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6861
6862 if (hwcrypto) {
6863 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6864 /* IPW HW can't build TKIP MIC,
6865 host decryption still needed */
6866 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6867 priv->ieee->host_mc_decrypt = 1;
6868 else {
6869 priv->ieee->host_encrypt = 0;
6870 priv->ieee->host_encrypt_msdu = 1;
6871 priv->ieee->host_decrypt = 1;
6872 }
6873 } else {
6874 priv->ieee->host_encrypt = 0;
6875 priv->ieee->host_encrypt_msdu = 0;
6876 priv->ieee->host_decrypt = 0;
6877 priv->ieee->host_mc_decrypt = 0;
6878 }
6879 }
6880
6881 return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6882 }
6883
6884 /* SIOCGIWENCODEEXT */
6885 static int ipw_wx_get_encodeext(struct net_device *dev,
6886 struct iw_request_info *info,
6887 union iwreq_data *wrqu, char *extra)
6888 {
6889 struct ipw_priv *priv = libipw_priv(dev);
6890 return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6891 }
6892
6893 /* SIOCSIWMLME */
6894 static int ipw_wx_set_mlme(struct net_device *dev,
6895 struct iw_request_info *info,
6896 union iwreq_data *wrqu, char *extra)
6897 {
6898 struct ipw_priv *priv = libipw_priv(dev);
6899 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6900 __le16 reason;
6901
6902 reason = cpu_to_le16(mlme->reason_code);
6903
6904 switch (mlme->cmd) {
6905 case IW_MLME_DEAUTH:
6906 /* silently ignore */
6907 break;
6908
6909 case IW_MLME_DISASSOC:
6910 ipw_disassociate(priv);
6911 break;
6912
6913 default:
6914 return -EOPNOTSUPP;
6915 }
6916 return 0;
6917 }
6918
6919 #ifdef CONFIG_IPW2200_QOS
6920
6921 /* QoS */
6922 /*
6923 * get the modulation type of the current network or
6924 * the card current mode
6925 */
6926 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6927 {
6928 u8 mode = 0;
6929
6930 if (priv->status & STATUS_ASSOCIATED) {
6931 unsigned long flags;
6932
6933 spin_lock_irqsave(&priv->ieee->lock, flags);
6934 mode = priv->assoc_network->mode;
6935 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6936 } else {
6937 mode = priv->ieee->mode;
6938 }
6939 IPW_DEBUG_QOS("QoS network/card mode %d\n", mode);
6940 return mode;
6941 }
6942
6943 /*
6944 * Handle management frame beacon and probe response
6945 */
6946 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6947 int active_network,
6948 struct libipw_network *network)
6949 {
6950 u32 size = sizeof(struct libipw_qos_parameters);
6951
6952 if (network->capability & WLAN_CAPABILITY_IBSS)
6953 network->qos_data.active = network->qos_data.supported;
6954
6955 if (network->flags & NETWORK_HAS_QOS_MASK) {
6956 if (active_network &&
6957 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6958 network->qos_data.active = network->qos_data.supported;
6959
6960 if ((network->qos_data.active == 1) && (active_network == 1) &&
6961 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6962 (network->qos_data.old_param_count !=
6963 network->qos_data.param_count)) {
6964 network->qos_data.old_param_count =
6965 network->qos_data.param_count;
6966 schedule_work(&priv->qos_activate);
6967 IPW_DEBUG_QOS("QoS parameters change call "
6968 "qos_activate\n");
6969 }
6970 } else {
6971 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6972 memcpy(&network->qos_data.parameters,
6973 &def_parameters_CCK, size);
6974 else
6975 memcpy(&network->qos_data.parameters,
6976 &def_parameters_OFDM, size);
6977
6978 if ((network->qos_data.active == 1) && (active_network == 1)) {
6979 IPW_DEBUG_QOS("QoS was disabled call qos_activate\n");
6980 schedule_work(&priv->qos_activate);
6981 }
6982
6983 network->qos_data.active = 0;
6984 network->qos_data.supported = 0;
6985 }
6986 if ((priv->status & STATUS_ASSOCIATED) &&
6987 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6988 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6989 if (network->capability & WLAN_CAPABILITY_IBSS)
6990 if ((network->ssid_len ==
6991 priv->assoc_network->ssid_len) &&
6992 !memcmp(network->ssid,
6993 priv->assoc_network->ssid,
6994 network->ssid_len)) {
6995 schedule_work(&priv->merge_networks);
6996 }
6997 }
6998
6999 return 0;
7000 }
7001
7002 /*
7003 * This function set up the firmware to support QoS. It sends
7004 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
7005 */
7006 static int ipw_qos_activate(struct ipw_priv *priv,
7007 struct libipw_qos_data *qos_network_data)
7008 {
7009 int err;
7010 struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS];
7011 struct libipw_qos_parameters *active_one = NULL;
7012 u32 size = sizeof(struct libipw_qos_parameters);
7013 u32 burst_duration;
7014 int i;
7015 u8 type;
7016
7017 type = ipw_qos_current_mode(priv);
7018
7019 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
7020 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
7021 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
7022 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
7023
7024 if (qos_network_data == NULL) {
7025 if (type == IEEE_B) {
7026 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
7027 active_one = &def_parameters_CCK;
7028 } else
7029 active_one = &def_parameters_OFDM;
7030
7031 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7032 burst_duration = ipw_qos_get_burst_duration(priv);
7033 for (i = 0; i < QOS_QUEUE_NUM; i++)
7034 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
7035 cpu_to_le16(burst_duration);
7036 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7037 if (type == IEEE_B) {
7038 IPW_DEBUG_QOS("QoS activate IBSS network mode %d\n",
7039 type);
7040 if (priv->qos_data.qos_enable == 0)
7041 active_one = &def_parameters_CCK;
7042 else
7043 active_one = priv->qos_data.def_qos_parm_CCK;
7044 } else {
7045 if (priv->qos_data.qos_enable == 0)
7046 active_one = &def_parameters_OFDM;
7047 else
7048 active_one = priv->qos_data.def_qos_parm_OFDM;
7049 }
7050 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7051 } else {
7052 unsigned long flags;
7053 int active;
7054
7055 spin_lock_irqsave(&priv->ieee->lock, flags);
7056 active_one = &(qos_network_data->parameters);
7057 qos_network_data->old_param_count =
7058 qos_network_data->param_count;
7059 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7060 active = qos_network_data->supported;
7061 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7062
7063 if (active == 0) {
7064 burst_duration = ipw_qos_get_burst_duration(priv);
7065 for (i = 0; i < QOS_QUEUE_NUM; i++)
7066 qos_parameters[QOS_PARAM_SET_ACTIVE].
7067 tx_op_limit[i] = cpu_to_le16(burst_duration);
7068 }
7069 }
7070
7071 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
7072 err = ipw_send_qos_params_command(priv,
7073 (struct libipw_qos_parameters *)
7074 &(qos_parameters[0]));
7075 if (err)
7076 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
7077
7078 return err;
7079 }
7080
7081 /*
7082 * send IPW_CMD_WME_INFO to the firmware
7083 */
7084 static int ipw_qos_set_info_element(struct ipw_priv *priv)
7085 {
7086 int ret = 0;
7087 struct libipw_qos_information_element qos_info;
7088
7089 if (priv == NULL)
7090 return -1;
7091
7092 qos_info.elementID = QOS_ELEMENT_ID;
7093 qos_info.length = sizeof(struct libipw_qos_information_element) - 2;
7094
7095 qos_info.version = QOS_VERSION_1;
7096 qos_info.ac_info = 0;
7097
7098 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
7099 qos_info.qui_type = QOS_OUI_TYPE;
7100 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
7101
7102 ret = ipw_send_qos_info_command(priv, &qos_info);
7103 if (ret != 0) {
7104 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
7105 }
7106 return ret;
7107 }
7108
7109 /*
7110 * Set the QoS parameter with the association request structure
7111 */
7112 static int ipw_qos_association(struct ipw_priv *priv,
7113 struct libipw_network *network)
7114 {
7115 int err = 0;
7116 struct libipw_qos_data *qos_data = NULL;
7117 struct libipw_qos_data ibss_data = {
7118 .supported = 1,
7119 .active = 1,
7120 };
7121
7122 switch (priv->ieee->iw_mode) {
7123 case IW_MODE_ADHOC:
7124 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
7125
7126 qos_data = &ibss_data;
7127 break;
7128
7129 case IW_MODE_INFRA:
7130 qos_data = &network->qos_data;
7131 break;
7132
7133 default:
7134 BUG();
7135 break;
7136 }
7137
7138 err = ipw_qos_activate(priv, qos_data);
7139 if (err) {
7140 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7141 return err;
7142 }
7143
7144 if (priv->qos_data.qos_enable && qos_data->supported) {
7145 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7146 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7147 return ipw_qos_set_info_element(priv);
7148 }
7149
7150 return 0;
7151 }
7152
7153 /*
7154 * handling the beaconing responses. if we get different QoS setting
7155 * off the network from the associated setting, adjust the QoS
7156 * setting
7157 */
7158 static int ipw_qos_association_resp(struct ipw_priv *priv,
7159 struct libipw_network *network)
7160 {
7161 int ret = 0;
7162 unsigned long flags;
7163 u32 size = sizeof(struct libipw_qos_parameters);
7164 int set_qos_param = 0;
7165
7166 if ((priv == NULL) || (network == NULL) ||
7167 (priv->assoc_network == NULL))
7168 return ret;
7169
7170 if (!(priv->status & STATUS_ASSOCIATED))
7171 return ret;
7172
7173 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7174 return ret;
7175
7176 spin_lock_irqsave(&priv->ieee->lock, flags);
7177 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7178 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7179 sizeof(struct libipw_qos_data));
7180 priv->assoc_network->qos_data.active = 1;
7181 if ((network->qos_data.old_param_count !=
7182 network->qos_data.param_count)) {
7183 set_qos_param = 1;
7184 network->qos_data.old_param_count =
7185 network->qos_data.param_count;
7186 }
7187
7188 } else {
7189 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7190 memcpy(&priv->assoc_network->qos_data.parameters,
7191 &def_parameters_CCK, size);
7192 else
7193 memcpy(&priv->assoc_network->qos_data.parameters,
7194 &def_parameters_OFDM, size);
7195 priv->assoc_network->qos_data.active = 0;
7196 priv->assoc_network->qos_data.supported = 0;
7197 set_qos_param = 1;
7198 }
7199
7200 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7201
7202 if (set_qos_param == 1)
7203 schedule_work(&priv->qos_activate);
7204
7205 return ret;
7206 }
7207
7208 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7209 {
7210 u32 ret = 0;
7211
7212 if ((priv == NULL))
7213 return 0;
7214
7215 if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION))
7216 ret = priv->qos_data.burst_duration_CCK;
7217 else
7218 ret = priv->qos_data.burst_duration_OFDM;
7219
7220 return ret;
7221 }
7222
7223 /*
7224 * Initialize the setting of QoS global
7225 */
7226 static void ipw_qos_init(struct ipw_priv *priv, int enable,
7227 int burst_enable, u32 burst_duration_CCK,
7228 u32 burst_duration_OFDM)
7229 {
7230 priv->qos_data.qos_enable = enable;
7231
7232 if (priv->qos_data.qos_enable) {
7233 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7234 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7235 IPW_DEBUG_QOS("QoS is enabled\n");
7236 } else {
7237 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7238 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7239 IPW_DEBUG_QOS("QoS is not enabled\n");
7240 }
7241
7242 priv->qos_data.burst_enable = burst_enable;
7243
7244 if (burst_enable) {
7245 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7246 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7247 } else {
7248 priv->qos_data.burst_duration_CCK = 0;
7249 priv->qos_data.burst_duration_OFDM = 0;
7250 }
7251 }
7252
7253 /*
7254 * map the packet priority to the right TX Queue
7255 */
7256 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7257 {
7258 if (priority > 7 || !priv->qos_data.qos_enable)
7259 priority = 0;
7260
7261 return from_priority_to_tx_queue[priority] - 1;
7262 }
7263
7264 static int ipw_is_qos_active(struct net_device *dev,
7265 struct sk_buff *skb)
7266 {
7267 struct ipw_priv *priv = libipw_priv(dev);
7268 struct libipw_qos_data *qos_data = NULL;
7269 int active, supported;
7270 u8 *daddr = skb->data + ETH_ALEN;
7271 int unicast = !is_multicast_ether_addr(daddr);
7272
7273 if (!(priv->status & STATUS_ASSOCIATED))
7274 return 0;
7275
7276 qos_data = &priv->assoc_network->qos_data;
7277
7278 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7279 if (unicast == 0)
7280 qos_data->active = 0;
7281 else
7282 qos_data->active = qos_data->supported;
7283 }
7284 active = qos_data->active;
7285 supported = qos_data->supported;
7286 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7287 "unicast %d\n",
7288 priv->qos_data.qos_enable, active, supported, unicast);
7289 if (active && priv->qos_data.qos_enable)
7290 return 1;
7291
7292 return 0;
7293
7294 }
7295 /*
7296 * add QoS parameter to the TX command
7297 */
7298 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7299 u16 priority,
7300 struct tfd_data *tfd)
7301 {
7302 int tx_queue_id = 0;
7303
7304
7305 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7306 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7307
7308 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7309 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7310 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7311 }
7312 return 0;
7313 }
7314
7315 /*
7316 * background support to run QoS activate functionality
7317 */
7318 static void ipw_bg_qos_activate(struct work_struct *work)
7319 {
7320 struct ipw_priv *priv =
7321 container_of(work, struct ipw_priv, qos_activate);
7322
7323 mutex_lock(&priv->mutex);
7324
7325 if (priv->status & STATUS_ASSOCIATED)
7326 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7327
7328 mutex_unlock(&priv->mutex);
7329 }
7330
7331 static int ipw_handle_probe_response(struct net_device *dev,
7332 struct libipw_probe_response *resp,
7333 struct libipw_network *network)
7334 {
7335 struct ipw_priv *priv = libipw_priv(dev);
7336 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7337 (network == priv->assoc_network));
7338
7339 ipw_qos_handle_probe_response(priv, active_network, network);
7340
7341 return 0;
7342 }
7343
7344 static int ipw_handle_beacon(struct net_device *dev,
7345 struct libipw_beacon *resp,
7346 struct libipw_network *network)
7347 {
7348 struct ipw_priv *priv = libipw_priv(dev);
7349 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7350 (network == priv->assoc_network));
7351
7352 ipw_qos_handle_probe_response(priv, active_network, network);
7353
7354 return 0;
7355 }
7356
7357 static int ipw_handle_assoc_response(struct net_device *dev,
7358 struct libipw_assoc_response *resp,
7359 struct libipw_network *network)
7360 {
7361 struct ipw_priv *priv = libipw_priv(dev);
7362 ipw_qos_association_resp(priv, network);
7363 return 0;
7364 }
7365
7366 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
7367 *qos_param)
7368 {
7369 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7370 sizeof(*qos_param) * 3, qos_param);
7371 }
7372
7373 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
7374 *qos_param)
7375 {
7376 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7377 qos_param);
7378 }
7379
7380 #endif /* CONFIG_IPW2200_QOS */
7381
7382 static int ipw_associate_network(struct ipw_priv *priv,
7383 struct libipw_network *network,
7384 struct ipw_supported_rates *rates, int roaming)
7385 {
7386 int err;
7387 DECLARE_SSID_BUF(ssid);
7388
7389 if (priv->config & CFG_FIXED_RATE)
7390 ipw_set_fixed_rate(priv, network->mode);
7391
7392 if (!(priv->config & CFG_STATIC_ESSID)) {
7393 priv->essid_len = min(network->ssid_len,
7394 (u8) IW_ESSID_MAX_SIZE);
7395 memcpy(priv->essid, network->ssid, priv->essid_len);
7396 }
7397
7398 network->last_associate = jiffies;
7399
7400 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7401 priv->assoc_request.channel = network->channel;
7402 priv->assoc_request.auth_key = 0;
7403
7404 if ((priv->capability & CAP_PRIVACY_ON) &&
7405 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7406 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7407 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7408
7409 if (priv->ieee->sec.level == SEC_LEVEL_1)
7410 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7411
7412 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7413 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7414 priv->assoc_request.auth_type = AUTH_LEAP;
7415 else
7416 priv->assoc_request.auth_type = AUTH_OPEN;
7417
7418 if (priv->ieee->wpa_ie_len) {
7419 priv->assoc_request.policy_support = cpu_to_le16(0x02); /* RSN active */
7420 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7421 priv->ieee->wpa_ie_len);
7422 }
7423
7424 /*
7425 * It is valid for our ieee device to support multiple modes, but
7426 * when it comes to associating to a given network we have to choose
7427 * just one mode.
7428 */
7429 if (network->mode & priv->ieee->mode & IEEE_A)
7430 priv->assoc_request.ieee_mode = IPW_A_MODE;
7431 else if (network->mode & priv->ieee->mode & IEEE_G)
7432 priv->assoc_request.ieee_mode = IPW_G_MODE;
7433 else if (network->mode & priv->ieee->mode & IEEE_B)
7434 priv->assoc_request.ieee_mode = IPW_B_MODE;
7435
7436 priv->assoc_request.capability = cpu_to_le16(network->capability);
7437 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7438 && !(priv->config & CFG_PREAMBLE_LONG)) {
7439 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7440 } else {
7441 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7442
7443 /* Clear the short preamble if we won't be supporting it */
7444 priv->assoc_request.capability &=
7445 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7446 }
7447
7448 /* Clear capability bits that aren't used in Ad Hoc */
7449 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7450 priv->assoc_request.capability &=
7451 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7452
7453 IPW_DEBUG_ASSOC("%ssociation attempt: '%s', channel %d, "
7454 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7455 roaming ? "Rea" : "A",
7456 print_ssid(ssid, priv->essid, priv->essid_len),
7457 network->channel,
7458 ipw_modes[priv->assoc_request.ieee_mode],
7459 rates->num_rates,
7460 (priv->assoc_request.preamble_length ==
7461 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7462 network->capability &
7463 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7464 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7465 priv->capability & CAP_PRIVACY_ON ?
7466 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7467 "(open)") : "",
7468 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7469 priv->capability & CAP_PRIVACY_ON ?
7470 '1' + priv->ieee->sec.active_key : '.',
7471 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7472
7473 priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7474 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7475 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7476 priv->assoc_request.assoc_type = HC_IBSS_START;
7477 priv->assoc_request.assoc_tsf_msw = 0;
7478 priv->assoc_request.assoc_tsf_lsw = 0;
7479 } else {
7480 if (unlikely(roaming))
7481 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7482 else
7483 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7484 priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7485 priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7486 }
7487
7488 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7489
7490 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7491 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7492 priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7493 } else {
7494 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7495 priv->assoc_request.atim_window = 0;
7496 }
7497
7498 priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7499
7500 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7501 if (err) {
7502 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7503 return err;
7504 }
7505
7506 rates->ieee_mode = priv->assoc_request.ieee_mode;
7507 rates->purpose = IPW_RATE_CONNECT;
7508 ipw_send_supported_rates(priv, rates);
7509
7510 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7511 priv->sys_config.dot11g_auto_detection = 1;
7512 else
7513 priv->sys_config.dot11g_auto_detection = 0;
7514
7515 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7516 priv->sys_config.answer_broadcast_ssid_probe = 1;
7517 else
7518 priv->sys_config.answer_broadcast_ssid_probe = 0;
7519
7520 err = ipw_send_system_config(priv);
7521 if (err) {
7522 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7523 return err;
7524 }
7525
7526 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7527 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7528 if (err) {
7529 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7530 return err;
7531 }
7532
7533 /*
7534 * If preemption is enabled, it is possible for the association
7535 * to complete before we return from ipw_send_associate. Therefore
7536 * we have to be sure and update our priviate data first.
7537 */
7538 priv->channel = network->channel;
7539 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7540 priv->status |= STATUS_ASSOCIATING;
7541 priv->status &= ~STATUS_SECURITY_UPDATED;
7542
7543 priv->assoc_network = network;
7544
7545 #ifdef CONFIG_IPW2200_QOS
7546 ipw_qos_association(priv, network);
7547 #endif
7548
7549 err = ipw_send_associate(priv, &priv->assoc_request);
7550 if (err) {
7551 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7552 return err;
7553 }
7554
7555 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM\n",
7556 print_ssid(ssid, priv->essid, priv->essid_len),
7557 priv->bssid);
7558
7559 return 0;
7560 }
7561
7562 static void ipw_roam(void *data)
7563 {
7564 struct ipw_priv *priv = data;
7565 struct libipw_network *network = NULL;
7566 struct ipw_network_match match = {
7567 .network = priv->assoc_network
7568 };
7569
7570 /* The roaming process is as follows:
7571 *
7572 * 1. Missed beacon threshold triggers the roaming process by
7573 * setting the status ROAM bit and requesting a scan.
7574 * 2. When the scan completes, it schedules the ROAM work
7575 * 3. The ROAM work looks at all of the known networks for one that
7576 * is a better network than the currently associated. If none
7577 * found, the ROAM process is over (ROAM bit cleared)
7578 * 4. If a better network is found, a disassociation request is
7579 * sent.
7580 * 5. When the disassociation completes, the roam work is again
7581 * scheduled. The second time through, the driver is no longer
7582 * associated, and the newly selected network is sent an
7583 * association request.
7584 * 6. At this point ,the roaming process is complete and the ROAM
7585 * status bit is cleared.
7586 */
7587
7588 /* If we are no longer associated, and the roaming bit is no longer
7589 * set, then we are not actively roaming, so just return */
7590 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7591 return;
7592
7593 if (priv->status & STATUS_ASSOCIATED) {
7594 /* First pass through ROAM process -- look for a better
7595 * network */
7596 unsigned long flags;
7597 u8 rssi = priv->assoc_network->stats.rssi;
7598 priv->assoc_network->stats.rssi = -128;
7599 spin_lock_irqsave(&priv->ieee->lock, flags);
7600 list_for_each_entry(network, &priv->ieee->network_list, list) {
7601 if (network != priv->assoc_network)
7602 ipw_best_network(priv, &match, network, 1);
7603 }
7604 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7605 priv->assoc_network->stats.rssi = rssi;
7606
7607 if (match.network == priv->assoc_network) {
7608 IPW_DEBUG_ASSOC("No better APs in this network to "
7609 "roam to.\n");
7610 priv->status &= ~STATUS_ROAMING;
7611 ipw_debug_config(priv);
7612 return;
7613 }
7614
7615 ipw_send_disassociate(priv, 1);
7616 priv->assoc_network = match.network;
7617
7618 return;
7619 }
7620
7621 /* Second pass through ROAM process -- request association */
7622 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7623 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7624 priv->status &= ~STATUS_ROAMING;
7625 }
7626
7627 static void ipw_bg_roam(struct work_struct *work)
7628 {
7629 struct ipw_priv *priv =
7630 container_of(work, struct ipw_priv, roam);
7631 mutex_lock(&priv->mutex);
7632 ipw_roam(priv);
7633 mutex_unlock(&priv->mutex);
7634 }
7635
7636 static int ipw_associate(void *data)
7637 {
7638 struct ipw_priv *priv = data;
7639
7640 struct libipw_network *network = NULL;
7641 struct ipw_network_match match = {
7642 .network = NULL
7643 };
7644 struct ipw_supported_rates *rates;
7645 struct list_head *element;
7646 unsigned long flags;
7647 DECLARE_SSID_BUF(ssid);
7648
7649 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7650 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7651 return 0;
7652 }
7653
7654 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7655 IPW_DEBUG_ASSOC("Not attempting association (already in "
7656 "progress)\n");
7657 return 0;
7658 }
7659
7660 if (priv->status & STATUS_DISASSOCIATING) {
7661 IPW_DEBUG_ASSOC("Not attempting association (in "
7662 "disassociating)\n ");
7663 schedule_work(&priv->associate);
7664 return 0;
7665 }
7666
7667 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7668 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7669 "initialized)\n");
7670 return 0;
7671 }
7672
7673 if (!(priv->config & CFG_ASSOCIATE) &&
7674 !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
7675 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7676 return 0;
7677 }
7678
7679 /* Protect our use of the network_list */
7680 spin_lock_irqsave(&priv->ieee->lock, flags);
7681 list_for_each_entry(network, &priv->ieee->network_list, list)
7682 ipw_best_network(priv, &match, network, 0);
7683
7684 network = match.network;
7685 rates = &match.rates;
7686
7687 if (network == NULL &&
7688 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7689 priv->config & CFG_ADHOC_CREATE &&
7690 priv->config & CFG_STATIC_ESSID &&
7691 priv->config & CFG_STATIC_CHANNEL) {
7692 /* Use oldest network if the free list is empty */
7693 if (list_empty(&priv->ieee->network_free_list)) {
7694 struct libipw_network *oldest = NULL;
7695 struct libipw_network *target;
7696
7697 list_for_each_entry(target, &priv->ieee->network_list, list) {
7698 if ((oldest == NULL) ||
7699 (target->last_scanned < oldest->last_scanned))
7700 oldest = target;
7701 }
7702
7703 /* If there are no more slots, expire the oldest */
7704 list_del(&oldest->list);
7705 target = oldest;
7706 IPW_DEBUG_ASSOC("Expired '%s' (%pM) from "
7707 "network list.\n",
7708 print_ssid(ssid, target->ssid,
7709 target->ssid_len),
7710 target->bssid);
7711 list_add_tail(&target->list,
7712 &priv->ieee->network_free_list);
7713 }
7714
7715 element = priv->ieee->network_free_list.next;
7716 network = list_entry(element, struct libipw_network, list);
7717 ipw_adhoc_create(priv, network);
7718 rates = &priv->rates;
7719 list_del(element);
7720 list_add_tail(&network->list, &priv->ieee->network_list);
7721 }
7722 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7723
7724 /* If we reached the end of the list, then we don't have any valid
7725 * matching APs */
7726 if (!network) {
7727 ipw_debug_config(priv);
7728
7729 if (!(priv->status & STATUS_SCANNING)) {
7730 if (!(priv->config & CFG_SPEED_SCAN))
7731 schedule_delayed_work(&priv->request_scan,
7732 SCAN_INTERVAL);
7733 else
7734 schedule_delayed_work(&priv->request_scan, 0);
7735 }
7736
7737 return 0;
7738 }
7739
7740 ipw_associate_network(priv, network, rates, 0);
7741
7742 return 1;
7743 }
7744
7745 static void ipw_bg_associate(struct work_struct *work)
7746 {
7747 struct ipw_priv *priv =
7748 container_of(work, struct ipw_priv, associate);
7749 mutex_lock(&priv->mutex);
7750 ipw_associate(priv);
7751 mutex_unlock(&priv->mutex);
7752 }
7753
7754 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7755 struct sk_buff *skb)
7756 {
7757 struct ieee80211_hdr *hdr;
7758 u16 fc;
7759
7760 hdr = (struct ieee80211_hdr *)skb->data;
7761 fc = le16_to_cpu(hdr->frame_control);
7762 if (!(fc & IEEE80211_FCTL_PROTECTED))
7763 return;
7764
7765 fc &= ~IEEE80211_FCTL_PROTECTED;
7766 hdr->frame_control = cpu_to_le16(fc);
7767 switch (priv->ieee->sec.level) {
7768 case SEC_LEVEL_3:
7769 /* Remove CCMP HDR */
7770 memmove(skb->data + LIBIPW_3ADDR_LEN,
7771 skb->data + LIBIPW_3ADDR_LEN + 8,
7772 skb->len - LIBIPW_3ADDR_LEN - 8);
7773 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7774 break;
7775 case SEC_LEVEL_2:
7776 break;
7777 case SEC_LEVEL_1:
7778 /* Remove IV */
7779 memmove(skb->data + LIBIPW_3ADDR_LEN,
7780 skb->data + LIBIPW_3ADDR_LEN + 4,
7781 skb->len - LIBIPW_3ADDR_LEN - 4);
7782 skb_trim(skb, skb->len - 8); /* IV + ICV */
7783 break;
7784 case SEC_LEVEL_0:
7785 break;
7786 default:
7787 printk(KERN_ERR "Unknown security level %d\n",
7788 priv->ieee->sec.level);
7789 break;
7790 }
7791 }
7792
7793 static void ipw_handle_data_packet(struct ipw_priv *priv,
7794 struct ipw_rx_mem_buffer *rxb,
7795 struct libipw_rx_stats *stats)
7796 {
7797 struct net_device *dev = priv->net_dev;
7798 struct libipw_hdr_4addr *hdr;
7799 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7800
7801 /* We received data from the HW, so stop the watchdog */
7802 dev->trans_start = jiffies;
7803
7804 /* We only process data packets if the
7805 * interface is open */
7806 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7807 skb_tailroom(rxb->skb))) {
7808 dev->stats.rx_errors++;
7809 priv->wstats.discard.misc++;
7810 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7811 return;
7812 } else if (unlikely(!netif_running(priv->net_dev))) {
7813 dev->stats.rx_dropped++;
7814 priv->wstats.discard.misc++;
7815 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7816 return;
7817 }
7818
7819 /* Advance skb->data to the start of the actual payload */
7820 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7821
7822 /* Set the size of the skb to the size of the frame */
7823 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7824
7825 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7826
7827 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7828 hdr = (struct libipw_hdr_4addr *)rxb->skb->data;
7829 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7830 (is_multicast_ether_addr(hdr->addr1) ?
7831 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7832 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7833
7834 if (!libipw_rx(priv->ieee, rxb->skb, stats))
7835 dev->stats.rx_errors++;
7836 else { /* libipw_rx succeeded, so it now owns the SKB */
7837 rxb->skb = NULL;
7838 __ipw_led_activity_on(priv);
7839 }
7840 }
7841
7842 #ifdef CONFIG_IPW2200_RADIOTAP
7843 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7844 struct ipw_rx_mem_buffer *rxb,
7845 struct libipw_rx_stats *stats)
7846 {
7847 struct net_device *dev = priv->net_dev;
7848 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7849 struct ipw_rx_frame *frame = &pkt->u.frame;
7850
7851 /* initial pull of some data */
7852 u16 received_channel = frame->received_channel;
7853 u8 antennaAndPhy = frame->antennaAndPhy;
7854 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7855 u16 pktrate = frame->rate;
7856
7857 /* Magic struct that slots into the radiotap header -- no reason
7858 * to build this manually element by element, we can write it much
7859 * more efficiently than we can parse it. ORDER MATTERS HERE */
7860 struct ipw_rt_hdr *ipw_rt;
7861
7862 unsigned short len = le16_to_cpu(pkt->u.frame.length);
7863
7864 /* We received data from the HW, so stop the watchdog */
7865 dev->trans_start = jiffies;
7866
7867 /* We only process data packets if the
7868 * interface is open */
7869 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7870 skb_tailroom(rxb->skb))) {
7871 dev->stats.rx_errors++;
7872 priv->wstats.discard.misc++;
7873 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7874 return;
7875 } else if (unlikely(!netif_running(priv->net_dev))) {
7876 dev->stats.rx_dropped++;
7877 priv->wstats.discard.misc++;
7878 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7879 return;
7880 }
7881
7882 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7883 * that now */
7884 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7885 /* FIXME: Should alloc bigger skb instead */
7886 dev->stats.rx_dropped++;
7887 priv->wstats.discard.misc++;
7888 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7889 return;
7890 }
7891
7892 /* copy the frame itself */
7893 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7894 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7895
7896 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7897
7898 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7899 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7900 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total header+data */
7901
7902 /* Big bitfield of all the fields we provide in radiotap */
7903 ipw_rt->rt_hdr.it_present = cpu_to_le32(
7904 (1 << IEEE80211_RADIOTAP_TSFT) |
7905 (1 << IEEE80211_RADIOTAP_FLAGS) |
7906 (1 << IEEE80211_RADIOTAP_RATE) |
7907 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7908 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7909 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7910 (1 << IEEE80211_RADIOTAP_ANTENNA));
7911
7912 /* Zero the flags, we'll add to them as we go */
7913 ipw_rt->rt_flags = 0;
7914 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7915 frame->parent_tsf[2] << 16 |
7916 frame->parent_tsf[1] << 8 |
7917 frame->parent_tsf[0]);
7918
7919 /* Convert signal to DBM */
7920 ipw_rt->rt_dbmsignal = antsignal;
7921 ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise);
7922
7923 /* Convert the channel data and set the flags */
7924 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7925 if (received_channel > 14) { /* 802.11a */
7926 ipw_rt->rt_chbitmask =
7927 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7928 } else if (antennaAndPhy & 32) { /* 802.11b */
7929 ipw_rt->rt_chbitmask =
7930 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7931 } else { /* 802.11g */
7932 ipw_rt->rt_chbitmask =
7933 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7934 }
7935
7936 /* set the rate in multiples of 500k/s */
7937 switch (pktrate) {
7938 case IPW_TX_RATE_1MB:
7939 ipw_rt->rt_rate = 2;
7940 break;
7941 case IPW_TX_RATE_2MB:
7942 ipw_rt->rt_rate = 4;
7943 break;
7944 case IPW_TX_RATE_5MB:
7945 ipw_rt->rt_rate = 10;
7946 break;
7947 case IPW_TX_RATE_6MB:
7948 ipw_rt->rt_rate = 12;
7949 break;
7950 case IPW_TX_RATE_9MB:
7951 ipw_rt->rt_rate = 18;
7952 break;
7953 case IPW_TX_RATE_11MB:
7954 ipw_rt->rt_rate = 22;
7955 break;
7956 case IPW_TX_RATE_12MB:
7957 ipw_rt->rt_rate = 24;
7958 break;
7959 case IPW_TX_RATE_18MB:
7960 ipw_rt->rt_rate = 36;
7961 break;
7962 case IPW_TX_RATE_24MB:
7963 ipw_rt->rt_rate = 48;
7964 break;
7965 case IPW_TX_RATE_36MB:
7966 ipw_rt->rt_rate = 72;
7967 break;
7968 case IPW_TX_RATE_48MB:
7969 ipw_rt->rt_rate = 96;
7970 break;
7971 case IPW_TX_RATE_54MB:
7972 ipw_rt->rt_rate = 108;
7973 break;
7974 default:
7975 ipw_rt->rt_rate = 0;
7976 break;
7977 }
7978
7979 /* antenna number */
7980 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7981
7982 /* set the preamble flag if we have it */
7983 if ((antennaAndPhy & 64))
7984 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7985
7986 /* Set the size of the skb to the size of the frame */
7987 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7988
7989 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7990
7991 if (!libipw_rx(priv->ieee, rxb->skb, stats))
7992 dev->stats.rx_errors++;
7993 else { /* libipw_rx succeeded, so it now owns the SKB */
7994 rxb->skb = NULL;
7995 /* no LED during capture */
7996 }
7997 }
7998 #endif
7999
8000 #ifdef CONFIG_IPW2200_PROMISCUOUS
8001 #define libipw_is_probe_response(fc) \
8002 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
8003 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
8004
8005 #define libipw_is_management(fc) \
8006 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
8007
8008 #define libipw_is_control(fc) \
8009 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
8010
8011 #define libipw_is_data(fc) \
8012 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
8013
8014 #define libipw_is_assoc_request(fc) \
8015 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
8016
8017 #define libipw_is_reassoc_request(fc) \
8018 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
8019
8020 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
8021 struct ipw_rx_mem_buffer *rxb,
8022 struct libipw_rx_stats *stats)
8023 {
8024 struct net_device *dev = priv->prom_net_dev;
8025 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
8026 struct ipw_rx_frame *frame = &pkt->u.frame;
8027 struct ipw_rt_hdr *ipw_rt;
8028
8029 /* First cache any information we need before we overwrite
8030 * the information provided in the skb from the hardware */
8031 struct ieee80211_hdr *hdr;
8032 u16 channel = frame->received_channel;
8033 u8 phy_flags = frame->antennaAndPhy;
8034 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
8035 s8 noise = (s8) le16_to_cpu(frame->noise);
8036 u8 rate = frame->rate;
8037 unsigned short len = le16_to_cpu(pkt->u.frame.length);
8038 struct sk_buff *skb;
8039 int hdr_only = 0;
8040 u16 filter = priv->prom_priv->filter;
8041
8042 /* If the filter is set to not include Rx frames then return */
8043 if (filter & IPW_PROM_NO_RX)
8044 return;
8045
8046 /* We received data from the HW, so stop the watchdog */
8047 dev->trans_start = jiffies;
8048
8049 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
8050 dev->stats.rx_errors++;
8051 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
8052 return;
8053 }
8054
8055 /* We only process data packets if the interface is open */
8056 if (unlikely(!netif_running(dev))) {
8057 dev->stats.rx_dropped++;
8058 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
8059 return;
8060 }
8061
8062 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
8063 * that now */
8064 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
8065 /* FIXME: Should alloc bigger skb instead */
8066 dev->stats.rx_dropped++;
8067 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
8068 return;
8069 }
8070
8071 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
8072 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
8073 if (filter & IPW_PROM_NO_MGMT)
8074 return;
8075 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
8076 hdr_only = 1;
8077 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
8078 if (filter & IPW_PROM_NO_CTL)
8079 return;
8080 if (filter & IPW_PROM_CTL_HEADER_ONLY)
8081 hdr_only = 1;
8082 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
8083 if (filter & IPW_PROM_NO_DATA)
8084 return;
8085 if (filter & IPW_PROM_DATA_HEADER_ONLY)
8086 hdr_only = 1;
8087 }
8088
8089 /* Copy the SKB since this is for the promiscuous side */
8090 skb = skb_copy(rxb->skb, GFP_ATOMIC);
8091 if (skb == NULL) {
8092 IPW_ERROR("skb_clone failed for promiscuous copy.\n");
8093 return;
8094 }
8095
8096 /* copy the frame data to write after where the radiotap header goes */
8097 ipw_rt = (void *)skb->data;
8098
8099 if (hdr_only)
8100 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
8101
8102 memcpy(ipw_rt->payload, hdr, len);
8103
8104 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
8105 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
8106 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */
8107
8108 /* Set the size of the skb to the size of the frame */
8109 skb_put(skb, sizeof(*ipw_rt) + len);
8110
8111 /* Big bitfield of all the fields we provide in radiotap */
8112 ipw_rt->rt_hdr.it_present = cpu_to_le32(
8113 (1 << IEEE80211_RADIOTAP_TSFT) |
8114 (1 << IEEE80211_RADIOTAP_FLAGS) |
8115 (1 << IEEE80211_RADIOTAP_RATE) |
8116 (1 << IEEE80211_RADIOTAP_CHANNEL) |
8117 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
8118 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
8119 (1 << IEEE80211_RADIOTAP_ANTENNA));
8120
8121 /* Zero the flags, we'll add to them as we go */
8122 ipw_rt->rt_flags = 0;
8123 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
8124 frame->parent_tsf[2] << 16 |
8125 frame->parent_tsf[1] << 8 |
8126 frame->parent_tsf[0]);
8127
8128 /* Convert to DBM */
8129 ipw_rt->rt_dbmsignal = signal;
8130 ipw_rt->rt_dbmnoise = noise;
8131
8132 /* Convert the channel data and set the flags */
8133 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
8134 if (channel > 14) { /* 802.11a */
8135 ipw_rt->rt_chbitmask =
8136 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
8137 } else if (phy_flags & (1 << 5)) { /* 802.11b */
8138 ipw_rt->rt_chbitmask =
8139 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8140 } else { /* 802.11g */
8141 ipw_rt->rt_chbitmask =
8142 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8143 }
8144
8145 /* set the rate in multiples of 500k/s */
8146 switch (rate) {
8147 case IPW_TX_RATE_1MB:
8148 ipw_rt->rt_rate = 2;
8149 break;
8150 case IPW_TX_RATE_2MB:
8151 ipw_rt->rt_rate = 4;
8152 break;
8153 case IPW_TX_RATE_5MB:
8154 ipw_rt->rt_rate = 10;
8155 break;
8156 case IPW_TX_RATE_6MB:
8157 ipw_rt->rt_rate = 12;
8158 break;
8159 case IPW_TX_RATE_9MB:
8160 ipw_rt->rt_rate = 18;
8161 break;
8162 case IPW_TX_RATE_11MB:
8163 ipw_rt->rt_rate = 22;
8164 break;
8165 case IPW_TX_RATE_12MB:
8166 ipw_rt->rt_rate = 24;
8167 break;
8168 case IPW_TX_RATE_18MB:
8169 ipw_rt->rt_rate = 36;
8170 break;
8171 case IPW_TX_RATE_24MB:
8172 ipw_rt->rt_rate = 48;
8173 break;
8174 case IPW_TX_RATE_36MB:
8175 ipw_rt->rt_rate = 72;
8176 break;
8177 case IPW_TX_RATE_48MB:
8178 ipw_rt->rt_rate = 96;
8179 break;
8180 case IPW_TX_RATE_54MB:
8181 ipw_rt->rt_rate = 108;
8182 break;
8183 default:
8184 ipw_rt->rt_rate = 0;
8185 break;
8186 }
8187
8188 /* antenna number */
8189 ipw_rt->rt_antenna = (phy_flags & 3);
8190
8191 /* set the preamble flag if we have it */
8192 if (phy_flags & (1 << 6))
8193 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8194
8195 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8196
8197 if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) {
8198 dev->stats.rx_errors++;
8199 dev_kfree_skb_any(skb);
8200 }
8201 }
8202 #endif
8203
8204 static int is_network_packet(struct ipw_priv *priv,
8205 struct libipw_hdr_4addr *header)
8206 {
8207 /* Filter incoming packets to determine if they are targeted toward
8208 * this network, discarding packets coming from ourselves */
8209 switch (priv->ieee->iw_mode) {
8210 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
8211 /* packets from our adapter are dropped (echo) */
8212 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
8213 return 0;
8214
8215 /* {broad,multi}cast packets to our BSSID go through */
8216 if (is_multicast_ether_addr(header->addr1))
8217 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
8218
8219 /* packets to our adapter go through */
8220 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8221 ETH_ALEN);
8222
8223 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
8224 /* packets from our adapter are dropped (echo) */
8225 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
8226 return 0;
8227
8228 /* {broad,multi}cast packets to our BSS go through */
8229 if (is_multicast_ether_addr(header->addr1))
8230 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
8231
8232 /* packets to our adapter go through */
8233 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8234 ETH_ALEN);
8235 }
8236
8237 return 1;
8238 }
8239
8240 #define IPW_PACKET_RETRY_TIME HZ
8241
8242 static int is_duplicate_packet(struct ipw_priv *priv,
8243 struct libipw_hdr_4addr *header)
8244 {
8245 u16 sc = le16_to_cpu(header->seq_ctl);
8246 u16 seq = WLAN_GET_SEQ_SEQ(sc);
8247 u16 frag = WLAN_GET_SEQ_FRAG(sc);
8248 u16 *last_seq, *last_frag;
8249 unsigned long *last_time;
8250
8251 switch (priv->ieee->iw_mode) {
8252 case IW_MODE_ADHOC:
8253 {
8254 struct list_head *p;
8255 struct ipw_ibss_seq *entry = NULL;
8256 u8 *mac = header->addr2;
8257 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8258
8259 __list_for_each(p, &priv->ibss_mac_hash[index]) {
8260 entry =
8261 list_entry(p, struct ipw_ibss_seq, list);
8262 if (!memcmp(entry->mac, mac, ETH_ALEN))
8263 break;
8264 }
8265 if (p == &priv->ibss_mac_hash[index]) {
8266 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8267 if (!entry) {
8268 IPW_ERROR
8269 ("Cannot malloc new mac entry\n");
8270 return 0;
8271 }
8272 memcpy(entry->mac, mac, ETH_ALEN);
8273 entry->seq_num = seq;
8274 entry->frag_num = frag;
8275 entry->packet_time = jiffies;
8276 list_add(&entry->list,
8277 &priv->ibss_mac_hash[index]);
8278 return 0;
8279 }
8280 last_seq = &entry->seq_num;
8281 last_frag = &entry->frag_num;
8282 last_time = &entry->packet_time;
8283 break;
8284 }
8285 case IW_MODE_INFRA:
8286 last_seq = &priv->last_seq_num;
8287 last_frag = &priv->last_frag_num;
8288 last_time = &priv->last_packet_time;
8289 break;
8290 default:
8291 return 0;
8292 }
8293 if ((*last_seq == seq) &&
8294 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8295 if (*last_frag == frag)
8296 goto drop;
8297 if (*last_frag + 1 != frag)
8298 /* out-of-order fragment */
8299 goto drop;
8300 } else
8301 *last_seq = seq;
8302
8303 *last_frag = frag;
8304 *last_time = jiffies;
8305 return 0;
8306
8307 drop:
8308 /* Comment this line now since we observed the card receives
8309 * duplicate packets but the FCTL_RETRY bit is not set in the
8310 * IBSS mode with fragmentation enabled.
8311 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
8312 return 1;
8313 }
8314
8315 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8316 struct ipw_rx_mem_buffer *rxb,
8317 struct libipw_rx_stats *stats)
8318 {
8319 struct sk_buff *skb = rxb->skb;
8320 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8321 struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *)
8322 (skb->data + IPW_RX_FRAME_SIZE);
8323
8324 libipw_rx_mgt(priv->ieee, header, stats);
8325
8326 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8327 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8328 IEEE80211_STYPE_PROBE_RESP) ||
8329 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8330 IEEE80211_STYPE_BEACON))) {
8331 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8332 ipw_add_station(priv, header->addr2);
8333 }
8334
8335 if (priv->config & CFG_NET_STATS) {
8336 IPW_DEBUG_HC("sending stat packet\n");
8337
8338 /* Set the size of the skb to the size of the full
8339 * ipw header and 802.11 frame */
8340 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8341 IPW_RX_FRAME_SIZE);
8342
8343 /* Advance past the ipw packet header to the 802.11 frame */
8344 skb_pull(skb, IPW_RX_FRAME_SIZE);
8345
8346 /* Push the libipw_rx_stats before the 802.11 frame */
8347 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8348
8349 skb->dev = priv->ieee->dev;
8350
8351 /* Point raw at the libipw_stats */
8352 skb_reset_mac_header(skb);
8353
8354 skb->pkt_type = PACKET_OTHERHOST;
8355 skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
8356 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8357 netif_rx(skb);
8358 rxb->skb = NULL;
8359 }
8360 }
8361
8362 /*
8363 * Main entry function for receiving a packet with 80211 headers. This
8364 * should be called when ever the FW has notified us that there is a new
8365 * skb in the receive queue.
8366 */
8367 static void ipw_rx(struct ipw_priv *priv)
8368 {
8369 struct ipw_rx_mem_buffer *rxb;
8370 struct ipw_rx_packet *pkt;
8371 struct libipw_hdr_4addr *header;
8372 u32 r, w, i;
8373 u8 network_packet;
8374 u8 fill_rx = 0;
8375
8376 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8377 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8378 i = priv->rxq->read;
8379
8380 if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8381 fill_rx = 1;
8382
8383 while (i != r) {
8384 rxb = priv->rxq->queue[i];
8385 if (unlikely(rxb == NULL)) {
8386 printk(KERN_CRIT "Queue not allocated!\n");
8387 break;
8388 }
8389 priv->rxq->queue[i] = NULL;
8390
8391 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8392 IPW_RX_BUF_SIZE,
8393 PCI_DMA_FROMDEVICE);
8394
8395 pkt = (struct ipw_rx_packet *)rxb->skb->data;
8396 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8397 pkt->header.message_type,
8398 pkt->header.rx_seq_num, pkt->header.control_bits);
8399
8400 switch (pkt->header.message_type) {
8401 case RX_FRAME_TYPE: /* 802.11 frame */ {
8402 struct libipw_rx_stats stats = {
8403 .rssi = pkt->u.frame.rssi_dbm -
8404 IPW_RSSI_TO_DBM,
8405 .signal =
8406 pkt->u.frame.rssi_dbm -
8407 IPW_RSSI_TO_DBM + 0x100,
8408 .noise =
8409 le16_to_cpu(pkt->u.frame.noise),
8410 .rate = pkt->u.frame.rate,
8411 .mac_time = jiffies,
8412 .received_channel =
8413 pkt->u.frame.received_channel,
8414 .freq =
8415 (pkt->u.frame.
8416 control & (1 << 0)) ?
8417 LIBIPW_24GHZ_BAND :
8418 LIBIPW_52GHZ_BAND,
8419 .len = le16_to_cpu(pkt->u.frame.length),
8420 };
8421
8422 if (stats.rssi != 0)
8423 stats.mask |= LIBIPW_STATMASK_RSSI;
8424 if (stats.signal != 0)
8425 stats.mask |= LIBIPW_STATMASK_SIGNAL;
8426 if (stats.noise != 0)
8427 stats.mask |= LIBIPW_STATMASK_NOISE;
8428 if (stats.rate != 0)
8429 stats.mask |= LIBIPW_STATMASK_RATE;
8430
8431 priv->rx_packets++;
8432
8433 #ifdef CONFIG_IPW2200_PROMISCUOUS
8434 if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8435 ipw_handle_promiscuous_rx(priv, rxb, &stats);
8436 #endif
8437
8438 #ifdef CONFIG_IPW2200_MONITOR
8439 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8440 #ifdef CONFIG_IPW2200_RADIOTAP
8441
8442 ipw_handle_data_packet_monitor(priv,
8443 rxb,
8444 &stats);
8445 #else
8446 ipw_handle_data_packet(priv, rxb,
8447 &stats);
8448 #endif
8449 break;
8450 }
8451 #endif
8452
8453 header =
8454 (struct libipw_hdr_4addr *)(rxb->skb->
8455 data +
8456 IPW_RX_FRAME_SIZE);
8457 /* TODO: Check Ad-Hoc dest/source and make sure
8458 * that we are actually parsing these packets
8459 * correctly -- we should probably use the
8460 * frame control of the packet and disregard
8461 * the current iw_mode */
8462
8463 network_packet =
8464 is_network_packet(priv, header);
8465 if (network_packet && priv->assoc_network) {
8466 priv->assoc_network->stats.rssi =
8467 stats.rssi;
8468 priv->exp_avg_rssi =
8469 exponential_average(priv->exp_avg_rssi,
8470 stats.rssi, DEPTH_RSSI);
8471 }
8472
8473 IPW_DEBUG_RX("Frame: len=%u\n",
8474 le16_to_cpu(pkt->u.frame.length));
8475
8476 if (le16_to_cpu(pkt->u.frame.length) <
8477 libipw_get_hdrlen(le16_to_cpu(
8478 header->frame_ctl))) {
8479 IPW_DEBUG_DROP
8480 ("Received packet is too small. "
8481 "Dropping.\n");
8482 priv->net_dev->stats.rx_errors++;
8483 priv->wstats.discard.misc++;
8484 break;
8485 }
8486
8487 switch (WLAN_FC_GET_TYPE
8488 (le16_to_cpu(header->frame_ctl))) {
8489
8490 case IEEE80211_FTYPE_MGMT:
8491 ipw_handle_mgmt_packet(priv, rxb,
8492 &stats);
8493 break;
8494
8495 case IEEE80211_FTYPE_CTL:
8496 break;
8497
8498 case IEEE80211_FTYPE_DATA:
8499 if (unlikely(!network_packet ||
8500 is_duplicate_packet(priv,
8501 header)))
8502 {
8503 IPW_DEBUG_DROP("Dropping: "
8504 "%pM, "
8505 "%pM, "
8506 "%pM\n",
8507 header->addr1,
8508 header->addr2,
8509 header->addr3);
8510 break;
8511 }
8512
8513 ipw_handle_data_packet(priv, rxb,
8514 &stats);
8515
8516 break;
8517 }
8518 break;
8519 }
8520
8521 case RX_HOST_NOTIFICATION_TYPE:{
8522 IPW_DEBUG_RX
8523 ("Notification: subtype=%02X flags=%02X size=%d\n",
8524 pkt->u.notification.subtype,
8525 pkt->u.notification.flags,
8526 le16_to_cpu(pkt->u.notification.size));
8527 ipw_rx_notification(priv, &pkt->u.notification);
8528 break;
8529 }
8530
8531 default:
8532 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8533 pkt->header.message_type);
8534 break;
8535 }
8536
8537 /* For now we just don't re-use anything. We can tweak this
8538 * later to try and re-use notification packets and SKBs that
8539 * fail to Rx correctly */
8540 if (rxb->skb != NULL) {
8541 dev_kfree_skb_any(rxb->skb);
8542 rxb->skb = NULL;
8543 }
8544
8545 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8546 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8547 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8548
8549 i = (i + 1) % RX_QUEUE_SIZE;
8550
8551 /* If there are a lot of unsued frames, restock the Rx queue
8552 * so the ucode won't assert */
8553 if (fill_rx) {
8554 priv->rxq->read = i;
8555 ipw_rx_queue_replenish(priv);
8556 }
8557 }
8558
8559 /* Backtrack one entry */
8560 priv->rxq->read = i;
8561 ipw_rx_queue_restock(priv);
8562 }
8563
8564 #define DEFAULT_RTS_THRESHOLD 2304U
8565 #define MIN_RTS_THRESHOLD 1U
8566 #define MAX_RTS_THRESHOLD 2304U
8567 #define DEFAULT_BEACON_INTERVAL 100U
8568 #define DEFAULT_SHORT_RETRY_LIMIT 7U
8569 #define DEFAULT_LONG_RETRY_LIMIT 4U
8570
8571 /**
8572 * ipw_sw_reset
8573 * @option: options to control different reset behaviour
8574 * 0 = reset everything except the 'disable' module_param
8575 * 1 = reset everything and print out driver info (for probe only)
8576 * 2 = reset everything
8577 */
8578 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8579 {
8580 int band, modulation;
8581 int old_mode = priv->ieee->iw_mode;
8582
8583 /* Initialize module parameter values here */
8584 priv->config = 0;
8585
8586 /* We default to disabling the LED code as right now it causes
8587 * too many systems to lock up... */
8588 if (!led_support)
8589 priv->config |= CFG_NO_LED;
8590
8591 if (associate)
8592 priv->config |= CFG_ASSOCIATE;
8593 else
8594 IPW_DEBUG_INFO("Auto associate disabled.\n");
8595
8596 if (auto_create)
8597 priv->config |= CFG_ADHOC_CREATE;
8598 else
8599 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8600
8601 priv->config &= ~CFG_STATIC_ESSID;
8602 priv->essid_len = 0;
8603 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8604
8605 if (disable && option) {
8606 priv->status |= STATUS_RF_KILL_SW;
8607 IPW_DEBUG_INFO("Radio disabled.\n");
8608 }
8609
8610 if (default_channel != 0) {
8611 priv->config |= CFG_STATIC_CHANNEL;
8612 priv->channel = default_channel;
8613 IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel);
8614 /* TODO: Validate that provided channel is in range */
8615 }
8616 #ifdef CONFIG_IPW2200_QOS
8617 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8618 burst_duration_CCK, burst_duration_OFDM);
8619 #endif /* CONFIG_IPW2200_QOS */
8620
8621 switch (network_mode) {
8622 case 1:
8623 priv->ieee->iw_mode = IW_MODE_ADHOC;
8624 priv->net_dev->type = ARPHRD_ETHER;
8625
8626 break;
8627 #ifdef CONFIG_IPW2200_MONITOR
8628 case 2:
8629 priv->ieee->iw_mode = IW_MODE_MONITOR;
8630 #ifdef CONFIG_IPW2200_RADIOTAP
8631 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8632 #else
8633 priv->net_dev->type = ARPHRD_IEEE80211;
8634 #endif
8635 break;
8636 #endif
8637 default:
8638 case 0:
8639 priv->net_dev->type = ARPHRD_ETHER;
8640 priv->ieee->iw_mode = IW_MODE_INFRA;
8641 break;
8642 }
8643
8644 if (hwcrypto) {
8645 priv->ieee->host_encrypt = 0;
8646 priv->ieee->host_encrypt_msdu = 0;
8647 priv->ieee->host_decrypt = 0;
8648 priv->ieee->host_mc_decrypt = 0;
8649 }
8650 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8651
8652 /* IPW2200/2915 is abled to do hardware fragmentation. */
8653 priv->ieee->host_open_frag = 0;
8654
8655 if ((priv->pci_dev->device == 0x4223) ||
8656 (priv->pci_dev->device == 0x4224)) {
8657 if (option == 1)
8658 printk(KERN_INFO DRV_NAME
8659 ": Detected Intel PRO/Wireless 2915ABG Network "
8660 "Connection\n");
8661 priv->ieee->abg_true = 1;
8662 band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND;
8663 modulation = LIBIPW_OFDM_MODULATION |
8664 LIBIPW_CCK_MODULATION;
8665 priv->adapter = IPW_2915ABG;
8666 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8667 } else {
8668 if (option == 1)
8669 printk(KERN_INFO DRV_NAME
8670 ": Detected Intel PRO/Wireless 2200BG Network "
8671 "Connection\n");
8672
8673 priv->ieee->abg_true = 0;
8674 band = LIBIPW_24GHZ_BAND;
8675 modulation = LIBIPW_OFDM_MODULATION |
8676 LIBIPW_CCK_MODULATION;
8677 priv->adapter = IPW_2200BG;
8678 priv->ieee->mode = IEEE_G | IEEE_B;
8679 }
8680
8681 priv->ieee->freq_band = band;
8682 priv->ieee->modulation = modulation;
8683
8684 priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK;
8685
8686 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8687 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8688
8689 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8690 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8691 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8692
8693 /* If power management is turned on, default to AC mode */
8694 priv->power_mode = IPW_POWER_AC;
8695 priv->tx_power = IPW_TX_POWER_DEFAULT;
8696
8697 return old_mode == priv->ieee->iw_mode;
8698 }
8699
8700 /*
8701 * This file defines the Wireless Extension handlers. It does not
8702 * define any methods of hardware manipulation and relies on the
8703 * functions defined in ipw_main to provide the HW interaction.
8704 *
8705 * The exception to this is the use of the ipw_get_ordinal()
8706 * function used to poll the hardware vs. making unnecessary calls.
8707 *
8708 */
8709
8710 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8711 {
8712 if (channel == 0) {
8713 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8714 priv->config &= ~CFG_STATIC_CHANNEL;
8715 IPW_DEBUG_ASSOC("Attempting to associate with new "
8716 "parameters.\n");
8717 ipw_associate(priv);
8718 return 0;
8719 }
8720
8721 priv->config |= CFG_STATIC_CHANNEL;
8722
8723 if (priv->channel == channel) {
8724 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8725 channel);
8726 return 0;
8727 }
8728
8729 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8730 priv->channel = channel;
8731
8732 #ifdef CONFIG_IPW2200_MONITOR
8733 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8734 int i;
8735 if (priv->status & STATUS_SCANNING) {
8736 IPW_DEBUG_SCAN("Scan abort triggered due to "
8737 "channel change.\n");
8738 ipw_abort_scan(priv);
8739 }
8740
8741 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8742 udelay(10);
8743
8744 if (priv->status & STATUS_SCANNING)
8745 IPW_DEBUG_SCAN("Still scanning...\n");
8746 else
8747 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8748 1000 - i);
8749
8750 return 0;
8751 }
8752 #endif /* CONFIG_IPW2200_MONITOR */
8753
8754 /* Network configuration changed -- force [re]association */
8755 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8756 if (!ipw_disassociate(priv))
8757 ipw_associate(priv);
8758
8759 return 0;
8760 }
8761
8762 static int ipw_wx_set_freq(struct net_device *dev,
8763 struct iw_request_info *info,
8764 union iwreq_data *wrqu, char *extra)
8765 {
8766 struct ipw_priv *priv = libipw_priv(dev);
8767 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8768 struct iw_freq *fwrq = &wrqu->freq;
8769 int ret = 0, i;
8770 u8 channel, flags;
8771 int band;
8772
8773 if (fwrq->m == 0) {
8774 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8775 mutex_lock(&priv->mutex);
8776 ret = ipw_set_channel(priv, 0);
8777 mutex_unlock(&priv->mutex);
8778 return ret;
8779 }
8780 /* if setting by freq convert to channel */
8781 if (fwrq->e == 1) {
8782 channel = libipw_freq_to_channel(priv->ieee, fwrq->m);
8783 if (channel == 0)
8784 return -EINVAL;
8785 } else
8786 channel = fwrq->m;
8787
8788 if (!(band = libipw_is_valid_channel(priv->ieee, channel)))
8789 return -EINVAL;
8790
8791 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8792 i = libipw_channel_to_index(priv->ieee, channel);
8793 if (i == -1)
8794 return -EINVAL;
8795
8796 flags = (band == LIBIPW_24GHZ_BAND) ?
8797 geo->bg[i].flags : geo->a[i].flags;
8798 if (flags & LIBIPW_CH_PASSIVE_ONLY) {
8799 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8800 return -EINVAL;
8801 }
8802 }
8803
8804 IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
8805 mutex_lock(&priv->mutex);
8806 ret = ipw_set_channel(priv, channel);
8807 mutex_unlock(&priv->mutex);
8808 return ret;
8809 }
8810
8811 static int ipw_wx_get_freq(struct net_device *dev,
8812 struct iw_request_info *info,
8813 union iwreq_data *wrqu, char *extra)
8814 {
8815 struct ipw_priv *priv = libipw_priv(dev);
8816
8817 wrqu->freq.e = 0;
8818
8819 /* If we are associated, trying to associate, or have a statically
8820 * configured CHANNEL then return that; otherwise return ANY */
8821 mutex_lock(&priv->mutex);
8822 if (priv->config & CFG_STATIC_CHANNEL ||
8823 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8824 int i;
8825
8826 i = libipw_channel_to_index(priv->ieee, priv->channel);
8827 BUG_ON(i == -1);
8828 wrqu->freq.e = 1;
8829
8830 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
8831 case LIBIPW_52GHZ_BAND:
8832 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8833 break;
8834
8835 case LIBIPW_24GHZ_BAND:
8836 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8837 break;
8838
8839 default:
8840 BUG();
8841 }
8842 } else
8843 wrqu->freq.m = 0;
8844
8845 mutex_unlock(&priv->mutex);
8846 IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
8847 return 0;
8848 }
8849
8850 static int ipw_wx_set_mode(struct net_device *dev,
8851 struct iw_request_info *info,
8852 union iwreq_data *wrqu, char *extra)
8853 {
8854 struct ipw_priv *priv = libipw_priv(dev);
8855 int err = 0;
8856
8857 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8858
8859 switch (wrqu->mode) {
8860 #ifdef CONFIG_IPW2200_MONITOR
8861 case IW_MODE_MONITOR:
8862 #endif
8863 case IW_MODE_ADHOC:
8864 case IW_MODE_INFRA:
8865 break;
8866 case IW_MODE_AUTO:
8867 wrqu->mode = IW_MODE_INFRA;
8868 break;
8869 default:
8870 return -EINVAL;
8871 }
8872 if (wrqu->mode == priv->ieee->iw_mode)
8873 return 0;
8874
8875 mutex_lock(&priv->mutex);
8876
8877 ipw_sw_reset(priv, 0);
8878
8879 #ifdef CONFIG_IPW2200_MONITOR
8880 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8881 priv->net_dev->type = ARPHRD_ETHER;
8882
8883 if (wrqu->mode == IW_MODE_MONITOR)
8884 #ifdef CONFIG_IPW2200_RADIOTAP
8885 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8886 #else
8887 priv->net_dev->type = ARPHRD_IEEE80211;
8888 #endif
8889 #endif /* CONFIG_IPW2200_MONITOR */
8890
8891 /* Free the existing firmware and reset the fw_loaded
8892 * flag so ipw_load() will bring in the new firmware */
8893 free_firmware();
8894
8895 priv->ieee->iw_mode = wrqu->mode;
8896
8897 schedule_work(&priv->adapter_restart);
8898 mutex_unlock(&priv->mutex);
8899 return err;
8900 }
8901
8902 static int ipw_wx_get_mode(struct net_device *dev,
8903 struct iw_request_info *info,
8904 union iwreq_data *wrqu, char *extra)
8905 {
8906 struct ipw_priv *priv = libipw_priv(dev);
8907 mutex_lock(&priv->mutex);
8908 wrqu->mode = priv->ieee->iw_mode;
8909 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8910 mutex_unlock(&priv->mutex);
8911 return 0;
8912 }
8913
8914 /* Values are in microsecond */
8915 static const s32 timeout_duration[] = {
8916 350000,
8917 250000,
8918 75000,
8919 37000,
8920 25000,
8921 };
8922
8923 static const s32 period_duration[] = {
8924 400000,
8925 700000,
8926 1000000,
8927 1000000,
8928 1000000
8929 };
8930
8931 static int ipw_wx_get_range(struct net_device *dev,
8932 struct iw_request_info *info,
8933 union iwreq_data *wrqu, char *extra)
8934 {
8935 struct ipw_priv *priv = libipw_priv(dev);
8936 struct iw_range *range = (struct iw_range *)extra;
8937 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8938 int i = 0, j;
8939
8940 wrqu->data.length = sizeof(*range);
8941 memset(range, 0, sizeof(*range));
8942
8943 /* 54Mbs == ~27 Mb/s real (802.11g) */
8944 range->throughput = 27 * 1000 * 1000;
8945
8946 range->max_qual.qual = 100;
8947 /* TODO: Find real max RSSI and stick here */
8948 range->max_qual.level = 0;
8949 range->max_qual.noise = 0;
8950 range->max_qual.updated = 7; /* Updated all three */
8951
8952 range->avg_qual.qual = 70;
8953 /* TODO: Find real 'good' to 'bad' threshold value for RSSI */
8954 range->avg_qual.level = 0; /* FIXME to real average level */
8955 range->avg_qual.noise = 0;
8956 range->avg_qual.updated = 7; /* Updated all three */
8957 mutex_lock(&priv->mutex);
8958 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8959
8960 for (i = 0; i < range->num_bitrates; i++)
8961 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8962 500000;
8963
8964 range->max_rts = DEFAULT_RTS_THRESHOLD;
8965 range->min_frag = MIN_FRAG_THRESHOLD;
8966 range->max_frag = MAX_FRAG_THRESHOLD;
8967
8968 range->encoding_size[0] = 5;
8969 range->encoding_size[1] = 13;
8970 range->num_encoding_sizes = 2;
8971 range->max_encoding_tokens = WEP_KEYS;
8972
8973 /* Set the Wireless Extension versions */
8974 range->we_version_compiled = WIRELESS_EXT;
8975 range->we_version_source = 18;
8976
8977 i = 0;
8978 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8979 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8980 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8981 (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8982 continue;
8983
8984 range->freq[i].i = geo->bg[j].channel;
8985 range->freq[i].m = geo->bg[j].freq * 100000;
8986 range->freq[i].e = 1;
8987 i++;
8988 }
8989 }
8990
8991 if (priv->ieee->mode & IEEE_A) {
8992 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8993 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8994 (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8995 continue;
8996
8997 range->freq[i].i = geo->a[j].channel;
8998 range->freq[i].m = geo->a[j].freq * 100000;
8999 range->freq[i].e = 1;
9000 i++;
9001 }
9002 }
9003
9004 range->num_channels = i;
9005 range->num_frequency = i;
9006
9007 mutex_unlock(&priv->mutex);
9008
9009 /* Event capability (kernel + driver) */
9010 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
9011 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
9012 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
9013 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
9014 range->event_capa[1] = IW_EVENT_CAPA_K_1;
9015
9016 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
9017 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
9018
9019 range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
9020
9021 IPW_DEBUG_WX("GET Range\n");
9022 return 0;
9023 }
9024
9025 static int ipw_wx_set_wap(struct net_device *dev,
9026 struct iw_request_info *info,
9027 union iwreq_data *wrqu, char *extra)
9028 {
9029 struct ipw_priv *priv = libipw_priv(dev);
9030
9031 static const unsigned char any[] = {
9032 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
9033 };
9034 static const unsigned char off[] = {
9035 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
9036 };
9037
9038 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
9039 return -EINVAL;
9040 mutex_lock(&priv->mutex);
9041 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
9042 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
9043 /* we disable mandatory BSSID association */
9044 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
9045 priv->config &= ~CFG_STATIC_BSSID;
9046 IPW_DEBUG_ASSOC("Attempting to associate with new "
9047 "parameters.\n");
9048 ipw_associate(priv);
9049 mutex_unlock(&priv->mutex);
9050 return 0;
9051 }
9052
9053 priv->config |= CFG_STATIC_BSSID;
9054 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
9055 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
9056 mutex_unlock(&priv->mutex);
9057 return 0;
9058 }
9059
9060 IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
9061 wrqu->ap_addr.sa_data);
9062
9063 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
9064
9065 /* Network configuration changed -- force [re]association */
9066 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
9067 if (!ipw_disassociate(priv))
9068 ipw_associate(priv);
9069
9070 mutex_unlock(&priv->mutex);
9071 return 0;
9072 }
9073
9074 static int ipw_wx_get_wap(struct net_device *dev,
9075 struct iw_request_info *info,
9076 union iwreq_data *wrqu, char *extra)
9077 {
9078 struct ipw_priv *priv = libipw_priv(dev);
9079
9080 /* If we are associated, trying to associate, or have a statically
9081 * configured BSSID then return that; otherwise return ANY */
9082 mutex_lock(&priv->mutex);
9083 if (priv->config & CFG_STATIC_BSSID ||
9084 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9085 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
9086 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
9087 } else
9088 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
9089
9090 IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
9091 wrqu->ap_addr.sa_data);
9092 mutex_unlock(&priv->mutex);
9093 return 0;
9094 }
9095
9096 static int ipw_wx_set_essid(struct net_device *dev,
9097 struct iw_request_info *info,
9098 union iwreq_data *wrqu, char *extra)
9099 {
9100 struct ipw_priv *priv = libipw_priv(dev);
9101 int length;
9102 DECLARE_SSID_BUF(ssid);
9103
9104 mutex_lock(&priv->mutex);
9105
9106 if (!wrqu->essid.flags)
9107 {
9108 IPW_DEBUG_WX("Setting ESSID to ANY\n");
9109 ipw_disassociate(priv);
9110 priv->config &= ~CFG_STATIC_ESSID;
9111 ipw_associate(priv);
9112 mutex_unlock(&priv->mutex);
9113 return 0;
9114 }
9115
9116 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
9117
9118 priv->config |= CFG_STATIC_ESSID;
9119
9120 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
9121 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
9122 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
9123 mutex_unlock(&priv->mutex);
9124 return 0;
9125 }
9126
9127 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n",
9128 print_ssid(ssid, extra, length), length);
9129
9130 priv->essid_len = length;
9131 memcpy(priv->essid, extra, priv->essid_len);
9132
9133 /* Network configuration changed -- force [re]association */
9134 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9135 if (!ipw_disassociate(priv))
9136 ipw_associate(priv);
9137
9138 mutex_unlock(&priv->mutex);
9139 return 0;
9140 }
9141
9142 static int ipw_wx_get_essid(struct net_device *dev,
9143 struct iw_request_info *info,
9144 union iwreq_data *wrqu, char *extra)
9145 {
9146 struct ipw_priv *priv = libipw_priv(dev);
9147 DECLARE_SSID_BUF(ssid);
9148
9149 /* If we are associated, trying to associate, or have a statically
9150 * configured ESSID then return that; otherwise return ANY */
9151 mutex_lock(&priv->mutex);
9152 if (priv->config & CFG_STATIC_ESSID ||
9153 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9154 IPW_DEBUG_WX("Getting essid: '%s'\n",
9155 print_ssid(ssid, priv->essid, priv->essid_len));
9156 memcpy(extra, priv->essid, priv->essid_len);
9157 wrqu->essid.length = priv->essid_len;
9158 wrqu->essid.flags = 1; /* active */
9159 } else {
9160 IPW_DEBUG_WX("Getting essid: ANY\n");
9161 wrqu->essid.length = 0;
9162 wrqu->essid.flags = 0; /* active */
9163 }
9164 mutex_unlock(&priv->mutex);
9165 return 0;
9166 }
9167
9168 static int ipw_wx_set_nick(struct net_device *dev,
9169 struct iw_request_info *info,
9170 union iwreq_data *wrqu, char *extra)
9171 {
9172 struct ipw_priv *priv = libipw_priv(dev);
9173
9174 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9175 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9176 return -E2BIG;
9177 mutex_lock(&priv->mutex);
9178 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
9179 memset(priv->nick, 0, sizeof(priv->nick));
9180 memcpy(priv->nick, extra, wrqu->data.length);
9181 IPW_DEBUG_TRACE("<<\n");
9182 mutex_unlock(&priv->mutex);
9183 return 0;
9184
9185 }
9186
9187 static int ipw_wx_get_nick(struct net_device *dev,
9188 struct iw_request_info *info,
9189 union iwreq_data *wrqu, char *extra)
9190 {
9191 struct ipw_priv *priv = libipw_priv(dev);
9192 IPW_DEBUG_WX("Getting nick\n");
9193 mutex_lock(&priv->mutex);
9194 wrqu->data.length = strlen(priv->nick);
9195 memcpy(extra, priv->nick, wrqu->data.length);
9196 wrqu->data.flags = 1; /* active */
9197 mutex_unlock(&priv->mutex);
9198 return 0;
9199 }
9200
9201 static int ipw_wx_set_sens(struct net_device *dev,
9202 struct iw_request_info *info,
9203 union iwreq_data *wrqu, char *extra)
9204 {
9205 struct ipw_priv *priv = libipw_priv(dev);
9206 int err = 0;
9207
9208 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9209 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9210 mutex_lock(&priv->mutex);
9211
9212 if (wrqu->sens.fixed == 0)
9213 {
9214 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9215 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9216 goto out;
9217 }
9218 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9219 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9220 err = -EINVAL;
9221 goto out;
9222 }
9223
9224 priv->roaming_threshold = wrqu->sens.value;
9225 priv->disassociate_threshold = 3*wrqu->sens.value;
9226 out:
9227 mutex_unlock(&priv->mutex);
9228 return err;
9229 }
9230
9231 static int ipw_wx_get_sens(struct net_device *dev,
9232 struct iw_request_info *info,
9233 union iwreq_data *wrqu, char *extra)
9234 {
9235 struct ipw_priv *priv = libipw_priv(dev);
9236 mutex_lock(&priv->mutex);
9237 wrqu->sens.fixed = 1;
9238 wrqu->sens.value = priv->roaming_threshold;
9239 mutex_unlock(&priv->mutex);
9240
9241 IPW_DEBUG_WX("GET roaming threshold -> %s %d\n",
9242 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9243
9244 return 0;
9245 }
9246
9247 static int ipw_wx_set_rate(struct net_device *dev,
9248 struct iw_request_info *info,
9249 union iwreq_data *wrqu, char *extra)
9250 {
9251 /* TODO: We should use semaphores or locks for access to priv */
9252 struct ipw_priv *priv = libipw_priv(dev);
9253 u32 target_rate = wrqu->bitrate.value;
9254 u32 fixed, mask;
9255
9256 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9257 /* value = X, fixed = 1 means only rate X */
9258 /* value = X, fixed = 0 means all rates lower equal X */
9259
9260 if (target_rate == -1) {
9261 fixed = 0;
9262 mask = LIBIPW_DEFAULT_RATES_MASK;
9263 /* Now we should reassociate */
9264 goto apply;
9265 }
9266
9267 mask = 0;
9268 fixed = wrqu->bitrate.fixed;
9269
9270 if (target_rate == 1000000 || !fixed)
9271 mask |= LIBIPW_CCK_RATE_1MB_MASK;
9272 if (target_rate == 1000000)
9273 goto apply;
9274
9275 if (target_rate == 2000000 || !fixed)
9276 mask |= LIBIPW_CCK_RATE_2MB_MASK;
9277 if (target_rate == 2000000)
9278 goto apply;
9279
9280 if (target_rate == 5500000 || !fixed)
9281 mask |= LIBIPW_CCK_RATE_5MB_MASK;
9282 if (target_rate == 5500000)
9283 goto apply;
9284
9285 if (target_rate == 6000000 || !fixed)
9286 mask |= LIBIPW_OFDM_RATE_6MB_MASK;
9287 if (target_rate == 6000000)
9288 goto apply;
9289
9290 if (target_rate == 9000000 || !fixed)
9291 mask |= LIBIPW_OFDM_RATE_9MB_MASK;
9292 if (target_rate == 9000000)
9293 goto apply;
9294
9295 if (target_rate == 11000000 || !fixed)
9296 mask |= LIBIPW_CCK_RATE_11MB_MASK;
9297 if (target_rate == 11000000)
9298 goto apply;
9299
9300 if (target_rate == 12000000 || !fixed)
9301 mask |= LIBIPW_OFDM_RATE_12MB_MASK;
9302 if (target_rate == 12000000)
9303 goto apply;
9304
9305 if (target_rate == 18000000 || !fixed)
9306 mask |= LIBIPW_OFDM_RATE_18MB_MASK;
9307 if (target_rate == 18000000)
9308 goto apply;
9309
9310 if (target_rate == 24000000 || !fixed)
9311 mask |= LIBIPW_OFDM_RATE_24MB_MASK;
9312 if (target_rate == 24000000)
9313 goto apply;
9314
9315 if (target_rate == 36000000 || !fixed)
9316 mask |= LIBIPW_OFDM_RATE_36MB_MASK;
9317 if (target_rate == 36000000)
9318 goto apply;
9319
9320 if (target_rate == 48000000 || !fixed)
9321 mask |= LIBIPW_OFDM_RATE_48MB_MASK;
9322 if (target_rate == 48000000)
9323 goto apply;
9324
9325 if (target_rate == 54000000 || !fixed)
9326 mask |= LIBIPW_OFDM_RATE_54MB_MASK;
9327 if (target_rate == 54000000)
9328 goto apply;
9329
9330 IPW_DEBUG_WX("invalid rate specified, returning error\n");
9331 return -EINVAL;
9332
9333 apply:
9334 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9335 mask, fixed ? "fixed" : "sub-rates");
9336 mutex_lock(&priv->mutex);
9337 if (mask == LIBIPW_DEFAULT_RATES_MASK) {
9338 priv->config &= ~CFG_FIXED_RATE;
9339 ipw_set_fixed_rate(priv, priv->ieee->mode);
9340 } else
9341 priv->config |= CFG_FIXED_RATE;
9342
9343 if (priv->rates_mask == mask) {
9344 IPW_DEBUG_WX("Mask set to current mask.\n");
9345 mutex_unlock(&priv->mutex);
9346 return 0;
9347 }
9348
9349 priv->rates_mask = mask;
9350
9351 /* Network configuration changed -- force [re]association */
9352 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9353 if (!ipw_disassociate(priv))
9354 ipw_associate(priv);
9355
9356 mutex_unlock(&priv->mutex);
9357 return 0;
9358 }
9359
9360 static int ipw_wx_get_rate(struct net_device *dev,
9361 struct iw_request_info *info,
9362 union iwreq_data *wrqu, char *extra)
9363 {
9364 struct ipw_priv *priv = libipw_priv(dev);
9365 mutex_lock(&priv->mutex);
9366 wrqu->bitrate.value = priv->last_rate;
9367 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9368 mutex_unlock(&priv->mutex);
9369 IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
9370 return 0;
9371 }
9372
9373 static int ipw_wx_set_rts(struct net_device *dev,
9374 struct iw_request_info *info,
9375 union iwreq_data *wrqu, char *extra)
9376 {
9377 struct ipw_priv *priv = libipw_priv(dev);
9378 mutex_lock(&priv->mutex);
9379 if (wrqu->rts.disabled || !wrqu->rts.fixed)
9380 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9381 else {
9382 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9383 wrqu->rts.value > MAX_RTS_THRESHOLD) {
9384 mutex_unlock(&priv->mutex);
9385 return -EINVAL;
9386 }
9387 priv->rts_threshold = wrqu->rts.value;
9388 }
9389
9390 ipw_send_rts_threshold(priv, priv->rts_threshold);
9391 mutex_unlock(&priv->mutex);
9392 IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold);
9393 return 0;
9394 }
9395
9396 static int ipw_wx_get_rts(struct net_device *dev,
9397 struct iw_request_info *info,
9398 union iwreq_data *wrqu, char *extra)
9399 {
9400 struct ipw_priv *priv = libipw_priv(dev);
9401 mutex_lock(&priv->mutex);
9402 wrqu->rts.value = priv->rts_threshold;
9403 wrqu->rts.fixed = 0; /* no auto select */
9404 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9405 mutex_unlock(&priv->mutex);
9406 IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value);
9407 return 0;
9408 }
9409
9410 static int ipw_wx_set_txpow(struct net_device *dev,
9411 struct iw_request_info *info,
9412 union iwreq_data *wrqu, char *extra)
9413 {
9414 struct ipw_priv *priv = libipw_priv(dev);
9415 int err = 0;
9416
9417 mutex_lock(&priv->mutex);
9418 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9419 err = -EINPROGRESS;
9420 goto out;
9421 }
9422
9423 if (!wrqu->power.fixed)
9424 wrqu->power.value = IPW_TX_POWER_DEFAULT;
9425
9426 if (wrqu->power.flags != IW_TXPOW_DBM) {
9427 err = -EINVAL;
9428 goto out;
9429 }
9430
9431 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9432 (wrqu->power.value < IPW_TX_POWER_MIN)) {
9433 err = -EINVAL;
9434 goto out;
9435 }
9436
9437 priv->tx_power = wrqu->power.value;
9438 err = ipw_set_tx_power(priv);
9439 out:
9440 mutex_unlock(&priv->mutex);
9441 return err;
9442 }
9443
9444 static int ipw_wx_get_txpow(struct net_device *dev,
9445 struct iw_request_info *info,
9446 union iwreq_data *wrqu, char *extra)
9447 {
9448 struct ipw_priv *priv = libipw_priv(dev);
9449 mutex_lock(&priv->mutex);
9450 wrqu->power.value = priv->tx_power;
9451 wrqu->power.fixed = 1;
9452 wrqu->power.flags = IW_TXPOW_DBM;
9453 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9454 mutex_unlock(&priv->mutex);
9455
9456 IPW_DEBUG_WX("GET TX Power -> %s %d\n",
9457 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9458
9459 return 0;
9460 }
9461
9462 static int ipw_wx_set_frag(struct net_device *dev,
9463 struct iw_request_info *info,
9464 union iwreq_data *wrqu, char *extra)
9465 {
9466 struct ipw_priv *priv = libipw_priv(dev);
9467 mutex_lock(&priv->mutex);
9468 if (wrqu->frag.disabled || !wrqu->frag.fixed)
9469 priv->ieee->fts = DEFAULT_FTS;
9470 else {
9471 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9472 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9473 mutex_unlock(&priv->mutex);
9474 return -EINVAL;
9475 }
9476
9477 priv->ieee->fts = wrqu->frag.value & ~0x1;
9478 }
9479
9480 ipw_send_frag_threshold(priv, wrqu->frag.value);
9481 mutex_unlock(&priv->mutex);
9482 IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value);
9483 return 0;
9484 }
9485
9486 static int ipw_wx_get_frag(struct net_device *dev,
9487 struct iw_request_info *info,
9488 union iwreq_data *wrqu, char *extra)
9489 {
9490 struct ipw_priv *priv = libipw_priv(dev);
9491 mutex_lock(&priv->mutex);
9492 wrqu->frag.value = priv->ieee->fts;
9493 wrqu->frag.fixed = 0; /* no auto select */
9494 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9495 mutex_unlock(&priv->mutex);
9496 IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
9497
9498 return 0;
9499 }
9500
9501 static int ipw_wx_set_retry(struct net_device *dev,
9502 struct iw_request_info *info,
9503 union iwreq_data *wrqu, char *extra)
9504 {
9505 struct ipw_priv *priv = libipw_priv(dev);
9506
9507 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9508 return -EINVAL;
9509
9510 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9511 return 0;
9512
9513 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9514 return -EINVAL;
9515
9516 mutex_lock(&priv->mutex);
9517 if (wrqu->retry.flags & IW_RETRY_SHORT)
9518 priv->short_retry_limit = (u8) wrqu->retry.value;
9519 else if (wrqu->retry.flags & IW_RETRY_LONG)
9520 priv->long_retry_limit = (u8) wrqu->retry.value;
9521 else {
9522 priv->short_retry_limit = (u8) wrqu->retry.value;
9523 priv->long_retry_limit = (u8) wrqu->retry.value;
9524 }
9525
9526 ipw_send_retry_limit(priv, priv->short_retry_limit,
9527 priv->long_retry_limit);
9528 mutex_unlock(&priv->mutex);
9529 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9530 priv->short_retry_limit, priv->long_retry_limit);
9531 return 0;
9532 }
9533
9534 static int ipw_wx_get_retry(struct net_device *dev,
9535 struct iw_request_info *info,
9536 union iwreq_data *wrqu, char *extra)
9537 {
9538 struct ipw_priv *priv = libipw_priv(dev);
9539
9540 mutex_lock(&priv->mutex);
9541 wrqu->retry.disabled = 0;
9542
9543 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9544 mutex_unlock(&priv->mutex);
9545 return -EINVAL;
9546 }
9547
9548 if (wrqu->retry.flags & IW_RETRY_LONG) {
9549 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9550 wrqu->retry.value = priv->long_retry_limit;
9551 } else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9552 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9553 wrqu->retry.value = priv->short_retry_limit;
9554 } else {
9555 wrqu->retry.flags = IW_RETRY_LIMIT;
9556 wrqu->retry.value = priv->short_retry_limit;
9557 }
9558 mutex_unlock(&priv->mutex);
9559
9560 IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value);
9561
9562 return 0;
9563 }
9564
9565 static int ipw_wx_set_scan(struct net_device *dev,
9566 struct iw_request_info *info,
9567 union iwreq_data *wrqu, char *extra)
9568 {
9569 struct ipw_priv *priv = libipw_priv(dev);
9570 struct iw_scan_req *req = (struct iw_scan_req *)extra;
9571 struct delayed_work *work = NULL;
9572
9573 mutex_lock(&priv->mutex);
9574
9575 priv->user_requested_scan = 1;
9576
9577 if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9578 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9579 int len = min((int)req->essid_len,
9580 (int)sizeof(priv->direct_scan_ssid));
9581 memcpy(priv->direct_scan_ssid, req->essid, len);
9582 priv->direct_scan_ssid_len = len;
9583 work = &priv->request_direct_scan;
9584 } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9585 work = &priv->request_passive_scan;
9586 }
9587 } else {
9588 /* Normal active broadcast scan */
9589 work = &priv->request_scan;
9590 }
9591
9592 mutex_unlock(&priv->mutex);
9593
9594 IPW_DEBUG_WX("Start scan\n");
9595
9596 schedule_delayed_work(work, 0);
9597
9598 return 0;
9599 }
9600
9601 static int ipw_wx_get_scan(struct net_device *dev,
9602 struct iw_request_info *info,
9603 union iwreq_data *wrqu, char *extra)
9604 {
9605 struct ipw_priv *priv = libipw_priv(dev);
9606 return libipw_wx_get_scan(priv->ieee, info, wrqu, extra);
9607 }
9608
9609 static int ipw_wx_set_encode(struct net_device *dev,
9610 struct iw_request_info *info,
9611 union iwreq_data *wrqu, char *key)
9612 {
9613 struct ipw_priv *priv = libipw_priv(dev);
9614 int ret;
9615 u32 cap = priv->capability;
9616
9617 mutex_lock(&priv->mutex);
9618 ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key);
9619
9620 /* In IBSS mode, we need to notify the firmware to update
9621 * the beacon info after we changed the capability. */
9622 if (cap != priv->capability &&
9623 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9624 priv->status & STATUS_ASSOCIATED)
9625 ipw_disassociate(priv);
9626
9627 mutex_unlock(&priv->mutex);
9628 return ret;
9629 }
9630
9631 static int ipw_wx_get_encode(struct net_device *dev,
9632 struct iw_request_info *info,
9633 union iwreq_data *wrqu, char *key)
9634 {
9635 struct ipw_priv *priv = libipw_priv(dev);
9636 return libipw_wx_get_encode(priv->ieee, info, wrqu, key);
9637 }
9638
9639 static int ipw_wx_set_power(struct net_device *dev,
9640 struct iw_request_info *info,
9641 union iwreq_data *wrqu, char *extra)
9642 {
9643 struct ipw_priv *priv = libipw_priv(dev);
9644 int err;
9645 mutex_lock(&priv->mutex);
9646 if (wrqu->power.disabled) {
9647 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9648 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9649 if (err) {
9650 IPW_DEBUG_WX("failed setting power mode.\n");
9651 mutex_unlock(&priv->mutex);
9652 return err;
9653 }
9654 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9655 mutex_unlock(&priv->mutex);
9656 return 0;
9657 }
9658
9659 switch (wrqu->power.flags & IW_POWER_MODE) {
9660 case IW_POWER_ON: /* If not specified */
9661 case IW_POWER_MODE: /* If set all mask */
9662 case IW_POWER_ALL_R: /* If explicitly state all */
9663 break;
9664 default: /* Otherwise we don't support it */
9665 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9666 wrqu->power.flags);
9667 mutex_unlock(&priv->mutex);
9668 return -EOPNOTSUPP;
9669 }
9670
9671 /* If the user hasn't specified a power management mode yet, default
9672 * to BATTERY */
9673 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9674 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9675 else
9676 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9677
9678 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9679 if (err) {
9680 IPW_DEBUG_WX("failed setting power mode.\n");
9681 mutex_unlock(&priv->mutex);
9682 return err;
9683 }
9684
9685 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9686 mutex_unlock(&priv->mutex);
9687 return 0;
9688 }
9689
9690 static int ipw_wx_get_power(struct net_device *dev,
9691 struct iw_request_info *info,
9692 union iwreq_data *wrqu, char *extra)
9693 {
9694 struct ipw_priv *priv = libipw_priv(dev);
9695 mutex_lock(&priv->mutex);
9696 if (!(priv->power_mode & IPW_POWER_ENABLED))
9697 wrqu->power.disabled = 1;
9698 else
9699 wrqu->power.disabled = 0;
9700
9701 mutex_unlock(&priv->mutex);
9702 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9703
9704 return 0;
9705 }
9706
9707 static int ipw_wx_set_powermode(struct net_device *dev,
9708 struct iw_request_info *info,
9709 union iwreq_data *wrqu, char *extra)
9710 {
9711 struct ipw_priv *priv = libipw_priv(dev);
9712 int mode = *(int *)extra;
9713 int err;
9714
9715 mutex_lock(&priv->mutex);
9716 if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9717 mode = IPW_POWER_AC;
9718
9719 if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9720 err = ipw_send_power_mode(priv, mode);
9721 if (err) {
9722 IPW_DEBUG_WX("failed setting power mode.\n");
9723 mutex_unlock(&priv->mutex);
9724 return err;
9725 }
9726 priv->power_mode = IPW_POWER_ENABLED | mode;
9727 }
9728 mutex_unlock(&priv->mutex);
9729 return 0;
9730 }
9731
9732 #define MAX_WX_STRING 80
9733 static int ipw_wx_get_powermode(struct net_device *dev,
9734 struct iw_request_info *info,
9735 union iwreq_data *wrqu, char *extra)
9736 {
9737 struct ipw_priv *priv = libipw_priv(dev);
9738 int level = IPW_POWER_LEVEL(priv->power_mode);
9739 char *p = extra;
9740
9741 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9742
9743 switch (level) {
9744 case IPW_POWER_AC:
9745 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9746 break;
9747 case IPW_POWER_BATTERY:
9748 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9749 break;
9750 default:
9751 p += snprintf(p, MAX_WX_STRING - (p - extra),
9752 "(Timeout %dms, Period %dms)",
9753 timeout_duration[level - 1] / 1000,
9754 period_duration[level - 1] / 1000);
9755 }
9756
9757 if (!(priv->power_mode & IPW_POWER_ENABLED))
9758 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9759
9760 wrqu->data.length = p - extra + 1;
9761
9762 return 0;
9763 }
9764
9765 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9766 struct iw_request_info *info,
9767 union iwreq_data *wrqu, char *extra)
9768 {
9769 struct ipw_priv *priv = libipw_priv(dev);
9770 int mode = *(int *)extra;
9771 u8 band = 0, modulation = 0;
9772
9773 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9774 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9775 return -EINVAL;
9776 }
9777 mutex_lock(&priv->mutex);
9778 if (priv->adapter == IPW_2915ABG) {
9779 priv->ieee->abg_true = 1;
9780 if (mode & IEEE_A) {
9781 band |= LIBIPW_52GHZ_BAND;
9782 modulation |= LIBIPW_OFDM_MODULATION;
9783 } else
9784 priv->ieee->abg_true = 0;
9785 } else {
9786 if (mode & IEEE_A) {
9787 IPW_WARNING("Attempt to set 2200BG into "
9788 "802.11a mode\n");
9789 mutex_unlock(&priv->mutex);
9790 return -EINVAL;
9791 }
9792
9793 priv->ieee->abg_true = 0;
9794 }
9795
9796 if (mode & IEEE_B) {
9797 band |= LIBIPW_24GHZ_BAND;
9798 modulation |= LIBIPW_CCK_MODULATION;
9799 } else
9800 priv->ieee->abg_true = 0;
9801
9802 if (mode & IEEE_G) {
9803 band |= LIBIPW_24GHZ_BAND;
9804 modulation |= LIBIPW_OFDM_MODULATION;
9805 } else
9806 priv->ieee->abg_true = 0;
9807
9808 priv->ieee->mode = mode;
9809 priv->ieee->freq_band = band;
9810 priv->ieee->modulation = modulation;
9811 init_supported_rates(priv, &priv->rates);
9812
9813 /* Network configuration changed -- force [re]association */
9814 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9815 if (!ipw_disassociate(priv)) {
9816 ipw_send_supported_rates(priv, &priv->rates);
9817 ipw_associate(priv);
9818 }
9819
9820 /* Update the band LEDs */
9821 ipw_led_band_on(priv);
9822
9823 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9824 mode & IEEE_A ? 'a' : '.',
9825 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9826 mutex_unlock(&priv->mutex);
9827 return 0;
9828 }
9829
9830 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9831 struct iw_request_info *info,
9832 union iwreq_data *wrqu, char *extra)
9833 {
9834 struct ipw_priv *priv = libipw_priv(dev);
9835 mutex_lock(&priv->mutex);
9836 switch (priv->ieee->mode) {
9837 case IEEE_A:
9838 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9839 break;
9840 case IEEE_B:
9841 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9842 break;
9843 case IEEE_A | IEEE_B:
9844 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9845 break;
9846 case IEEE_G:
9847 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9848 break;
9849 case IEEE_A | IEEE_G:
9850 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9851 break;
9852 case IEEE_B | IEEE_G:
9853 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9854 break;
9855 case IEEE_A | IEEE_B | IEEE_G:
9856 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9857 break;
9858 default:
9859 strncpy(extra, "unknown", MAX_WX_STRING);
9860 break;
9861 }
9862
9863 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9864
9865 wrqu->data.length = strlen(extra) + 1;
9866 mutex_unlock(&priv->mutex);
9867
9868 return 0;
9869 }
9870
9871 static int ipw_wx_set_preamble(struct net_device *dev,
9872 struct iw_request_info *info,
9873 union iwreq_data *wrqu, char *extra)
9874 {
9875 struct ipw_priv *priv = libipw_priv(dev);
9876 int mode = *(int *)extra;
9877 mutex_lock(&priv->mutex);
9878 /* Switching from SHORT -> LONG requires a disassociation */
9879 if (mode == 1) {
9880 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9881 priv->config |= CFG_PREAMBLE_LONG;
9882
9883 /* Network configuration changed -- force [re]association */
9884 IPW_DEBUG_ASSOC
9885 ("[re]association triggered due to preamble change.\n");
9886 if (!ipw_disassociate(priv))
9887 ipw_associate(priv);
9888 }
9889 goto done;
9890 }
9891
9892 if (mode == 0) {
9893 priv->config &= ~CFG_PREAMBLE_LONG;
9894 goto done;
9895 }
9896 mutex_unlock(&priv->mutex);
9897 return -EINVAL;
9898
9899 done:
9900 mutex_unlock(&priv->mutex);
9901 return 0;
9902 }
9903
9904 static int ipw_wx_get_preamble(struct net_device *dev,
9905 struct iw_request_info *info,
9906 union iwreq_data *wrqu, char *extra)
9907 {
9908 struct ipw_priv *priv = libipw_priv(dev);
9909 mutex_lock(&priv->mutex);
9910 if (priv->config & CFG_PREAMBLE_LONG)
9911 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9912 else
9913 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9914 mutex_unlock(&priv->mutex);
9915 return 0;
9916 }
9917
9918 #ifdef CONFIG_IPW2200_MONITOR
9919 static int ipw_wx_set_monitor(struct net_device *dev,
9920 struct iw_request_info *info,
9921 union iwreq_data *wrqu, char *extra)
9922 {
9923 struct ipw_priv *priv = libipw_priv(dev);
9924 int *parms = (int *)extra;
9925 int enable = (parms[0] > 0);
9926 mutex_lock(&priv->mutex);
9927 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9928 if (enable) {
9929 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9930 #ifdef CONFIG_IPW2200_RADIOTAP
9931 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9932 #else
9933 priv->net_dev->type = ARPHRD_IEEE80211;
9934 #endif
9935 schedule_work(&priv->adapter_restart);
9936 }
9937
9938 ipw_set_channel(priv, parms[1]);
9939 } else {
9940 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9941 mutex_unlock(&priv->mutex);
9942 return 0;
9943 }
9944 priv->net_dev->type = ARPHRD_ETHER;
9945 schedule_work(&priv->adapter_restart);
9946 }
9947 mutex_unlock(&priv->mutex);
9948 return 0;
9949 }
9950
9951 #endif /* CONFIG_IPW2200_MONITOR */
9952
9953 static int ipw_wx_reset(struct net_device *dev,
9954 struct iw_request_info *info,
9955 union iwreq_data *wrqu, char *extra)
9956 {
9957 struct ipw_priv *priv = libipw_priv(dev);
9958 IPW_DEBUG_WX("RESET\n");
9959 schedule_work(&priv->adapter_restart);
9960 return 0;
9961 }
9962
9963 static int ipw_wx_sw_reset(struct net_device *dev,
9964 struct iw_request_info *info,
9965 union iwreq_data *wrqu, char *extra)
9966 {
9967 struct ipw_priv *priv = libipw_priv(dev);
9968 union iwreq_data wrqu_sec = {
9969 .encoding = {
9970 .flags = IW_ENCODE_DISABLED,
9971 },
9972 };
9973 int ret;
9974
9975 IPW_DEBUG_WX("SW_RESET\n");
9976
9977 mutex_lock(&priv->mutex);
9978
9979 ret = ipw_sw_reset(priv, 2);
9980 if (!ret) {
9981 free_firmware();
9982 ipw_adapter_restart(priv);
9983 }
9984
9985 /* The SW reset bit might have been toggled on by the 'disable'
9986 * module parameter, so take appropriate action */
9987 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9988
9989 mutex_unlock(&priv->mutex);
9990 libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9991 mutex_lock(&priv->mutex);
9992
9993 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9994 /* Configuration likely changed -- force [re]association */
9995 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9996 "reset.\n");
9997 if (!ipw_disassociate(priv))
9998 ipw_associate(priv);
9999 }
10000
10001 mutex_unlock(&priv->mutex);
10002
10003 return 0;
10004 }
10005
10006 /* Rebase the WE IOCTLs to zero for the handler array */
10007 static iw_handler ipw_wx_handlers[] = {
10008 IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
10009 IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq),
10010 IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq),
10011 IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode),
10012 IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode),
10013 IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens),
10014 IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens),
10015 IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range),
10016 IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap),
10017 IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap),
10018 IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan),
10019 IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan),
10020 IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid),
10021 IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid),
10022 IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick),
10023 IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick),
10024 IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate),
10025 IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate),
10026 IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts),
10027 IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts),
10028 IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag),
10029 IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag),
10030 IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow),
10031 IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow),
10032 IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry),
10033 IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry),
10034 IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode),
10035 IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode),
10036 IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power),
10037 IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power),
10038 IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
10039 IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
10040 IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
10041 IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
10042 IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie),
10043 IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie),
10044 IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme),
10045 IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth),
10046 IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth),
10047 IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext),
10048 IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext),
10049 };
10050
10051 enum {
10052 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
10053 IPW_PRIV_GET_POWER,
10054 IPW_PRIV_SET_MODE,
10055 IPW_PRIV_GET_MODE,
10056 IPW_PRIV_SET_PREAMBLE,
10057 IPW_PRIV_GET_PREAMBLE,
10058 IPW_PRIV_RESET,
10059 IPW_PRIV_SW_RESET,
10060 #ifdef CONFIG_IPW2200_MONITOR
10061 IPW_PRIV_SET_MONITOR,
10062 #endif
10063 };
10064
10065 static struct iw_priv_args ipw_priv_args[] = {
10066 {
10067 .cmd = IPW_PRIV_SET_POWER,
10068 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10069 .name = "set_power"},
10070 {
10071 .cmd = IPW_PRIV_GET_POWER,
10072 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10073 .name = "get_power"},
10074 {
10075 .cmd = IPW_PRIV_SET_MODE,
10076 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10077 .name = "set_mode"},
10078 {
10079 .cmd = IPW_PRIV_GET_MODE,
10080 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10081 .name = "get_mode"},
10082 {
10083 .cmd = IPW_PRIV_SET_PREAMBLE,
10084 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10085 .name = "set_preamble"},
10086 {
10087 .cmd = IPW_PRIV_GET_PREAMBLE,
10088 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
10089 .name = "get_preamble"},
10090 {
10091 IPW_PRIV_RESET,
10092 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
10093 {
10094 IPW_PRIV_SW_RESET,
10095 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
10096 #ifdef CONFIG_IPW2200_MONITOR
10097 {
10098 IPW_PRIV_SET_MONITOR,
10099 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
10100 #endif /* CONFIG_IPW2200_MONITOR */
10101 };
10102
10103 static iw_handler ipw_priv_handler[] = {
10104 ipw_wx_set_powermode,
10105 ipw_wx_get_powermode,
10106 ipw_wx_set_wireless_mode,
10107 ipw_wx_get_wireless_mode,
10108 ipw_wx_set_preamble,
10109 ipw_wx_get_preamble,
10110 ipw_wx_reset,
10111 ipw_wx_sw_reset,
10112 #ifdef CONFIG_IPW2200_MONITOR
10113 ipw_wx_set_monitor,
10114 #endif
10115 };
10116
10117 static struct iw_handler_def ipw_wx_handler_def = {
10118 .standard = ipw_wx_handlers,
10119 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
10120 .num_private = ARRAY_SIZE(ipw_priv_handler),
10121 .num_private_args = ARRAY_SIZE(ipw_priv_args),
10122 .private = ipw_priv_handler,
10123 .private_args = ipw_priv_args,
10124 .get_wireless_stats = ipw_get_wireless_stats,
10125 };
10126
10127 /*
10128 * Get wireless statistics.
10129 * Called by /proc/net/wireless
10130 * Also called by SIOCGIWSTATS
10131 */
10132 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10133 {
10134 struct ipw_priv *priv = libipw_priv(dev);
10135 struct iw_statistics *wstats;
10136
10137 wstats = &priv->wstats;
10138
10139 /* if hw is disabled, then ipw_get_ordinal() can't be called.
10140 * netdev->get_wireless_stats seems to be called before fw is
10141 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
10142 * and associated; if not associcated, the values are all meaningless
10143 * anyway, so set them all to NULL and INVALID */
10144 if (!(priv->status & STATUS_ASSOCIATED)) {
10145 wstats->miss.beacon = 0;
10146 wstats->discard.retries = 0;
10147 wstats->qual.qual = 0;
10148 wstats->qual.level = 0;
10149 wstats->qual.noise = 0;
10150 wstats->qual.updated = 7;
10151 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10152 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10153 return wstats;
10154 }
10155
10156 wstats->qual.qual = priv->quality;
10157 wstats->qual.level = priv->exp_avg_rssi;
10158 wstats->qual.noise = priv->exp_avg_noise;
10159 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10160 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10161
10162 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10163 wstats->discard.retries = priv->last_tx_failures;
10164 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10165
10166 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10167 goto fail_get_ordinal;
10168 wstats->discard.retries += tx_retry; */
10169
10170 return wstats;
10171 }
10172
10173 /* net device stuff */
10174
10175 static void init_sys_config(struct ipw_sys_config *sys_config)
10176 {
10177 memset(sys_config, 0, sizeof(struct ipw_sys_config));
10178 sys_config->bt_coexistence = 0;
10179 sys_config->answer_broadcast_ssid_probe = 0;
10180 sys_config->accept_all_data_frames = 0;
10181 sys_config->accept_non_directed_frames = 1;
10182 sys_config->exclude_unicast_unencrypted = 0;
10183 sys_config->disable_unicast_decryption = 1;
10184 sys_config->exclude_multicast_unencrypted = 0;
10185 sys_config->disable_multicast_decryption = 1;
10186 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10187 antenna = CFG_SYS_ANTENNA_BOTH;
10188 sys_config->antenna_diversity = antenna;
10189 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
10190 sys_config->dot11g_auto_detection = 0;
10191 sys_config->enable_cts_to_self = 0;
10192 sys_config->bt_coexist_collision_thr = 0;
10193 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
10194 sys_config->silence_threshold = 0x1e;
10195 }
10196
10197 static int ipw_net_open(struct net_device *dev)
10198 {
10199 IPW_DEBUG_INFO("dev->open\n");
10200 netif_start_queue(dev);
10201 return 0;
10202 }
10203
10204 static int ipw_net_stop(struct net_device *dev)
10205 {
10206 IPW_DEBUG_INFO("dev->close\n");
10207 netif_stop_queue(dev);
10208 return 0;
10209 }
10210
10211 /*
10212 todo:
10213
10214 modify to send one tfd per fragment instead of using chunking. otherwise
10215 we need to heavily modify the libipw_skb_to_txb.
10216 */
10217
10218 static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
10219 int pri)
10220 {
10221 struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *)
10222 txb->fragments[0]->data;
10223 int i = 0;
10224 struct tfd_frame *tfd;
10225 #ifdef CONFIG_IPW2200_QOS
10226 int tx_id = ipw_get_tx_queue_number(priv, pri);
10227 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10228 #else
10229 struct clx2_tx_queue *txq = &priv->txq[0];
10230 #endif
10231 struct clx2_queue *q = &txq->q;
10232 u8 id, hdr_len, unicast;
10233 int fc;
10234
10235 if (!(priv->status & STATUS_ASSOCIATED))
10236 goto drop;
10237
10238 hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10239 switch (priv->ieee->iw_mode) {
10240 case IW_MODE_ADHOC:
10241 unicast = !is_multicast_ether_addr(hdr->addr1);
10242 id = ipw_find_station(priv, hdr->addr1);
10243 if (id == IPW_INVALID_STATION) {
10244 id = ipw_add_station(priv, hdr->addr1);
10245 if (id == IPW_INVALID_STATION) {
10246 IPW_WARNING("Attempt to send data to "
10247 "invalid cell: %pM\n",
10248 hdr->addr1);
10249 goto drop;
10250 }
10251 }
10252 break;
10253
10254 case IW_MODE_INFRA:
10255 default:
10256 unicast = !is_multicast_ether_addr(hdr->addr3);
10257 id = 0;
10258 break;
10259 }
10260
10261 tfd = &txq->bd[q->first_empty];
10262 txq->txb[q->first_empty] = txb;
10263 memset(tfd, 0, sizeof(*tfd));
10264 tfd->u.data.station_number = id;
10265
10266 tfd->control_flags.message_type = TX_FRAME_TYPE;
10267 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10268
10269 tfd->u.data.cmd_id = DINO_CMD_TX;
10270 tfd->u.data.len = cpu_to_le16(txb->payload_size);
10271
10272 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10273 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10274 else
10275 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10276
10277 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10278 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10279
10280 fc = le16_to_cpu(hdr->frame_ctl);
10281 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10282
10283 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10284
10285 if (likely(unicast))
10286 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10287
10288 if (txb->encrypted && !priv->ieee->host_encrypt) {
10289 switch (priv->ieee->sec.level) {
10290 case SEC_LEVEL_3:
10291 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10292 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10293 /* XXX: ACK flag must be set for CCMP even if it
10294 * is a multicast/broadcast packet, because CCMP
10295 * group communication encrypted by GTK is
10296 * actually done by the AP. */
10297 if (!unicast)
10298 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10299
10300 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10301 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10302 tfd->u.data.key_index = 0;
10303 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10304 break;
10305 case SEC_LEVEL_2:
10306 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10307 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10308 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10309 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10310 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10311 break;
10312 case SEC_LEVEL_1:
10313 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10314 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10315 tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
10316 if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
10317 40)
10318 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10319 else
10320 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10321 break;
10322 case SEC_LEVEL_0:
10323 break;
10324 default:
10325 printk(KERN_ERR "Unknown security level %d\n",
10326 priv->ieee->sec.level);
10327 break;
10328 }
10329 } else
10330 /* No hardware encryption */
10331 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10332
10333 #ifdef CONFIG_IPW2200_QOS
10334 if (fc & IEEE80211_STYPE_QOS_DATA)
10335 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10336 #endif /* CONFIG_IPW2200_QOS */
10337
10338 /* payload */
10339 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10340 txb->nr_frags));
10341 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10342 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10343 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10344 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10345 i, le32_to_cpu(tfd->u.data.num_chunks),
10346 txb->fragments[i]->len - hdr_len);
10347 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10348 i, tfd->u.data.num_chunks,
10349 txb->fragments[i]->len - hdr_len);
10350 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10351 txb->fragments[i]->len - hdr_len);
10352
10353 tfd->u.data.chunk_ptr[i] =
10354 cpu_to_le32(pci_map_single
10355 (priv->pci_dev,
10356 txb->fragments[i]->data + hdr_len,
10357 txb->fragments[i]->len - hdr_len,
10358 PCI_DMA_TODEVICE));
10359 tfd->u.data.chunk_len[i] =
10360 cpu_to_le16(txb->fragments[i]->len - hdr_len);
10361 }
10362
10363 if (i != txb->nr_frags) {
10364 struct sk_buff *skb;
10365 u16 remaining_bytes = 0;
10366 int j;
10367
10368 for (j = i; j < txb->nr_frags; j++)
10369 remaining_bytes += txb->fragments[j]->len - hdr_len;
10370
10371 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10372 remaining_bytes);
10373 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10374 if (skb != NULL) {
10375 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10376 for (j = i; j < txb->nr_frags; j++) {
10377 int size = txb->fragments[j]->len - hdr_len;
10378
10379 printk(KERN_INFO "Adding frag %d %d...\n",
10380 j, size);
10381 memcpy(skb_put(skb, size),
10382 txb->fragments[j]->data + hdr_len, size);
10383 }
10384 dev_kfree_skb_any(txb->fragments[i]);
10385 txb->fragments[i] = skb;
10386 tfd->u.data.chunk_ptr[i] =
10387 cpu_to_le32(pci_map_single
10388 (priv->pci_dev, skb->data,
10389 remaining_bytes,
10390 PCI_DMA_TODEVICE));
10391
10392 le32_add_cpu(&tfd->u.data.num_chunks, 1);
10393 }
10394 }
10395
10396 /* kick DMA */
10397 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10398 ipw_write32(priv, q->reg_w, q->first_empty);
10399
10400 if (ipw_tx_queue_space(q) < q->high_mark)
10401 netif_stop_queue(priv->net_dev);
10402
10403 return NETDEV_TX_OK;
10404
10405 drop:
10406 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10407 libipw_txb_free(txb);
10408 return NETDEV_TX_OK;
10409 }
10410
10411 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10412 {
10413 struct ipw_priv *priv = libipw_priv(dev);
10414 #ifdef CONFIG_IPW2200_QOS
10415 int tx_id = ipw_get_tx_queue_number(priv, pri);
10416 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10417 #else
10418 struct clx2_tx_queue *txq = &priv->txq[0];
10419 #endif /* CONFIG_IPW2200_QOS */
10420
10421 if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10422 return 1;
10423
10424 return 0;
10425 }
10426
10427 #ifdef CONFIG_IPW2200_PROMISCUOUS
10428 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10429 struct libipw_txb *txb)
10430 {
10431 struct libipw_rx_stats dummystats;
10432 struct ieee80211_hdr *hdr;
10433 u8 n;
10434 u16 filter = priv->prom_priv->filter;
10435 int hdr_only = 0;
10436
10437 if (filter & IPW_PROM_NO_TX)
10438 return;
10439
10440 memset(&dummystats, 0, sizeof(dummystats));
10441
10442 /* Filtering of fragment chains is done against the first fragment */
10443 hdr = (void *)txb->fragments[0]->data;
10444 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
10445 if (filter & IPW_PROM_NO_MGMT)
10446 return;
10447 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10448 hdr_only = 1;
10449 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
10450 if (filter & IPW_PROM_NO_CTL)
10451 return;
10452 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10453 hdr_only = 1;
10454 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
10455 if (filter & IPW_PROM_NO_DATA)
10456 return;
10457 if (filter & IPW_PROM_DATA_HEADER_ONLY)
10458 hdr_only = 1;
10459 }
10460
10461 for(n=0; n<txb->nr_frags; ++n) {
10462 struct sk_buff *src = txb->fragments[n];
10463 struct sk_buff *dst;
10464 struct ieee80211_radiotap_header *rt_hdr;
10465 int len;
10466
10467 if (hdr_only) {
10468 hdr = (void *)src->data;
10469 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
10470 } else
10471 len = src->len;
10472
10473 dst = alloc_skb(len + sizeof(*rt_hdr), GFP_ATOMIC);
10474 if (!dst)
10475 continue;
10476
10477 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10478
10479 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10480 rt_hdr->it_pad = 0;
10481 rt_hdr->it_present = 0; /* after all, it's just an idea */
10482 rt_hdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10483
10484 *(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10485 ieee80211chan2mhz(priv->channel));
10486 if (priv->channel > 14) /* 802.11a */
10487 *(__le16*)skb_put(dst, sizeof(u16)) =
10488 cpu_to_le16(IEEE80211_CHAN_OFDM |
10489 IEEE80211_CHAN_5GHZ);
10490 else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10491 *(__le16*)skb_put(dst, sizeof(u16)) =
10492 cpu_to_le16(IEEE80211_CHAN_CCK |
10493 IEEE80211_CHAN_2GHZ);
10494 else /* 802.11g */
10495 *(__le16*)skb_put(dst, sizeof(u16)) =
10496 cpu_to_le16(IEEE80211_CHAN_OFDM |
10497 IEEE80211_CHAN_2GHZ);
10498
10499 rt_hdr->it_len = cpu_to_le16(dst->len);
10500
10501 skb_copy_from_linear_data(src, skb_put(dst, len), len);
10502
10503 if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats))
10504 dev_kfree_skb_any(dst);
10505 }
10506 }
10507 #endif
10508
10509 static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb,
10510 struct net_device *dev, int pri)
10511 {
10512 struct ipw_priv *priv = libipw_priv(dev);
10513 unsigned long flags;
10514 netdev_tx_t ret;
10515
10516 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10517 spin_lock_irqsave(&priv->lock, flags);
10518
10519 #ifdef CONFIG_IPW2200_PROMISCUOUS
10520 if (rtap_iface && netif_running(priv->prom_net_dev))
10521 ipw_handle_promiscuous_tx(priv, txb);
10522 #endif
10523
10524 ret = ipw_tx_skb(priv, txb, pri);
10525 if (ret == NETDEV_TX_OK)
10526 __ipw_led_activity_on(priv);
10527 spin_unlock_irqrestore(&priv->lock, flags);
10528
10529 return ret;
10530 }
10531
10532 static void ipw_net_set_multicast_list(struct net_device *dev)
10533 {
10534
10535 }
10536
10537 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10538 {
10539 struct ipw_priv *priv = libipw_priv(dev);
10540 struct sockaddr *addr = p;
10541
10542 if (!is_valid_ether_addr(addr->sa_data))
10543 return -EADDRNOTAVAIL;
10544 mutex_lock(&priv->mutex);
10545 priv->config |= CFG_CUSTOM_MAC;
10546 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10547 printk(KERN_INFO "%s: Setting MAC to %pM\n",
10548 priv->net_dev->name, priv->mac_addr);
10549 schedule_work(&priv->adapter_restart);
10550 mutex_unlock(&priv->mutex);
10551 return 0;
10552 }
10553
10554 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10555 struct ethtool_drvinfo *info)
10556 {
10557 struct ipw_priv *p = libipw_priv(dev);
10558 char vers[64];
10559 char date[32];
10560 u32 len;
10561
10562 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
10563 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
10564
10565 len = sizeof(vers);
10566 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10567 len = sizeof(date);
10568 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10569
10570 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10571 vers, date);
10572 strlcpy(info->bus_info, pci_name(p->pci_dev),
10573 sizeof(info->bus_info));
10574 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10575 }
10576
10577 static u32 ipw_ethtool_get_link(struct net_device *dev)
10578 {
10579 struct ipw_priv *priv = libipw_priv(dev);
10580 return (priv->status & STATUS_ASSOCIATED) != 0;
10581 }
10582
10583 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10584 {
10585 return IPW_EEPROM_IMAGE_SIZE;
10586 }
10587
10588 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10589 struct ethtool_eeprom *eeprom, u8 * bytes)
10590 {
10591 struct ipw_priv *p = libipw_priv(dev);
10592
10593 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10594 return -EINVAL;
10595 mutex_lock(&p->mutex);
10596 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10597 mutex_unlock(&p->mutex);
10598 return 0;
10599 }
10600
10601 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10602 struct ethtool_eeprom *eeprom, u8 * bytes)
10603 {
10604 struct ipw_priv *p = libipw_priv(dev);
10605 int i;
10606
10607 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10608 return -EINVAL;
10609 mutex_lock(&p->mutex);
10610 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10611 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10612 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10613 mutex_unlock(&p->mutex);
10614 return 0;
10615 }
10616
10617 static const struct ethtool_ops ipw_ethtool_ops = {
10618 .get_link = ipw_ethtool_get_link,
10619 .get_drvinfo = ipw_ethtool_get_drvinfo,
10620 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10621 .get_eeprom = ipw_ethtool_get_eeprom,
10622 .set_eeprom = ipw_ethtool_set_eeprom,
10623 };
10624
10625 static irqreturn_t ipw_isr(int irq, void *data)
10626 {
10627 struct ipw_priv *priv = data;
10628 u32 inta, inta_mask;
10629
10630 if (!priv)
10631 return IRQ_NONE;
10632
10633 spin_lock(&priv->irq_lock);
10634
10635 if (!(priv->status & STATUS_INT_ENABLED)) {
10636 /* IRQ is disabled */
10637 goto none;
10638 }
10639
10640 inta = ipw_read32(priv, IPW_INTA_RW);
10641 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10642
10643 if (inta == 0xFFFFFFFF) {
10644 /* Hardware disappeared */
10645 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10646 goto none;
10647 }
10648
10649 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10650 /* Shared interrupt */
10651 goto none;
10652 }
10653
10654 /* tell the device to stop sending interrupts */
10655 __ipw_disable_interrupts(priv);
10656
10657 /* ack current interrupts */
10658 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10659 ipw_write32(priv, IPW_INTA_RW, inta);
10660
10661 /* Cache INTA value for our tasklet */
10662 priv->isr_inta = inta;
10663
10664 tasklet_schedule(&priv->irq_tasklet);
10665
10666 spin_unlock(&priv->irq_lock);
10667
10668 return IRQ_HANDLED;
10669 none:
10670 spin_unlock(&priv->irq_lock);
10671 return IRQ_NONE;
10672 }
10673
10674 static void ipw_rf_kill(void *adapter)
10675 {
10676 struct ipw_priv *priv = adapter;
10677 unsigned long flags;
10678
10679 spin_lock_irqsave(&priv->lock, flags);
10680
10681 if (rf_kill_active(priv)) {
10682 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10683 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
10684 goto exit_unlock;
10685 }
10686
10687 /* RF Kill is now disabled, so bring the device back up */
10688
10689 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10690 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10691 "device\n");
10692
10693 /* we can not do an adapter restart while inside an irq lock */
10694 schedule_work(&priv->adapter_restart);
10695 } else
10696 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10697 "enabled\n");
10698
10699 exit_unlock:
10700 spin_unlock_irqrestore(&priv->lock, flags);
10701 }
10702
10703 static void ipw_bg_rf_kill(struct work_struct *work)
10704 {
10705 struct ipw_priv *priv =
10706 container_of(work, struct ipw_priv, rf_kill.work);
10707 mutex_lock(&priv->mutex);
10708 ipw_rf_kill(priv);
10709 mutex_unlock(&priv->mutex);
10710 }
10711
10712 static void ipw_link_up(struct ipw_priv *priv)
10713 {
10714 priv->last_seq_num = -1;
10715 priv->last_frag_num = -1;
10716 priv->last_packet_time = 0;
10717
10718 netif_carrier_on(priv->net_dev);
10719
10720 cancel_delayed_work(&priv->request_scan);
10721 cancel_delayed_work(&priv->request_direct_scan);
10722 cancel_delayed_work(&priv->request_passive_scan);
10723 cancel_delayed_work(&priv->scan_event);
10724 ipw_reset_stats(priv);
10725 /* Ensure the rate is updated immediately */
10726 priv->last_rate = ipw_get_current_rate(priv);
10727 ipw_gather_stats(priv);
10728 ipw_led_link_up(priv);
10729 notify_wx_assoc_event(priv);
10730
10731 if (priv->config & CFG_BACKGROUND_SCAN)
10732 schedule_delayed_work(&priv->request_scan, HZ);
10733 }
10734
10735 static void ipw_bg_link_up(struct work_struct *work)
10736 {
10737 struct ipw_priv *priv =
10738 container_of(work, struct ipw_priv, link_up);
10739 mutex_lock(&priv->mutex);
10740 ipw_link_up(priv);
10741 mutex_unlock(&priv->mutex);
10742 }
10743
10744 static void ipw_link_down(struct ipw_priv *priv)
10745 {
10746 ipw_led_link_down(priv);
10747 netif_carrier_off(priv->net_dev);
10748 notify_wx_assoc_event(priv);
10749
10750 /* Cancel any queued work ... */
10751 cancel_delayed_work(&priv->request_scan);
10752 cancel_delayed_work(&priv->request_direct_scan);
10753 cancel_delayed_work(&priv->request_passive_scan);
10754 cancel_delayed_work(&priv->adhoc_check);
10755 cancel_delayed_work(&priv->gather_stats);
10756
10757 ipw_reset_stats(priv);
10758
10759 if (!(priv->status & STATUS_EXIT_PENDING)) {
10760 /* Queue up another scan... */
10761 schedule_delayed_work(&priv->request_scan, 0);
10762 } else
10763 cancel_delayed_work(&priv->scan_event);
10764 }
10765
10766 static void ipw_bg_link_down(struct work_struct *work)
10767 {
10768 struct ipw_priv *priv =
10769 container_of(work, struct ipw_priv, link_down);
10770 mutex_lock(&priv->mutex);
10771 ipw_link_down(priv);
10772 mutex_unlock(&priv->mutex);
10773 }
10774
10775 static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv)
10776 {
10777 int ret = 0;
10778
10779 init_waitqueue_head(&priv->wait_command_queue);
10780 init_waitqueue_head(&priv->wait_state);
10781
10782 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10783 INIT_WORK(&priv->associate, ipw_bg_associate);
10784 INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10785 INIT_WORK(&priv->system_config, ipw_system_config);
10786 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10787 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10788 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10789 INIT_WORK(&priv->up, ipw_bg_up);
10790 INIT_WORK(&priv->down, ipw_bg_down);
10791 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10792 INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
10793 INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10794 INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10795 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10796 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10797 INIT_WORK(&priv->roam, ipw_bg_roam);
10798 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10799 INIT_WORK(&priv->link_up, ipw_bg_link_up);
10800 INIT_WORK(&priv->link_down, ipw_bg_link_down);
10801 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10802 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10803 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10804 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10805
10806 #ifdef CONFIG_IPW2200_QOS
10807 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10808 #endif /* CONFIG_IPW2200_QOS */
10809
10810 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10811 ipw_irq_tasklet, (unsigned long)priv);
10812
10813 return ret;
10814 }
10815
10816 static void shim__set_security(struct net_device *dev,
10817 struct libipw_security *sec)
10818 {
10819 struct ipw_priv *priv = libipw_priv(dev);
10820 int i;
10821 for (i = 0; i < 4; i++) {
10822 if (sec->flags & (1 << i)) {
10823 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10824 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10825 if (sec->key_sizes[i] == 0)
10826 priv->ieee->sec.flags &= ~(1 << i);
10827 else {
10828 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10829 sec->key_sizes[i]);
10830 priv->ieee->sec.flags |= (1 << i);
10831 }
10832 priv->status |= STATUS_SECURITY_UPDATED;
10833 } else if (sec->level != SEC_LEVEL_1)
10834 priv->ieee->sec.flags &= ~(1 << i);
10835 }
10836
10837 if (sec->flags & SEC_ACTIVE_KEY) {
10838 if (sec->active_key <= 3) {
10839 priv->ieee->sec.active_key = sec->active_key;
10840 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10841 } else
10842 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10843 priv->status |= STATUS_SECURITY_UPDATED;
10844 } else
10845 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10846
10847 if ((sec->flags & SEC_AUTH_MODE) &&
10848 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10849 priv->ieee->sec.auth_mode = sec->auth_mode;
10850 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10851 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10852 priv->capability |= CAP_SHARED_KEY;
10853 else
10854 priv->capability &= ~CAP_SHARED_KEY;
10855 priv->status |= STATUS_SECURITY_UPDATED;
10856 }
10857
10858 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10859 priv->ieee->sec.flags |= SEC_ENABLED;
10860 priv->ieee->sec.enabled = sec->enabled;
10861 priv->status |= STATUS_SECURITY_UPDATED;
10862 if (sec->enabled)
10863 priv->capability |= CAP_PRIVACY_ON;
10864 else
10865 priv->capability &= ~CAP_PRIVACY_ON;
10866 }
10867
10868 if (sec->flags & SEC_ENCRYPT)
10869 priv->ieee->sec.encrypt = sec->encrypt;
10870
10871 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10872 priv->ieee->sec.level = sec->level;
10873 priv->ieee->sec.flags |= SEC_LEVEL;
10874 priv->status |= STATUS_SECURITY_UPDATED;
10875 }
10876
10877 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10878 ipw_set_hwcrypto_keys(priv);
10879
10880 /* To match current functionality of ipw2100 (which works well w/
10881 * various supplicants, we don't force a disassociate if the
10882 * privacy capability changes ... */
10883 #if 0
10884 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10885 (((priv->assoc_request.capability &
10886 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10887 (!(priv->assoc_request.capability &
10888 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10889 IPW_DEBUG_ASSOC("Disassociating due to capability "
10890 "change.\n");
10891 ipw_disassociate(priv);
10892 }
10893 #endif
10894 }
10895
10896 static int init_supported_rates(struct ipw_priv *priv,
10897 struct ipw_supported_rates *rates)
10898 {
10899 /* TODO: Mask out rates based on priv->rates_mask */
10900
10901 memset(rates, 0, sizeof(*rates));
10902 /* configure supported rates */
10903 switch (priv->ieee->freq_band) {
10904 case LIBIPW_52GHZ_BAND:
10905 rates->ieee_mode = IPW_A_MODE;
10906 rates->purpose = IPW_RATE_CAPABILITIES;
10907 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10908 LIBIPW_OFDM_DEFAULT_RATES_MASK);
10909 break;
10910
10911 default: /* Mixed or 2.4Ghz */
10912 rates->ieee_mode = IPW_G_MODE;
10913 rates->purpose = IPW_RATE_CAPABILITIES;
10914 ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION,
10915 LIBIPW_CCK_DEFAULT_RATES_MASK);
10916 if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) {
10917 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10918 LIBIPW_OFDM_DEFAULT_RATES_MASK);
10919 }
10920 break;
10921 }
10922
10923 return 0;
10924 }
10925
10926 static int ipw_config(struct ipw_priv *priv)
10927 {
10928 /* This is only called from ipw_up, which resets/reloads the firmware
10929 so, we don't need to first disable the card before we configure
10930 it */
10931 if (ipw_set_tx_power(priv))
10932 goto error;
10933
10934 /* initialize adapter address */
10935 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10936 goto error;
10937
10938 /* set basic system config settings */
10939 init_sys_config(&priv->sys_config);
10940
10941 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10942 * Does not support BT priority yet (don't abort or defer our Tx) */
10943 if (bt_coexist) {
10944 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10945
10946 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10947 priv->sys_config.bt_coexistence
10948 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10949 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10950 priv->sys_config.bt_coexistence
10951 |= CFG_BT_COEXISTENCE_OOB;
10952 }
10953
10954 #ifdef CONFIG_IPW2200_PROMISCUOUS
10955 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10956 priv->sys_config.accept_all_data_frames = 1;
10957 priv->sys_config.accept_non_directed_frames = 1;
10958 priv->sys_config.accept_all_mgmt_bcpr = 1;
10959 priv->sys_config.accept_all_mgmt_frames = 1;
10960 }
10961 #endif
10962
10963 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10964 priv->sys_config.answer_broadcast_ssid_probe = 1;
10965 else
10966 priv->sys_config.answer_broadcast_ssid_probe = 0;
10967
10968 if (ipw_send_system_config(priv))
10969 goto error;
10970
10971 init_supported_rates(priv, &priv->rates);
10972 if (ipw_send_supported_rates(priv, &priv->rates))
10973 goto error;
10974
10975 /* Set request-to-send threshold */
10976 if (priv->rts_threshold) {
10977 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10978 goto error;
10979 }
10980 #ifdef CONFIG_IPW2200_QOS
10981 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10982 ipw_qos_activate(priv, NULL);
10983 #endif /* CONFIG_IPW2200_QOS */
10984
10985 if (ipw_set_random_seed(priv))
10986 goto error;
10987
10988 /* final state transition to the RUN state */
10989 if (ipw_send_host_complete(priv))
10990 goto error;
10991
10992 priv->status |= STATUS_INIT;
10993
10994 ipw_led_init(priv);
10995 ipw_led_radio_on(priv);
10996 priv->notif_missed_beacons = 0;
10997
10998 /* Set hardware WEP key if it is configured. */
10999 if ((priv->capability & CAP_PRIVACY_ON) &&
11000 (priv->ieee->sec.level == SEC_LEVEL_1) &&
11001 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
11002 ipw_set_hwcrypto_keys(priv);
11003
11004 return 0;
11005
11006 error:
11007 return -EIO;
11008 }
11009
11010 /*
11011 * NOTE:
11012 *
11013 * These tables have been tested in conjunction with the
11014 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
11015 *
11016 * Altering this values, using it on other hardware, or in geographies
11017 * not intended for resale of the above mentioned Intel adapters has
11018 * not been tested.
11019 *
11020 * Remember to update the table in README.ipw2200 when changing this
11021 * table.
11022 *
11023 */
11024 static const struct libipw_geo ipw_geos[] = {
11025 { /* Restricted */
11026 "---",
11027 .bg_channels = 11,
11028 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11029 {2427, 4}, {2432, 5}, {2437, 6},
11030 {2442, 7}, {2447, 8}, {2452, 9},
11031 {2457, 10}, {2462, 11}},
11032 },
11033
11034 { /* Custom US/Canada */
11035 "ZZF",
11036 .bg_channels = 11,
11037 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11038 {2427, 4}, {2432, 5}, {2437, 6},
11039 {2442, 7}, {2447, 8}, {2452, 9},
11040 {2457, 10}, {2462, 11}},
11041 .a_channels = 8,
11042 .a = {{5180, 36},
11043 {5200, 40},
11044 {5220, 44},
11045 {5240, 48},
11046 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11047 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11048 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11049 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}},
11050 },
11051
11052 { /* Rest of World */
11053 "ZZD",
11054 .bg_channels = 13,
11055 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11056 {2427, 4}, {2432, 5}, {2437, 6},
11057 {2442, 7}, {2447, 8}, {2452, 9},
11058 {2457, 10}, {2462, 11}, {2467, 12},
11059 {2472, 13}},
11060 },
11061
11062 { /* Custom USA & Europe & High */
11063 "ZZA",
11064 .bg_channels = 11,
11065 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11066 {2427, 4}, {2432, 5}, {2437, 6},
11067 {2442, 7}, {2447, 8}, {2452, 9},
11068 {2457, 10}, {2462, 11}},
11069 .a_channels = 13,
11070 .a = {{5180, 36},
11071 {5200, 40},
11072 {5220, 44},
11073 {5240, 48},
11074 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11075 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11076 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11077 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11078 {5745, 149},
11079 {5765, 153},
11080 {5785, 157},
11081 {5805, 161},
11082 {5825, 165}},
11083 },
11084
11085 { /* Custom NA & Europe */
11086 "ZZB",
11087 .bg_channels = 11,
11088 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11089 {2427, 4}, {2432, 5}, {2437, 6},
11090 {2442, 7}, {2447, 8}, {2452, 9},
11091 {2457, 10}, {2462, 11}},
11092 .a_channels = 13,
11093 .a = {{5180, 36},
11094 {5200, 40},
11095 {5220, 44},
11096 {5240, 48},
11097 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11098 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11099 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11100 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11101 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11102 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11103 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11104 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11105 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11106 },
11107
11108 { /* Custom Japan */
11109 "ZZC",
11110 .bg_channels = 11,
11111 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11112 {2427, 4}, {2432, 5}, {2437, 6},
11113 {2442, 7}, {2447, 8}, {2452, 9},
11114 {2457, 10}, {2462, 11}},
11115 .a_channels = 4,
11116 .a = {{5170, 34}, {5190, 38},
11117 {5210, 42}, {5230, 46}},
11118 },
11119
11120 { /* Custom */
11121 "ZZM",
11122 .bg_channels = 11,
11123 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11124 {2427, 4}, {2432, 5}, {2437, 6},
11125 {2442, 7}, {2447, 8}, {2452, 9},
11126 {2457, 10}, {2462, 11}},
11127 },
11128
11129 { /* Europe */
11130 "ZZE",
11131 .bg_channels = 13,
11132 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11133 {2427, 4}, {2432, 5}, {2437, 6},
11134 {2442, 7}, {2447, 8}, {2452, 9},
11135 {2457, 10}, {2462, 11}, {2467, 12},
11136 {2472, 13}},
11137 .a_channels = 19,
11138 .a = {{5180, 36},
11139 {5200, 40},
11140 {5220, 44},
11141 {5240, 48},
11142 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11143 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11144 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11145 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11146 {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11147 {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11148 {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11149 {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11150 {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11151 {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11152 {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11153 {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11154 {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11155 {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11156 {5700, 140, LIBIPW_CH_PASSIVE_ONLY}},
11157 },
11158
11159 { /* Custom Japan */
11160 "ZZJ",
11161 .bg_channels = 14,
11162 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11163 {2427, 4}, {2432, 5}, {2437, 6},
11164 {2442, 7}, {2447, 8}, {2452, 9},
11165 {2457, 10}, {2462, 11}, {2467, 12},
11166 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}},
11167 .a_channels = 4,
11168 .a = {{5170, 34}, {5190, 38},
11169 {5210, 42}, {5230, 46}},
11170 },
11171
11172 { /* Rest of World */
11173 "ZZR",
11174 .bg_channels = 14,
11175 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11176 {2427, 4}, {2432, 5}, {2437, 6},
11177 {2442, 7}, {2447, 8}, {2452, 9},
11178 {2457, 10}, {2462, 11}, {2467, 12},
11179 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY |
11180 LIBIPW_CH_PASSIVE_ONLY}},
11181 },
11182
11183 { /* High Band */
11184 "ZZH",
11185 .bg_channels = 13,
11186 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11187 {2427, 4}, {2432, 5}, {2437, 6},
11188 {2442, 7}, {2447, 8}, {2452, 9},
11189 {2457, 10}, {2462, 11},
11190 {2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11191 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11192 .a_channels = 4,
11193 .a = {{5745, 149}, {5765, 153},
11194 {5785, 157}, {5805, 161}},
11195 },
11196
11197 { /* Custom Europe */
11198 "ZZG",
11199 .bg_channels = 13,
11200 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11201 {2427, 4}, {2432, 5}, {2437, 6},
11202 {2442, 7}, {2447, 8}, {2452, 9},
11203 {2457, 10}, {2462, 11},
11204 {2467, 12}, {2472, 13}},
11205 .a_channels = 4,
11206 .a = {{5180, 36}, {5200, 40},
11207 {5220, 44}, {5240, 48}},
11208 },
11209
11210 { /* Europe */
11211 "ZZK",
11212 .bg_channels = 13,
11213 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11214 {2427, 4}, {2432, 5}, {2437, 6},
11215 {2442, 7}, {2447, 8}, {2452, 9},
11216 {2457, 10}, {2462, 11},
11217 {2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11218 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11219 .a_channels = 24,
11220 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11221 {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11222 {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11223 {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11224 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11225 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11226 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11227 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11228 {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11229 {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11230 {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11231 {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11232 {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11233 {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11234 {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11235 {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11236 {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11237 {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11238 {5700, 140, LIBIPW_CH_PASSIVE_ONLY},
11239 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11240 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11241 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11242 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11243 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11244 },
11245
11246 { /* Europe */
11247 "ZZL",
11248 .bg_channels = 11,
11249 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11250 {2427, 4}, {2432, 5}, {2437, 6},
11251 {2442, 7}, {2447, 8}, {2452, 9},
11252 {2457, 10}, {2462, 11}},
11253 .a_channels = 13,
11254 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11255 {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11256 {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11257 {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11258 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11259 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11260 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11261 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11262 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11263 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11264 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11265 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11266 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11267 }
11268 };
11269
11270 #define MAX_HW_RESTARTS 5
11271 static int ipw_up(struct ipw_priv *priv)
11272 {
11273 int rc, i, j;
11274
11275 /* Age scan list entries found before suspend */
11276 if (priv->suspend_time) {
11277 libipw_networks_age(priv->ieee, priv->suspend_time);
11278 priv->suspend_time = 0;
11279 }
11280
11281 if (priv->status & STATUS_EXIT_PENDING)
11282 return -EIO;
11283
11284 if (cmdlog && !priv->cmdlog) {
11285 priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11286 GFP_KERNEL);
11287 if (priv->cmdlog == NULL) {
11288 IPW_ERROR("Error allocating %d command log entries.\n",
11289 cmdlog);
11290 return -ENOMEM;
11291 } else {
11292 priv->cmdlog_len = cmdlog;
11293 }
11294 }
11295
11296 for (i = 0; i < MAX_HW_RESTARTS; i++) {
11297 /* Load the microcode, firmware, and eeprom.
11298 * Also start the clocks. */
11299 rc = ipw_load(priv);
11300 if (rc) {
11301 IPW_ERROR("Unable to load firmware: %d\n", rc);
11302 return rc;
11303 }
11304
11305 ipw_init_ordinals(priv);
11306 if (!(priv->config & CFG_CUSTOM_MAC))
11307 eeprom_parse_mac(priv, priv->mac_addr);
11308 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11309 memcpy(priv->net_dev->perm_addr, priv->mac_addr, ETH_ALEN);
11310
11311 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11312 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11313 ipw_geos[j].name, 3))
11314 break;
11315 }
11316 if (j == ARRAY_SIZE(ipw_geos)) {
11317 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11318 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11319 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11320 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11321 j = 0;
11322 }
11323 if (libipw_set_geo(priv->ieee, &ipw_geos[j])) {
11324 IPW_WARNING("Could not set geography.");
11325 return 0;
11326 }
11327
11328 if (priv->status & STATUS_RF_KILL_SW) {
11329 IPW_WARNING("Radio disabled by module parameter.\n");
11330 return 0;
11331 } else if (rf_kill_active(priv)) {
11332 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11333 "Kill switch must be turned off for "
11334 "wireless networking to work.\n");
11335 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
11336 return 0;
11337 }
11338
11339 rc = ipw_config(priv);
11340 if (!rc) {
11341 IPW_DEBUG_INFO("Configured device on count %i\n", i);
11342
11343 /* If configure to try and auto-associate, kick
11344 * off a scan. */
11345 schedule_delayed_work(&priv->request_scan, 0);
11346
11347 return 0;
11348 }
11349
11350 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11351 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11352 i, MAX_HW_RESTARTS);
11353
11354 /* We had an error bringing up the hardware, so take it
11355 * all the way back down so we can try again */
11356 ipw_down(priv);
11357 }
11358
11359 /* tried to restart and config the device for as long as our
11360 * patience could withstand */
11361 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11362
11363 return -EIO;
11364 }
11365
11366 static void ipw_bg_up(struct work_struct *work)
11367 {
11368 struct ipw_priv *priv =
11369 container_of(work, struct ipw_priv, up);
11370 mutex_lock(&priv->mutex);
11371 ipw_up(priv);
11372 mutex_unlock(&priv->mutex);
11373 }
11374
11375 static void ipw_deinit(struct ipw_priv *priv)
11376 {
11377 int i;
11378
11379 if (priv->status & STATUS_SCANNING) {
11380 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11381 ipw_abort_scan(priv);
11382 }
11383
11384 if (priv->status & STATUS_ASSOCIATED) {
11385 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11386 ipw_disassociate(priv);
11387 }
11388
11389 ipw_led_shutdown(priv);
11390
11391 /* Wait up to 1s for status to change to not scanning and not
11392 * associated (disassociation can take a while for a ful 802.11
11393 * exchange */
11394 for (i = 1000; i && (priv->status &
11395 (STATUS_DISASSOCIATING |
11396 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11397 udelay(10);
11398
11399 if (priv->status & (STATUS_DISASSOCIATING |
11400 STATUS_ASSOCIATED | STATUS_SCANNING))
11401 IPW_DEBUG_INFO("Still associated or scanning...\n");
11402 else
11403 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11404
11405 /* Attempt to disable the card */
11406 ipw_send_card_disable(priv, 0);
11407
11408 priv->status &= ~STATUS_INIT;
11409 }
11410
11411 static void ipw_down(struct ipw_priv *priv)
11412 {
11413 int exit_pending = priv->status & STATUS_EXIT_PENDING;
11414
11415 priv->status |= STATUS_EXIT_PENDING;
11416
11417 if (ipw_is_init(priv))
11418 ipw_deinit(priv);
11419
11420 /* Wipe out the EXIT_PENDING status bit if we are not actually
11421 * exiting the module */
11422 if (!exit_pending)
11423 priv->status &= ~STATUS_EXIT_PENDING;
11424
11425 /* tell the device to stop sending interrupts */
11426 ipw_disable_interrupts(priv);
11427
11428 /* Clear all bits but the RF Kill */
11429 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11430 netif_carrier_off(priv->net_dev);
11431
11432 ipw_stop_nic(priv);
11433
11434 ipw_led_radio_off(priv);
11435 }
11436
11437 static void ipw_bg_down(struct work_struct *work)
11438 {
11439 struct ipw_priv *priv =
11440 container_of(work, struct ipw_priv, down);
11441 mutex_lock(&priv->mutex);
11442 ipw_down(priv);
11443 mutex_unlock(&priv->mutex);
11444 }
11445
11446 static int ipw_wdev_init(struct net_device *dev)
11447 {
11448 int i, rc = 0;
11449 struct ipw_priv *priv = libipw_priv(dev);
11450 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11451 struct wireless_dev *wdev = &priv->ieee->wdev;
11452
11453 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11454
11455 /* fill-out priv->ieee->bg_band */
11456 if (geo->bg_channels) {
11457 struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
11458
11459 bg_band->band = IEEE80211_BAND_2GHZ;
11460 bg_band->n_channels = geo->bg_channels;
11461 bg_band->channels = kcalloc(geo->bg_channels,
11462 sizeof(struct ieee80211_channel),
11463 GFP_KERNEL);
11464 if (!bg_band->channels) {
11465 rc = -ENOMEM;
11466 goto out;
11467 }
11468 /* translate geo->bg to bg_band.channels */
11469 for (i = 0; i < geo->bg_channels; i++) {
11470 bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
11471 bg_band->channels[i].center_freq = geo->bg[i].freq;
11472 bg_band->channels[i].hw_value = geo->bg[i].channel;
11473 bg_band->channels[i].max_power = geo->bg[i].max_power;
11474 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11475 bg_band->channels[i].flags |=
11476 IEEE80211_CHAN_PASSIVE_SCAN;
11477 if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
11478 bg_band->channels[i].flags |=
11479 IEEE80211_CHAN_NO_IBSS;
11480 if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
11481 bg_band->channels[i].flags |=
11482 IEEE80211_CHAN_RADAR;
11483 /* No equivalent for LIBIPW_CH_80211H_RULES,
11484 LIBIPW_CH_UNIFORM_SPREADING, or
11485 LIBIPW_CH_B_ONLY... */
11486 }
11487 /* point at bitrate info */
11488 bg_band->bitrates = ipw2200_bg_rates;
11489 bg_band->n_bitrates = ipw2200_num_bg_rates;
11490
11491 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
11492 }
11493
11494 /* fill-out priv->ieee->a_band */
11495 if (geo->a_channels) {
11496 struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
11497
11498 a_band->band = IEEE80211_BAND_5GHZ;
11499 a_band->n_channels = geo->a_channels;
11500 a_band->channels = kcalloc(geo->a_channels,
11501 sizeof(struct ieee80211_channel),
11502 GFP_KERNEL);
11503 if (!a_band->channels) {
11504 rc = -ENOMEM;
11505 goto out;
11506 }
11507 /* translate geo->a to a_band.channels */
11508 for (i = 0; i < geo->a_channels; i++) {
11509 a_band->channels[i].band = IEEE80211_BAND_5GHZ;
11510 a_band->channels[i].center_freq = geo->a[i].freq;
11511 a_band->channels[i].hw_value = geo->a[i].channel;
11512 a_band->channels[i].max_power = geo->a[i].max_power;
11513 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11514 a_band->channels[i].flags |=
11515 IEEE80211_CHAN_PASSIVE_SCAN;
11516 if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
11517 a_band->channels[i].flags |=
11518 IEEE80211_CHAN_NO_IBSS;
11519 if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
11520 a_band->channels[i].flags |=
11521 IEEE80211_CHAN_RADAR;
11522 /* No equivalent for LIBIPW_CH_80211H_RULES,
11523 LIBIPW_CH_UNIFORM_SPREADING, or
11524 LIBIPW_CH_B_ONLY... */
11525 }
11526 /* point at bitrate info */
11527 a_band->bitrates = ipw2200_a_rates;
11528 a_band->n_bitrates = ipw2200_num_a_rates;
11529
11530 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
11531 }
11532
11533 wdev->wiphy->cipher_suites = ipw_cipher_suites;
11534 wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
11535
11536 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11537
11538 /* With that information in place, we can now register the wiphy... */
11539 if (wiphy_register(wdev->wiphy))
11540 rc = -EIO;
11541 out:
11542 return rc;
11543 }
11544
11545 /* PCI driver stuff */
11546 static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
11547 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11548 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11549 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11550 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11551 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11552 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11553 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11554 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11555 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11556 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11557 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11558 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11559 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11560 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11561 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11562 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11563 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11564 {PCI_VDEVICE(INTEL, 0x104f), 0},
11565 {PCI_VDEVICE(INTEL, 0x4220), 0}, /* BG */
11566 {PCI_VDEVICE(INTEL, 0x4221), 0}, /* BG */
11567 {PCI_VDEVICE(INTEL, 0x4223), 0}, /* ABG */
11568 {PCI_VDEVICE(INTEL, 0x4224), 0}, /* ABG */
11569
11570 /* required last entry */
11571 {0,}
11572 };
11573
11574 MODULE_DEVICE_TABLE(pci, card_ids);
11575
11576 static struct attribute *ipw_sysfs_entries[] = {
11577 &dev_attr_rf_kill.attr,
11578 &dev_attr_direct_dword.attr,
11579 &dev_attr_indirect_byte.attr,
11580 &dev_attr_indirect_dword.attr,
11581 &dev_attr_mem_gpio_reg.attr,
11582 &dev_attr_command_event_reg.attr,
11583 &dev_attr_nic_type.attr,
11584 &dev_attr_status.attr,
11585 &dev_attr_cfg.attr,
11586 &dev_attr_error.attr,
11587 &dev_attr_event_log.attr,
11588 &dev_attr_cmd_log.attr,
11589 &dev_attr_eeprom_delay.attr,
11590 &dev_attr_ucode_version.attr,
11591 &dev_attr_rtc.attr,
11592 &dev_attr_scan_age.attr,
11593 &dev_attr_led.attr,
11594 &dev_attr_speed_scan.attr,
11595 &dev_attr_net_stats.attr,
11596 &dev_attr_channels.attr,
11597 #ifdef CONFIG_IPW2200_PROMISCUOUS
11598 &dev_attr_rtap_iface.attr,
11599 &dev_attr_rtap_filter.attr,
11600 #endif
11601 NULL
11602 };
11603
11604 static struct attribute_group ipw_attribute_group = {
11605 .name = NULL, /* put in device directory */
11606 .attrs = ipw_sysfs_entries,
11607 };
11608
11609 #ifdef CONFIG_IPW2200_PROMISCUOUS
11610 static int ipw_prom_open(struct net_device *dev)
11611 {
11612 struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11613 struct ipw_priv *priv = prom_priv->priv;
11614
11615 IPW_DEBUG_INFO("prom dev->open\n");
11616 netif_carrier_off(dev);
11617
11618 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11619 priv->sys_config.accept_all_data_frames = 1;
11620 priv->sys_config.accept_non_directed_frames = 1;
11621 priv->sys_config.accept_all_mgmt_bcpr = 1;
11622 priv->sys_config.accept_all_mgmt_frames = 1;
11623
11624 ipw_send_system_config(priv);
11625 }
11626
11627 return 0;
11628 }
11629
11630 static int ipw_prom_stop(struct net_device *dev)
11631 {
11632 struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11633 struct ipw_priv *priv = prom_priv->priv;
11634
11635 IPW_DEBUG_INFO("prom dev->stop\n");
11636
11637 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11638 priv->sys_config.accept_all_data_frames = 0;
11639 priv->sys_config.accept_non_directed_frames = 0;
11640 priv->sys_config.accept_all_mgmt_bcpr = 0;
11641 priv->sys_config.accept_all_mgmt_frames = 0;
11642
11643 ipw_send_system_config(priv);
11644 }
11645
11646 return 0;
11647 }
11648
11649 static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb,
11650 struct net_device *dev)
11651 {
11652 IPW_DEBUG_INFO("prom dev->xmit\n");
11653 dev_kfree_skb(skb);
11654 return NETDEV_TX_OK;
11655 }
11656
11657 static const struct net_device_ops ipw_prom_netdev_ops = {
11658 .ndo_open = ipw_prom_open,
11659 .ndo_stop = ipw_prom_stop,
11660 .ndo_start_xmit = ipw_prom_hard_start_xmit,
11661 .ndo_change_mtu = libipw_change_mtu,
11662 .ndo_set_mac_address = eth_mac_addr,
11663 .ndo_validate_addr = eth_validate_addr,
11664 };
11665
11666 static int ipw_prom_alloc(struct ipw_priv *priv)
11667 {
11668 int rc = 0;
11669
11670 if (priv->prom_net_dev)
11671 return -EPERM;
11672
11673 priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1);
11674 if (priv->prom_net_dev == NULL)
11675 return -ENOMEM;
11676
11677 priv->prom_priv = libipw_priv(priv->prom_net_dev);
11678 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11679 priv->prom_priv->priv = priv;
11680
11681 strcpy(priv->prom_net_dev->name, "rtap%d");
11682 memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11683
11684 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11685 priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
11686
11687 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11688 SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11689
11690 rc = register_netdev(priv->prom_net_dev);
11691 if (rc) {
11692 free_libipw(priv->prom_net_dev, 1);
11693 priv->prom_net_dev = NULL;
11694 return rc;
11695 }
11696
11697 return 0;
11698 }
11699
11700 static void ipw_prom_free(struct ipw_priv *priv)
11701 {
11702 if (!priv->prom_net_dev)
11703 return;
11704
11705 unregister_netdev(priv->prom_net_dev);
11706 free_libipw(priv->prom_net_dev, 1);
11707
11708 priv->prom_net_dev = NULL;
11709 }
11710
11711 #endif
11712
11713 static const struct net_device_ops ipw_netdev_ops = {
11714 .ndo_open = ipw_net_open,
11715 .ndo_stop = ipw_net_stop,
11716 .ndo_set_rx_mode = ipw_net_set_multicast_list,
11717 .ndo_set_mac_address = ipw_net_set_mac_address,
11718 .ndo_start_xmit = libipw_xmit,
11719 .ndo_change_mtu = libipw_change_mtu,
11720 .ndo_validate_addr = eth_validate_addr,
11721 };
11722
11723 static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11724 const struct pci_device_id *ent)
11725 {
11726 int err = 0;
11727 struct net_device *net_dev;
11728 void __iomem *base;
11729 u32 length, val;
11730 struct ipw_priv *priv;
11731 int i;
11732
11733 net_dev = alloc_libipw(sizeof(struct ipw_priv), 0);
11734 if (net_dev == NULL) {
11735 err = -ENOMEM;
11736 goto out;
11737 }
11738
11739 priv = libipw_priv(net_dev);
11740 priv->ieee = netdev_priv(net_dev);
11741
11742 priv->net_dev = net_dev;
11743 priv->pci_dev = pdev;
11744 ipw_debug_level = debug;
11745 spin_lock_init(&priv->irq_lock);
11746 spin_lock_init(&priv->lock);
11747 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11748 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11749
11750 mutex_init(&priv->mutex);
11751 if (pci_enable_device(pdev)) {
11752 err = -ENODEV;
11753 goto out_free_libipw;
11754 }
11755
11756 pci_set_master(pdev);
11757
11758 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
11759 if (!err)
11760 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
11761 if (err) {
11762 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11763 goto out_pci_disable_device;
11764 }
11765
11766 pci_set_drvdata(pdev, priv);
11767
11768 err = pci_request_regions(pdev, DRV_NAME);
11769 if (err)
11770 goto out_pci_disable_device;
11771
11772 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11773 * PCI Tx retries from interfering with C3 CPU state */
11774 pci_read_config_dword(pdev, 0x40, &val);
11775 if ((val & 0x0000ff00) != 0)
11776 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11777
11778 length = pci_resource_len(pdev, 0);
11779 priv->hw_len = length;
11780
11781 base = pci_ioremap_bar(pdev, 0);
11782 if (!base) {
11783 err = -ENODEV;
11784 goto out_pci_release_regions;
11785 }
11786
11787 priv->hw_base = base;
11788 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11789 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11790
11791 err = ipw_setup_deferred_work(priv);
11792 if (err) {
11793 IPW_ERROR("Unable to setup deferred work\n");
11794 goto out_iounmap;
11795 }
11796
11797 ipw_sw_reset(priv, 1);
11798
11799 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11800 if (err) {
11801 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11802 goto out_iounmap;
11803 }
11804
11805 SET_NETDEV_DEV(net_dev, &pdev->dev);
11806
11807 mutex_lock(&priv->mutex);
11808
11809 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11810 priv->ieee->set_security = shim__set_security;
11811 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11812
11813 #ifdef CONFIG_IPW2200_QOS
11814 priv->ieee->is_qos_active = ipw_is_qos_active;
11815 priv->ieee->handle_probe_response = ipw_handle_beacon;
11816 priv->ieee->handle_beacon = ipw_handle_probe_response;
11817 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11818 #endif /* CONFIG_IPW2200_QOS */
11819
11820 priv->ieee->perfect_rssi = -20;
11821 priv->ieee->worst_rssi = -85;
11822
11823 net_dev->netdev_ops = &ipw_netdev_ops;
11824 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11825 net_dev->wireless_data = &priv->wireless_data;
11826 net_dev->wireless_handlers = &ipw_wx_handler_def;
11827 net_dev->ethtool_ops = &ipw_ethtool_ops;
11828
11829 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11830 if (err) {
11831 IPW_ERROR("failed to create sysfs device attributes\n");
11832 mutex_unlock(&priv->mutex);
11833 goto out_release_irq;
11834 }
11835
11836 if (ipw_up(priv)) {
11837 mutex_unlock(&priv->mutex);
11838 err = -EIO;
11839 goto out_remove_sysfs;
11840 }
11841
11842 mutex_unlock(&priv->mutex);
11843
11844 err = ipw_wdev_init(net_dev);
11845 if (err) {
11846 IPW_ERROR("failed to register wireless device\n");
11847 goto out_remove_sysfs;
11848 }
11849
11850 err = register_netdev(net_dev);
11851 if (err) {
11852 IPW_ERROR("failed to register network device\n");
11853 goto out_unregister_wiphy;
11854 }
11855
11856 #ifdef CONFIG_IPW2200_PROMISCUOUS
11857 if (rtap_iface) {
11858 err = ipw_prom_alloc(priv);
11859 if (err) {
11860 IPW_ERROR("Failed to register promiscuous network "
11861 "device (error %d).\n", err);
11862 unregister_netdev(priv->net_dev);
11863 goto out_unregister_wiphy;
11864 }
11865 }
11866 #endif
11867
11868 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11869 "channels, %d 802.11a channels)\n",
11870 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11871 priv->ieee->geo.a_channels);
11872
11873 return 0;
11874
11875 out_unregister_wiphy:
11876 wiphy_unregister(priv->ieee->wdev.wiphy);
11877 kfree(priv->ieee->a_band.channels);
11878 kfree(priv->ieee->bg_band.channels);
11879 out_remove_sysfs:
11880 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11881 out_release_irq:
11882 free_irq(pdev->irq, priv);
11883 out_iounmap:
11884 iounmap(priv->hw_base);
11885 out_pci_release_regions:
11886 pci_release_regions(pdev);
11887 out_pci_disable_device:
11888 pci_disable_device(pdev);
11889 pci_set_drvdata(pdev, NULL);
11890 out_free_libipw:
11891 free_libipw(priv->net_dev, 0);
11892 out:
11893 return err;
11894 }
11895
11896 static void __devexit ipw_pci_remove(struct pci_dev *pdev)
11897 {
11898 struct ipw_priv *priv = pci_get_drvdata(pdev);
11899 struct list_head *p, *q;
11900 int i;
11901
11902 if (!priv)
11903 return;
11904
11905 mutex_lock(&priv->mutex);
11906
11907 priv->status |= STATUS_EXIT_PENDING;
11908 ipw_down(priv);
11909 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11910
11911 mutex_unlock(&priv->mutex);
11912
11913 unregister_netdev(priv->net_dev);
11914
11915 if (priv->rxq) {
11916 ipw_rx_queue_free(priv, priv->rxq);
11917 priv->rxq = NULL;
11918 }
11919 ipw_tx_queue_free(priv);
11920
11921 if (priv->cmdlog) {
11922 kfree(priv->cmdlog);
11923 priv->cmdlog = NULL;
11924 }
11925
11926 /* make sure all works are inactive */
11927 cancel_delayed_work_sync(&priv->adhoc_check);
11928 cancel_work_sync(&priv->associate);
11929 cancel_work_sync(&priv->disassociate);
11930 cancel_work_sync(&priv->system_config);
11931 cancel_work_sync(&priv->rx_replenish);
11932 cancel_work_sync(&priv->adapter_restart);
11933 cancel_delayed_work_sync(&priv->rf_kill);
11934 cancel_work_sync(&priv->up);
11935 cancel_work_sync(&priv->down);
11936 cancel_delayed_work_sync(&priv->request_scan);
11937 cancel_delayed_work_sync(&priv->request_direct_scan);
11938 cancel_delayed_work_sync(&priv->request_passive_scan);
11939 cancel_delayed_work_sync(&priv->scan_event);
11940 cancel_delayed_work_sync(&priv->gather_stats);
11941 cancel_work_sync(&priv->abort_scan);
11942 cancel_work_sync(&priv->roam);
11943 cancel_delayed_work_sync(&priv->scan_check);
11944 cancel_work_sync(&priv->link_up);
11945 cancel_work_sync(&priv->link_down);
11946 cancel_delayed_work_sync(&priv->led_link_on);
11947 cancel_delayed_work_sync(&priv->led_link_off);
11948 cancel_delayed_work_sync(&priv->led_act_off);
11949 cancel_work_sync(&priv->merge_networks);
11950
11951 /* Free MAC hash list for ADHOC */
11952 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11953 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11954 list_del(p);
11955 kfree(list_entry(p, struct ipw_ibss_seq, list));
11956 }
11957 }
11958
11959 kfree(priv->error);
11960 priv->error = NULL;
11961
11962 #ifdef CONFIG_IPW2200_PROMISCUOUS
11963 ipw_prom_free(priv);
11964 #endif
11965
11966 free_irq(pdev->irq, priv);
11967 iounmap(priv->hw_base);
11968 pci_release_regions(pdev);
11969 pci_disable_device(pdev);
11970 pci_set_drvdata(pdev, NULL);
11971 /* wiphy_unregister needs to be here, before free_libipw */
11972 wiphy_unregister(priv->ieee->wdev.wiphy);
11973 kfree(priv->ieee->a_band.channels);
11974 kfree(priv->ieee->bg_band.channels);
11975 free_libipw(priv->net_dev, 0);
11976 free_firmware();
11977 }
11978
11979 #ifdef CONFIG_PM
11980 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11981 {
11982 struct ipw_priv *priv = pci_get_drvdata(pdev);
11983 struct net_device *dev = priv->net_dev;
11984
11985 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11986
11987 /* Take down the device; powers it off, etc. */
11988 ipw_down(priv);
11989
11990 /* Remove the PRESENT state of the device */
11991 netif_device_detach(dev);
11992
11993 pci_save_state(pdev);
11994 pci_disable_device(pdev);
11995 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11996
11997 priv->suspend_at = get_seconds();
11998
11999 return 0;
12000 }
12001
12002 static int ipw_pci_resume(struct pci_dev *pdev)
12003 {
12004 struct ipw_priv *priv = pci_get_drvdata(pdev);
12005 struct net_device *dev = priv->net_dev;
12006 int err;
12007 u32 val;
12008
12009 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
12010
12011 pci_set_power_state(pdev, PCI_D0);
12012 err = pci_enable_device(pdev);
12013 if (err) {
12014 printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
12015 dev->name);
12016 return err;
12017 }
12018 pci_restore_state(pdev);
12019
12020 /*
12021 * Suspend/Resume resets the PCI configuration space, so we have to
12022 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
12023 * from interfering with C3 CPU state. pci_restore_state won't help
12024 * here since it only restores the first 64 bytes pci config header.
12025 */
12026 pci_read_config_dword(pdev, 0x40, &val);
12027 if ((val & 0x0000ff00) != 0)
12028 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
12029
12030 /* Set the device back into the PRESENT state; this will also wake
12031 * the queue of needed */
12032 netif_device_attach(dev);
12033
12034 priv->suspend_time = get_seconds() - priv->suspend_at;
12035
12036 /* Bring the device back up */
12037 schedule_work(&priv->up);
12038
12039 return 0;
12040 }
12041 #endif
12042
12043 static void ipw_pci_shutdown(struct pci_dev *pdev)
12044 {
12045 struct ipw_priv *priv = pci_get_drvdata(pdev);
12046
12047 /* Take down the device; powers it off, etc. */
12048 ipw_down(priv);
12049
12050 pci_disable_device(pdev);
12051 }
12052
12053 /* driver initialization stuff */
12054 static struct pci_driver ipw_driver = {
12055 .name = DRV_NAME,
12056 .id_table = card_ids,
12057 .probe = ipw_pci_probe,
12058 .remove = __devexit_p(ipw_pci_remove),
12059 #ifdef CONFIG_PM
12060 .suspend = ipw_pci_suspend,
12061 .resume = ipw_pci_resume,
12062 #endif
12063 .shutdown = ipw_pci_shutdown,
12064 };
12065
12066 static int __init ipw_init(void)
12067 {
12068 int ret;
12069
12070 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
12071 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
12072
12073 ret = pci_register_driver(&ipw_driver);
12074 if (ret) {
12075 IPW_ERROR("Unable to initialize PCI module\n");
12076 return ret;
12077 }
12078
12079 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
12080 if (ret) {
12081 IPW_ERROR("Unable to create driver sysfs file\n");
12082 pci_unregister_driver(&ipw_driver);
12083 return ret;
12084 }
12085
12086 return ret;
12087 }
12088
12089 static void __exit ipw_exit(void)
12090 {
12091 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
12092 pci_unregister_driver(&ipw_driver);
12093 }
12094
12095 module_param(disable, int, 0444);
12096 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
12097
12098 module_param(associate, int, 0444);
12099 MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
12100
12101 module_param(auto_create, int, 0444);
12102 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
12103
12104 module_param_named(led, led_support, int, 0444);
12105 MODULE_PARM_DESC(led, "enable led control on some systems (default 1 on)");
12106
12107 module_param(debug, int, 0444);
12108 MODULE_PARM_DESC(debug, "debug output mask");
12109
12110 module_param_named(channel, default_channel, int, 0444);
12111 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
12112
12113 #ifdef CONFIG_IPW2200_PROMISCUOUS
12114 module_param(rtap_iface, int, 0444);
12115 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
12116 #endif
12117
12118 #ifdef CONFIG_IPW2200_QOS
12119 module_param(qos_enable, int, 0444);
12120 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
12121
12122 module_param(qos_burst_enable, int, 0444);
12123 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
12124
12125 module_param(qos_no_ack_mask, int, 0444);
12126 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
12127
12128 module_param(burst_duration_CCK, int, 0444);
12129 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
12130
12131 module_param(burst_duration_OFDM, int, 0444);
12132 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
12133 #endif /* CONFIG_IPW2200_QOS */
12134
12135 #ifdef CONFIG_IPW2200_MONITOR
12136 module_param_named(mode, network_mode, int, 0444);
12137 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
12138 #else
12139 module_param_named(mode, network_mode, int, 0444);
12140 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
12141 #endif
12142
12143 module_param(bt_coexist, int, 0444);
12144 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
12145
12146 module_param(hwcrypto, int, 0444);
12147 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
12148
12149 module_param(cmdlog, int, 0444);
12150 MODULE_PARM_DESC(cmdlog,
12151 "allocate a ring buffer for logging firmware commands");
12152
12153 module_param(roaming, int, 0444);
12154 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
12155
12156 module_param(antenna, int, 0444);
12157 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
12158
12159 module_exit(ipw_exit);
12160 module_init(ipw_init);