]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/wireless/ipw2x00/ipw2200.c
net:drivers/net: Miscellaneous conversions to ETH_ALEN
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / ipw2x00 / ipw2200.c
1 /******************************************************************************
2
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 Intel Linux Wireless <ilw@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31 ******************************************************************************/
32
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <net/cfg80211-wext.h>
36 #include "ipw2200.h"
37 #include "ipw.h"
38
39
40 #ifndef KBUILD_EXTMOD
41 #define VK "k"
42 #else
43 #define VK
44 #endif
45
46 #ifdef CONFIG_IPW2200_DEBUG
47 #define VD "d"
48 #else
49 #define VD
50 #endif
51
52 #ifdef CONFIG_IPW2200_MONITOR
53 #define VM "m"
54 #else
55 #define VM
56 #endif
57
58 #ifdef CONFIG_IPW2200_PROMISCUOUS
59 #define VP "p"
60 #else
61 #define VP
62 #endif
63
64 #ifdef CONFIG_IPW2200_RADIOTAP
65 #define VR "r"
66 #else
67 #define VR
68 #endif
69
70 #ifdef CONFIG_IPW2200_QOS
71 #define VQ "q"
72 #else
73 #define VQ
74 #endif
75
76 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
77 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
78 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
79 #define DRV_VERSION IPW2200_VERSION
80
81 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
82
83 MODULE_DESCRIPTION(DRV_DESCRIPTION);
84 MODULE_VERSION(DRV_VERSION);
85 MODULE_AUTHOR(DRV_COPYRIGHT);
86 MODULE_LICENSE("GPL");
87 MODULE_FIRMWARE("ipw2200-ibss.fw");
88 #ifdef CONFIG_IPW2200_MONITOR
89 MODULE_FIRMWARE("ipw2200-sniffer.fw");
90 #endif
91 MODULE_FIRMWARE("ipw2200-bss.fw");
92
93 static int cmdlog = 0;
94 static int debug = 0;
95 static int default_channel = 0;
96 static int network_mode = 0;
97
98 static u32 ipw_debug_level;
99 static int associate;
100 static int auto_create = 1;
101 static int led_support = 1;
102 static int disable = 0;
103 static int bt_coexist = 0;
104 static int hwcrypto = 0;
105 static int roaming = 1;
106 static const char ipw_modes[] = {
107 'a', 'b', 'g', '?'
108 };
109 static int antenna = CFG_SYS_ANTENNA_BOTH;
110
111 #ifdef CONFIG_IPW2200_PROMISCUOUS
112 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
113 #endif
114
115 static struct ieee80211_rate ipw2200_rates[] = {
116 { .bitrate = 10 },
117 { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
118 { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
119 { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
120 { .bitrate = 60 },
121 { .bitrate = 90 },
122 { .bitrate = 120 },
123 { .bitrate = 180 },
124 { .bitrate = 240 },
125 { .bitrate = 360 },
126 { .bitrate = 480 },
127 { .bitrate = 540 }
128 };
129
130 #define ipw2200_a_rates (ipw2200_rates + 4)
131 #define ipw2200_num_a_rates 8
132 #define ipw2200_bg_rates (ipw2200_rates + 0)
133 #define ipw2200_num_bg_rates 12
134
135 /* Ugly macro to convert literal channel numbers into their mhz equivalents
136 * There are certianly some conditions that will break this (like feeding it '30')
137 * but they shouldn't arise since nothing talks on channel 30. */
138 #define ieee80211chan2mhz(x) \
139 (((x) <= 14) ? \
140 (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
141 ((x) + 1000) * 5)
142
143 #ifdef CONFIG_IPW2200_QOS
144 static int qos_enable = 0;
145 static int qos_burst_enable = 0;
146 static int qos_no_ack_mask = 0;
147 static int burst_duration_CCK = 0;
148 static int burst_duration_OFDM = 0;
149
150 static struct libipw_qos_parameters def_qos_parameters_OFDM = {
151 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
152 QOS_TX3_CW_MIN_OFDM},
153 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
154 QOS_TX3_CW_MAX_OFDM},
155 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
156 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
157 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
158 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
159 };
160
161 static struct libipw_qos_parameters def_qos_parameters_CCK = {
162 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
163 QOS_TX3_CW_MIN_CCK},
164 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
165 QOS_TX3_CW_MAX_CCK},
166 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
167 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
168 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
169 QOS_TX3_TXOP_LIMIT_CCK}
170 };
171
172 static struct libipw_qos_parameters def_parameters_OFDM = {
173 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
174 DEF_TX3_CW_MIN_OFDM},
175 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
176 DEF_TX3_CW_MAX_OFDM},
177 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
178 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
179 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
180 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
181 };
182
183 static struct libipw_qos_parameters def_parameters_CCK = {
184 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
185 DEF_TX3_CW_MIN_CCK},
186 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
187 DEF_TX3_CW_MAX_CCK},
188 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
189 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
190 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
191 DEF_TX3_TXOP_LIMIT_CCK}
192 };
193
194 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
195
196 static int from_priority_to_tx_queue[] = {
197 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
198 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
199 };
200
201 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
202
203 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
204 *qos_param);
205 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
206 *qos_param);
207 #endif /* CONFIG_IPW2200_QOS */
208
209 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
210 static void ipw_remove_current_network(struct ipw_priv *priv);
211 static void ipw_rx(struct ipw_priv *priv);
212 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
213 struct clx2_tx_queue *txq, int qindex);
214 static int ipw_queue_reset(struct ipw_priv *priv);
215
216 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
217 int len, int sync);
218
219 static void ipw_tx_queue_free(struct ipw_priv *);
220
221 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
222 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
223 static void ipw_rx_queue_replenish(void *);
224 static int ipw_up(struct ipw_priv *);
225 static void ipw_bg_up(struct work_struct *work);
226 static void ipw_down(struct ipw_priv *);
227 static void ipw_bg_down(struct work_struct *work);
228 static int ipw_config(struct ipw_priv *);
229 static int init_supported_rates(struct ipw_priv *priv,
230 struct ipw_supported_rates *prates);
231 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
232 static void ipw_send_wep_keys(struct ipw_priv *, int);
233
234 static int snprint_line(char *buf, size_t count,
235 const u8 * data, u32 len, u32 ofs)
236 {
237 int out, i, j, l;
238 char c;
239
240 out = snprintf(buf, count, "%08X", ofs);
241
242 for (l = 0, i = 0; i < 2; i++) {
243 out += snprintf(buf + out, count - out, " ");
244 for (j = 0; j < 8 && l < len; j++, l++)
245 out += snprintf(buf + out, count - out, "%02X ",
246 data[(i * 8 + j)]);
247 for (; j < 8; j++)
248 out += snprintf(buf + out, count - out, " ");
249 }
250
251 out += snprintf(buf + out, count - out, " ");
252 for (l = 0, i = 0; i < 2; i++) {
253 out += snprintf(buf + out, count - out, " ");
254 for (j = 0; j < 8 && l < len; j++, l++) {
255 c = data[(i * 8 + j)];
256 if (!isascii(c) || !isprint(c))
257 c = '.';
258
259 out += snprintf(buf + out, count - out, "%c", c);
260 }
261
262 for (; j < 8; j++)
263 out += snprintf(buf + out, count - out, " ");
264 }
265
266 return out;
267 }
268
269 static void printk_buf(int level, const u8 * data, u32 len)
270 {
271 char line[81];
272 u32 ofs = 0;
273 if (!(ipw_debug_level & level))
274 return;
275
276 while (len) {
277 snprint_line(line, sizeof(line), &data[ofs],
278 min(len, 16U), ofs);
279 printk(KERN_DEBUG "%s\n", line);
280 ofs += 16;
281 len -= min(len, 16U);
282 }
283 }
284
285 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
286 {
287 size_t out = size;
288 u32 ofs = 0;
289 int total = 0;
290
291 while (size && len) {
292 out = snprint_line(output, size, &data[ofs],
293 min_t(size_t, len, 16U), ofs);
294
295 ofs += 16;
296 output += out;
297 size -= out;
298 len -= min_t(size_t, len, 16U);
299 total += out;
300 }
301 return total;
302 }
303
304 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
305 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
306 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
307
308 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
309 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
310 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
311
312 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
313 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
314 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
315 {
316 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
317 __LINE__, (u32) (b), (u32) (c));
318 _ipw_write_reg8(a, b, c);
319 }
320
321 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
322 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
323 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
324 {
325 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
326 __LINE__, (u32) (b), (u32) (c));
327 _ipw_write_reg16(a, b, c);
328 }
329
330 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
331 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
332 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
333 {
334 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
335 __LINE__, (u32) (b), (u32) (c));
336 _ipw_write_reg32(a, b, c);
337 }
338
339 /* 8-bit direct write (low 4K) */
340 static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs,
341 u8 val)
342 {
343 writeb(val, ipw->hw_base + ofs);
344 }
345
346 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
347 #define ipw_write8(ipw, ofs, val) do { \
348 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \
349 __LINE__, (u32)(ofs), (u32)(val)); \
350 _ipw_write8(ipw, ofs, val); \
351 } while (0)
352
353 /* 16-bit direct write (low 4K) */
354 static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs,
355 u16 val)
356 {
357 writew(val, ipw->hw_base + ofs);
358 }
359
360 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
361 #define ipw_write16(ipw, ofs, val) do { \
362 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \
363 __LINE__, (u32)(ofs), (u32)(val)); \
364 _ipw_write16(ipw, ofs, val); \
365 } while (0)
366
367 /* 32-bit direct write (low 4K) */
368 static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs,
369 u32 val)
370 {
371 writel(val, ipw->hw_base + ofs);
372 }
373
374 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
375 #define ipw_write32(ipw, ofs, val) do { \
376 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \
377 __LINE__, (u32)(ofs), (u32)(val)); \
378 _ipw_write32(ipw, ofs, val); \
379 } while (0)
380
381 /* 8-bit direct read (low 4K) */
382 static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs)
383 {
384 return readb(ipw->hw_base + ofs);
385 }
386
387 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
388 #define ipw_read8(ipw, ofs) ({ \
389 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \
390 (u32)(ofs)); \
391 _ipw_read8(ipw, ofs); \
392 })
393
394 /* 16-bit direct read (low 4K) */
395 static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs)
396 {
397 return readw(ipw->hw_base + ofs);
398 }
399
400 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
401 #define ipw_read16(ipw, ofs) ({ \
402 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \
403 (u32)(ofs)); \
404 _ipw_read16(ipw, ofs); \
405 })
406
407 /* 32-bit direct read (low 4K) */
408 static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs)
409 {
410 return readl(ipw->hw_base + ofs);
411 }
412
413 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
414 #define ipw_read32(ipw, ofs) ({ \
415 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \
416 (u32)(ofs)); \
417 _ipw_read32(ipw, ofs); \
418 })
419
420 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
421 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
422 #define ipw_read_indirect(a, b, c, d) ({ \
423 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \
424 __LINE__, (u32)(b), (u32)(d)); \
425 _ipw_read_indirect(a, b, c, d); \
426 })
427
428 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
429 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
430 int num);
431 #define ipw_write_indirect(a, b, c, d) do { \
432 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \
433 __LINE__, (u32)(b), (u32)(d)); \
434 _ipw_write_indirect(a, b, c, d); \
435 } while (0)
436
437 /* 32-bit indirect write (above 4K) */
438 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
439 {
440 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
441 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
442 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
443 }
444
445 /* 8-bit indirect write (above 4K) */
446 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
447 {
448 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
449 u32 dif_len = reg - aligned_addr;
450
451 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
452 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
453 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
454 }
455
456 /* 16-bit indirect write (above 4K) */
457 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
458 {
459 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
460 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
461
462 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
463 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
464 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
465 }
466
467 /* 8-bit indirect read (above 4K) */
468 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
469 {
470 u32 word;
471 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
472 IPW_DEBUG_IO(" reg = 0x%8X :\n", reg);
473 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
474 return (word >> ((reg & 0x3) * 8)) & 0xff;
475 }
476
477 /* 32-bit indirect read (above 4K) */
478 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
479 {
480 u32 value;
481
482 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
483
484 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
485 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
486 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value);
487 return value;
488 }
489
490 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
491 /* for area above 1st 4K of SRAM/reg space */
492 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
493 int num)
494 {
495 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
496 u32 dif_len = addr - aligned_addr;
497 u32 i;
498
499 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
500
501 if (num <= 0) {
502 return;
503 }
504
505 /* Read the first dword (or portion) byte by byte */
506 if (unlikely(dif_len)) {
507 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
508 /* Start reading at aligned_addr + dif_len */
509 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
510 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
511 aligned_addr += 4;
512 }
513
514 /* Read all of the middle dwords as dwords, with auto-increment */
515 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
516 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
517 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
518
519 /* Read the last dword (or portion) byte by byte */
520 if (unlikely(num)) {
521 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
522 for (i = 0; num > 0; i++, num--)
523 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
524 }
525 }
526
527 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
528 /* for area above 1st 4K of SRAM/reg space */
529 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
530 int num)
531 {
532 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
533 u32 dif_len = addr - aligned_addr;
534 u32 i;
535
536 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
537
538 if (num <= 0) {
539 return;
540 }
541
542 /* Write the first dword (or portion) byte by byte */
543 if (unlikely(dif_len)) {
544 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
545 /* Start writing at aligned_addr + dif_len */
546 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
547 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
548 aligned_addr += 4;
549 }
550
551 /* Write all of the middle dwords as dwords, with auto-increment */
552 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
553 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
554 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
555
556 /* Write the last dword (or portion) byte by byte */
557 if (unlikely(num)) {
558 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
559 for (i = 0; num > 0; i++, num--, buf++)
560 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
561 }
562 }
563
564 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
565 /* for 1st 4K of SRAM/regs space */
566 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
567 int num)
568 {
569 memcpy_toio((priv->hw_base + addr), buf, num);
570 }
571
572 /* Set bit(s) in low 4K of SRAM/regs */
573 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
574 {
575 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
576 }
577
578 /* Clear bit(s) in low 4K of SRAM/regs */
579 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
580 {
581 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
582 }
583
584 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
585 {
586 if (priv->status & STATUS_INT_ENABLED)
587 return;
588 priv->status |= STATUS_INT_ENABLED;
589 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
590 }
591
592 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
593 {
594 if (!(priv->status & STATUS_INT_ENABLED))
595 return;
596 priv->status &= ~STATUS_INT_ENABLED;
597 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
598 }
599
600 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
601 {
602 unsigned long flags;
603
604 spin_lock_irqsave(&priv->irq_lock, flags);
605 __ipw_enable_interrupts(priv);
606 spin_unlock_irqrestore(&priv->irq_lock, flags);
607 }
608
609 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
610 {
611 unsigned long flags;
612
613 spin_lock_irqsave(&priv->irq_lock, flags);
614 __ipw_disable_interrupts(priv);
615 spin_unlock_irqrestore(&priv->irq_lock, flags);
616 }
617
618 static char *ipw_error_desc(u32 val)
619 {
620 switch (val) {
621 case IPW_FW_ERROR_OK:
622 return "ERROR_OK";
623 case IPW_FW_ERROR_FAIL:
624 return "ERROR_FAIL";
625 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
626 return "MEMORY_UNDERFLOW";
627 case IPW_FW_ERROR_MEMORY_OVERFLOW:
628 return "MEMORY_OVERFLOW";
629 case IPW_FW_ERROR_BAD_PARAM:
630 return "BAD_PARAM";
631 case IPW_FW_ERROR_BAD_CHECKSUM:
632 return "BAD_CHECKSUM";
633 case IPW_FW_ERROR_NMI_INTERRUPT:
634 return "NMI_INTERRUPT";
635 case IPW_FW_ERROR_BAD_DATABASE:
636 return "BAD_DATABASE";
637 case IPW_FW_ERROR_ALLOC_FAIL:
638 return "ALLOC_FAIL";
639 case IPW_FW_ERROR_DMA_UNDERRUN:
640 return "DMA_UNDERRUN";
641 case IPW_FW_ERROR_DMA_STATUS:
642 return "DMA_STATUS";
643 case IPW_FW_ERROR_DINO_ERROR:
644 return "DINO_ERROR";
645 case IPW_FW_ERROR_EEPROM_ERROR:
646 return "EEPROM_ERROR";
647 case IPW_FW_ERROR_SYSASSERT:
648 return "SYSASSERT";
649 case IPW_FW_ERROR_FATAL_ERROR:
650 return "FATAL_ERROR";
651 default:
652 return "UNKNOWN_ERROR";
653 }
654 }
655
656 static void ipw_dump_error_log(struct ipw_priv *priv,
657 struct ipw_fw_error *error)
658 {
659 u32 i;
660
661 if (!error) {
662 IPW_ERROR("Error allocating and capturing error log. "
663 "Nothing to dump.\n");
664 return;
665 }
666
667 IPW_ERROR("Start IPW Error Log Dump:\n");
668 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
669 error->status, error->config);
670
671 for (i = 0; i < error->elem_len; i++)
672 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
673 ipw_error_desc(error->elem[i].desc),
674 error->elem[i].time,
675 error->elem[i].blink1,
676 error->elem[i].blink2,
677 error->elem[i].link1,
678 error->elem[i].link2, error->elem[i].data);
679 for (i = 0; i < error->log_len; i++)
680 IPW_ERROR("%i\t0x%08x\t%i\n",
681 error->log[i].time,
682 error->log[i].data, error->log[i].event);
683 }
684
685 static inline int ipw_is_init(struct ipw_priv *priv)
686 {
687 return (priv->status & STATUS_INIT) ? 1 : 0;
688 }
689
690 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
691 {
692 u32 addr, field_info, field_len, field_count, total_len;
693
694 IPW_DEBUG_ORD("ordinal = %i\n", ord);
695
696 if (!priv || !val || !len) {
697 IPW_DEBUG_ORD("Invalid argument\n");
698 return -EINVAL;
699 }
700
701 /* verify device ordinal tables have been initialized */
702 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
703 IPW_DEBUG_ORD("Access ordinals before initialization\n");
704 return -EINVAL;
705 }
706
707 switch (IPW_ORD_TABLE_ID_MASK & ord) {
708 case IPW_ORD_TABLE_0_MASK:
709 /*
710 * TABLE 0: Direct access to a table of 32 bit values
711 *
712 * This is a very simple table with the data directly
713 * read from the table
714 */
715
716 /* remove the table id from the ordinal */
717 ord &= IPW_ORD_TABLE_VALUE_MASK;
718
719 /* boundary check */
720 if (ord > priv->table0_len) {
721 IPW_DEBUG_ORD("ordinal value (%i) longer then "
722 "max (%i)\n", ord, priv->table0_len);
723 return -EINVAL;
724 }
725
726 /* verify we have enough room to store the value */
727 if (*len < sizeof(u32)) {
728 IPW_DEBUG_ORD("ordinal buffer length too small, "
729 "need %zd\n", sizeof(u32));
730 return -EINVAL;
731 }
732
733 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
734 ord, priv->table0_addr + (ord << 2));
735
736 *len = sizeof(u32);
737 ord <<= 2;
738 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
739 break;
740
741 case IPW_ORD_TABLE_1_MASK:
742 /*
743 * TABLE 1: Indirect access to a table of 32 bit values
744 *
745 * This is a fairly large table of u32 values each
746 * representing starting addr for the data (which is
747 * also a u32)
748 */
749
750 /* remove the table id from the ordinal */
751 ord &= IPW_ORD_TABLE_VALUE_MASK;
752
753 /* boundary check */
754 if (ord > priv->table1_len) {
755 IPW_DEBUG_ORD("ordinal value too long\n");
756 return -EINVAL;
757 }
758
759 /* verify we have enough room to store the value */
760 if (*len < sizeof(u32)) {
761 IPW_DEBUG_ORD("ordinal buffer length too small, "
762 "need %zd\n", sizeof(u32));
763 return -EINVAL;
764 }
765
766 *((u32 *) val) =
767 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
768 *len = sizeof(u32);
769 break;
770
771 case IPW_ORD_TABLE_2_MASK:
772 /*
773 * TABLE 2: Indirect access to a table of variable sized values
774 *
775 * This table consist of six values, each containing
776 * - dword containing the starting offset of the data
777 * - dword containing the lengh in the first 16bits
778 * and the count in the second 16bits
779 */
780
781 /* remove the table id from the ordinal */
782 ord &= IPW_ORD_TABLE_VALUE_MASK;
783
784 /* boundary check */
785 if (ord > priv->table2_len) {
786 IPW_DEBUG_ORD("ordinal value too long\n");
787 return -EINVAL;
788 }
789
790 /* get the address of statistic */
791 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
792
793 /* get the second DW of statistics ;
794 * two 16-bit words - first is length, second is count */
795 field_info =
796 ipw_read_reg32(priv,
797 priv->table2_addr + (ord << 3) +
798 sizeof(u32));
799
800 /* get each entry length */
801 field_len = *((u16 *) & field_info);
802
803 /* get number of entries */
804 field_count = *(((u16 *) & field_info) + 1);
805
806 /* abort if not enough memory */
807 total_len = field_len * field_count;
808 if (total_len > *len) {
809 *len = total_len;
810 return -EINVAL;
811 }
812
813 *len = total_len;
814 if (!total_len)
815 return 0;
816
817 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
818 "field_info = 0x%08x\n",
819 addr, total_len, field_info);
820 ipw_read_indirect(priv, addr, val, total_len);
821 break;
822
823 default:
824 IPW_DEBUG_ORD("Invalid ordinal!\n");
825 return -EINVAL;
826
827 }
828
829 return 0;
830 }
831
832 static void ipw_init_ordinals(struct ipw_priv *priv)
833 {
834 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
835 priv->table0_len = ipw_read32(priv, priv->table0_addr);
836
837 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
838 priv->table0_addr, priv->table0_len);
839
840 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
841 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
842
843 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
844 priv->table1_addr, priv->table1_len);
845
846 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
847 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
848 priv->table2_len &= 0x0000ffff; /* use first two bytes */
849
850 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
851 priv->table2_addr, priv->table2_len);
852
853 }
854
855 static u32 ipw_register_toggle(u32 reg)
856 {
857 reg &= ~IPW_START_STANDBY;
858 if (reg & IPW_GATE_ODMA)
859 reg &= ~IPW_GATE_ODMA;
860 if (reg & IPW_GATE_IDMA)
861 reg &= ~IPW_GATE_IDMA;
862 if (reg & IPW_GATE_ADMA)
863 reg &= ~IPW_GATE_ADMA;
864 return reg;
865 }
866
867 /*
868 * LED behavior:
869 * - On radio ON, turn on any LEDs that require to be on during start
870 * - On initialization, start unassociated blink
871 * - On association, disable unassociated blink
872 * - On disassociation, start unassociated blink
873 * - On radio OFF, turn off any LEDs started during radio on
874 *
875 */
876 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
877 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
878 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
879
880 static void ipw_led_link_on(struct ipw_priv *priv)
881 {
882 unsigned long flags;
883 u32 led;
884
885 /* If configured to not use LEDs, or nic_type is 1,
886 * then we don't toggle a LINK led */
887 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
888 return;
889
890 spin_lock_irqsave(&priv->lock, flags);
891
892 if (!(priv->status & STATUS_RF_KILL_MASK) &&
893 !(priv->status & STATUS_LED_LINK_ON)) {
894 IPW_DEBUG_LED("Link LED On\n");
895 led = ipw_read_reg32(priv, IPW_EVENT_REG);
896 led |= priv->led_association_on;
897
898 led = ipw_register_toggle(led);
899
900 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
901 ipw_write_reg32(priv, IPW_EVENT_REG, led);
902
903 priv->status |= STATUS_LED_LINK_ON;
904
905 /* If we aren't associated, schedule turning the LED off */
906 if (!(priv->status & STATUS_ASSOCIATED))
907 schedule_delayed_work(&priv->led_link_off,
908 LD_TIME_LINK_ON);
909 }
910
911 spin_unlock_irqrestore(&priv->lock, flags);
912 }
913
914 static void ipw_bg_led_link_on(struct work_struct *work)
915 {
916 struct ipw_priv *priv =
917 container_of(work, struct ipw_priv, led_link_on.work);
918 mutex_lock(&priv->mutex);
919 ipw_led_link_on(priv);
920 mutex_unlock(&priv->mutex);
921 }
922
923 static void ipw_led_link_off(struct ipw_priv *priv)
924 {
925 unsigned long flags;
926 u32 led;
927
928 /* If configured not to use LEDs, or nic type is 1,
929 * then we don't goggle the LINK led. */
930 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
931 return;
932
933 spin_lock_irqsave(&priv->lock, flags);
934
935 if (priv->status & STATUS_LED_LINK_ON) {
936 led = ipw_read_reg32(priv, IPW_EVENT_REG);
937 led &= priv->led_association_off;
938 led = ipw_register_toggle(led);
939
940 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
941 ipw_write_reg32(priv, IPW_EVENT_REG, led);
942
943 IPW_DEBUG_LED("Link LED Off\n");
944
945 priv->status &= ~STATUS_LED_LINK_ON;
946
947 /* If we aren't associated and the radio is on, schedule
948 * turning the LED on (blink while unassociated) */
949 if (!(priv->status & STATUS_RF_KILL_MASK) &&
950 !(priv->status & STATUS_ASSOCIATED))
951 schedule_delayed_work(&priv->led_link_on,
952 LD_TIME_LINK_OFF);
953
954 }
955
956 spin_unlock_irqrestore(&priv->lock, flags);
957 }
958
959 static void ipw_bg_led_link_off(struct work_struct *work)
960 {
961 struct ipw_priv *priv =
962 container_of(work, struct ipw_priv, led_link_off.work);
963 mutex_lock(&priv->mutex);
964 ipw_led_link_off(priv);
965 mutex_unlock(&priv->mutex);
966 }
967
968 static void __ipw_led_activity_on(struct ipw_priv *priv)
969 {
970 u32 led;
971
972 if (priv->config & CFG_NO_LED)
973 return;
974
975 if (priv->status & STATUS_RF_KILL_MASK)
976 return;
977
978 if (!(priv->status & STATUS_LED_ACT_ON)) {
979 led = ipw_read_reg32(priv, IPW_EVENT_REG);
980 led |= priv->led_activity_on;
981
982 led = ipw_register_toggle(led);
983
984 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
985 ipw_write_reg32(priv, IPW_EVENT_REG, led);
986
987 IPW_DEBUG_LED("Activity LED On\n");
988
989 priv->status |= STATUS_LED_ACT_ON;
990
991 cancel_delayed_work(&priv->led_act_off);
992 schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
993 } else {
994 /* Reschedule LED off for full time period */
995 cancel_delayed_work(&priv->led_act_off);
996 schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
997 }
998 }
999
1000 #if 0
1001 void ipw_led_activity_on(struct ipw_priv *priv)
1002 {
1003 unsigned long flags;
1004 spin_lock_irqsave(&priv->lock, flags);
1005 __ipw_led_activity_on(priv);
1006 spin_unlock_irqrestore(&priv->lock, flags);
1007 }
1008 #endif /* 0 */
1009
1010 static void ipw_led_activity_off(struct ipw_priv *priv)
1011 {
1012 unsigned long flags;
1013 u32 led;
1014
1015 if (priv->config & CFG_NO_LED)
1016 return;
1017
1018 spin_lock_irqsave(&priv->lock, flags);
1019
1020 if (priv->status & STATUS_LED_ACT_ON) {
1021 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1022 led &= priv->led_activity_off;
1023
1024 led = ipw_register_toggle(led);
1025
1026 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1027 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1028
1029 IPW_DEBUG_LED("Activity LED Off\n");
1030
1031 priv->status &= ~STATUS_LED_ACT_ON;
1032 }
1033
1034 spin_unlock_irqrestore(&priv->lock, flags);
1035 }
1036
1037 static void ipw_bg_led_activity_off(struct work_struct *work)
1038 {
1039 struct ipw_priv *priv =
1040 container_of(work, struct ipw_priv, led_act_off.work);
1041 mutex_lock(&priv->mutex);
1042 ipw_led_activity_off(priv);
1043 mutex_unlock(&priv->mutex);
1044 }
1045
1046 static void ipw_led_band_on(struct ipw_priv *priv)
1047 {
1048 unsigned long flags;
1049 u32 led;
1050
1051 /* Only nic type 1 supports mode LEDs */
1052 if (priv->config & CFG_NO_LED ||
1053 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1054 return;
1055
1056 spin_lock_irqsave(&priv->lock, flags);
1057
1058 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1059 if (priv->assoc_network->mode == IEEE_A) {
1060 led |= priv->led_ofdm_on;
1061 led &= priv->led_association_off;
1062 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1063 } else if (priv->assoc_network->mode == IEEE_G) {
1064 led |= priv->led_ofdm_on;
1065 led |= priv->led_association_on;
1066 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1067 } else {
1068 led &= priv->led_ofdm_off;
1069 led |= priv->led_association_on;
1070 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1071 }
1072
1073 led = ipw_register_toggle(led);
1074
1075 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1076 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1077
1078 spin_unlock_irqrestore(&priv->lock, flags);
1079 }
1080
1081 static void ipw_led_band_off(struct ipw_priv *priv)
1082 {
1083 unsigned long flags;
1084 u32 led;
1085
1086 /* Only nic type 1 supports mode LEDs */
1087 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1088 return;
1089
1090 spin_lock_irqsave(&priv->lock, flags);
1091
1092 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1093 led &= priv->led_ofdm_off;
1094 led &= priv->led_association_off;
1095
1096 led = ipw_register_toggle(led);
1097
1098 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1099 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1100
1101 spin_unlock_irqrestore(&priv->lock, flags);
1102 }
1103
1104 static void ipw_led_radio_on(struct ipw_priv *priv)
1105 {
1106 ipw_led_link_on(priv);
1107 }
1108
1109 static void ipw_led_radio_off(struct ipw_priv *priv)
1110 {
1111 ipw_led_activity_off(priv);
1112 ipw_led_link_off(priv);
1113 }
1114
1115 static void ipw_led_link_up(struct ipw_priv *priv)
1116 {
1117 /* Set the Link Led on for all nic types */
1118 ipw_led_link_on(priv);
1119 }
1120
1121 static void ipw_led_link_down(struct ipw_priv *priv)
1122 {
1123 ipw_led_activity_off(priv);
1124 ipw_led_link_off(priv);
1125
1126 if (priv->status & STATUS_RF_KILL_MASK)
1127 ipw_led_radio_off(priv);
1128 }
1129
1130 static void ipw_led_init(struct ipw_priv *priv)
1131 {
1132 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1133
1134 /* Set the default PINs for the link and activity leds */
1135 priv->led_activity_on = IPW_ACTIVITY_LED;
1136 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1137
1138 priv->led_association_on = IPW_ASSOCIATED_LED;
1139 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1140
1141 /* Set the default PINs for the OFDM leds */
1142 priv->led_ofdm_on = IPW_OFDM_LED;
1143 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1144
1145 switch (priv->nic_type) {
1146 case EEPROM_NIC_TYPE_1:
1147 /* In this NIC type, the LEDs are reversed.... */
1148 priv->led_activity_on = IPW_ASSOCIATED_LED;
1149 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1150 priv->led_association_on = IPW_ACTIVITY_LED;
1151 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1152
1153 if (!(priv->config & CFG_NO_LED))
1154 ipw_led_band_on(priv);
1155
1156 /* And we don't blink link LEDs for this nic, so
1157 * just return here */
1158 return;
1159
1160 case EEPROM_NIC_TYPE_3:
1161 case EEPROM_NIC_TYPE_2:
1162 case EEPROM_NIC_TYPE_4:
1163 case EEPROM_NIC_TYPE_0:
1164 break;
1165
1166 default:
1167 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1168 priv->nic_type);
1169 priv->nic_type = EEPROM_NIC_TYPE_0;
1170 break;
1171 }
1172
1173 if (!(priv->config & CFG_NO_LED)) {
1174 if (priv->status & STATUS_ASSOCIATED)
1175 ipw_led_link_on(priv);
1176 else
1177 ipw_led_link_off(priv);
1178 }
1179 }
1180
1181 static void ipw_led_shutdown(struct ipw_priv *priv)
1182 {
1183 ipw_led_activity_off(priv);
1184 ipw_led_link_off(priv);
1185 ipw_led_band_off(priv);
1186 cancel_delayed_work(&priv->led_link_on);
1187 cancel_delayed_work(&priv->led_link_off);
1188 cancel_delayed_work(&priv->led_act_off);
1189 }
1190
1191 /*
1192 * The following adds a new attribute to the sysfs representation
1193 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1194 * used for controlling the debug level.
1195 *
1196 * See the level definitions in ipw for details.
1197 */
1198 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1199 {
1200 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1201 }
1202
1203 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1204 size_t count)
1205 {
1206 char *p = (char *)buf;
1207 u32 val;
1208
1209 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1210 p++;
1211 if (p[0] == 'x' || p[0] == 'X')
1212 p++;
1213 val = simple_strtoul(p, &p, 16);
1214 } else
1215 val = simple_strtoul(p, &p, 10);
1216 if (p == buf)
1217 printk(KERN_INFO DRV_NAME
1218 ": %s is not in hex or decimal form.\n", buf);
1219 else
1220 ipw_debug_level = val;
1221
1222 return strnlen(buf, count);
1223 }
1224
1225 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1226 show_debug_level, store_debug_level);
1227
1228 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1229 {
1230 /* length = 1st dword in log */
1231 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1232 }
1233
1234 static void ipw_capture_event_log(struct ipw_priv *priv,
1235 u32 log_len, struct ipw_event *log)
1236 {
1237 u32 base;
1238
1239 if (log_len) {
1240 base = ipw_read32(priv, IPW_EVENT_LOG);
1241 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1242 (u8 *) log, sizeof(*log) * log_len);
1243 }
1244 }
1245
1246 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1247 {
1248 struct ipw_fw_error *error;
1249 u32 log_len = ipw_get_event_log_len(priv);
1250 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1251 u32 elem_len = ipw_read_reg32(priv, base);
1252
1253 error = kmalloc(sizeof(*error) +
1254 sizeof(*error->elem) * elem_len +
1255 sizeof(*error->log) * log_len, GFP_ATOMIC);
1256 if (!error) {
1257 IPW_ERROR("Memory allocation for firmware error log "
1258 "failed.\n");
1259 return NULL;
1260 }
1261 error->jiffies = jiffies;
1262 error->status = priv->status;
1263 error->config = priv->config;
1264 error->elem_len = elem_len;
1265 error->log_len = log_len;
1266 error->elem = (struct ipw_error_elem *)error->payload;
1267 error->log = (struct ipw_event *)(error->elem + elem_len);
1268
1269 ipw_capture_event_log(priv, log_len, error->log);
1270
1271 if (elem_len)
1272 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1273 sizeof(*error->elem) * elem_len);
1274
1275 return error;
1276 }
1277
1278 static ssize_t show_event_log(struct device *d,
1279 struct device_attribute *attr, char *buf)
1280 {
1281 struct ipw_priv *priv = dev_get_drvdata(d);
1282 u32 log_len = ipw_get_event_log_len(priv);
1283 u32 log_size;
1284 struct ipw_event *log;
1285 u32 len = 0, i;
1286
1287 /* not using min() because of its strict type checking */
1288 log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1289 sizeof(*log) * log_len : PAGE_SIZE;
1290 log = kzalloc(log_size, GFP_KERNEL);
1291 if (!log) {
1292 IPW_ERROR("Unable to allocate memory for log\n");
1293 return 0;
1294 }
1295 log_len = log_size / sizeof(*log);
1296 ipw_capture_event_log(priv, log_len, log);
1297
1298 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1299 for (i = 0; i < log_len; i++)
1300 len += snprintf(buf + len, PAGE_SIZE - len,
1301 "\n%08X%08X%08X",
1302 log[i].time, log[i].event, log[i].data);
1303 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1304 kfree(log);
1305 return len;
1306 }
1307
1308 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1309
1310 static ssize_t show_error(struct device *d,
1311 struct device_attribute *attr, char *buf)
1312 {
1313 struct ipw_priv *priv = dev_get_drvdata(d);
1314 u32 len = 0, i;
1315 if (!priv->error)
1316 return 0;
1317 len += snprintf(buf + len, PAGE_SIZE - len,
1318 "%08lX%08X%08X%08X",
1319 priv->error->jiffies,
1320 priv->error->status,
1321 priv->error->config, priv->error->elem_len);
1322 for (i = 0; i < priv->error->elem_len; i++)
1323 len += snprintf(buf + len, PAGE_SIZE - len,
1324 "\n%08X%08X%08X%08X%08X%08X%08X",
1325 priv->error->elem[i].time,
1326 priv->error->elem[i].desc,
1327 priv->error->elem[i].blink1,
1328 priv->error->elem[i].blink2,
1329 priv->error->elem[i].link1,
1330 priv->error->elem[i].link2,
1331 priv->error->elem[i].data);
1332
1333 len += snprintf(buf + len, PAGE_SIZE - len,
1334 "\n%08X", priv->error->log_len);
1335 for (i = 0; i < priv->error->log_len; i++)
1336 len += snprintf(buf + len, PAGE_SIZE - len,
1337 "\n%08X%08X%08X",
1338 priv->error->log[i].time,
1339 priv->error->log[i].event,
1340 priv->error->log[i].data);
1341 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1342 return len;
1343 }
1344
1345 static ssize_t clear_error(struct device *d,
1346 struct device_attribute *attr,
1347 const char *buf, size_t count)
1348 {
1349 struct ipw_priv *priv = dev_get_drvdata(d);
1350
1351 kfree(priv->error);
1352 priv->error = NULL;
1353 return count;
1354 }
1355
1356 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1357
1358 static ssize_t show_cmd_log(struct device *d,
1359 struct device_attribute *attr, char *buf)
1360 {
1361 struct ipw_priv *priv = dev_get_drvdata(d);
1362 u32 len = 0, i;
1363 if (!priv->cmdlog)
1364 return 0;
1365 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1366 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1367 i = (i + 1) % priv->cmdlog_len) {
1368 len +=
1369 snprintf(buf + len, PAGE_SIZE - len,
1370 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1371 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1372 priv->cmdlog[i].cmd.len);
1373 len +=
1374 snprintk_buf(buf + len, PAGE_SIZE - len,
1375 (u8 *) priv->cmdlog[i].cmd.param,
1376 priv->cmdlog[i].cmd.len);
1377 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1378 }
1379 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1380 return len;
1381 }
1382
1383 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1384
1385 #ifdef CONFIG_IPW2200_PROMISCUOUS
1386 static void ipw_prom_free(struct ipw_priv *priv);
1387 static int ipw_prom_alloc(struct ipw_priv *priv);
1388 static ssize_t store_rtap_iface(struct device *d,
1389 struct device_attribute *attr,
1390 const char *buf, size_t count)
1391 {
1392 struct ipw_priv *priv = dev_get_drvdata(d);
1393 int rc = 0;
1394
1395 if (count < 1)
1396 return -EINVAL;
1397
1398 switch (buf[0]) {
1399 case '0':
1400 if (!rtap_iface)
1401 return count;
1402
1403 if (netif_running(priv->prom_net_dev)) {
1404 IPW_WARNING("Interface is up. Cannot unregister.\n");
1405 return count;
1406 }
1407
1408 ipw_prom_free(priv);
1409 rtap_iface = 0;
1410 break;
1411
1412 case '1':
1413 if (rtap_iface)
1414 return count;
1415
1416 rc = ipw_prom_alloc(priv);
1417 if (!rc)
1418 rtap_iface = 1;
1419 break;
1420
1421 default:
1422 return -EINVAL;
1423 }
1424
1425 if (rc) {
1426 IPW_ERROR("Failed to register promiscuous network "
1427 "device (error %d).\n", rc);
1428 }
1429
1430 return count;
1431 }
1432
1433 static ssize_t show_rtap_iface(struct device *d,
1434 struct device_attribute *attr,
1435 char *buf)
1436 {
1437 struct ipw_priv *priv = dev_get_drvdata(d);
1438 if (rtap_iface)
1439 return sprintf(buf, "%s", priv->prom_net_dev->name);
1440 else {
1441 buf[0] = '-';
1442 buf[1] = '1';
1443 buf[2] = '\0';
1444 return 3;
1445 }
1446 }
1447
1448 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1449 store_rtap_iface);
1450
1451 static ssize_t store_rtap_filter(struct device *d,
1452 struct device_attribute *attr,
1453 const char *buf, size_t count)
1454 {
1455 struct ipw_priv *priv = dev_get_drvdata(d);
1456
1457 if (!priv->prom_priv) {
1458 IPW_ERROR("Attempting to set filter without "
1459 "rtap_iface enabled.\n");
1460 return -EPERM;
1461 }
1462
1463 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1464
1465 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1466 BIT_ARG16(priv->prom_priv->filter));
1467
1468 return count;
1469 }
1470
1471 static ssize_t show_rtap_filter(struct device *d,
1472 struct device_attribute *attr,
1473 char *buf)
1474 {
1475 struct ipw_priv *priv = dev_get_drvdata(d);
1476 return sprintf(buf, "0x%04X",
1477 priv->prom_priv ? priv->prom_priv->filter : 0);
1478 }
1479
1480 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1481 store_rtap_filter);
1482 #endif
1483
1484 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1485 char *buf)
1486 {
1487 struct ipw_priv *priv = dev_get_drvdata(d);
1488 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1489 }
1490
1491 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1492 const char *buf, size_t count)
1493 {
1494 struct ipw_priv *priv = dev_get_drvdata(d);
1495 struct net_device *dev = priv->net_dev;
1496 char buffer[] = "00000000";
1497 unsigned long len =
1498 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1499 unsigned long val;
1500 char *p = buffer;
1501
1502 IPW_DEBUG_INFO("enter\n");
1503
1504 strncpy(buffer, buf, len);
1505 buffer[len] = 0;
1506
1507 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1508 p++;
1509 if (p[0] == 'x' || p[0] == 'X')
1510 p++;
1511 val = simple_strtoul(p, &p, 16);
1512 } else
1513 val = simple_strtoul(p, &p, 10);
1514 if (p == buffer) {
1515 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1516 } else {
1517 priv->ieee->scan_age = val;
1518 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1519 }
1520
1521 IPW_DEBUG_INFO("exit\n");
1522 return len;
1523 }
1524
1525 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1526
1527 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1528 char *buf)
1529 {
1530 struct ipw_priv *priv = dev_get_drvdata(d);
1531 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1532 }
1533
1534 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1535 const char *buf, size_t count)
1536 {
1537 struct ipw_priv *priv = dev_get_drvdata(d);
1538
1539 IPW_DEBUG_INFO("enter\n");
1540
1541 if (count == 0)
1542 return 0;
1543
1544 if (*buf == 0) {
1545 IPW_DEBUG_LED("Disabling LED control.\n");
1546 priv->config |= CFG_NO_LED;
1547 ipw_led_shutdown(priv);
1548 } else {
1549 IPW_DEBUG_LED("Enabling LED control.\n");
1550 priv->config &= ~CFG_NO_LED;
1551 ipw_led_init(priv);
1552 }
1553
1554 IPW_DEBUG_INFO("exit\n");
1555 return count;
1556 }
1557
1558 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1559
1560 static ssize_t show_status(struct device *d,
1561 struct device_attribute *attr, char *buf)
1562 {
1563 struct ipw_priv *p = dev_get_drvdata(d);
1564 return sprintf(buf, "0x%08x\n", (int)p->status);
1565 }
1566
1567 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1568
1569 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1570 char *buf)
1571 {
1572 struct ipw_priv *p = dev_get_drvdata(d);
1573 return sprintf(buf, "0x%08x\n", (int)p->config);
1574 }
1575
1576 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1577
1578 static ssize_t show_nic_type(struct device *d,
1579 struct device_attribute *attr, char *buf)
1580 {
1581 struct ipw_priv *priv = dev_get_drvdata(d);
1582 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1583 }
1584
1585 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1586
1587 static ssize_t show_ucode_version(struct device *d,
1588 struct device_attribute *attr, char *buf)
1589 {
1590 u32 len = sizeof(u32), tmp = 0;
1591 struct ipw_priv *p = dev_get_drvdata(d);
1592
1593 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1594 return 0;
1595
1596 return sprintf(buf, "0x%08x\n", tmp);
1597 }
1598
1599 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1600
1601 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1602 char *buf)
1603 {
1604 u32 len = sizeof(u32), tmp = 0;
1605 struct ipw_priv *p = dev_get_drvdata(d);
1606
1607 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1608 return 0;
1609
1610 return sprintf(buf, "0x%08x\n", tmp);
1611 }
1612
1613 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1614
1615 /*
1616 * Add a device attribute to view/control the delay between eeprom
1617 * operations.
1618 */
1619 static ssize_t show_eeprom_delay(struct device *d,
1620 struct device_attribute *attr, char *buf)
1621 {
1622 struct ipw_priv *p = dev_get_drvdata(d);
1623 int n = p->eeprom_delay;
1624 return sprintf(buf, "%i\n", n);
1625 }
1626 static ssize_t store_eeprom_delay(struct device *d,
1627 struct device_attribute *attr,
1628 const char *buf, size_t count)
1629 {
1630 struct ipw_priv *p = dev_get_drvdata(d);
1631 sscanf(buf, "%i", &p->eeprom_delay);
1632 return strnlen(buf, count);
1633 }
1634
1635 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1636 show_eeprom_delay, store_eeprom_delay);
1637
1638 static ssize_t show_command_event_reg(struct device *d,
1639 struct device_attribute *attr, char *buf)
1640 {
1641 u32 reg = 0;
1642 struct ipw_priv *p = dev_get_drvdata(d);
1643
1644 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1645 return sprintf(buf, "0x%08x\n", reg);
1646 }
1647 static ssize_t store_command_event_reg(struct device *d,
1648 struct device_attribute *attr,
1649 const char *buf, size_t count)
1650 {
1651 u32 reg;
1652 struct ipw_priv *p = dev_get_drvdata(d);
1653
1654 sscanf(buf, "%x", &reg);
1655 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1656 return strnlen(buf, count);
1657 }
1658
1659 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1660 show_command_event_reg, store_command_event_reg);
1661
1662 static ssize_t show_mem_gpio_reg(struct device *d,
1663 struct device_attribute *attr, char *buf)
1664 {
1665 u32 reg = 0;
1666 struct ipw_priv *p = dev_get_drvdata(d);
1667
1668 reg = ipw_read_reg32(p, 0x301100);
1669 return sprintf(buf, "0x%08x\n", reg);
1670 }
1671 static ssize_t store_mem_gpio_reg(struct device *d,
1672 struct device_attribute *attr,
1673 const char *buf, size_t count)
1674 {
1675 u32 reg;
1676 struct ipw_priv *p = dev_get_drvdata(d);
1677
1678 sscanf(buf, "%x", &reg);
1679 ipw_write_reg32(p, 0x301100, reg);
1680 return strnlen(buf, count);
1681 }
1682
1683 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1684 show_mem_gpio_reg, store_mem_gpio_reg);
1685
1686 static ssize_t show_indirect_dword(struct device *d,
1687 struct device_attribute *attr, char *buf)
1688 {
1689 u32 reg = 0;
1690 struct ipw_priv *priv = dev_get_drvdata(d);
1691
1692 if (priv->status & STATUS_INDIRECT_DWORD)
1693 reg = ipw_read_reg32(priv, priv->indirect_dword);
1694 else
1695 reg = 0;
1696
1697 return sprintf(buf, "0x%08x\n", reg);
1698 }
1699 static ssize_t store_indirect_dword(struct device *d,
1700 struct device_attribute *attr,
1701 const char *buf, size_t count)
1702 {
1703 struct ipw_priv *priv = dev_get_drvdata(d);
1704
1705 sscanf(buf, "%x", &priv->indirect_dword);
1706 priv->status |= STATUS_INDIRECT_DWORD;
1707 return strnlen(buf, count);
1708 }
1709
1710 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1711 show_indirect_dword, store_indirect_dword);
1712
1713 static ssize_t show_indirect_byte(struct device *d,
1714 struct device_attribute *attr, char *buf)
1715 {
1716 u8 reg = 0;
1717 struct ipw_priv *priv = dev_get_drvdata(d);
1718
1719 if (priv->status & STATUS_INDIRECT_BYTE)
1720 reg = ipw_read_reg8(priv, priv->indirect_byte);
1721 else
1722 reg = 0;
1723
1724 return sprintf(buf, "0x%02x\n", reg);
1725 }
1726 static ssize_t store_indirect_byte(struct device *d,
1727 struct device_attribute *attr,
1728 const char *buf, size_t count)
1729 {
1730 struct ipw_priv *priv = dev_get_drvdata(d);
1731
1732 sscanf(buf, "%x", &priv->indirect_byte);
1733 priv->status |= STATUS_INDIRECT_BYTE;
1734 return strnlen(buf, count);
1735 }
1736
1737 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1738 show_indirect_byte, store_indirect_byte);
1739
1740 static ssize_t show_direct_dword(struct device *d,
1741 struct device_attribute *attr, char *buf)
1742 {
1743 u32 reg = 0;
1744 struct ipw_priv *priv = dev_get_drvdata(d);
1745
1746 if (priv->status & STATUS_DIRECT_DWORD)
1747 reg = ipw_read32(priv, priv->direct_dword);
1748 else
1749 reg = 0;
1750
1751 return sprintf(buf, "0x%08x\n", reg);
1752 }
1753 static ssize_t store_direct_dword(struct device *d,
1754 struct device_attribute *attr,
1755 const char *buf, size_t count)
1756 {
1757 struct ipw_priv *priv = dev_get_drvdata(d);
1758
1759 sscanf(buf, "%x", &priv->direct_dword);
1760 priv->status |= STATUS_DIRECT_DWORD;
1761 return strnlen(buf, count);
1762 }
1763
1764 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1765 show_direct_dword, store_direct_dword);
1766
1767 static int rf_kill_active(struct ipw_priv *priv)
1768 {
1769 if (0 == (ipw_read32(priv, 0x30) & 0x10000)) {
1770 priv->status |= STATUS_RF_KILL_HW;
1771 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
1772 } else {
1773 priv->status &= ~STATUS_RF_KILL_HW;
1774 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
1775 }
1776
1777 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1778 }
1779
1780 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1781 char *buf)
1782 {
1783 /* 0 - RF kill not enabled
1784 1 - SW based RF kill active (sysfs)
1785 2 - HW based RF kill active
1786 3 - Both HW and SW baed RF kill active */
1787 struct ipw_priv *priv = dev_get_drvdata(d);
1788 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1789 (rf_kill_active(priv) ? 0x2 : 0x0);
1790 return sprintf(buf, "%i\n", val);
1791 }
1792
1793 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1794 {
1795 if ((disable_radio ? 1 : 0) ==
1796 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1797 return 0;
1798
1799 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1800 disable_radio ? "OFF" : "ON");
1801
1802 if (disable_radio) {
1803 priv->status |= STATUS_RF_KILL_SW;
1804
1805 cancel_delayed_work(&priv->request_scan);
1806 cancel_delayed_work(&priv->request_direct_scan);
1807 cancel_delayed_work(&priv->request_passive_scan);
1808 cancel_delayed_work(&priv->scan_event);
1809 schedule_work(&priv->down);
1810 } else {
1811 priv->status &= ~STATUS_RF_KILL_SW;
1812 if (rf_kill_active(priv)) {
1813 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1814 "disabled by HW switch\n");
1815 /* Make sure the RF_KILL check timer is running */
1816 cancel_delayed_work(&priv->rf_kill);
1817 schedule_delayed_work(&priv->rf_kill,
1818 round_jiffies_relative(2 * HZ));
1819 } else
1820 schedule_work(&priv->up);
1821 }
1822
1823 return 1;
1824 }
1825
1826 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1827 const char *buf, size_t count)
1828 {
1829 struct ipw_priv *priv = dev_get_drvdata(d);
1830
1831 ipw_radio_kill_sw(priv, buf[0] == '1');
1832
1833 return count;
1834 }
1835
1836 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1837
1838 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1839 char *buf)
1840 {
1841 struct ipw_priv *priv = dev_get_drvdata(d);
1842 int pos = 0, len = 0;
1843 if (priv->config & CFG_SPEED_SCAN) {
1844 while (priv->speed_scan[pos] != 0)
1845 len += sprintf(&buf[len], "%d ",
1846 priv->speed_scan[pos++]);
1847 return len + sprintf(&buf[len], "\n");
1848 }
1849
1850 return sprintf(buf, "0\n");
1851 }
1852
1853 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1854 const char *buf, size_t count)
1855 {
1856 struct ipw_priv *priv = dev_get_drvdata(d);
1857 int channel, pos = 0;
1858 const char *p = buf;
1859
1860 /* list of space separated channels to scan, optionally ending with 0 */
1861 while ((channel = simple_strtol(p, NULL, 0))) {
1862 if (pos == MAX_SPEED_SCAN - 1) {
1863 priv->speed_scan[pos] = 0;
1864 break;
1865 }
1866
1867 if (libipw_is_valid_channel(priv->ieee, channel))
1868 priv->speed_scan[pos++] = channel;
1869 else
1870 IPW_WARNING("Skipping invalid channel request: %d\n",
1871 channel);
1872 p = strchr(p, ' ');
1873 if (!p)
1874 break;
1875 while (*p == ' ' || *p == '\t')
1876 p++;
1877 }
1878
1879 if (pos == 0)
1880 priv->config &= ~CFG_SPEED_SCAN;
1881 else {
1882 priv->speed_scan_pos = 0;
1883 priv->config |= CFG_SPEED_SCAN;
1884 }
1885
1886 return count;
1887 }
1888
1889 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1890 store_speed_scan);
1891
1892 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1893 char *buf)
1894 {
1895 struct ipw_priv *priv = dev_get_drvdata(d);
1896 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1897 }
1898
1899 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1900 const char *buf, size_t count)
1901 {
1902 struct ipw_priv *priv = dev_get_drvdata(d);
1903 if (buf[0] == '1')
1904 priv->config |= CFG_NET_STATS;
1905 else
1906 priv->config &= ~CFG_NET_STATS;
1907
1908 return count;
1909 }
1910
1911 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1912 show_net_stats, store_net_stats);
1913
1914 static ssize_t show_channels(struct device *d,
1915 struct device_attribute *attr,
1916 char *buf)
1917 {
1918 struct ipw_priv *priv = dev_get_drvdata(d);
1919 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
1920 int len = 0, i;
1921
1922 len = sprintf(&buf[len],
1923 "Displaying %d channels in 2.4Ghz band "
1924 "(802.11bg):\n", geo->bg_channels);
1925
1926 for (i = 0; i < geo->bg_channels; i++) {
1927 len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1928 geo->bg[i].channel,
1929 geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ?
1930 " (radar spectrum)" : "",
1931 ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) ||
1932 (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT))
1933 ? "" : ", IBSS",
1934 geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1935 "passive only" : "active/passive",
1936 geo->bg[i].flags & LIBIPW_CH_B_ONLY ?
1937 "B" : "B/G");
1938 }
1939
1940 len += sprintf(&buf[len],
1941 "Displaying %d channels in 5.2Ghz band "
1942 "(802.11a):\n", geo->a_channels);
1943 for (i = 0; i < geo->a_channels; i++) {
1944 len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1945 geo->a[i].channel,
1946 geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ?
1947 " (radar spectrum)" : "",
1948 ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) ||
1949 (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT))
1950 ? "" : ", IBSS",
1951 geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1952 "passive only" : "active/passive");
1953 }
1954
1955 return len;
1956 }
1957
1958 static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1959
1960 static void notify_wx_assoc_event(struct ipw_priv *priv)
1961 {
1962 union iwreq_data wrqu;
1963 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1964 if (priv->status & STATUS_ASSOCIATED)
1965 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1966 else
1967 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1968 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1969 }
1970
1971 static void ipw_irq_tasklet(struct ipw_priv *priv)
1972 {
1973 u32 inta, inta_mask, handled = 0;
1974 unsigned long flags;
1975 int rc = 0;
1976
1977 spin_lock_irqsave(&priv->irq_lock, flags);
1978
1979 inta = ipw_read32(priv, IPW_INTA_RW);
1980 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1981
1982 if (inta == 0xFFFFFFFF) {
1983 /* Hardware disappeared */
1984 IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n");
1985 /* Only handle the cached INTA values */
1986 inta = 0;
1987 }
1988 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1989
1990 /* Add any cached INTA values that need to be handled */
1991 inta |= priv->isr_inta;
1992
1993 spin_unlock_irqrestore(&priv->irq_lock, flags);
1994
1995 spin_lock_irqsave(&priv->lock, flags);
1996
1997 /* handle all the justifications for the interrupt */
1998 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1999 ipw_rx(priv);
2000 handled |= IPW_INTA_BIT_RX_TRANSFER;
2001 }
2002
2003 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
2004 IPW_DEBUG_HC("Command completed.\n");
2005 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
2006 priv->status &= ~STATUS_HCMD_ACTIVE;
2007 wake_up_interruptible(&priv->wait_command_queue);
2008 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
2009 }
2010
2011 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
2012 IPW_DEBUG_TX("TX_QUEUE_1\n");
2013 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
2014 handled |= IPW_INTA_BIT_TX_QUEUE_1;
2015 }
2016
2017 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
2018 IPW_DEBUG_TX("TX_QUEUE_2\n");
2019 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
2020 handled |= IPW_INTA_BIT_TX_QUEUE_2;
2021 }
2022
2023 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
2024 IPW_DEBUG_TX("TX_QUEUE_3\n");
2025 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
2026 handled |= IPW_INTA_BIT_TX_QUEUE_3;
2027 }
2028
2029 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
2030 IPW_DEBUG_TX("TX_QUEUE_4\n");
2031 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
2032 handled |= IPW_INTA_BIT_TX_QUEUE_4;
2033 }
2034
2035 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
2036 IPW_WARNING("STATUS_CHANGE\n");
2037 handled |= IPW_INTA_BIT_STATUS_CHANGE;
2038 }
2039
2040 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
2041 IPW_WARNING("TX_PERIOD_EXPIRED\n");
2042 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
2043 }
2044
2045 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
2046 IPW_WARNING("HOST_CMD_DONE\n");
2047 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
2048 }
2049
2050 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
2051 IPW_WARNING("FW_INITIALIZATION_DONE\n");
2052 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
2053 }
2054
2055 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
2056 IPW_WARNING("PHY_OFF_DONE\n");
2057 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2058 }
2059
2060 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2061 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2062 priv->status |= STATUS_RF_KILL_HW;
2063 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
2064 wake_up_interruptible(&priv->wait_command_queue);
2065 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2066 cancel_delayed_work(&priv->request_scan);
2067 cancel_delayed_work(&priv->request_direct_scan);
2068 cancel_delayed_work(&priv->request_passive_scan);
2069 cancel_delayed_work(&priv->scan_event);
2070 schedule_work(&priv->link_down);
2071 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
2072 handled |= IPW_INTA_BIT_RF_KILL_DONE;
2073 }
2074
2075 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2076 IPW_WARNING("Firmware error detected. Restarting.\n");
2077 if (priv->error) {
2078 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2079 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2080 struct ipw_fw_error *error =
2081 ipw_alloc_error_log(priv);
2082 ipw_dump_error_log(priv, error);
2083 kfree(error);
2084 }
2085 } else {
2086 priv->error = ipw_alloc_error_log(priv);
2087 if (priv->error)
2088 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2089 else
2090 IPW_DEBUG_FW("Error allocating sysfs 'error' "
2091 "log.\n");
2092 if (ipw_debug_level & IPW_DL_FW_ERRORS)
2093 ipw_dump_error_log(priv, priv->error);
2094 }
2095
2096 /* XXX: If hardware encryption is for WPA/WPA2,
2097 * we have to notify the supplicant. */
2098 if (priv->ieee->sec.encrypt) {
2099 priv->status &= ~STATUS_ASSOCIATED;
2100 notify_wx_assoc_event(priv);
2101 }
2102
2103 /* Keep the restart process from trying to send host
2104 * commands by clearing the INIT status bit */
2105 priv->status &= ~STATUS_INIT;
2106
2107 /* Cancel currently queued command. */
2108 priv->status &= ~STATUS_HCMD_ACTIVE;
2109 wake_up_interruptible(&priv->wait_command_queue);
2110
2111 schedule_work(&priv->adapter_restart);
2112 handled |= IPW_INTA_BIT_FATAL_ERROR;
2113 }
2114
2115 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2116 IPW_ERROR("Parity error\n");
2117 handled |= IPW_INTA_BIT_PARITY_ERROR;
2118 }
2119
2120 if (handled != inta) {
2121 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2122 }
2123
2124 spin_unlock_irqrestore(&priv->lock, flags);
2125
2126 /* enable all interrupts */
2127 ipw_enable_interrupts(priv);
2128 }
2129
2130 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2131 static char *get_cmd_string(u8 cmd)
2132 {
2133 switch (cmd) {
2134 IPW_CMD(HOST_COMPLETE);
2135 IPW_CMD(POWER_DOWN);
2136 IPW_CMD(SYSTEM_CONFIG);
2137 IPW_CMD(MULTICAST_ADDRESS);
2138 IPW_CMD(SSID);
2139 IPW_CMD(ADAPTER_ADDRESS);
2140 IPW_CMD(PORT_TYPE);
2141 IPW_CMD(RTS_THRESHOLD);
2142 IPW_CMD(FRAG_THRESHOLD);
2143 IPW_CMD(POWER_MODE);
2144 IPW_CMD(WEP_KEY);
2145 IPW_CMD(TGI_TX_KEY);
2146 IPW_CMD(SCAN_REQUEST);
2147 IPW_CMD(SCAN_REQUEST_EXT);
2148 IPW_CMD(ASSOCIATE);
2149 IPW_CMD(SUPPORTED_RATES);
2150 IPW_CMD(SCAN_ABORT);
2151 IPW_CMD(TX_FLUSH);
2152 IPW_CMD(QOS_PARAMETERS);
2153 IPW_CMD(DINO_CONFIG);
2154 IPW_CMD(RSN_CAPABILITIES);
2155 IPW_CMD(RX_KEY);
2156 IPW_CMD(CARD_DISABLE);
2157 IPW_CMD(SEED_NUMBER);
2158 IPW_CMD(TX_POWER);
2159 IPW_CMD(COUNTRY_INFO);
2160 IPW_CMD(AIRONET_INFO);
2161 IPW_CMD(AP_TX_POWER);
2162 IPW_CMD(CCKM_INFO);
2163 IPW_CMD(CCX_VER_INFO);
2164 IPW_CMD(SET_CALIBRATION);
2165 IPW_CMD(SENSITIVITY_CALIB);
2166 IPW_CMD(RETRY_LIMIT);
2167 IPW_CMD(IPW_PRE_POWER_DOWN);
2168 IPW_CMD(VAP_BEACON_TEMPLATE);
2169 IPW_CMD(VAP_DTIM_PERIOD);
2170 IPW_CMD(EXT_SUPPORTED_RATES);
2171 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2172 IPW_CMD(VAP_QUIET_INTERVALS);
2173 IPW_CMD(VAP_CHANNEL_SWITCH);
2174 IPW_CMD(VAP_MANDATORY_CHANNELS);
2175 IPW_CMD(VAP_CELL_PWR_LIMIT);
2176 IPW_CMD(VAP_CF_PARAM_SET);
2177 IPW_CMD(VAP_SET_BEACONING_STATE);
2178 IPW_CMD(MEASUREMENT);
2179 IPW_CMD(POWER_CAPABILITY);
2180 IPW_CMD(SUPPORTED_CHANNELS);
2181 IPW_CMD(TPC_REPORT);
2182 IPW_CMD(WME_INFO);
2183 IPW_CMD(PRODUCTION_COMMAND);
2184 default:
2185 return "UNKNOWN";
2186 }
2187 }
2188
2189 #define HOST_COMPLETE_TIMEOUT HZ
2190
2191 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2192 {
2193 int rc = 0;
2194 unsigned long flags;
2195 unsigned long now, end;
2196
2197 spin_lock_irqsave(&priv->lock, flags);
2198 if (priv->status & STATUS_HCMD_ACTIVE) {
2199 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2200 get_cmd_string(cmd->cmd));
2201 spin_unlock_irqrestore(&priv->lock, flags);
2202 return -EAGAIN;
2203 }
2204
2205 priv->status |= STATUS_HCMD_ACTIVE;
2206
2207 if (priv->cmdlog) {
2208 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2209 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2210 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2211 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2212 cmd->len);
2213 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2214 }
2215
2216 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2217 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2218 priv->status);
2219
2220 #ifndef DEBUG_CMD_WEP_KEY
2221 if (cmd->cmd == IPW_CMD_WEP_KEY)
2222 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2223 else
2224 #endif
2225 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2226
2227 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2228 if (rc) {
2229 priv->status &= ~STATUS_HCMD_ACTIVE;
2230 IPW_ERROR("Failed to send %s: Reason %d\n",
2231 get_cmd_string(cmd->cmd), rc);
2232 spin_unlock_irqrestore(&priv->lock, flags);
2233 goto exit;
2234 }
2235 spin_unlock_irqrestore(&priv->lock, flags);
2236
2237 now = jiffies;
2238 end = now + HOST_COMPLETE_TIMEOUT;
2239 again:
2240 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2241 !(priv->
2242 status & STATUS_HCMD_ACTIVE),
2243 end - now);
2244 if (rc < 0) {
2245 now = jiffies;
2246 if (time_before(now, end))
2247 goto again;
2248 rc = 0;
2249 }
2250
2251 if (rc == 0) {
2252 spin_lock_irqsave(&priv->lock, flags);
2253 if (priv->status & STATUS_HCMD_ACTIVE) {
2254 IPW_ERROR("Failed to send %s: Command timed out.\n",
2255 get_cmd_string(cmd->cmd));
2256 priv->status &= ~STATUS_HCMD_ACTIVE;
2257 spin_unlock_irqrestore(&priv->lock, flags);
2258 rc = -EIO;
2259 goto exit;
2260 }
2261 spin_unlock_irqrestore(&priv->lock, flags);
2262 } else
2263 rc = 0;
2264
2265 if (priv->status & STATUS_RF_KILL_HW) {
2266 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2267 get_cmd_string(cmd->cmd));
2268 rc = -EIO;
2269 goto exit;
2270 }
2271
2272 exit:
2273 if (priv->cmdlog) {
2274 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2275 priv->cmdlog_pos %= priv->cmdlog_len;
2276 }
2277 return rc;
2278 }
2279
2280 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2281 {
2282 struct host_cmd cmd = {
2283 .cmd = command,
2284 };
2285
2286 return __ipw_send_cmd(priv, &cmd);
2287 }
2288
2289 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2290 void *data)
2291 {
2292 struct host_cmd cmd = {
2293 .cmd = command,
2294 .len = len,
2295 .param = data,
2296 };
2297
2298 return __ipw_send_cmd(priv, &cmd);
2299 }
2300
2301 static int ipw_send_host_complete(struct ipw_priv *priv)
2302 {
2303 if (!priv) {
2304 IPW_ERROR("Invalid args\n");
2305 return -1;
2306 }
2307
2308 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2309 }
2310
2311 static int ipw_send_system_config(struct ipw_priv *priv)
2312 {
2313 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2314 sizeof(priv->sys_config),
2315 &priv->sys_config);
2316 }
2317
2318 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2319 {
2320 if (!priv || !ssid) {
2321 IPW_ERROR("Invalid args\n");
2322 return -1;
2323 }
2324
2325 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2326 ssid);
2327 }
2328
2329 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2330 {
2331 if (!priv || !mac) {
2332 IPW_ERROR("Invalid args\n");
2333 return -1;
2334 }
2335
2336 IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
2337 priv->net_dev->name, mac);
2338
2339 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2340 }
2341
2342 static void ipw_adapter_restart(void *adapter)
2343 {
2344 struct ipw_priv *priv = adapter;
2345
2346 if (priv->status & STATUS_RF_KILL_MASK)
2347 return;
2348
2349 ipw_down(priv);
2350
2351 if (priv->assoc_network &&
2352 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2353 ipw_remove_current_network(priv);
2354
2355 if (ipw_up(priv)) {
2356 IPW_ERROR("Failed to up device\n");
2357 return;
2358 }
2359 }
2360
2361 static void ipw_bg_adapter_restart(struct work_struct *work)
2362 {
2363 struct ipw_priv *priv =
2364 container_of(work, struct ipw_priv, adapter_restart);
2365 mutex_lock(&priv->mutex);
2366 ipw_adapter_restart(priv);
2367 mutex_unlock(&priv->mutex);
2368 }
2369
2370 static void ipw_abort_scan(struct ipw_priv *priv);
2371
2372 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2373
2374 static void ipw_scan_check(void *data)
2375 {
2376 struct ipw_priv *priv = data;
2377
2378 if (priv->status & STATUS_SCAN_ABORTING) {
2379 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2380 "adapter after (%dms).\n",
2381 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2382 schedule_work(&priv->adapter_restart);
2383 } else if (priv->status & STATUS_SCANNING) {
2384 IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
2385 "after (%dms).\n",
2386 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2387 ipw_abort_scan(priv);
2388 schedule_delayed_work(&priv->scan_check, HZ);
2389 }
2390 }
2391
2392 static void ipw_bg_scan_check(struct work_struct *work)
2393 {
2394 struct ipw_priv *priv =
2395 container_of(work, struct ipw_priv, scan_check.work);
2396 mutex_lock(&priv->mutex);
2397 ipw_scan_check(priv);
2398 mutex_unlock(&priv->mutex);
2399 }
2400
2401 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2402 struct ipw_scan_request_ext *request)
2403 {
2404 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2405 sizeof(*request), request);
2406 }
2407
2408 static int ipw_send_scan_abort(struct ipw_priv *priv)
2409 {
2410 if (!priv) {
2411 IPW_ERROR("Invalid args\n");
2412 return -1;
2413 }
2414
2415 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2416 }
2417
2418 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2419 {
2420 struct ipw_sensitivity_calib calib = {
2421 .beacon_rssi_raw = cpu_to_le16(sens),
2422 };
2423
2424 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2425 &calib);
2426 }
2427
2428 static int ipw_send_associate(struct ipw_priv *priv,
2429 struct ipw_associate *associate)
2430 {
2431 if (!priv || !associate) {
2432 IPW_ERROR("Invalid args\n");
2433 return -1;
2434 }
2435
2436 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2437 associate);
2438 }
2439
2440 static int ipw_send_supported_rates(struct ipw_priv *priv,
2441 struct ipw_supported_rates *rates)
2442 {
2443 if (!priv || !rates) {
2444 IPW_ERROR("Invalid args\n");
2445 return -1;
2446 }
2447
2448 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2449 rates);
2450 }
2451
2452 static int ipw_set_random_seed(struct ipw_priv *priv)
2453 {
2454 u32 val;
2455
2456 if (!priv) {
2457 IPW_ERROR("Invalid args\n");
2458 return -1;
2459 }
2460
2461 get_random_bytes(&val, sizeof(val));
2462
2463 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2464 }
2465
2466 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2467 {
2468 __le32 v = cpu_to_le32(phy_off);
2469 if (!priv) {
2470 IPW_ERROR("Invalid args\n");
2471 return -1;
2472 }
2473
2474 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2475 }
2476
2477 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2478 {
2479 if (!priv || !power) {
2480 IPW_ERROR("Invalid args\n");
2481 return -1;
2482 }
2483
2484 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2485 }
2486
2487 static int ipw_set_tx_power(struct ipw_priv *priv)
2488 {
2489 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
2490 struct ipw_tx_power tx_power;
2491 s8 max_power;
2492 int i;
2493
2494 memset(&tx_power, 0, sizeof(tx_power));
2495
2496 /* configure device for 'G' band */
2497 tx_power.ieee_mode = IPW_G_MODE;
2498 tx_power.num_channels = geo->bg_channels;
2499 for (i = 0; i < geo->bg_channels; i++) {
2500 max_power = geo->bg[i].max_power;
2501 tx_power.channels_tx_power[i].channel_number =
2502 geo->bg[i].channel;
2503 tx_power.channels_tx_power[i].tx_power = max_power ?
2504 min(max_power, priv->tx_power) : priv->tx_power;
2505 }
2506 if (ipw_send_tx_power(priv, &tx_power))
2507 return -EIO;
2508
2509 /* configure device to also handle 'B' band */
2510 tx_power.ieee_mode = IPW_B_MODE;
2511 if (ipw_send_tx_power(priv, &tx_power))
2512 return -EIO;
2513
2514 /* configure device to also handle 'A' band */
2515 if (priv->ieee->abg_true) {
2516 tx_power.ieee_mode = IPW_A_MODE;
2517 tx_power.num_channels = geo->a_channels;
2518 for (i = 0; i < tx_power.num_channels; i++) {
2519 max_power = geo->a[i].max_power;
2520 tx_power.channels_tx_power[i].channel_number =
2521 geo->a[i].channel;
2522 tx_power.channels_tx_power[i].tx_power = max_power ?
2523 min(max_power, priv->tx_power) : priv->tx_power;
2524 }
2525 if (ipw_send_tx_power(priv, &tx_power))
2526 return -EIO;
2527 }
2528 return 0;
2529 }
2530
2531 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2532 {
2533 struct ipw_rts_threshold rts_threshold = {
2534 .rts_threshold = cpu_to_le16(rts),
2535 };
2536
2537 if (!priv) {
2538 IPW_ERROR("Invalid args\n");
2539 return -1;
2540 }
2541
2542 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2543 sizeof(rts_threshold), &rts_threshold);
2544 }
2545
2546 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2547 {
2548 struct ipw_frag_threshold frag_threshold = {
2549 .frag_threshold = cpu_to_le16(frag),
2550 };
2551
2552 if (!priv) {
2553 IPW_ERROR("Invalid args\n");
2554 return -1;
2555 }
2556
2557 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2558 sizeof(frag_threshold), &frag_threshold);
2559 }
2560
2561 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2562 {
2563 __le32 param;
2564
2565 if (!priv) {
2566 IPW_ERROR("Invalid args\n");
2567 return -1;
2568 }
2569
2570 /* If on battery, set to 3, if AC set to CAM, else user
2571 * level */
2572 switch (mode) {
2573 case IPW_POWER_BATTERY:
2574 param = cpu_to_le32(IPW_POWER_INDEX_3);
2575 break;
2576 case IPW_POWER_AC:
2577 param = cpu_to_le32(IPW_POWER_MODE_CAM);
2578 break;
2579 default:
2580 param = cpu_to_le32(mode);
2581 break;
2582 }
2583
2584 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2585 &param);
2586 }
2587
2588 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2589 {
2590 struct ipw_retry_limit retry_limit = {
2591 .short_retry_limit = slimit,
2592 .long_retry_limit = llimit
2593 };
2594
2595 if (!priv) {
2596 IPW_ERROR("Invalid args\n");
2597 return -1;
2598 }
2599
2600 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2601 &retry_limit);
2602 }
2603
2604 /*
2605 * The IPW device contains a Microwire compatible EEPROM that stores
2606 * various data like the MAC address. Usually the firmware has exclusive
2607 * access to the eeprom, but during device initialization (before the
2608 * device driver has sent the HostComplete command to the firmware) the
2609 * device driver has read access to the EEPROM by way of indirect addressing
2610 * through a couple of memory mapped registers.
2611 *
2612 * The following is a simplified implementation for pulling data out of the
2613 * the eeprom, along with some helper functions to find information in
2614 * the per device private data's copy of the eeprom.
2615 *
2616 * NOTE: To better understand how these functions work (i.e what is a chip
2617 * select and why do have to keep driving the eeprom clock?), read
2618 * just about any data sheet for a Microwire compatible EEPROM.
2619 */
2620
2621 /* write a 32 bit value into the indirect accessor register */
2622 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2623 {
2624 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2625
2626 /* the eeprom requires some time to complete the operation */
2627 udelay(p->eeprom_delay);
2628 }
2629
2630 /* perform a chip select operation */
2631 static void eeprom_cs(struct ipw_priv *priv)
2632 {
2633 eeprom_write_reg(priv, 0);
2634 eeprom_write_reg(priv, EEPROM_BIT_CS);
2635 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2636 eeprom_write_reg(priv, EEPROM_BIT_CS);
2637 }
2638
2639 /* perform a chip select operation */
2640 static void eeprom_disable_cs(struct ipw_priv *priv)
2641 {
2642 eeprom_write_reg(priv, EEPROM_BIT_CS);
2643 eeprom_write_reg(priv, 0);
2644 eeprom_write_reg(priv, EEPROM_BIT_SK);
2645 }
2646
2647 /* push a single bit down to the eeprom */
2648 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2649 {
2650 int d = (bit ? EEPROM_BIT_DI : 0);
2651 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2652 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2653 }
2654
2655 /* push an opcode followed by an address down to the eeprom */
2656 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2657 {
2658 int i;
2659
2660 eeprom_cs(priv);
2661 eeprom_write_bit(priv, 1);
2662 eeprom_write_bit(priv, op & 2);
2663 eeprom_write_bit(priv, op & 1);
2664 for (i = 7; i >= 0; i--) {
2665 eeprom_write_bit(priv, addr & (1 << i));
2666 }
2667 }
2668
2669 /* pull 16 bits off the eeprom, one bit at a time */
2670 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2671 {
2672 int i;
2673 u16 r = 0;
2674
2675 /* Send READ Opcode */
2676 eeprom_op(priv, EEPROM_CMD_READ, addr);
2677
2678 /* Send dummy bit */
2679 eeprom_write_reg(priv, EEPROM_BIT_CS);
2680
2681 /* Read the byte off the eeprom one bit at a time */
2682 for (i = 0; i < 16; i++) {
2683 u32 data = 0;
2684 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2685 eeprom_write_reg(priv, EEPROM_BIT_CS);
2686 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2687 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2688 }
2689
2690 /* Send another dummy bit */
2691 eeprom_write_reg(priv, 0);
2692 eeprom_disable_cs(priv);
2693
2694 return r;
2695 }
2696
2697 /* helper function for pulling the mac address out of the private */
2698 /* data's copy of the eeprom data */
2699 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2700 {
2701 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], ETH_ALEN);
2702 }
2703
2704 static void ipw_read_eeprom(struct ipw_priv *priv)
2705 {
2706 int i;
2707 __le16 *eeprom = (__le16 *) priv->eeprom;
2708
2709 IPW_DEBUG_TRACE(">>\n");
2710
2711 /* read entire contents of eeprom into private buffer */
2712 for (i = 0; i < 128; i++)
2713 eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2714
2715 IPW_DEBUG_TRACE("<<\n");
2716 }
2717
2718 /*
2719 * Either the device driver (i.e. the host) or the firmware can
2720 * load eeprom data into the designated region in SRAM. If neither
2721 * happens then the FW will shutdown with a fatal error.
2722 *
2723 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2724 * bit needs region of shared SRAM needs to be non-zero.
2725 */
2726 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2727 {
2728 int i;
2729
2730 IPW_DEBUG_TRACE(">>\n");
2731
2732 /*
2733 If the data looks correct, then copy it to our private
2734 copy. Otherwise let the firmware know to perform the operation
2735 on its own.
2736 */
2737 if (priv->eeprom[EEPROM_VERSION] != 0) {
2738 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2739
2740 /* write the eeprom data to sram */
2741 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2742 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2743
2744 /* Do not load eeprom data on fatal error or suspend */
2745 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2746 } else {
2747 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2748
2749 /* Load eeprom data on fatal error or suspend */
2750 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2751 }
2752
2753 IPW_DEBUG_TRACE("<<\n");
2754 }
2755
2756 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2757 {
2758 count >>= 2;
2759 if (!count)
2760 return;
2761 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2762 while (count--)
2763 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2764 }
2765
2766 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2767 {
2768 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2769 CB_NUMBER_OF_ELEMENTS_SMALL *
2770 sizeof(struct command_block));
2771 }
2772
2773 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2774 { /* start dma engine but no transfers yet */
2775
2776 IPW_DEBUG_FW(">> :\n");
2777
2778 /* Start the dma */
2779 ipw_fw_dma_reset_command_blocks(priv);
2780
2781 /* Write CB base address */
2782 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2783
2784 IPW_DEBUG_FW("<< :\n");
2785 return 0;
2786 }
2787
2788 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2789 {
2790 u32 control = 0;
2791
2792 IPW_DEBUG_FW(">> :\n");
2793
2794 /* set the Stop and Abort bit */
2795 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2796 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2797 priv->sram_desc.last_cb_index = 0;
2798
2799 IPW_DEBUG_FW("<<\n");
2800 }
2801
2802 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2803 struct command_block *cb)
2804 {
2805 u32 address =
2806 IPW_SHARED_SRAM_DMA_CONTROL +
2807 (sizeof(struct command_block) * index);
2808 IPW_DEBUG_FW(">> :\n");
2809
2810 ipw_write_indirect(priv, address, (u8 *) cb,
2811 (int)sizeof(struct command_block));
2812
2813 IPW_DEBUG_FW("<< :\n");
2814 return 0;
2815
2816 }
2817
2818 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2819 {
2820 u32 control = 0;
2821 u32 index = 0;
2822
2823 IPW_DEBUG_FW(">> :\n");
2824
2825 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2826 ipw_fw_dma_write_command_block(priv, index,
2827 &priv->sram_desc.cb_list[index]);
2828
2829 /* Enable the DMA in the CSR register */
2830 ipw_clear_bit(priv, IPW_RESET_REG,
2831 IPW_RESET_REG_MASTER_DISABLED |
2832 IPW_RESET_REG_STOP_MASTER);
2833
2834 /* Set the Start bit. */
2835 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2836 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2837
2838 IPW_DEBUG_FW("<< :\n");
2839 return 0;
2840 }
2841
2842 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2843 {
2844 u32 address;
2845 u32 register_value = 0;
2846 u32 cb_fields_address = 0;
2847
2848 IPW_DEBUG_FW(">> :\n");
2849 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2850 IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address);
2851
2852 /* Read the DMA Controlor register */
2853 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2854 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value);
2855
2856 /* Print the CB values */
2857 cb_fields_address = address;
2858 register_value = ipw_read_reg32(priv, cb_fields_address);
2859 IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value);
2860
2861 cb_fields_address += sizeof(u32);
2862 register_value = ipw_read_reg32(priv, cb_fields_address);
2863 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value);
2864
2865 cb_fields_address += sizeof(u32);
2866 register_value = ipw_read_reg32(priv, cb_fields_address);
2867 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n",
2868 register_value);
2869
2870 cb_fields_address += sizeof(u32);
2871 register_value = ipw_read_reg32(priv, cb_fields_address);
2872 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value);
2873
2874 IPW_DEBUG_FW(">> :\n");
2875 }
2876
2877 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2878 {
2879 u32 current_cb_address = 0;
2880 u32 current_cb_index = 0;
2881
2882 IPW_DEBUG_FW("<< :\n");
2883 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2884
2885 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2886 sizeof(struct command_block);
2887
2888 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n",
2889 current_cb_index, current_cb_address);
2890
2891 IPW_DEBUG_FW(">> :\n");
2892 return current_cb_index;
2893
2894 }
2895
2896 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2897 u32 src_address,
2898 u32 dest_address,
2899 u32 length,
2900 int interrupt_enabled, int is_last)
2901 {
2902
2903 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2904 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2905 CB_DEST_SIZE_LONG;
2906 struct command_block *cb;
2907 u32 last_cb_element = 0;
2908
2909 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2910 src_address, dest_address, length);
2911
2912 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2913 return -1;
2914
2915 last_cb_element = priv->sram_desc.last_cb_index;
2916 cb = &priv->sram_desc.cb_list[last_cb_element];
2917 priv->sram_desc.last_cb_index++;
2918
2919 /* Calculate the new CB control word */
2920 if (interrupt_enabled)
2921 control |= CB_INT_ENABLED;
2922
2923 if (is_last)
2924 control |= CB_LAST_VALID;
2925
2926 control |= length;
2927
2928 /* Calculate the CB Element's checksum value */
2929 cb->status = control ^ src_address ^ dest_address;
2930
2931 /* Copy the Source and Destination addresses */
2932 cb->dest_addr = dest_address;
2933 cb->source_addr = src_address;
2934
2935 /* Copy the Control Word last */
2936 cb->control = control;
2937
2938 return 0;
2939 }
2940
2941 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2942 int nr, u32 dest_address, u32 len)
2943 {
2944 int ret, i;
2945 u32 size;
2946
2947 IPW_DEBUG_FW(">>\n");
2948 IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
2949 nr, dest_address, len);
2950
2951 for (i = 0; i < nr; i++) {
2952 size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
2953 ret = ipw_fw_dma_add_command_block(priv, src_address[i],
2954 dest_address +
2955 i * CB_MAX_LENGTH, size,
2956 0, 0);
2957 if (ret) {
2958 IPW_DEBUG_FW_INFO(": Failed\n");
2959 return -1;
2960 } else
2961 IPW_DEBUG_FW_INFO(": Added new cb\n");
2962 }
2963
2964 IPW_DEBUG_FW("<<\n");
2965 return 0;
2966 }
2967
2968 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2969 {
2970 u32 current_index = 0, previous_index;
2971 u32 watchdog = 0;
2972
2973 IPW_DEBUG_FW(">> :\n");
2974
2975 current_index = ipw_fw_dma_command_block_index(priv);
2976 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2977 (int)priv->sram_desc.last_cb_index);
2978
2979 while (current_index < priv->sram_desc.last_cb_index) {
2980 udelay(50);
2981 previous_index = current_index;
2982 current_index = ipw_fw_dma_command_block_index(priv);
2983
2984 if (previous_index < current_index) {
2985 watchdog = 0;
2986 continue;
2987 }
2988 if (++watchdog > 400) {
2989 IPW_DEBUG_FW_INFO("Timeout\n");
2990 ipw_fw_dma_dump_command_block(priv);
2991 ipw_fw_dma_abort(priv);
2992 return -1;
2993 }
2994 }
2995
2996 ipw_fw_dma_abort(priv);
2997
2998 /*Disable the DMA in the CSR register */
2999 ipw_set_bit(priv, IPW_RESET_REG,
3000 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
3001
3002 IPW_DEBUG_FW("<< dmaWaitSync\n");
3003 return 0;
3004 }
3005
3006 static void ipw_remove_current_network(struct ipw_priv *priv)
3007 {
3008 struct list_head *element, *safe;
3009 struct libipw_network *network = NULL;
3010 unsigned long flags;
3011
3012 spin_lock_irqsave(&priv->ieee->lock, flags);
3013 list_for_each_safe(element, safe, &priv->ieee->network_list) {
3014 network = list_entry(element, struct libipw_network, list);
3015 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
3016 list_del(element);
3017 list_add_tail(&network->list,
3018 &priv->ieee->network_free_list);
3019 }
3020 }
3021 spin_unlock_irqrestore(&priv->ieee->lock, flags);
3022 }
3023
3024 /**
3025 * Check that card is still alive.
3026 * Reads debug register from domain0.
3027 * If card is present, pre-defined value should
3028 * be found there.
3029 *
3030 * @param priv
3031 * @return 1 if card is present, 0 otherwise
3032 */
3033 static inline int ipw_alive(struct ipw_priv *priv)
3034 {
3035 return ipw_read32(priv, 0x90) == 0xd55555d5;
3036 }
3037
3038 /* timeout in msec, attempted in 10-msec quanta */
3039 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
3040 int timeout)
3041 {
3042 int i = 0;
3043
3044 do {
3045 if ((ipw_read32(priv, addr) & mask) == mask)
3046 return i;
3047 mdelay(10);
3048 i += 10;
3049 } while (i < timeout);
3050
3051 return -ETIME;
3052 }
3053
3054 /* These functions load the firmware and micro code for the operation of
3055 * the ipw hardware. It assumes the buffer has all the bits for the
3056 * image and the caller is handling the memory allocation and clean up.
3057 */
3058
3059 static int ipw_stop_master(struct ipw_priv *priv)
3060 {
3061 int rc;
3062
3063 IPW_DEBUG_TRACE(">>\n");
3064 /* stop master. typical delay - 0 */
3065 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3066
3067 /* timeout is in msec, polled in 10-msec quanta */
3068 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3069 IPW_RESET_REG_MASTER_DISABLED, 100);
3070 if (rc < 0) {
3071 IPW_ERROR("wait for stop master failed after 100ms\n");
3072 return -1;
3073 }
3074
3075 IPW_DEBUG_INFO("stop master %dms\n", rc);
3076
3077 return rc;
3078 }
3079
3080 static void ipw_arc_release(struct ipw_priv *priv)
3081 {
3082 IPW_DEBUG_TRACE(">>\n");
3083 mdelay(5);
3084
3085 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3086
3087 /* no one knows timing, for safety add some delay */
3088 mdelay(5);
3089 }
3090
3091 struct fw_chunk {
3092 __le32 address;
3093 __le32 length;
3094 };
3095
3096 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3097 {
3098 int rc = 0, i, addr;
3099 u8 cr = 0;
3100 __le16 *image;
3101
3102 image = (__le16 *) data;
3103
3104 IPW_DEBUG_TRACE(">>\n");
3105
3106 rc = ipw_stop_master(priv);
3107
3108 if (rc < 0)
3109 return rc;
3110
3111 for (addr = IPW_SHARED_LOWER_BOUND;
3112 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3113 ipw_write32(priv, addr, 0);
3114 }
3115
3116 /* no ucode (yet) */
3117 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3118 /* destroy DMA queues */
3119 /* reset sequence */
3120
3121 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3122 ipw_arc_release(priv);
3123 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3124 mdelay(1);
3125
3126 /* reset PHY */
3127 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3128 mdelay(1);
3129
3130 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3131 mdelay(1);
3132
3133 /* enable ucode store */
3134 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3135 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3136 mdelay(1);
3137
3138 /* write ucode */
3139 /**
3140 * @bug
3141 * Do NOT set indirect address register once and then
3142 * store data to indirect data register in the loop.
3143 * It seems very reasonable, but in this case DINO do not
3144 * accept ucode. It is essential to set address each time.
3145 */
3146 /* load new ipw uCode */
3147 for (i = 0; i < len / 2; i++)
3148 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3149 le16_to_cpu(image[i]));
3150
3151 /* enable DINO */
3152 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3153 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3154
3155 /* this is where the igx / win driver deveates from the VAP driver. */
3156
3157 /* wait for alive response */
3158 for (i = 0; i < 100; i++) {
3159 /* poll for incoming data */
3160 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3161 if (cr & DINO_RXFIFO_DATA)
3162 break;
3163 mdelay(1);
3164 }
3165
3166 if (cr & DINO_RXFIFO_DATA) {
3167 /* alive_command_responce size is NOT multiple of 4 */
3168 __le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3169
3170 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3171 response_buffer[i] =
3172 cpu_to_le32(ipw_read_reg32(priv,
3173 IPW_BASEBAND_RX_FIFO_READ));
3174 memcpy(&priv->dino_alive, response_buffer,
3175 sizeof(priv->dino_alive));
3176 if (priv->dino_alive.alive_command == 1
3177 && priv->dino_alive.ucode_valid == 1) {
3178 rc = 0;
3179 IPW_DEBUG_INFO
3180 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3181 "of %02d/%02d/%02d %02d:%02d\n",
3182 priv->dino_alive.software_revision,
3183 priv->dino_alive.software_revision,
3184 priv->dino_alive.device_identifier,
3185 priv->dino_alive.device_identifier,
3186 priv->dino_alive.time_stamp[0],
3187 priv->dino_alive.time_stamp[1],
3188 priv->dino_alive.time_stamp[2],
3189 priv->dino_alive.time_stamp[3],
3190 priv->dino_alive.time_stamp[4]);
3191 } else {
3192 IPW_DEBUG_INFO("Microcode is not alive\n");
3193 rc = -EINVAL;
3194 }
3195 } else {
3196 IPW_DEBUG_INFO("No alive response from DINO\n");
3197 rc = -ETIME;
3198 }
3199
3200 /* disable DINO, otherwise for some reason
3201 firmware have problem getting alive resp. */
3202 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3203
3204 return rc;
3205 }
3206
3207 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3208 {
3209 int ret = -1;
3210 int offset = 0;
3211 struct fw_chunk *chunk;
3212 int total_nr = 0;
3213 int i;
3214 struct pci_pool *pool;
3215 void **virts;
3216 dma_addr_t *phys;
3217
3218 IPW_DEBUG_TRACE("<< :\n");
3219
3220 virts = kmalloc(sizeof(void *) * CB_NUMBER_OF_ELEMENTS_SMALL,
3221 GFP_KERNEL);
3222 if (!virts)
3223 return -ENOMEM;
3224
3225 phys = kmalloc(sizeof(dma_addr_t) * CB_NUMBER_OF_ELEMENTS_SMALL,
3226 GFP_KERNEL);
3227 if (!phys) {
3228 kfree(virts);
3229 return -ENOMEM;
3230 }
3231 pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
3232 if (!pool) {
3233 IPW_ERROR("pci_pool_create failed\n");
3234 kfree(phys);
3235 kfree(virts);
3236 return -ENOMEM;
3237 }
3238
3239 /* Start the Dma */
3240 ret = ipw_fw_dma_enable(priv);
3241
3242 /* the DMA is already ready this would be a bug. */
3243 BUG_ON(priv->sram_desc.last_cb_index > 0);
3244
3245 do {
3246 u32 chunk_len;
3247 u8 *start;
3248 int size;
3249 int nr = 0;
3250
3251 chunk = (struct fw_chunk *)(data + offset);
3252 offset += sizeof(struct fw_chunk);
3253 chunk_len = le32_to_cpu(chunk->length);
3254 start = data + offset;
3255
3256 nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
3257 for (i = 0; i < nr; i++) {
3258 virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL,
3259 &phys[total_nr]);
3260 if (!virts[total_nr]) {
3261 ret = -ENOMEM;
3262 goto out;
3263 }
3264 size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
3265 CB_MAX_LENGTH);
3266 memcpy(virts[total_nr], start, size);
3267 start += size;
3268 total_nr++;
3269 /* We don't support fw chunk larger than 64*8K */
3270 BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
3271 }
3272
3273 /* build DMA packet and queue up for sending */
3274 /* dma to chunk->address, the chunk->length bytes from data +
3275 * offeset*/
3276 /* Dma loading */
3277 ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
3278 nr, le32_to_cpu(chunk->address),
3279 chunk_len);
3280 if (ret) {
3281 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3282 goto out;
3283 }
3284
3285 offset += chunk_len;
3286 } while (offset < len);
3287
3288 /* Run the DMA and wait for the answer */
3289 ret = ipw_fw_dma_kick(priv);
3290 if (ret) {
3291 IPW_ERROR("dmaKick Failed\n");
3292 goto out;
3293 }
3294
3295 ret = ipw_fw_dma_wait(priv);
3296 if (ret) {
3297 IPW_ERROR("dmaWaitSync Failed\n");
3298 goto out;
3299 }
3300 out:
3301 for (i = 0; i < total_nr; i++)
3302 pci_pool_free(pool, virts[i], phys[i]);
3303
3304 pci_pool_destroy(pool);
3305 kfree(phys);
3306 kfree(virts);
3307
3308 return ret;
3309 }
3310
3311 /* stop nic */
3312 static int ipw_stop_nic(struct ipw_priv *priv)
3313 {
3314 int rc = 0;
3315
3316 /* stop */
3317 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3318
3319 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3320 IPW_RESET_REG_MASTER_DISABLED, 500);
3321 if (rc < 0) {
3322 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3323 return rc;
3324 }
3325
3326 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3327
3328 return rc;
3329 }
3330
3331 static void ipw_start_nic(struct ipw_priv *priv)
3332 {
3333 IPW_DEBUG_TRACE(">>\n");
3334
3335 /* prvHwStartNic release ARC */
3336 ipw_clear_bit(priv, IPW_RESET_REG,
3337 IPW_RESET_REG_MASTER_DISABLED |
3338 IPW_RESET_REG_STOP_MASTER |
3339 CBD_RESET_REG_PRINCETON_RESET);
3340
3341 /* enable power management */
3342 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3343 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3344
3345 IPW_DEBUG_TRACE("<<\n");
3346 }
3347
3348 static int ipw_init_nic(struct ipw_priv *priv)
3349 {
3350 int rc;
3351
3352 IPW_DEBUG_TRACE(">>\n");
3353 /* reset */
3354 /*prvHwInitNic */
3355 /* set "initialization complete" bit to move adapter to D0 state */
3356 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3357
3358 /* low-level PLL activation */
3359 ipw_write32(priv, IPW_READ_INT_REGISTER,
3360 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3361
3362 /* wait for clock stabilization */
3363 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3364 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3365 if (rc < 0)
3366 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3367
3368 /* assert SW reset */
3369 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3370
3371 udelay(10);
3372
3373 /* set "initialization complete" bit to move adapter to D0 state */
3374 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3375
3376 IPW_DEBUG_TRACE(">>\n");
3377 return 0;
3378 }
3379
3380 /* Call this function from process context, it will sleep in request_firmware.
3381 * Probe is an ok place to call this from.
3382 */
3383 static int ipw_reset_nic(struct ipw_priv *priv)
3384 {
3385 int rc = 0;
3386 unsigned long flags;
3387
3388 IPW_DEBUG_TRACE(">>\n");
3389
3390 rc = ipw_init_nic(priv);
3391
3392 spin_lock_irqsave(&priv->lock, flags);
3393 /* Clear the 'host command active' bit... */
3394 priv->status &= ~STATUS_HCMD_ACTIVE;
3395 wake_up_interruptible(&priv->wait_command_queue);
3396 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3397 wake_up_interruptible(&priv->wait_state);
3398 spin_unlock_irqrestore(&priv->lock, flags);
3399
3400 IPW_DEBUG_TRACE("<<\n");
3401 return rc;
3402 }
3403
3404
3405 struct ipw_fw {
3406 __le32 ver;
3407 __le32 boot_size;
3408 __le32 ucode_size;
3409 __le32 fw_size;
3410 u8 data[0];
3411 };
3412
3413 static int ipw_get_fw(struct ipw_priv *priv,
3414 const struct firmware **raw, const char *name)
3415 {
3416 struct ipw_fw *fw;
3417 int rc;
3418
3419 /* ask firmware_class module to get the boot firmware off disk */
3420 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3421 if (rc < 0) {
3422 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3423 return rc;
3424 }
3425
3426 if ((*raw)->size < sizeof(*fw)) {
3427 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3428 return -EINVAL;
3429 }
3430
3431 fw = (void *)(*raw)->data;
3432
3433 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3434 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3435 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3436 name, (*raw)->size);
3437 return -EINVAL;
3438 }
3439
3440 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3441 name,
3442 le32_to_cpu(fw->ver) >> 16,
3443 le32_to_cpu(fw->ver) & 0xff,
3444 (*raw)->size - sizeof(*fw));
3445 return 0;
3446 }
3447
3448 #define IPW_RX_BUF_SIZE (3000)
3449
3450 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3451 struct ipw_rx_queue *rxq)
3452 {
3453 unsigned long flags;
3454 int i;
3455
3456 spin_lock_irqsave(&rxq->lock, flags);
3457
3458 INIT_LIST_HEAD(&rxq->rx_free);
3459 INIT_LIST_HEAD(&rxq->rx_used);
3460
3461 /* Fill the rx_used queue with _all_ of the Rx buffers */
3462 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3463 /* In the reset function, these buffers may have been allocated
3464 * to an SKB, so we need to unmap and free potential storage */
3465 if (rxq->pool[i].skb != NULL) {
3466 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3467 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3468 dev_kfree_skb(rxq->pool[i].skb);
3469 rxq->pool[i].skb = NULL;
3470 }
3471 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3472 }
3473
3474 /* Set us so that we have processed and used all buffers, but have
3475 * not restocked the Rx queue with fresh buffers */
3476 rxq->read = rxq->write = 0;
3477 rxq->free_count = 0;
3478 spin_unlock_irqrestore(&rxq->lock, flags);
3479 }
3480
3481 #ifdef CONFIG_PM
3482 static int fw_loaded = 0;
3483 static const struct firmware *raw = NULL;
3484
3485 static void free_firmware(void)
3486 {
3487 if (fw_loaded) {
3488 release_firmware(raw);
3489 raw = NULL;
3490 fw_loaded = 0;
3491 }
3492 }
3493 #else
3494 #define free_firmware() do {} while (0)
3495 #endif
3496
3497 static int ipw_load(struct ipw_priv *priv)
3498 {
3499 #ifndef CONFIG_PM
3500 const struct firmware *raw = NULL;
3501 #endif
3502 struct ipw_fw *fw;
3503 u8 *boot_img, *ucode_img, *fw_img;
3504 u8 *name = NULL;
3505 int rc = 0, retries = 3;
3506
3507 switch (priv->ieee->iw_mode) {
3508 case IW_MODE_ADHOC:
3509 name = "ipw2200-ibss.fw";
3510 break;
3511 #ifdef CONFIG_IPW2200_MONITOR
3512 case IW_MODE_MONITOR:
3513 name = "ipw2200-sniffer.fw";
3514 break;
3515 #endif
3516 case IW_MODE_INFRA:
3517 name = "ipw2200-bss.fw";
3518 break;
3519 }
3520
3521 if (!name) {
3522 rc = -EINVAL;
3523 goto error;
3524 }
3525
3526 #ifdef CONFIG_PM
3527 if (!fw_loaded) {
3528 #endif
3529 rc = ipw_get_fw(priv, &raw, name);
3530 if (rc < 0)
3531 goto error;
3532 #ifdef CONFIG_PM
3533 }
3534 #endif
3535
3536 fw = (void *)raw->data;
3537 boot_img = &fw->data[0];
3538 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3539 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3540 le32_to_cpu(fw->ucode_size)];
3541
3542 if (rc < 0)
3543 goto error;
3544
3545 if (!priv->rxq)
3546 priv->rxq = ipw_rx_queue_alloc(priv);
3547 else
3548 ipw_rx_queue_reset(priv, priv->rxq);
3549 if (!priv->rxq) {
3550 IPW_ERROR("Unable to initialize Rx queue\n");
3551 rc = -ENOMEM;
3552 goto error;
3553 }
3554
3555 retry:
3556 /* Ensure interrupts are disabled */
3557 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3558 priv->status &= ~STATUS_INT_ENABLED;
3559
3560 /* ack pending interrupts */
3561 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3562
3563 ipw_stop_nic(priv);
3564
3565 rc = ipw_reset_nic(priv);
3566 if (rc < 0) {
3567 IPW_ERROR("Unable to reset NIC\n");
3568 goto error;
3569 }
3570
3571 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3572 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3573
3574 /* DMA the initial boot firmware into the device */
3575 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3576 if (rc < 0) {
3577 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3578 goto error;
3579 }
3580
3581 /* kick start the device */
3582 ipw_start_nic(priv);
3583
3584 /* wait for the device to finish its initial startup sequence */
3585 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3586 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3587 if (rc < 0) {
3588 IPW_ERROR("device failed to boot initial fw image\n");
3589 goto error;
3590 }
3591 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3592
3593 /* ack fw init done interrupt */
3594 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3595
3596 /* DMA the ucode into the device */
3597 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3598 if (rc < 0) {
3599 IPW_ERROR("Unable to load ucode: %d\n", rc);
3600 goto error;
3601 }
3602
3603 /* stop nic */
3604 ipw_stop_nic(priv);
3605
3606 /* DMA bss firmware into the device */
3607 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3608 if (rc < 0) {
3609 IPW_ERROR("Unable to load firmware: %d\n", rc);
3610 goto error;
3611 }
3612 #ifdef CONFIG_PM
3613 fw_loaded = 1;
3614 #endif
3615
3616 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3617
3618 rc = ipw_queue_reset(priv);
3619 if (rc < 0) {
3620 IPW_ERROR("Unable to initialize queues\n");
3621 goto error;
3622 }
3623
3624 /* Ensure interrupts are disabled */
3625 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3626 /* ack pending interrupts */
3627 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3628
3629 /* kick start the device */
3630 ipw_start_nic(priv);
3631
3632 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3633 if (retries > 0) {
3634 IPW_WARNING("Parity error. Retrying init.\n");
3635 retries--;
3636 goto retry;
3637 }
3638
3639 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3640 rc = -EIO;
3641 goto error;
3642 }
3643
3644 /* wait for the device */
3645 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3646 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3647 if (rc < 0) {
3648 IPW_ERROR("device failed to start within 500ms\n");
3649 goto error;
3650 }
3651 IPW_DEBUG_INFO("device response after %dms\n", rc);
3652
3653 /* ack fw init done interrupt */
3654 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3655
3656 /* read eeprom data */
3657 priv->eeprom_delay = 1;
3658 ipw_read_eeprom(priv);
3659 /* initialize the eeprom region of sram */
3660 ipw_eeprom_init_sram(priv);
3661
3662 /* enable interrupts */
3663 ipw_enable_interrupts(priv);
3664
3665 /* Ensure our queue has valid packets */
3666 ipw_rx_queue_replenish(priv);
3667
3668 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3669
3670 /* ack pending interrupts */
3671 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3672
3673 #ifndef CONFIG_PM
3674 release_firmware(raw);
3675 #endif
3676 return 0;
3677
3678 error:
3679 if (priv->rxq) {
3680 ipw_rx_queue_free(priv, priv->rxq);
3681 priv->rxq = NULL;
3682 }
3683 ipw_tx_queue_free(priv);
3684 release_firmware(raw);
3685 #ifdef CONFIG_PM
3686 fw_loaded = 0;
3687 raw = NULL;
3688 #endif
3689
3690 return rc;
3691 }
3692
3693 /**
3694 * DMA services
3695 *
3696 * Theory of operation
3697 *
3698 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3699 * 2 empty entries always kept in the buffer to protect from overflow.
3700 *
3701 * For Tx queue, there are low mark and high mark limits. If, after queuing
3702 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3703 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3704 * Tx queue resumed.
3705 *
3706 * The IPW operates with six queues, one receive queue in the device's
3707 * sram, one transmit queue for sending commands to the device firmware,
3708 * and four transmit queues for data.
3709 *
3710 * The four transmit queues allow for performing quality of service (qos)
3711 * transmissions as per the 802.11 protocol. Currently Linux does not
3712 * provide a mechanism to the user for utilizing prioritized queues, so
3713 * we only utilize the first data transmit queue (queue1).
3714 */
3715
3716 /**
3717 * Driver allocates buffers of this size for Rx
3718 */
3719
3720 /**
3721 * ipw_rx_queue_space - Return number of free slots available in queue.
3722 */
3723 static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3724 {
3725 int s = q->read - q->write;
3726 if (s <= 0)
3727 s += RX_QUEUE_SIZE;
3728 /* keep some buffer to not confuse full and empty queue */
3729 s -= 2;
3730 if (s < 0)
3731 s = 0;
3732 return s;
3733 }
3734
3735 static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3736 {
3737 int s = q->last_used - q->first_empty;
3738 if (s <= 0)
3739 s += q->n_bd;
3740 s -= 2; /* keep some reserve to not confuse empty and full situations */
3741 if (s < 0)
3742 s = 0;
3743 return s;
3744 }
3745
3746 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3747 {
3748 return (++index == n_bd) ? 0 : index;
3749 }
3750
3751 /**
3752 * Initialize common DMA queue structure
3753 *
3754 * @param q queue to init
3755 * @param count Number of BD's to allocate. Should be power of 2
3756 * @param read_register Address for 'read' register
3757 * (not offset within BAR, full address)
3758 * @param write_register Address for 'write' register
3759 * (not offset within BAR, full address)
3760 * @param base_register Address for 'base' register
3761 * (not offset within BAR, full address)
3762 * @param size Address for 'size' register
3763 * (not offset within BAR, full address)
3764 */
3765 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3766 int count, u32 read, u32 write, u32 base, u32 size)
3767 {
3768 q->n_bd = count;
3769
3770 q->low_mark = q->n_bd / 4;
3771 if (q->low_mark < 4)
3772 q->low_mark = 4;
3773
3774 q->high_mark = q->n_bd / 8;
3775 if (q->high_mark < 2)
3776 q->high_mark = 2;
3777
3778 q->first_empty = q->last_used = 0;
3779 q->reg_r = read;
3780 q->reg_w = write;
3781
3782 ipw_write32(priv, base, q->dma_addr);
3783 ipw_write32(priv, size, count);
3784 ipw_write32(priv, read, 0);
3785 ipw_write32(priv, write, 0);
3786
3787 _ipw_read32(priv, 0x90);
3788 }
3789
3790 static int ipw_queue_tx_init(struct ipw_priv *priv,
3791 struct clx2_tx_queue *q,
3792 int count, u32 read, u32 write, u32 base, u32 size)
3793 {
3794 struct pci_dev *dev = priv->pci_dev;
3795
3796 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3797 if (!q->txb) {
3798 IPW_ERROR("vmalloc for auxiliary BD structures failed\n");
3799 return -ENOMEM;
3800 }
3801
3802 q->bd =
3803 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3804 if (!q->bd) {
3805 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3806 sizeof(q->bd[0]) * count);
3807 kfree(q->txb);
3808 q->txb = NULL;
3809 return -ENOMEM;
3810 }
3811
3812 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3813 return 0;
3814 }
3815
3816 /**
3817 * Free one TFD, those at index [txq->q.last_used].
3818 * Do NOT advance any indexes
3819 *
3820 * @param dev
3821 * @param txq
3822 */
3823 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3824 struct clx2_tx_queue *txq)
3825 {
3826 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3827 struct pci_dev *dev = priv->pci_dev;
3828 int i;
3829
3830 /* classify bd */
3831 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3832 /* nothing to cleanup after for host commands */
3833 return;
3834
3835 /* sanity check */
3836 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3837 IPW_ERROR("Too many chunks: %i\n",
3838 le32_to_cpu(bd->u.data.num_chunks));
3839 /** @todo issue fatal error, it is quite serious situation */
3840 return;
3841 }
3842
3843 /* unmap chunks if any */
3844 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3845 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3846 le16_to_cpu(bd->u.data.chunk_len[i]),
3847 PCI_DMA_TODEVICE);
3848 if (txq->txb[txq->q.last_used]) {
3849 libipw_txb_free(txq->txb[txq->q.last_used]);
3850 txq->txb[txq->q.last_used] = NULL;
3851 }
3852 }
3853 }
3854
3855 /**
3856 * Deallocate DMA queue.
3857 *
3858 * Empty queue by removing and destroying all BD's.
3859 * Free all buffers.
3860 *
3861 * @param dev
3862 * @param q
3863 */
3864 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3865 {
3866 struct clx2_queue *q = &txq->q;
3867 struct pci_dev *dev = priv->pci_dev;
3868
3869 if (q->n_bd == 0)
3870 return;
3871
3872 /* first, empty all BD's */
3873 for (; q->first_empty != q->last_used;
3874 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3875 ipw_queue_tx_free_tfd(priv, txq);
3876 }
3877
3878 /* free buffers belonging to queue itself */
3879 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3880 q->dma_addr);
3881 kfree(txq->txb);
3882
3883 /* 0 fill whole structure */
3884 memset(txq, 0, sizeof(*txq));
3885 }
3886
3887 /**
3888 * Destroy all DMA queues and structures
3889 *
3890 * @param priv
3891 */
3892 static void ipw_tx_queue_free(struct ipw_priv *priv)
3893 {
3894 /* Tx CMD queue */
3895 ipw_queue_tx_free(priv, &priv->txq_cmd);
3896
3897 /* Tx queues */
3898 ipw_queue_tx_free(priv, &priv->txq[0]);
3899 ipw_queue_tx_free(priv, &priv->txq[1]);
3900 ipw_queue_tx_free(priv, &priv->txq[2]);
3901 ipw_queue_tx_free(priv, &priv->txq[3]);
3902 }
3903
3904 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3905 {
3906 /* First 3 bytes are manufacturer */
3907 bssid[0] = priv->mac_addr[0];
3908 bssid[1] = priv->mac_addr[1];
3909 bssid[2] = priv->mac_addr[2];
3910
3911 /* Last bytes are random */
3912 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3913
3914 bssid[0] &= 0xfe; /* clear multicast bit */
3915 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3916 }
3917
3918 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3919 {
3920 struct ipw_station_entry entry;
3921 int i;
3922
3923 for (i = 0; i < priv->num_stations; i++) {
3924 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3925 /* Another node is active in network */
3926 priv->missed_adhoc_beacons = 0;
3927 if (!(priv->config & CFG_STATIC_CHANNEL))
3928 /* when other nodes drop out, we drop out */
3929 priv->config &= ~CFG_ADHOC_PERSIST;
3930
3931 return i;
3932 }
3933 }
3934
3935 if (i == MAX_STATIONS)
3936 return IPW_INVALID_STATION;
3937
3938 IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
3939
3940 entry.reserved = 0;
3941 entry.support_mode = 0;
3942 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3943 memcpy(priv->stations[i], bssid, ETH_ALEN);
3944 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3945 &entry, sizeof(entry));
3946 priv->num_stations++;
3947
3948 return i;
3949 }
3950
3951 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3952 {
3953 int i;
3954
3955 for (i = 0; i < priv->num_stations; i++)
3956 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3957 return i;
3958
3959 return IPW_INVALID_STATION;
3960 }
3961
3962 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3963 {
3964 int err;
3965
3966 if (priv->status & STATUS_ASSOCIATING) {
3967 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3968 schedule_work(&priv->disassociate);
3969 return;
3970 }
3971
3972 if (!(priv->status & STATUS_ASSOCIATED)) {
3973 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3974 return;
3975 }
3976
3977 IPW_DEBUG_ASSOC("Disassocation attempt from %pM "
3978 "on channel %d.\n",
3979 priv->assoc_request.bssid,
3980 priv->assoc_request.channel);
3981
3982 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3983 priv->status |= STATUS_DISASSOCIATING;
3984
3985 if (quiet)
3986 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3987 else
3988 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3989
3990 err = ipw_send_associate(priv, &priv->assoc_request);
3991 if (err) {
3992 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3993 "failed.\n");
3994 return;
3995 }
3996
3997 }
3998
3999 static int ipw_disassociate(void *data)
4000 {
4001 struct ipw_priv *priv = data;
4002 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
4003 return 0;
4004 ipw_send_disassociate(data, 0);
4005 netif_carrier_off(priv->net_dev);
4006 return 1;
4007 }
4008
4009 static void ipw_bg_disassociate(struct work_struct *work)
4010 {
4011 struct ipw_priv *priv =
4012 container_of(work, struct ipw_priv, disassociate);
4013 mutex_lock(&priv->mutex);
4014 ipw_disassociate(priv);
4015 mutex_unlock(&priv->mutex);
4016 }
4017
4018 static void ipw_system_config(struct work_struct *work)
4019 {
4020 struct ipw_priv *priv =
4021 container_of(work, struct ipw_priv, system_config);
4022
4023 #ifdef CONFIG_IPW2200_PROMISCUOUS
4024 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
4025 priv->sys_config.accept_all_data_frames = 1;
4026 priv->sys_config.accept_non_directed_frames = 1;
4027 priv->sys_config.accept_all_mgmt_bcpr = 1;
4028 priv->sys_config.accept_all_mgmt_frames = 1;
4029 }
4030 #endif
4031
4032 ipw_send_system_config(priv);
4033 }
4034
4035 struct ipw_status_code {
4036 u16 status;
4037 const char *reason;
4038 };
4039
4040 static const struct ipw_status_code ipw_status_codes[] = {
4041 {0x00, "Successful"},
4042 {0x01, "Unspecified failure"},
4043 {0x0A, "Cannot support all requested capabilities in the "
4044 "Capability information field"},
4045 {0x0B, "Reassociation denied due to inability to confirm that "
4046 "association exists"},
4047 {0x0C, "Association denied due to reason outside the scope of this "
4048 "standard"},
4049 {0x0D,
4050 "Responding station does not support the specified authentication "
4051 "algorithm"},
4052 {0x0E,
4053 "Received an Authentication frame with authentication sequence "
4054 "transaction sequence number out of expected sequence"},
4055 {0x0F, "Authentication rejected because of challenge failure"},
4056 {0x10, "Authentication rejected due to timeout waiting for next "
4057 "frame in sequence"},
4058 {0x11, "Association denied because AP is unable to handle additional "
4059 "associated stations"},
4060 {0x12,
4061 "Association denied due to requesting station not supporting all "
4062 "of the datarates in the BSSBasicServiceSet Parameter"},
4063 {0x13,
4064 "Association denied due to requesting station not supporting "
4065 "short preamble operation"},
4066 {0x14,
4067 "Association denied due to requesting station not supporting "
4068 "PBCC encoding"},
4069 {0x15,
4070 "Association denied due to requesting station not supporting "
4071 "channel agility"},
4072 {0x19,
4073 "Association denied due to requesting station not supporting "
4074 "short slot operation"},
4075 {0x1A,
4076 "Association denied due to requesting station not supporting "
4077 "DSSS-OFDM operation"},
4078 {0x28, "Invalid Information Element"},
4079 {0x29, "Group Cipher is not valid"},
4080 {0x2A, "Pairwise Cipher is not valid"},
4081 {0x2B, "AKMP is not valid"},
4082 {0x2C, "Unsupported RSN IE version"},
4083 {0x2D, "Invalid RSN IE Capabilities"},
4084 {0x2E, "Cipher suite is rejected per security policy"},
4085 };
4086
4087 static const char *ipw_get_status_code(u16 status)
4088 {
4089 int i;
4090 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
4091 if (ipw_status_codes[i].status == (status & 0xff))
4092 return ipw_status_codes[i].reason;
4093 return "Unknown status value.";
4094 }
4095
4096 static void inline average_init(struct average *avg)
4097 {
4098 memset(avg, 0, sizeof(*avg));
4099 }
4100
4101 #define DEPTH_RSSI 8
4102 #define DEPTH_NOISE 16
4103 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
4104 {
4105 return ((depth-1)*prev_avg + val)/depth;
4106 }
4107
4108 static void average_add(struct average *avg, s16 val)
4109 {
4110 avg->sum -= avg->entries[avg->pos];
4111 avg->sum += val;
4112 avg->entries[avg->pos++] = val;
4113 if (unlikely(avg->pos == AVG_ENTRIES)) {
4114 avg->init = 1;
4115 avg->pos = 0;
4116 }
4117 }
4118
4119 static s16 average_value(struct average *avg)
4120 {
4121 if (!unlikely(avg->init)) {
4122 if (avg->pos)
4123 return avg->sum / avg->pos;
4124 return 0;
4125 }
4126
4127 return avg->sum / AVG_ENTRIES;
4128 }
4129
4130 static void ipw_reset_stats(struct ipw_priv *priv)
4131 {
4132 u32 len = sizeof(u32);
4133
4134 priv->quality = 0;
4135
4136 average_init(&priv->average_missed_beacons);
4137 priv->exp_avg_rssi = -60;
4138 priv->exp_avg_noise = -85 + 0x100;
4139
4140 priv->last_rate = 0;
4141 priv->last_missed_beacons = 0;
4142 priv->last_rx_packets = 0;
4143 priv->last_tx_packets = 0;
4144 priv->last_tx_failures = 0;
4145
4146 /* Firmware managed, reset only when NIC is restarted, so we have to
4147 * normalize on the current value */
4148 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4149 &priv->last_rx_err, &len);
4150 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4151 &priv->last_tx_failures, &len);
4152
4153 /* Driver managed, reset with each association */
4154 priv->missed_adhoc_beacons = 0;
4155 priv->missed_beacons = 0;
4156 priv->tx_packets = 0;
4157 priv->rx_packets = 0;
4158
4159 }
4160
4161 static u32 ipw_get_max_rate(struct ipw_priv *priv)
4162 {
4163 u32 i = 0x80000000;
4164 u32 mask = priv->rates_mask;
4165 /* If currently associated in B mode, restrict the maximum
4166 * rate match to B rates */
4167 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4168 mask &= LIBIPW_CCK_RATES_MASK;
4169
4170 /* TODO: Verify that the rate is supported by the current rates
4171 * list. */
4172
4173 while (i && !(mask & i))
4174 i >>= 1;
4175 switch (i) {
4176 case LIBIPW_CCK_RATE_1MB_MASK:
4177 return 1000000;
4178 case LIBIPW_CCK_RATE_2MB_MASK:
4179 return 2000000;
4180 case LIBIPW_CCK_RATE_5MB_MASK:
4181 return 5500000;
4182 case LIBIPW_OFDM_RATE_6MB_MASK:
4183 return 6000000;
4184 case LIBIPW_OFDM_RATE_9MB_MASK:
4185 return 9000000;
4186 case LIBIPW_CCK_RATE_11MB_MASK:
4187 return 11000000;
4188 case LIBIPW_OFDM_RATE_12MB_MASK:
4189 return 12000000;
4190 case LIBIPW_OFDM_RATE_18MB_MASK:
4191 return 18000000;
4192 case LIBIPW_OFDM_RATE_24MB_MASK:
4193 return 24000000;
4194 case LIBIPW_OFDM_RATE_36MB_MASK:
4195 return 36000000;
4196 case LIBIPW_OFDM_RATE_48MB_MASK:
4197 return 48000000;
4198 case LIBIPW_OFDM_RATE_54MB_MASK:
4199 return 54000000;
4200 }
4201
4202 if (priv->ieee->mode == IEEE_B)
4203 return 11000000;
4204 else
4205 return 54000000;
4206 }
4207
4208 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4209 {
4210 u32 rate, len = sizeof(rate);
4211 int err;
4212
4213 if (!(priv->status & STATUS_ASSOCIATED))
4214 return 0;
4215
4216 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4217 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4218 &len);
4219 if (err) {
4220 IPW_DEBUG_INFO("failed querying ordinals.\n");
4221 return 0;
4222 }
4223 } else
4224 return ipw_get_max_rate(priv);
4225
4226 switch (rate) {
4227 case IPW_TX_RATE_1MB:
4228 return 1000000;
4229 case IPW_TX_RATE_2MB:
4230 return 2000000;
4231 case IPW_TX_RATE_5MB:
4232 return 5500000;
4233 case IPW_TX_RATE_6MB:
4234 return 6000000;
4235 case IPW_TX_RATE_9MB:
4236 return 9000000;
4237 case IPW_TX_RATE_11MB:
4238 return 11000000;
4239 case IPW_TX_RATE_12MB:
4240 return 12000000;
4241 case IPW_TX_RATE_18MB:
4242 return 18000000;
4243 case IPW_TX_RATE_24MB:
4244 return 24000000;
4245 case IPW_TX_RATE_36MB:
4246 return 36000000;
4247 case IPW_TX_RATE_48MB:
4248 return 48000000;
4249 case IPW_TX_RATE_54MB:
4250 return 54000000;
4251 }
4252
4253 return 0;
4254 }
4255
4256 #define IPW_STATS_INTERVAL (2 * HZ)
4257 static void ipw_gather_stats(struct ipw_priv *priv)
4258 {
4259 u32 rx_err, rx_err_delta, rx_packets_delta;
4260 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4261 u32 missed_beacons_percent, missed_beacons_delta;
4262 u32 quality = 0;
4263 u32 len = sizeof(u32);
4264 s16 rssi;
4265 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4266 rate_quality;
4267 u32 max_rate;
4268
4269 if (!(priv->status & STATUS_ASSOCIATED)) {
4270 priv->quality = 0;
4271 return;
4272 }
4273
4274 /* Update the statistics */
4275 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4276 &priv->missed_beacons, &len);
4277 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4278 priv->last_missed_beacons = priv->missed_beacons;
4279 if (priv->assoc_request.beacon_interval) {
4280 missed_beacons_percent = missed_beacons_delta *
4281 (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4282 (IPW_STATS_INTERVAL * 10);
4283 } else {
4284 missed_beacons_percent = 0;
4285 }
4286 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4287
4288 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4289 rx_err_delta = rx_err - priv->last_rx_err;
4290 priv->last_rx_err = rx_err;
4291
4292 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4293 tx_failures_delta = tx_failures - priv->last_tx_failures;
4294 priv->last_tx_failures = tx_failures;
4295
4296 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4297 priv->last_rx_packets = priv->rx_packets;
4298
4299 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4300 priv->last_tx_packets = priv->tx_packets;
4301
4302 /* Calculate quality based on the following:
4303 *
4304 * Missed beacon: 100% = 0, 0% = 70% missed
4305 * Rate: 60% = 1Mbs, 100% = Max
4306 * Rx and Tx errors represent a straight % of total Rx/Tx
4307 * RSSI: 100% = > -50, 0% = < -80
4308 * Rx errors: 100% = 0, 0% = 50% missed
4309 *
4310 * The lowest computed quality is used.
4311 *
4312 */
4313 #define BEACON_THRESHOLD 5
4314 beacon_quality = 100 - missed_beacons_percent;
4315 if (beacon_quality < BEACON_THRESHOLD)
4316 beacon_quality = 0;
4317 else
4318 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4319 (100 - BEACON_THRESHOLD);
4320 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4321 beacon_quality, missed_beacons_percent);
4322
4323 priv->last_rate = ipw_get_current_rate(priv);
4324 max_rate = ipw_get_max_rate(priv);
4325 rate_quality = priv->last_rate * 40 / max_rate + 60;
4326 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4327 rate_quality, priv->last_rate / 1000000);
4328
4329 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4330 rx_quality = 100 - (rx_err_delta * 100) /
4331 (rx_packets_delta + rx_err_delta);
4332 else
4333 rx_quality = 100;
4334 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4335 rx_quality, rx_err_delta, rx_packets_delta);
4336
4337 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4338 tx_quality = 100 - (tx_failures_delta * 100) /
4339 (tx_packets_delta + tx_failures_delta);
4340 else
4341 tx_quality = 100;
4342 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4343 tx_quality, tx_failures_delta, tx_packets_delta);
4344
4345 rssi = priv->exp_avg_rssi;
4346 signal_quality =
4347 (100 *
4348 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4349 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4350 (priv->ieee->perfect_rssi - rssi) *
4351 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4352 62 * (priv->ieee->perfect_rssi - rssi))) /
4353 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4354 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4355 if (signal_quality > 100)
4356 signal_quality = 100;
4357 else if (signal_quality < 1)
4358 signal_quality = 0;
4359
4360 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4361 signal_quality, rssi);
4362
4363 quality = min(rx_quality, signal_quality);
4364 quality = min(tx_quality, quality);
4365 quality = min(rate_quality, quality);
4366 quality = min(beacon_quality, quality);
4367 if (quality == beacon_quality)
4368 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4369 quality);
4370 if (quality == rate_quality)
4371 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4372 quality);
4373 if (quality == tx_quality)
4374 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4375 quality);
4376 if (quality == rx_quality)
4377 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4378 quality);
4379 if (quality == signal_quality)
4380 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4381 quality);
4382
4383 priv->quality = quality;
4384
4385 schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL);
4386 }
4387
4388 static void ipw_bg_gather_stats(struct work_struct *work)
4389 {
4390 struct ipw_priv *priv =
4391 container_of(work, struct ipw_priv, gather_stats.work);
4392 mutex_lock(&priv->mutex);
4393 ipw_gather_stats(priv);
4394 mutex_unlock(&priv->mutex);
4395 }
4396
4397 /* Missed beacon behavior:
4398 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4399 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4400 * Above disassociate threshold, give up and stop scanning.
4401 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4402 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4403 int missed_count)
4404 {
4405 priv->notif_missed_beacons = missed_count;
4406
4407 if (missed_count > priv->disassociate_threshold &&
4408 priv->status & STATUS_ASSOCIATED) {
4409 /* If associated and we've hit the missed
4410 * beacon threshold, disassociate, turn
4411 * off roaming, and abort any active scans */
4412 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4413 IPW_DL_STATE | IPW_DL_ASSOC,
4414 "Missed beacon: %d - disassociate\n", missed_count);
4415 priv->status &= ~STATUS_ROAMING;
4416 if (priv->status & STATUS_SCANNING) {
4417 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4418 IPW_DL_STATE,
4419 "Aborting scan with missed beacon.\n");
4420 schedule_work(&priv->abort_scan);
4421 }
4422
4423 schedule_work(&priv->disassociate);
4424 return;
4425 }
4426
4427 if (priv->status & STATUS_ROAMING) {
4428 /* If we are currently roaming, then just
4429 * print a debug statement... */
4430 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4431 "Missed beacon: %d - roam in progress\n",
4432 missed_count);
4433 return;
4434 }
4435
4436 if (roaming &&
4437 (missed_count > priv->roaming_threshold &&
4438 missed_count <= priv->disassociate_threshold)) {
4439 /* If we are not already roaming, set the ROAM
4440 * bit in the status and kick off a scan.
4441 * This can happen several times before we reach
4442 * disassociate_threshold. */
4443 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4444 "Missed beacon: %d - initiate "
4445 "roaming\n", missed_count);
4446 if (!(priv->status & STATUS_ROAMING)) {
4447 priv->status |= STATUS_ROAMING;
4448 if (!(priv->status & STATUS_SCANNING))
4449 schedule_delayed_work(&priv->request_scan, 0);
4450 }
4451 return;
4452 }
4453
4454 if (priv->status & STATUS_SCANNING &&
4455 missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
4456 /* Stop scan to keep fw from getting
4457 * stuck (only if we aren't roaming --
4458 * otherwise we'll never scan more than 2 or 3
4459 * channels..) */
4460 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4461 "Aborting scan with missed beacon.\n");
4462 schedule_work(&priv->abort_scan);
4463 }
4464
4465 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4466 }
4467
4468 static void ipw_scan_event(struct work_struct *work)
4469 {
4470 union iwreq_data wrqu;
4471
4472 struct ipw_priv *priv =
4473 container_of(work, struct ipw_priv, scan_event.work);
4474
4475 wrqu.data.length = 0;
4476 wrqu.data.flags = 0;
4477 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4478 }
4479
4480 static void handle_scan_event(struct ipw_priv *priv)
4481 {
4482 /* Only userspace-requested scan completion events go out immediately */
4483 if (!priv->user_requested_scan) {
4484 schedule_delayed_work(&priv->scan_event,
4485 round_jiffies_relative(msecs_to_jiffies(4000)));
4486 } else {
4487 priv->user_requested_scan = 0;
4488 mod_delayed_work(system_wq, &priv->scan_event, 0);
4489 }
4490 }
4491
4492 /**
4493 * Handle host notification packet.
4494 * Called from interrupt routine
4495 */
4496 static void ipw_rx_notification(struct ipw_priv *priv,
4497 struct ipw_rx_notification *notif)
4498 {
4499 DECLARE_SSID_BUF(ssid);
4500 u16 size = le16_to_cpu(notif->size);
4501
4502 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4503
4504 switch (notif->subtype) {
4505 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4506 struct notif_association *assoc = &notif->u.assoc;
4507
4508 switch (assoc->state) {
4509 case CMAS_ASSOCIATED:{
4510 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4511 IPW_DL_ASSOC,
4512 "associated: '%s' %pM\n",
4513 print_ssid(ssid, priv->essid,
4514 priv->essid_len),
4515 priv->bssid);
4516
4517 switch (priv->ieee->iw_mode) {
4518 case IW_MODE_INFRA:
4519 memcpy(priv->ieee->bssid,
4520 priv->bssid, ETH_ALEN);
4521 break;
4522
4523 case IW_MODE_ADHOC:
4524 memcpy(priv->ieee->bssid,
4525 priv->bssid, ETH_ALEN);
4526
4527 /* clear out the station table */
4528 priv->num_stations = 0;
4529
4530 IPW_DEBUG_ASSOC
4531 ("queueing adhoc check\n");
4532 schedule_delayed_work(
4533 &priv->adhoc_check,
4534 le16_to_cpu(priv->
4535 assoc_request.
4536 beacon_interval));
4537 break;
4538 }
4539
4540 priv->status &= ~STATUS_ASSOCIATING;
4541 priv->status |= STATUS_ASSOCIATED;
4542 schedule_work(&priv->system_config);
4543
4544 #ifdef CONFIG_IPW2200_QOS
4545 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4546 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
4547 if ((priv->status & STATUS_AUTH) &&
4548 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4549 == IEEE80211_STYPE_ASSOC_RESP)) {
4550 if ((sizeof
4551 (struct
4552 libipw_assoc_response)
4553 <= size)
4554 && (size <= 2314)) {
4555 struct
4556 libipw_rx_stats
4557 stats = {
4558 .len = size - 1,
4559 };
4560
4561 IPW_DEBUG_QOS
4562 ("QoS Associate "
4563 "size %d\n", size);
4564 libipw_rx_mgt(priv->
4565 ieee,
4566 (struct
4567 libipw_hdr_4addr
4568 *)
4569 &notif->u.raw, &stats);
4570 }
4571 }
4572 #endif
4573
4574 schedule_work(&priv->link_up);
4575
4576 break;
4577 }
4578
4579 case CMAS_AUTHENTICATED:{
4580 if (priv->
4581 status & (STATUS_ASSOCIATED |
4582 STATUS_AUTH)) {
4583 struct notif_authenticate *auth
4584 = &notif->u.auth;
4585 IPW_DEBUG(IPW_DL_NOTIF |
4586 IPW_DL_STATE |
4587 IPW_DL_ASSOC,
4588 "deauthenticated: '%s' "
4589 "%pM"
4590 ": (0x%04X) - %s\n",
4591 print_ssid(ssid,
4592 priv->
4593 essid,
4594 priv->
4595 essid_len),
4596 priv->bssid,
4597 le16_to_cpu(auth->status),
4598 ipw_get_status_code
4599 (le16_to_cpu
4600 (auth->status)));
4601
4602 priv->status &=
4603 ~(STATUS_ASSOCIATING |
4604 STATUS_AUTH |
4605 STATUS_ASSOCIATED);
4606
4607 schedule_work(&priv->link_down);
4608 break;
4609 }
4610
4611 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4612 IPW_DL_ASSOC,
4613 "authenticated: '%s' %pM\n",
4614 print_ssid(ssid, priv->essid,
4615 priv->essid_len),
4616 priv->bssid);
4617 break;
4618 }
4619
4620 case CMAS_INIT:{
4621 if (priv->status & STATUS_AUTH) {
4622 struct
4623 libipw_assoc_response
4624 *resp;
4625 resp =
4626 (struct
4627 libipw_assoc_response
4628 *)&notif->u.raw;
4629 IPW_DEBUG(IPW_DL_NOTIF |
4630 IPW_DL_STATE |
4631 IPW_DL_ASSOC,
4632 "association failed (0x%04X): %s\n",
4633 le16_to_cpu(resp->status),
4634 ipw_get_status_code
4635 (le16_to_cpu
4636 (resp->status)));
4637 }
4638
4639 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4640 IPW_DL_ASSOC,
4641 "disassociated: '%s' %pM\n",
4642 print_ssid(ssid, priv->essid,
4643 priv->essid_len),
4644 priv->bssid);
4645
4646 priv->status &=
4647 ~(STATUS_DISASSOCIATING |
4648 STATUS_ASSOCIATING |
4649 STATUS_ASSOCIATED | STATUS_AUTH);
4650 if (priv->assoc_network
4651 && (priv->assoc_network->
4652 capability &
4653 WLAN_CAPABILITY_IBSS))
4654 ipw_remove_current_network
4655 (priv);
4656
4657 schedule_work(&priv->link_down);
4658
4659 break;
4660 }
4661
4662 case CMAS_RX_ASSOC_RESP:
4663 break;
4664
4665 default:
4666 IPW_ERROR("assoc: unknown (%d)\n",
4667 assoc->state);
4668 break;
4669 }
4670
4671 break;
4672 }
4673
4674 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4675 struct notif_authenticate *auth = &notif->u.auth;
4676 switch (auth->state) {
4677 case CMAS_AUTHENTICATED:
4678 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4679 "authenticated: '%s' %pM\n",
4680 print_ssid(ssid, priv->essid,
4681 priv->essid_len),
4682 priv->bssid);
4683 priv->status |= STATUS_AUTH;
4684 break;
4685
4686 case CMAS_INIT:
4687 if (priv->status & STATUS_AUTH) {
4688 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4689 IPW_DL_ASSOC,
4690 "authentication failed (0x%04X): %s\n",
4691 le16_to_cpu(auth->status),
4692 ipw_get_status_code(le16_to_cpu
4693 (auth->
4694 status)));
4695 }
4696 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4697 IPW_DL_ASSOC,
4698 "deauthenticated: '%s' %pM\n",
4699 print_ssid(ssid, priv->essid,
4700 priv->essid_len),
4701 priv->bssid);
4702
4703 priv->status &= ~(STATUS_ASSOCIATING |
4704 STATUS_AUTH |
4705 STATUS_ASSOCIATED);
4706
4707 schedule_work(&priv->link_down);
4708 break;
4709
4710 case CMAS_TX_AUTH_SEQ_1:
4711 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4712 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4713 break;
4714 case CMAS_RX_AUTH_SEQ_2:
4715 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4716 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4717 break;
4718 case CMAS_AUTH_SEQ_1_PASS:
4719 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4720 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4721 break;
4722 case CMAS_AUTH_SEQ_1_FAIL:
4723 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4724 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4725 break;
4726 case CMAS_TX_AUTH_SEQ_3:
4727 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4728 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4729 break;
4730 case CMAS_RX_AUTH_SEQ_4:
4731 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4732 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4733 break;
4734 case CMAS_AUTH_SEQ_2_PASS:
4735 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4736 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4737 break;
4738 case CMAS_AUTH_SEQ_2_FAIL:
4739 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4740 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4741 break;
4742 case CMAS_TX_ASSOC:
4743 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4744 IPW_DL_ASSOC, "TX_ASSOC\n");
4745 break;
4746 case CMAS_RX_ASSOC_RESP:
4747 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4748 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4749
4750 break;
4751 case CMAS_ASSOCIATED:
4752 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4753 IPW_DL_ASSOC, "ASSOCIATED\n");
4754 break;
4755 default:
4756 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4757 auth->state);
4758 break;
4759 }
4760 break;
4761 }
4762
4763 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4764 struct notif_channel_result *x =
4765 &notif->u.channel_result;
4766
4767 if (size == sizeof(*x)) {
4768 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4769 x->channel_num);
4770 } else {
4771 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4772 "(should be %zd)\n",
4773 size, sizeof(*x));
4774 }
4775 break;
4776 }
4777
4778 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4779 struct notif_scan_complete *x = &notif->u.scan_complete;
4780 if (size == sizeof(*x)) {
4781 IPW_DEBUG_SCAN
4782 ("Scan completed: type %d, %d channels, "
4783 "%d status\n", x->scan_type,
4784 x->num_channels, x->status);
4785 } else {
4786 IPW_ERROR("Scan completed of wrong size %d "
4787 "(should be %zd)\n",
4788 size, sizeof(*x));
4789 }
4790
4791 priv->status &=
4792 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4793
4794 wake_up_interruptible(&priv->wait_state);
4795 cancel_delayed_work(&priv->scan_check);
4796
4797 if (priv->status & STATUS_EXIT_PENDING)
4798 break;
4799
4800 priv->ieee->scans++;
4801
4802 #ifdef CONFIG_IPW2200_MONITOR
4803 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4804 priv->status |= STATUS_SCAN_FORCED;
4805 schedule_delayed_work(&priv->request_scan, 0);
4806 break;
4807 }
4808 priv->status &= ~STATUS_SCAN_FORCED;
4809 #endif /* CONFIG_IPW2200_MONITOR */
4810
4811 /* Do queued direct scans first */
4812 if (priv->status & STATUS_DIRECT_SCAN_PENDING)
4813 schedule_delayed_work(&priv->request_direct_scan, 0);
4814
4815 if (!(priv->status & (STATUS_ASSOCIATED |
4816 STATUS_ASSOCIATING |
4817 STATUS_ROAMING |
4818 STATUS_DISASSOCIATING)))
4819 schedule_work(&priv->associate);
4820 else if (priv->status & STATUS_ROAMING) {
4821 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4822 /* If a scan completed and we are in roam mode, then
4823 * the scan that completed was the one requested as a
4824 * result of entering roam... so, schedule the
4825 * roam work */
4826 schedule_work(&priv->roam);
4827 else
4828 /* Don't schedule if we aborted the scan */
4829 priv->status &= ~STATUS_ROAMING;
4830 } else if (priv->status & STATUS_SCAN_PENDING)
4831 schedule_delayed_work(&priv->request_scan, 0);
4832 else if (priv->config & CFG_BACKGROUND_SCAN
4833 && priv->status & STATUS_ASSOCIATED)
4834 schedule_delayed_work(&priv->request_scan,
4835 round_jiffies_relative(HZ));
4836
4837 /* Send an empty event to user space.
4838 * We don't send the received data on the event because
4839 * it would require us to do complex transcoding, and
4840 * we want to minimise the work done in the irq handler
4841 * Use a request to extract the data.
4842 * Also, we generate this even for any scan, regardless
4843 * on how the scan was initiated. User space can just
4844 * sync on periodic scan to get fresh data...
4845 * Jean II */
4846 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4847 handle_scan_event(priv);
4848 break;
4849 }
4850
4851 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4852 struct notif_frag_length *x = &notif->u.frag_len;
4853
4854 if (size == sizeof(*x))
4855 IPW_ERROR("Frag length: %d\n",
4856 le16_to_cpu(x->frag_length));
4857 else
4858 IPW_ERROR("Frag length of wrong size %d "
4859 "(should be %zd)\n",
4860 size, sizeof(*x));
4861 break;
4862 }
4863
4864 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4865 struct notif_link_deterioration *x =
4866 &notif->u.link_deterioration;
4867
4868 if (size == sizeof(*x)) {
4869 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4870 "link deterioration: type %d, cnt %d\n",
4871 x->silence_notification_type,
4872 x->silence_count);
4873 memcpy(&priv->last_link_deterioration, x,
4874 sizeof(*x));
4875 } else {
4876 IPW_ERROR("Link Deterioration of wrong size %d "
4877 "(should be %zd)\n",
4878 size, sizeof(*x));
4879 }
4880 break;
4881 }
4882
4883 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4884 IPW_ERROR("Dino config\n");
4885 if (priv->hcmd
4886 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4887 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4888
4889 break;
4890 }
4891
4892 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4893 struct notif_beacon_state *x = &notif->u.beacon_state;
4894 if (size != sizeof(*x)) {
4895 IPW_ERROR
4896 ("Beacon state of wrong size %d (should "
4897 "be %zd)\n", size, sizeof(*x));
4898 break;
4899 }
4900
4901 if (le32_to_cpu(x->state) ==
4902 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4903 ipw_handle_missed_beacon(priv,
4904 le32_to_cpu(x->
4905 number));
4906
4907 break;
4908 }
4909
4910 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4911 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4912 if (size == sizeof(*x)) {
4913 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4914 "0x%02x station %d\n",
4915 x->key_state, x->security_type,
4916 x->station_index);
4917 break;
4918 }
4919
4920 IPW_ERROR
4921 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4922 size, sizeof(*x));
4923 break;
4924 }
4925
4926 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4927 struct notif_calibration *x = &notif->u.calibration;
4928
4929 if (size == sizeof(*x)) {
4930 memcpy(&priv->calib, x, sizeof(*x));
4931 IPW_DEBUG_INFO("TODO: Calibration\n");
4932 break;
4933 }
4934
4935 IPW_ERROR
4936 ("Calibration of wrong size %d (should be %zd)\n",
4937 size, sizeof(*x));
4938 break;
4939 }
4940
4941 case HOST_NOTIFICATION_NOISE_STATS:{
4942 if (size == sizeof(u32)) {
4943 priv->exp_avg_noise =
4944 exponential_average(priv->exp_avg_noise,
4945 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4946 DEPTH_NOISE);
4947 break;
4948 }
4949
4950 IPW_ERROR
4951 ("Noise stat is wrong size %d (should be %zd)\n",
4952 size, sizeof(u32));
4953 break;
4954 }
4955
4956 default:
4957 IPW_DEBUG_NOTIF("Unknown notification: "
4958 "subtype=%d,flags=0x%2x,size=%d\n",
4959 notif->subtype, notif->flags, size);
4960 }
4961 }
4962
4963 /**
4964 * Destroys all DMA structures and initialise them again
4965 *
4966 * @param priv
4967 * @return error code
4968 */
4969 static int ipw_queue_reset(struct ipw_priv *priv)
4970 {
4971 int rc = 0;
4972 /** @todo customize queue sizes */
4973 int nTx = 64, nTxCmd = 8;
4974 ipw_tx_queue_free(priv);
4975 /* Tx CMD queue */
4976 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4977 IPW_TX_CMD_QUEUE_READ_INDEX,
4978 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4979 IPW_TX_CMD_QUEUE_BD_BASE,
4980 IPW_TX_CMD_QUEUE_BD_SIZE);
4981 if (rc) {
4982 IPW_ERROR("Tx Cmd queue init failed\n");
4983 goto error;
4984 }
4985 /* Tx queue(s) */
4986 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4987 IPW_TX_QUEUE_0_READ_INDEX,
4988 IPW_TX_QUEUE_0_WRITE_INDEX,
4989 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4990 if (rc) {
4991 IPW_ERROR("Tx 0 queue init failed\n");
4992 goto error;
4993 }
4994 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4995 IPW_TX_QUEUE_1_READ_INDEX,
4996 IPW_TX_QUEUE_1_WRITE_INDEX,
4997 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4998 if (rc) {
4999 IPW_ERROR("Tx 1 queue init failed\n");
5000 goto error;
5001 }
5002 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
5003 IPW_TX_QUEUE_2_READ_INDEX,
5004 IPW_TX_QUEUE_2_WRITE_INDEX,
5005 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
5006 if (rc) {
5007 IPW_ERROR("Tx 2 queue init failed\n");
5008 goto error;
5009 }
5010 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
5011 IPW_TX_QUEUE_3_READ_INDEX,
5012 IPW_TX_QUEUE_3_WRITE_INDEX,
5013 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
5014 if (rc) {
5015 IPW_ERROR("Tx 3 queue init failed\n");
5016 goto error;
5017 }
5018 /* statistics */
5019 priv->rx_bufs_min = 0;
5020 priv->rx_pend_max = 0;
5021 return rc;
5022
5023 error:
5024 ipw_tx_queue_free(priv);
5025 return rc;
5026 }
5027
5028 /**
5029 * Reclaim Tx queue entries no more used by NIC.
5030 *
5031 * When FW advances 'R' index, all entries between old and
5032 * new 'R' index need to be reclaimed. As result, some free space
5033 * forms. If there is enough free space (> low mark), wake Tx queue.
5034 *
5035 * @note Need to protect against garbage in 'R' index
5036 * @param priv
5037 * @param txq
5038 * @param qindex
5039 * @return Number of used entries remains in the queue
5040 */
5041 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
5042 struct clx2_tx_queue *txq, int qindex)
5043 {
5044 u32 hw_tail;
5045 int used;
5046 struct clx2_queue *q = &txq->q;
5047
5048 hw_tail = ipw_read32(priv, q->reg_r);
5049 if (hw_tail >= q->n_bd) {
5050 IPW_ERROR
5051 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
5052 hw_tail, q->n_bd);
5053 goto done;
5054 }
5055 for (; q->last_used != hw_tail;
5056 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
5057 ipw_queue_tx_free_tfd(priv, txq);
5058 priv->tx_packets++;
5059 }
5060 done:
5061 if ((ipw_tx_queue_space(q) > q->low_mark) &&
5062 (qindex >= 0))
5063 netif_wake_queue(priv->net_dev);
5064 used = q->first_empty - q->last_used;
5065 if (used < 0)
5066 used += q->n_bd;
5067
5068 return used;
5069 }
5070
5071 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
5072 int len, int sync)
5073 {
5074 struct clx2_tx_queue *txq = &priv->txq_cmd;
5075 struct clx2_queue *q = &txq->q;
5076 struct tfd_frame *tfd;
5077
5078 if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
5079 IPW_ERROR("No space for Tx\n");
5080 return -EBUSY;
5081 }
5082
5083 tfd = &txq->bd[q->first_empty];
5084 txq->txb[q->first_empty] = NULL;
5085
5086 memset(tfd, 0, sizeof(*tfd));
5087 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
5088 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
5089 priv->hcmd_seq++;
5090 tfd->u.cmd.index = hcmd;
5091 tfd->u.cmd.length = len;
5092 memcpy(tfd->u.cmd.payload, buf, len);
5093 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
5094 ipw_write32(priv, q->reg_w, q->first_empty);
5095 _ipw_read32(priv, 0x90);
5096
5097 return 0;
5098 }
5099
5100 /*
5101 * Rx theory of operation
5102 *
5103 * The host allocates 32 DMA target addresses and passes the host address
5104 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5105 * 0 to 31
5106 *
5107 * Rx Queue Indexes
5108 * The host/firmware share two index registers for managing the Rx buffers.
5109 *
5110 * The READ index maps to the first position that the firmware may be writing
5111 * to -- the driver can read up to (but not including) this position and get
5112 * good data.
5113 * The READ index is managed by the firmware once the card is enabled.
5114 *
5115 * The WRITE index maps to the last position the driver has read from -- the
5116 * position preceding WRITE is the last slot the firmware can place a packet.
5117 *
5118 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5119 * WRITE = READ.
5120 *
5121 * During initialization the host sets up the READ queue position to the first
5122 * INDEX position, and WRITE to the last (READ - 1 wrapped)
5123 *
5124 * When the firmware places a packet in a buffer it will advance the READ index
5125 * and fire the RX interrupt. The driver can then query the READ index and
5126 * process as many packets as possible, moving the WRITE index forward as it
5127 * resets the Rx queue buffers with new memory.
5128 *
5129 * The management in the driver is as follows:
5130 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
5131 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5132 * to replensish the ipw->rxq->rx_free.
5133 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5134 * ipw->rxq is replenished and the READ INDEX is updated (updating the
5135 * 'processed' and 'read' driver indexes as well)
5136 * + A received packet is processed and handed to the kernel network stack,
5137 * detached from the ipw->rxq. The driver 'processed' index is updated.
5138 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5139 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5140 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
5141 * were enough free buffers and RX_STALLED is set it is cleared.
5142 *
5143 *
5144 * Driver sequence:
5145 *
5146 * ipw_rx_queue_alloc() Allocates rx_free
5147 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
5148 * ipw_rx_queue_restock
5149 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
5150 * queue, updates firmware pointers, and updates
5151 * the WRITE index. If insufficient rx_free buffers
5152 * are available, schedules ipw_rx_queue_replenish
5153 *
5154 * -- enable interrupts --
5155 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
5156 * READ INDEX, detaching the SKB from the pool.
5157 * Moves the packet buffer from queue to rx_used.
5158 * Calls ipw_rx_queue_restock to refill any empty
5159 * slots.
5160 * ...
5161 *
5162 */
5163
5164 /*
5165 * If there are slots in the RX queue that need to be restocked,
5166 * and we have free pre-allocated buffers, fill the ranks as much
5167 * as we can pulling from rx_free.
5168 *
5169 * This moves the 'write' index forward to catch up with 'processed', and
5170 * also updates the memory address in the firmware to reference the new
5171 * target buffer.
5172 */
5173 static void ipw_rx_queue_restock(struct ipw_priv *priv)
5174 {
5175 struct ipw_rx_queue *rxq = priv->rxq;
5176 struct list_head *element;
5177 struct ipw_rx_mem_buffer *rxb;
5178 unsigned long flags;
5179 int write;
5180
5181 spin_lock_irqsave(&rxq->lock, flags);
5182 write = rxq->write;
5183 while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5184 element = rxq->rx_free.next;
5185 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5186 list_del(element);
5187
5188 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5189 rxb->dma_addr);
5190 rxq->queue[rxq->write] = rxb;
5191 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5192 rxq->free_count--;
5193 }
5194 spin_unlock_irqrestore(&rxq->lock, flags);
5195
5196 /* If the pre-allocated buffer pool is dropping low, schedule to
5197 * refill it */
5198 if (rxq->free_count <= RX_LOW_WATERMARK)
5199 schedule_work(&priv->rx_replenish);
5200
5201 /* If we've added more space for the firmware to place data, tell it */
5202 if (write != rxq->write)
5203 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5204 }
5205
5206 /*
5207 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5208 * Also restock the Rx queue via ipw_rx_queue_restock.
5209 *
5210 * This is called as a scheduled work item (except for during intialization)
5211 */
5212 static void ipw_rx_queue_replenish(void *data)
5213 {
5214 struct ipw_priv *priv = data;
5215 struct ipw_rx_queue *rxq = priv->rxq;
5216 struct list_head *element;
5217 struct ipw_rx_mem_buffer *rxb;
5218 unsigned long flags;
5219
5220 spin_lock_irqsave(&rxq->lock, flags);
5221 while (!list_empty(&rxq->rx_used)) {
5222 element = rxq->rx_used.next;
5223 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5224 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5225 if (!rxb->skb) {
5226 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5227 priv->net_dev->name);
5228 /* We don't reschedule replenish work here -- we will
5229 * call the restock method and if it still needs
5230 * more buffers it will schedule replenish */
5231 break;
5232 }
5233 list_del(element);
5234
5235 rxb->dma_addr =
5236 pci_map_single(priv->pci_dev, rxb->skb->data,
5237 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5238
5239 list_add_tail(&rxb->list, &rxq->rx_free);
5240 rxq->free_count++;
5241 }
5242 spin_unlock_irqrestore(&rxq->lock, flags);
5243
5244 ipw_rx_queue_restock(priv);
5245 }
5246
5247 static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5248 {
5249 struct ipw_priv *priv =
5250 container_of(work, struct ipw_priv, rx_replenish);
5251 mutex_lock(&priv->mutex);
5252 ipw_rx_queue_replenish(priv);
5253 mutex_unlock(&priv->mutex);
5254 }
5255
5256 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5257 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5258 * This free routine walks the list of POOL entries and if SKB is set to
5259 * non NULL it is unmapped and freed
5260 */
5261 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5262 {
5263 int i;
5264
5265 if (!rxq)
5266 return;
5267
5268 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5269 if (rxq->pool[i].skb != NULL) {
5270 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5271 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5272 dev_kfree_skb(rxq->pool[i].skb);
5273 }
5274 }
5275
5276 kfree(rxq);
5277 }
5278
5279 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5280 {
5281 struct ipw_rx_queue *rxq;
5282 int i;
5283
5284 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5285 if (unlikely(!rxq)) {
5286 IPW_ERROR("memory allocation failed\n");
5287 return NULL;
5288 }
5289 spin_lock_init(&rxq->lock);
5290 INIT_LIST_HEAD(&rxq->rx_free);
5291 INIT_LIST_HEAD(&rxq->rx_used);
5292
5293 /* Fill the rx_used queue with _all_ of the Rx buffers */
5294 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5295 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5296
5297 /* Set us so that we have processed and used all buffers, but have
5298 * not restocked the Rx queue with fresh buffers */
5299 rxq->read = rxq->write = 0;
5300 rxq->free_count = 0;
5301
5302 return rxq;
5303 }
5304
5305 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5306 {
5307 rate &= ~LIBIPW_BASIC_RATE_MASK;
5308 if (ieee_mode == IEEE_A) {
5309 switch (rate) {
5310 case LIBIPW_OFDM_RATE_6MB:
5311 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ?
5312 1 : 0;
5313 case LIBIPW_OFDM_RATE_9MB:
5314 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ?
5315 1 : 0;
5316 case LIBIPW_OFDM_RATE_12MB:
5317 return priv->
5318 rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5319 case LIBIPW_OFDM_RATE_18MB:
5320 return priv->
5321 rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5322 case LIBIPW_OFDM_RATE_24MB:
5323 return priv->
5324 rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5325 case LIBIPW_OFDM_RATE_36MB:
5326 return priv->
5327 rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5328 case LIBIPW_OFDM_RATE_48MB:
5329 return priv->
5330 rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5331 case LIBIPW_OFDM_RATE_54MB:
5332 return priv->
5333 rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5334 default:
5335 return 0;
5336 }
5337 }
5338
5339 /* B and G mixed */
5340 switch (rate) {
5341 case LIBIPW_CCK_RATE_1MB:
5342 return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0;
5343 case LIBIPW_CCK_RATE_2MB:
5344 return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0;
5345 case LIBIPW_CCK_RATE_5MB:
5346 return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0;
5347 case LIBIPW_CCK_RATE_11MB:
5348 return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0;
5349 }
5350
5351 /* If we are limited to B modulations, bail at this point */
5352 if (ieee_mode == IEEE_B)
5353 return 0;
5354
5355 /* G */
5356 switch (rate) {
5357 case LIBIPW_OFDM_RATE_6MB:
5358 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0;
5359 case LIBIPW_OFDM_RATE_9MB:
5360 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0;
5361 case LIBIPW_OFDM_RATE_12MB:
5362 return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5363 case LIBIPW_OFDM_RATE_18MB:
5364 return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5365 case LIBIPW_OFDM_RATE_24MB:
5366 return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5367 case LIBIPW_OFDM_RATE_36MB:
5368 return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5369 case LIBIPW_OFDM_RATE_48MB:
5370 return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5371 case LIBIPW_OFDM_RATE_54MB:
5372 return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5373 }
5374
5375 return 0;
5376 }
5377
5378 static int ipw_compatible_rates(struct ipw_priv *priv,
5379 const struct libipw_network *network,
5380 struct ipw_supported_rates *rates)
5381 {
5382 int num_rates, i;
5383
5384 memset(rates, 0, sizeof(*rates));
5385 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5386 rates->num_rates = 0;
5387 for (i = 0; i < num_rates; i++) {
5388 if (!ipw_is_rate_in_mask(priv, network->mode,
5389 network->rates[i])) {
5390
5391 if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) {
5392 IPW_DEBUG_SCAN("Adding masked mandatory "
5393 "rate %02X\n",
5394 network->rates[i]);
5395 rates->supported_rates[rates->num_rates++] =
5396 network->rates[i];
5397 continue;
5398 }
5399
5400 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5401 network->rates[i], priv->rates_mask);
5402 continue;
5403 }
5404
5405 rates->supported_rates[rates->num_rates++] = network->rates[i];
5406 }
5407
5408 num_rates = min(network->rates_ex_len,
5409 (u8) (IPW_MAX_RATES - num_rates));
5410 for (i = 0; i < num_rates; i++) {
5411 if (!ipw_is_rate_in_mask(priv, network->mode,
5412 network->rates_ex[i])) {
5413 if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) {
5414 IPW_DEBUG_SCAN("Adding masked mandatory "
5415 "rate %02X\n",
5416 network->rates_ex[i]);
5417 rates->supported_rates[rates->num_rates++] =
5418 network->rates[i];
5419 continue;
5420 }
5421
5422 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5423 network->rates_ex[i], priv->rates_mask);
5424 continue;
5425 }
5426
5427 rates->supported_rates[rates->num_rates++] =
5428 network->rates_ex[i];
5429 }
5430
5431 return 1;
5432 }
5433
5434 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5435 const struct ipw_supported_rates *src)
5436 {
5437 u8 i;
5438 for (i = 0; i < src->num_rates; i++)
5439 dest->supported_rates[i] = src->supported_rates[i];
5440 dest->num_rates = src->num_rates;
5441 }
5442
5443 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5444 * mask should ever be used -- right now all callers to add the scan rates are
5445 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5446 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5447 u8 modulation, u32 rate_mask)
5448 {
5449 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5450 LIBIPW_BASIC_RATE_MASK : 0;
5451
5452 if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK)
5453 rates->supported_rates[rates->num_rates++] =
5454 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB;
5455
5456 if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK)
5457 rates->supported_rates[rates->num_rates++] =
5458 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB;
5459
5460 if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK)
5461 rates->supported_rates[rates->num_rates++] = basic_mask |
5462 LIBIPW_CCK_RATE_5MB;
5463
5464 if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK)
5465 rates->supported_rates[rates->num_rates++] = basic_mask |
5466 LIBIPW_CCK_RATE_11MB;
5467 }
5468
5469 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5470 u8 modulation, u32 rate_mask)
5471 {
5472 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5473 LIBIPW_BASIC_RATE_MASK : 0;
5474
5475 if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK)
5476 rates->supported_rates[rates->num_rates++] = basic_mask |
5477 LIBIPW_OFDM_RATE_6MB;
5478
5479 if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK)
5480 rates->supported_rates[rates->num_rates++] =
5481 LIBIPW_OFDM_RATE_9MB;
5482
5483 if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK)
5484 rates->supported_rates[rates->num_rates++] = basic_mask |
5485 LIBIPW_OFDM_RATE_12MB;
5486
5487 if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK)
5488 rates->supported_rates[rates->num_rates++] =
5489 LIBIPW_OFDM_RATE_18MB;
5490
5491 if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK)
5492 rates->supported_rates[rates->num_rates++] = basic_mask |
5493 LIBIPW_OFDM_RATE_24MB;
5494
5495 if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK)
5496 rates->supported_rates[rates->num_rates++] =
5497 LIBIPW_OFDM_RATE_36MB;
5498
5499 if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK)
5500 rates->supported_rates[rates->num_rates++] =
5501 LIBIPW_OFDM_RATE_48MB;
5502
5503 if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK)
5504 rates->supported_rates[rates->num_rates++] =
5505 LIBIPW_OFDM_RATE_54MB;
5506 }
5507
5508 struct ipw_network_match {
5509 struct libipw_network *network;
5510 struct ipw_supported_rates rates;
5511 };
5512
5513 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5514 struct ipw_network_match *match,
5515 struct libipw_network *network,
5516 int roaming)
5517 {
5518 struct ipw_supported_rates rates;
5519 DECLARE_SSID_BUF(ssid);
5520
5521 /* Verify that this network's capability is compatible with the
5522 * current mode (AdHoc or Infrastructure) */
5523 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5524 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5525 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded due to "
5526 "capability mismatch.\n",
5527 print_ssid(ssid, network->ssid,
5528 network->ssid_len),
5529 network->bssid);
5530 return 0;
5531 }
5532
5533 if (unlikely(roaming)) {
5534 /* If we are roaming, then ensure check if this is a valid
5535 * network to try and roam to */
5536 if ((network->ssid_len != match->network->ssid_len) ||
5537 memcmp(network->ssid, match->network->ssid,
5538 network->ssid_len)) {
5539 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5540 "because of non-network ESSID.\n",
5541 print_ssid(ssid, network->ssid,
5542 network->ssid_len),
5543 network->bssid);
5544 return 0;
5545 }
5546 } else {
5547 /* If an ESSID has been configured then compare the broadcast
5548 * ESSID to ours */
5549 if ((priv->config & CFG_STATIC_ESSID) &&
5550 ((network->ssid_len != priv->essid_len) ||
5551 memcmp(network->ssid, priv->essid,
5552 min(network->ssid_len, priv->essid_len)))) {
5553 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5554
5555 strncpy(escaped,
5556 print_ssid(ssid, network->ssid,
5557 network->ssid_len),
5558 sizeof(escaped));
5559 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5560 "because of ESSID mismatch: '%s'.\n",
5561 escaped, network->bssid,
5562 print_ssid(ssid, priv->essid,
5563 priv->essid_len));
5564 return 0;
5565 }
5566 }
5567
5568 /* If the old network rate is better than this one, don't bother
5569 * testing everything else. */
5570
5571 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5572 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5573 "current network.\n",
5574 print_ssid(ssid, match->network->ssid,
5575 match->network->ssid_len));
5576 return 0;
5577 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5578 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5579 "current network.\n",
5580 print_ssid(ssid, match->network->ssid,
5581 match->network->ssid_len));
5582 return 0;
5583 }
5584
5585 /* Now go through and see if the requested network is valid... */
5586 if (priv->ieee->scan_age != 0 &&
5587 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5588 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5589 "because of age: %ums.\n",
5590 print_ssid(ssid, network->ssid,
5591 network->ssid_len),
5592 network->bssid,
5593 jiffies_to_msecs(jiffies -
5594 network->last_scanned));
5595 return 0;
5596 }
5597
5598 if ((priv->config & CFG_STATIC_CHANNEL) &&
5599 (network->channel != priv->channel)) {
5600 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5601 "because of channel mismatch: %d != %d.\n",
5602 print_ssid(ssid, network->ssid,
5603 network->ssid_len),
5604 network->bssid,
5605 network->channel, priv->channel);
5606 return 0;
5607 }
5608
5609 /* Verify privacy compatibility */
5610 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5611 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5612 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5613 "because of privacy mismatch: %s != %s.\n",
5614 print_ssid(ssid, network->ssid,
5615 network->ssid_len),
5616 network->bssid,
5617 priv->
5618 capability & CAP_PRIVACY_ON ? "on" : "off",
5619 network->
5620 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5621 "off");
5622 return 0;
5623 }
5624
5625 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5626 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5627 "because of the same BSSID match: %pM"
5628 ".\n", print_ssid(ssid, network->ssid,
5629 network->ssid_len),
5630 network->bssid,
5631 priv->bssid);
5632 return 0;
5633 }
5634
5635 /* Filter out any incompatible freq / mode combinations */
5636 if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5637 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5638 "because of invalid frequency/mode "
5639 "combination.\n",
5640 print_ssid(ssid, network->ssid,
5641 network->ssid_len),
5642 network->bssid);
5643 return 0;
5644 }
5645
5646 /* Ensure that the rates supported by the driver are compatible with
5647 * this AP, including verification of basic rates (mandatory) */
5648 if (!ipw_compatible_rates(priv, network, &rates)) {
5649 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5650 "because configured rate mask excludes "
5651 "AP mandatory rate.\n",
5652 print_ssid(ssid, network->ssid,
5653 network->ssid_len),
5654 network->bssid);
5655 return 0;
5656 }
5657
5658 if (rates.num_rates == 0) {
5659 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5660 "because of no compatible rates.\n",
5661 print_ssid(ssid, network->ssid,
5662 network->ssid_len),
5663 network->bssid);
5664 return 0;
5665 }
5666
5667 /* TODO: Perform any further minimal comparititive tests. We do not
5668 * want to put too much policy logic here; intelligent scan selection
5669 * should occur within a generic IEEE 802.11 user space tool. */
5670
5671 /* Set up 'new' AP to this network */
5672 ipw_copy_rates(&match->rates, &rates);
5673 match->network = network;
5674 IPW_DEBUG_MERGE("Network '%s (%pM)' is a viable match.\n",
5675 print_ssid(ssid, network->ssid, network->ssid_len),
5676 network->bssid);
5677
5678 return 1;
5679 }
5680
5681 static void ipw_merge_adhoc_network(struct work_struct *work)
5682 {
5683 DECLARE_SSID_BUF(ssid);
5684 struct ipw_priv *priv =
5685 container_of(work, struct ipw_priv, merge_networks);
5686 struct libipw_network *network = NULL;
5687 struct ipw_network_match match = {
5688 .network = priv->assoc_network
5689 };
5690
5691 if ((priv->status & STATUS_ASSOCIATED) &&
5692 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5693 /* First pass through ROAM process -- look for a better
5694 * network */
5695 unsigned long flags;
5696
5697 spin_lock_irqsave(&priv->ieee->lock, flags);
5698 list_for_each_entry(network, &priv->ieee->network_list, list) {
5699 if (network != priv->assoc_network)
5700 ipw_find_adhoc_network(priv, &match, network,
5701 1);
5702 }
5703 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5704
5705 if (match.network == priv->assoc_network) {
5706 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5707 "merge to.\n");
5708 return;
5709 }
5710
5711 mutex_lock(&priv->mutex);
5712 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5713 IPW_DEBUG_MERGE("remove network %s\n",
5714 print_ssid(ssid, priv->essid,
5715 priv->essid_len));
5716 ipw_remove_current_network(priv);
5717 }
5718
5719 ipw_disassociate(priv);
5720 priv->assoc_network = match.network;
5721 mutex_unlock(&priv->mutex);
5722 return;
5723 }
5724 }
5725
5726 static int ipw_best_network(struct ipw_priv *priv,
5727 struct ipw_network_match *match,
5728 struct libipw_network *network, int roaming)
5729 {
5730 struct ipw_supported_rates rates;
5731 DECLARE_SSID_BUF(ssid);
5732
5733 /* Verify that this network's capability is compatible with the
5734 * current mode (AdHoc or Infrastructure) */
5735 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5736 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5737 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5738 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5739 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded due to "
5740 "capability mismatch.\n",
5741 print_ssid(ssid, network->ssid,
5742 network->ssid_len),
5743 network->bssid);
5744 return 0;
5745 }
5746
5747 if (unlikely(roaming)) {
5748 /* If we are roaming, then ensure check if this is a valid
5749 * network to try and roam to */
5750 if ((network->ssid_len != match->network->ssid_len) ||
5751 memcmp(network->ssid, match->network->ssid,
5752 network->ssid_len)) {
5753 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5754 "because of non-network ESSID.\n",
5755 print_ssid(ssid, network->ssid,
5756 network->ssid_len),
5757 network->bssid);
5758 return 0;
5759 }
5760 } else {
5761 /* If an ESSID has been configured then compare the broadcast
5762 * ESSID to ours */
5763 if ((priv->config & CFG_STATIC_ESSID) &&
5764 ((network->ssid_len != priv->essid_len) ||
5765 memcmp(network->ssid, priv->essid,
5766 min(network->ssid_len, priv->essid_len)))) {
5767 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5768 strncpy(escaped,
5769 print_ssid(ssid, network->ssid,
5770 network->ssid_len),
5771 sizeof(escaped));
5772 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5773 "because of ESSID mismatch: '%s'.\n",
5774 escaped, network->bssid,
5775 print_ssid(ssid, priv->essid,
5776 priv->essid_len));
5777 return 0;
5778 }
5779 }
5780
5781 /* If the old network rate is better than this one, don't bother
5782 * testing everything else. */
5783 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5784 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5785 strncpy(escaped,
5786 print_ssid(ssid, network->ssid, network->ssid_len),
5787 sizeof(escaped));
5788 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded because "
5789 "'%s (%pM)' has a stronger signal.\n",
5790 escaped, network->bssid,
5791 print_ssid(ssid, match->network->ssid,
5792 match->network->ssid_len),
5793 match->network->bssid);
5794 return 0;
5795 }
5796
5797 /* If this network has already had an association attempt within the
5798 * last 3 seconds, do not try and associate again... */
5799 if (network->last_associate &&
5800 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5801 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5802 "because of storming (%ums since last "
5803 "assoc attempt).\n",
5804 print_ssid(ssid, network->ssid,
5805 network->ssid_len),
5806 network->bssid,
5807 jiffies_to_msecs(jiffies -
5808 network->last_associate));
5809 return 0;
5810 }
5811
5812 /* Now go through and see if the requested network is valid... */
5813 if (priv->ieee->scan_age != 0 &&
5814 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5815 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5816 "because of age: %ums.\n",
5817 print_ssid(ssid, network->ssid,
5818 network->ssid_len),
5819 network->bssid,
5820 jiffies_to_msecs(jiffies -
5821 network->last_scanned));
5822 return 0;
5823 }
5824
5825 if ((priv->config & CFG_STATIC_CHANNEL) &&
5826 (network->channel != priv->channel)) {
5827 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5828 "because of channel mismatch: %d != %d.\n",
5829 print_ssid(ssid, network->ssid,
5830 network->ssid_len),
5831 network->bssid,
5832 network->channel, priv->channel);
5833 return 0;
5834 }
5835
5836 /* Verify privacy compatibility */
5837 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5838 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5839 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5840 "because of privacy mismatch: %s != %s.\n",
5841 print_ssid(ssid, network->ssid,
5842 network->ssid_len),
5843 network->bssid,
5844 priv->capability & CAP_PRIVACY_ON ? "on" :
5845 "off",
5846 network->capability &
5847 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5848 return 0;
5849 }
5850
5851 if ((priv->config & CFG_STATIC_BSSID) &&
5852 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5853 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5854 "because of BSSID mismatch: %pM.\n",
5855 print_ssid(ssid, network->ssid,
5856 network->ssid_len),
5857 network->bssid, priv->bssid);
5858 return 0;
5859 }
5860
5861 /* Filter out any incompatible freq / mode combinations */
5862 if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5863 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5864 "because of invalid frequency/mode "
5865 "combination.\n",
5866 print_ssid(ssid, network->ssid,
5867 network->ssid_len),
5868 network->bssid);
5869 return 0;
5870 }
5871
5872 /* Filter out invalid channel in current GEO */
5873 if (!libipw_is_valid_channel(priv->ieee, network->channel)) {
5874 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5875 "because of invalid channel in current GEO\n",
5876 print_ssid(ssid, network->ssid,
5877 network->ssid_len),
5878 network->bssid);
5879 return 0;
5880 }
5881
5882 /* Ensure that the rates supported by the driver are compatible with
5883 * this AP, including verification of basic rates (mandatory) */
5884 if (!ipw_compatible_rates(priv, network, &rates)) {
5885 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5886 "because configured rate mask excludes "
5887 "AP mandatory rate.\n",
5888 print_ssid(ssid, network->ssid,
5889 network->ssid_len),
5890 network->bssid);
5891 return 0;
5892 }
5893
5894 if (rates.num_rates == 0) {
5895 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5896 "because of no compatible rates.\n",
5897 print_ssid(ssid, network->ssid,
5898 network->ssid_len),
5899 network->bssid);
5900 return 0;
5901 }
5902
5903 /* TODO: Perform any further minimal comparititive tests. We do not
5904 * want to put too much policy logic here; intelligent scan selection
5905 * should occur within a generic IEEE 802.11 user space tool. */
5906
5907 /* Set up 'new' AP to this network */
5908 ipw_copy_rates(&match->rates, &rates);
5909 match->network = network;
5910
5911 IPW_DEBUG_ASSOC("Network '%s (%pM)' is a viable match.\n",
5912 print_ssid(ssid, network->ssid, network->ssid_len),
5913 network->bssid);
5914
5915 return 1;
5916 }
5917
5918 static void ipw_adhoc_create(struct ipw_priv *priv,
5919 struct libipw_network *network)
5920 {
5921 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
5922 int i;
5923
5924 /*
5925 * For the purposes of scanning, we can set our wireless mode
5926 * to trigger scans across combinations of bands, but when it
5927 * comes to creating a new ad-hoc network, we have tell the FW
5928 * exactly which band to use.
5929 *
5930 * We also have the possibility of an invalid channel for the
5931 * chossen band. Attempting to create a new ad-hoc network
5932 * with an invalid channel for wireless mode will trigger a
5933 * FW fatal error.
5934 *
5935 */
5936 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
5937 case LIBIPW_52GHZ_BAND:
5938 network->mode = IEEE_A;
5939 i = libipw_channel_to_index(priv->ieee, priv->channel);
5940 BUG_ON(i == -1);
5941 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5942 IPW_WARNING("Overriding invalid channel\n");
5943 priv->channel = geo->a[0].channel;
5944 }
5945 break;
5946
5947 case LIBIPW_24GHZ_BAND:
5948 if (priv->ieee->mode & IEEE_G)
5949 network->mode = IEEE_G;
5950 else
5951 network->mode = IEEE_B;
5952 i = libipw_channel_to_index(priv->ieee, priv->channel);
5953 BUG_ON(i == -1);
5954 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5955 IPW_WARNING("Overriding invalid channel\n");
5956 priv->channel = geo->bg[0].channel;
5957 }
5958 break;
5959
5960 default:
5961 IPW_WARNING("Overriding invalid channel\n");
5962 if (priv->ieee->mode & IEEE_A) {
5963 network->mode = IEEE_A;
5964 priv->channel = geo->a[0].channel;
5965 } else if (priv->ieee->mode & IEEE_G) {
5966 network->mode = IEEE_G;
5967 priv->channel = geo->bg[0].channel;
5968 } else {
5969 network->mode = IEEE_B;
5970 priv->channel = geo->bg[0].channel;
5971 }
5972 break;
5973 }
5974
5975 network->channel = priv->channel;
5976 priv->config |= CFG_ADHOC_PERSIST;
5977 ipw_create_bssid(priv, network->bssid);
5978 network->ssid_len = priv->essid_len;
5979 memcpy(network->ssid, priv->essid, priv->essid_len);
5980 memset(&network->stats, 0, sizeof(network->stats));
5981 network->capability = WLAN_CAPABILITY_IBSS;
5982 if (!(priv->config & CFG_PREAMBLE_LONG))
5983 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5984 if (priv->capability & CAP_PRIVACY_ON)
5985 network->capability |= WLAN_CAPABILITY_PRIVACY;
5986 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5987 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5988 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5989 memcpy(network->rates_ex,
5990 &priv->rates.supported_rates[network->rates_len],
5991 network->rates_ex_len);
5992 network->last_scanned = 0;
5993 network->flags = 0;
5994 network->last_associate = 0;
5995 network->time_stamp[0] = 0;
5996 network->time_stamp[1] = 0;
5997 network->beacon_interval = 100; /* Default */
5998 network->listen_interval = 10; /* Default */
5999 network->atim_window = 0; /* Default */
6000 network->wpa_ie_len = 0;
6001 network->rsn_ie_len = 0;
6002 }
6003
6004 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
6005 {
6006 struct ipw_tgi_tx_key key;
6007
6008 if (!(priv->ieee->sec.flags & (1 << index)))
6009 return;
6010
6011 key.key_id = index;
6012 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
6013 key.security_type = type;
6014 key.station_index = 0; /* always 0 for BSS */
6015 key.flags = 0;
6016 /* 0 for new key; previous value of counter (after fatal error) */
6017 key.tx_counter[0] = cpu_to_le32(0);
6018 key.tx_counter[1] = cpu_to_le32(0);
6019
6020 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
6021 }
6022
6023 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
6024 {
6025 struct ipw_wep_key key;
6026 int i;
6027
6028 key.cmd_id = DINO_CMD_WEP_KEY;
6029 key.seq_num = 0;
6030
6031 /* Note: AES keys cannot be set for multiple times.
6032 * Only set it at the first time. */
6033 for (i = 0; i < 4; i++) {
6034 key.key_index = i | type;
6035 if (!(priv->ieee->sec.flags & (1 << i))) {
6036 key.key_size = 0;
6037 continue;
6038 }
6039
6040 key.key_size = priv->ieee->sec.key_sizes[i];
6041 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
6042
6043 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
6044 }
6045 }
6046
6047 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
6048 {
6049 if (priv->ieee->host_encrypt)
6050 return;
6051
6052 switch (level) {
6053 case SEC_LEVEL_3:
6054 priv->sys_config.disable_unicast_decryption = 0;
6055 priv->ieee->host_decrypt = 0;
6056 break;
6057 case SEC_LEVEL_2:
6058 priv->sys_config.disable_unicast_decryption = 1;
6059 priv->ieee->host_decrypt = 1;
6060 break;
6061 case SEC_LEVEL_1:
6062 priv->sys_config.disable_unicast_decryption = 0;
6063 priv->ieee->host_decrypt = 0;
6064 break;
6065 case SEC_LEVEL_0:
6066 priv->sys_config.disable_unicast_decryption = 1;
6067 break;
6068 default:
6069 break;
6070 }
6071 }
6072
6073 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
6074 {
6075 if (priv->ieee->host_encrypt)
6076 return;
6077
6078 switch (level) {
6079 case SEC_LEVEL_3:
6080 priv->sys_config.disable_multicast_decryption = 0;
6081 break;
6082 case SEC_LEVEL_2:
6083 priv->sys_config.disable_multicast_decryption = 1;
6084 break;
6085 case SEC_LEVEL_1:
6086 priv->sys_config.disable_multicast_decryption = 0;
6087 break;
6088 case SEC_LEVEL_0:
6089 priv->sys_config.disable_multicast_decryption = 1;
6090 break;
6091 default:
6092 break;
6093 }
6094 }
6095
6096 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
6097 {
6098 switch (priv->ieee->sec.level) {
6099 case SEC_LEVEL_3:
6100 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6101 ipw_send_tgi_tx_key(priv,
6102 DCT_FLAG_EXT_SECURITY_CCM,
6103 priv->ieee->sec.active_key);
6104
6105 if (!priv->ieee->host_mc_decrypt)
6106 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
6107 break;
6108 case SEC_LEVEL_2:
6109 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6110 ipw_send_tgi_tx_key(priv,
6111 DCT_FLAG_EXT_SECURITY_TKIP,
6112 priv->ieee->sec.active_key);
6113 break;
6114 case SEC_LEVEL_1:
6115 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6116 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6117 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6118 break;
6119 case SEC_LEVEL_0:
6120 default:
6121 break;
6122 }
6123 }
6124
6125 static void ipw_adhoc_check(void *data)
6126 {
6127 struct ipw_priv *priv = data;
6128
6129 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6130 !(priv->config & CFG_ADHOC_PERSIST)) {
6131 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6132 IPW_DL_STATE | IPW_DL_ASSOC,
6133 "Missed beacon: %d - disassociate\n",
6134 priv->missed_adhoc_beacons);
6135 ipw_remove_current_network(priv);
6136 ipw_disassociate(priv);
6137 return;
6138 }
6139
6140 schedule_delayed_work(&priv->adhoc_check,
6141 le16_to_cpu(priv->assoc_request.beacon_interval));
6142 }
6143
6144 static void ipw_bg_adhoc_check(struct work_struct *work)
6145 {
6146 struct ipw_priv *priv =
6147 container_of(work, struct ipw_priv, adhoc_check.work);
6148 mutex_lock(&priv->mutex);
6149 ipw_adhoc_check(priv);
6150 mutex_unlock(&priv->mutex);
6151 }
6152
6153 static void ipw_debug_config(struct ipw_priv *priv)
6154 {
6155 DECLARE_SSID_BUF(ssid);
6156 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6157 "[CFG 0x%08X]\n", priv->config);
6158 if (priv->config & CFG_STATIC_CHANNEL)
6159 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6160 else
6161 IPW_DEBUG_INFO("Channel unlocked.\n");
6162 if (priv->config & CFG_STATIC_ESSID)
6163 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
6164 print_ssid(ssid, priv->essid, priv->essid_len));
6165 else
6166 IPW_DEBUG_INFO("ESSID unlocked.\n");
6167 if (priv->config & CFG_STATIC_BSSID)
6168 IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
6169 else
6170 IPW_DEBUG_INFO("BSSID unlocked.\n");
6171 if (priv->capability & CAP_PRIVACY_ON)
6172 IPW_DEBUG_INFO("PRIVACY on\n");
6173 else
6174 IPW_DEBUG_INFO("PRIVACY off\n");
6175 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6176 }
6177
6178 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6179 {
6180 /* TODO: Verify that this works... */
6181 struct ipw_fixed_rate fr;
6182 u32 reg;
6183 u16 mask = 0;
6184 u16 new_tx_rates = priv->rates_mask;
6185
6186 /* Identify 'current FW band' and match it with the fixed
6187 * Tx rates */
6188
6189 switch (priv->ieee->freq_band) {
6190 case LIBIPW_52GHZ_BAND: /* A only */
6191 /* IEEE_A */
6192 if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) {
6193 /* Invalid fixed rate mask */
6194 IPW_DEBUG_WX
6195 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6196 new_tx_rates = 0;
6197 break;
6198 }
6199
6200 new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A;
6201 break;
6202
6203 default: /* 2.4Ghz or Mixed */
6204 /* IEEE_B */
6205 if (mode == IEEE_B) {
6206 if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) {
6207 /* Invalid fixed rate mask */
6208 IPW_DEBUG_WX
6209 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6210 new_tx_rates = 0;
6211 }
6212 break;
6213 }
6214
6215 /* IEEE_G */
6216 if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK |
6217 LIBIPW_OFDM_RATES_MASK)) {
6218 /* Invalid fixed rate mask */
6219 IPW_DEBUG_WX
6220 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6221 new_tx_rates = 0;
6222 break;
6223 }
6224
6225 if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) {
6226 mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1);
6227 new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK;
6228 }
6229
6230 if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) {
6231 mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1);
6232 new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK;
6233 }
6234
6235 if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) {
6236 mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1);
6237 new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK;
6238 }
6239
6240 new_tx_rates |= mask;
6241 break;
6242 }
6243
6244 fr.tx_rates = cpu_to_le16(new_tx_rates);
6245
6246 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6247 ipw_write_reg32(priv, reg, *(u32 *) & fr);
6248 }
6249
6250 static void ipw_abort_scan(struct ipw_priv *priv)
6251 {
6252 int err;
6253
6254 if (priv->status & STATUS_SCAN_ABORTING) {
6255 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6256 return;
6257 }
6258 priv->status |= STATUS_SCAN_ABORTING;
6259
6260 err = ipw_send_scan_abort(priv);
6261 if (err)
6262 IPW_DEBUG_HC("Request to abort scan failed.\n");
6263 }
6264
6265 static void ipw_add_scan_channels(struct ipw_priv *priv,
6266 struct ipw_scan_request_ext *scan,
6267 int scan_type)
6268 {
6269 int channel_index = 0;
6270 const struct libipw_geo *geo;
6271 int i;
6272
6273 geo = libipw_get_geo(priv->ieee);
6274
6275 if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) {
6276 int start = channel_index;
6277 for (i = 0; i < geo->a_channels; i++) {
6278 if ((priv->status & STATUS_ASSOCIATED) &&
6279 geo->a[i].channel == priv->channel)
6280 continue;
6281 channel_index++;
6282 scan->channels_list[channel_index] = geo->a[i].channel;
6283 ipw_set_scan_type(scan, channel_index,
6284 geo->a[i].
6285 flags & LIBIPW_CH_PASSIVE_ONLY ?
6286 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6287 scan_type);
6288 }
6289
6290 if (start != channel_index) {
6291 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6292 (channel_index - start);
6293 channel_index++;
6294 }
6295 }
6296
6297 if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) {
6298 int start = channel_index;
6299 if (priv->config & CFG_SPEED_SCAN) {
6300 int index;
6301 u8 channels[LIBIPW_24GHZ_CHANNELS] = {
6302 /* nop out the list */
6303 [0] = 0
6304 };
6305
6306 u8 channel;
6307 while (channel_index < IPW_SCAN_CHANNELS - 1) {
6308 channel =
6309 priv->speed_scan[priv->speed_scan_pos];
6310 if (channel == 0) {
6311 priv->speed_scan_pos = 0;
6312 channel = priv->speed_scan[0];
6313 }
6314 if ((priv->status & STATUS_ASSOCIATED) &&
6315 channel == priv->channel) {
6316 priv->speed_scan_pos++;
6317 continue;
6318 }
6319
6320 /* If this channel has already been
6321 * added in scan, break from loop
6322 * and this will be the first channel
6323 * in the next scan.
6324 */
6325 if (channels[channel - 1] != 0)
6326 break;
6327
6328 channels[channel - 1] = 1;
6329 priv->speed_scan_pos++;
6330 channel_index++;
6331 scan->channels_list[channel_index] = channel;
6332 index =
6333 libipw_channel_to_index(priv->ieee, channel);
6334 ipw_set_scan_type(scan, channel_index,
6335 geo->bg[index].
6336 flags &
6337 LIBIPW_CH_PASSIVE_ONLY ?
6338 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6339 : scan_type);
6340 }
6341 } else {
6342 for (i = 0; i < geo->bg_channels; i++) {
6343 if ((priv->status & STATUS_ASSOCIATED) &&
6344 geo->bg[i].channel == priv->channel)
6345 continue;
6346 channel_index++;
6347 scan->channels_list[channel_index] =
6348 geo->bg[i].channel;
6349 ipw_set_scan_type(scan, channel_index,
6350 geo->bg[i].
6351 flags &
6352 LIBIPW_CH_PASSIVE_ONLY ?
6353 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6354 : scan_type);
6355 }
6356 }
6357
6358 if (start != channel_index) {
6359 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6360 (channel_index - start);
6361 }
6362 }
6363 }
6364
6365 static int ipw_passive_dwell_time(struct ipw_priv *priv)
6366 {
6367 /* staying on passive channels longer than the DTIM interval during a
6368 * scan, while associated, causes the firmware to cancel the scan
6369 * without notification. Hence, don't stay on passive channels longer
6370 * than the beacon interval.
6371 */
6372 if (priv->status & STATUS_ASSOCIATED
6373 && priv->assoc_network->beacon_interval > 10)
6374 return priv->assoc_network->beacon_interval - 10;
6375 else
6376 return 120;
6377 }
6378
6379 static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6380 {
6381 struct ipw_scan_request_ext scan;
6382 int err = 0, scan_type;
6383
6384 if (!(priv->status & STATUS_INIT) ||
6385 (priv->status & STATUS_EXIT_PENDING))
6386 return 0;
6387
6388 mutex_lock(&priv->mutex);
6389
6390 if (direct && (priv->direct_scan_ssid_len == 0)) {
6391 IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
6392 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6393 goto done;
6394 }
6395
6396 if (priv->status & STATUS_SCANNING) {
6397 IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n");
6398 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6399 STATUS_SCAN_PENDING;
6400 goto done;
6401 }
6402
6403 if (!(priv->status & STATUS_SCAN_FORCED) &&
6404 priv->status & STATUS_SCAN_ABORTING) {
6405 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6406 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6407 STATUS_SCAN_PENDING;
6408 goto done;
6409 }
6410
6411 if (priv->status & STATUS_RF_KILL_MASK) {
6412 IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
6413 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6414 STATUS_SCAN_PENDING;
6415 goto done;
6416 }
6417
6418 memset(&scan, 0, sizeof(scan));
6419 scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee));
6420
6421 if (type == IW_SCAN_TYPE_PASSIVE) {
6422 IPW_DEBUG_WX("use passive scanning\n");
6423 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6424 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6425 cpu_to_le16(ipw_passive_dwell_time(priv));
6426 ipw_add_scan_channels(priv, &scan, scan_type);
6427 goto send_request;
6428 }
6429
6430 /* Use active scan by default. */
6431 if (priv->config & CFG_SPEED_SCAN)
6432 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6433 cpu_to_le16(30);
6434 else
6435 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6436 cpu_to_le16(20);
6437
6438 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6439 cpu_to_le16(20);
6440
6441 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6442 cpu_to_le16(ipw_passive_dwell_time(priv));
6443 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6444
6445 #ifdef CONFIG_IPW2200_MONITOR
6446 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6447 u8 channel;
6448 u8 band = 0;
6449
6450 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
6451 case LIBIPW_52GHZ_BAND:
6452 band = (u8) (IPW_A_MODE << 6) | 1;
6453 channel = priv->channel;
6454 break;
6455
6456 case LIBIPW_24GHZ_BAND:
6457 band = (u8) (IPW_B_MODE << 6) | 1;
6458 channel = priv->channel;
6459 break;
6460
6461 default:
6462 band = (u8) (IPW_B_MODE << 6) | 1;
6463 channel = 9;
6464 break;
6465 }
6466
6467 scan.channels_list[0] = band;
6468 scan.channels_list[1] = channel;
6469 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6470
6471 /* NOTE: The card will sit on this channel for this time
6472 * period. Scan aborts are timing sensitive and frequently
6473 * result in firmware restarts. As such, it is best to
6474 * set a small dwell_time here and just keep re-issuing
6475 * scans. Otherwise fast channel hopping will not actually
6476 * hop channels.
6477 *
6478 * TODO: Move SPEED SCAN support to all modes and bands */
6479 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6480 cpu_to_le16(2000);
6481 } else {
6482 #endif /* CONFIG_IPW2200_MONITOR */
6483 /* Honor direct scans first, otherwise if we are roaming make
6484 * this a direct scan for the current network. Finally,
6485 * ensure that every other scan is a fast channel hop scan */
6486 if (direct) {
6487 err = ipw_send_ssid(priv, priv->direct_scan_ssid,
6488 priv->direct_scan_ssid_len);
6489 if (err) {
6490 IPW_DEBUG_HC("Attempt to send SSID command "
6491 "failed\n");
6492 goto done;
6493 }
6494
6495 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6496 } else if ((priv->status & STATUS_ROAMING)
6497 || (!(priv->status & STATUS_ASSOCIATED)
6498 && (priv->config & CFG_STATIC_ESSID)
6499 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6500 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6501 if (err) {
6502 IPW_DEBUG_HC("Attempt to send SSID command "
6503 "failed.\n");
6504 goto done;
6505 }
6506
6507 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6508 } else
6509 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6510
6511 ipw_add_scan_channels(priv, &scan, scan_type);
6512 #ifdef CONFIG_IPW2200_MONITOR
6513 }
6514 #endif
6515
6516 send_request:
6517 err = ipw_send_scan_request_ext(priv, &scan);
6518 if (err) {
6519 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6520 goto done;
6521 }
6522
6523 priv->status |= STATUS_SCANNING;
6524 if (direct) {
6525 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6526 priv->direct_scan_ssid_len = 0;
6527 } else
6528 priv->status &= ~STATUS_SCAN_PENDING;
6529
6530 schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG);
6531 done:
6532 mutex_unlock(&priv->mutex);
6533 return err;
6534 }
6535
6536 static void ipw_request_passive_scan(struct work_struct *work)
6537 {
6538 struct ipw_priv *priv =
6539 container_of(work, struct ipw_priv, request_passive_scan.work);
6540 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
6541 }
6542
6543 static void ipw_request_scan(struct work_struct *work)
6544 {
6545 struct ipw_priv *priv =
6546 container_of(work, struct ipw_priv, request_scan.work);
6547 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
6548 }
6549
6550 static void ipw_request_direct_scan(struct work_struct *work)
6551 {
6552 struct ipw_priv *priv =
6553 container_of(work, struct ipw_priv, request_direct_scan.work);
6554 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
6555 }
6556
6557 static void ipw_bg_abort_scan(struct work_struct *work)
6558 {
6559 struct ipw_priv *priv =
6560 container_of(work, struct ipw_priv, abort_scan);
6561 mutex_lock(&priv->mutex);
6562 ipw_abort_scan(priv);
6563 mutex_unlock(&priv->mutex);
6564 }
6565
6566 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6567 {
6568 /* This is called when wpa_supplicant loads and closes the driver
6569 * interface. */
6570 priv->ieee->wpa_enabled = value;
6571 return 0;
6572 }
6573
6574 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6575 {
6576 struct libipw_device *ieee = priv->ieee;
6577 struct libipw_security sec = {
6578 .flags = SEC_AUTH_MODE,
6579 };
6580 int ret = 0;
6581
6582 if (value & IW_AUTH_ALG_SHARED_KEY) {
6583 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6584 ieee->open_wep = 0;
6585 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6586 sec.auth_mode = WLAN_AUTH_OPEN;
6587 ieee->open_wep = 1;
6588 } else if (value & IW_AUTH_ALG_LEAP) {
6589 sec.auth_mode = WLAN_AUTH_LEAP;
6590 ieee->open_wep = 1;
6591 } else
6592 return -EINVAL;
6593
6594 if (ieee->set_security)
6595 ieee->set_security(ieee->dev, &sec);
6596 else
6597 ret = -EOPNOTSUPP;
6598
6599 return ret;
6600 }
6601
6602 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6603 int wpa_ie_len)
6604 {
6605 /* make sure WPA is enabled */
6606 ipw_wpa_enable(priv, 1);
6607 }
6608
6609 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6610 char *capabilities, int length)
6611 {
6612 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6613
6614 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6615 capabilities);
6616 }
6617
6618 /*
6619 * WE-18 support
6620 */
6621
6622 /* SIOCSIWGENIE */
6623 static int ipw_wx_set_genie(struct net_device *dev,
6624 struct iw_request_info *info,
6625 union iwreq_data *wrqu, char *extra)
6626 {
6627 struct ipw_priv *priv = libipw_priv(dev);
6628 struct libipw_device *ieee = priv->ieee;
6629 u8 *buf;
6630 int err = 0;
6631
6632 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6633 (wrqu->data.length && extra == NULL))
6634 return -EINVAL;
6635
6636 if (wrqu->data.length) {
6637 buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
6638 if (buf == NULL) {
6639 err = -ENOMEM;
6640 goto out;
6641 }
6642
6643 kfree(ieee->wpa_ie);
6644 ieee->wpa_ie = buf;
6645 ieee->wpa_ie_len = wrqu->data.length;
6646 } else {
6647 kfree(ieee->wpa_ie);
6648 ieee->wpa_ie = NULL;
6649 ieee->wpa_ie_len = 0;
6650 }
6651
6652 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6653 out:
6654 return err;
6655 }
6656
6657 /* SIOCGIWGENIE */
6658 static int ipw_wx_get_genie(struct net_device *dev,
6659 struct iw_request_info *info,
6660 union iwreq_data *wrqu, char *extra)
6661 {
6662 struct ipw_priv *priv = libipw_priv(dev);
6663 struct libipw_device *ieee = priv->ieee;
6664 int err = 0;
6665
6666 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6667 wrqu->data.length = 0;
6668 goto out;
6669 }
6670
6671 if (wrqu->data.length < ieee->wpa_ie_len) {
6672 err = -E2BIG;
6673 goto out;
6674 }
6675
6676 wrqu->data.length = ieee->wpa_ie_len;
6677 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6678
6679 out:
6680 return err;
6681 }
6682
6683 static int wext_cipher2level(int cipher)
6684 {
6685 switch (cipher) {
6686 case IW_AUTH_CIPHER_NONE:
6687 return SEC_LEVEL_0;
6688 case IW_AUTH_CIPHER_WEP40:
6689 case IW_AUTH_CIPHER_WEP104:
6690 return SEC_LEVEL_1;
6691 case IW_AUTH_CIPHER_TKIP:
6692 return SEC_LEVEL_2;
6693 case IW_AUTH_CIPHER_CCMP:
6694 return SEC_LEVEL_3;
6695 default:
6696 return -1;
6697 }
6698 }
6699
6700 /* SIOCSIWAUTH */
6701 static int ipw_wx_set_auth(struct net_device *dev,
6702 struct iw_request_info *info,
6703 union iwreq_data *wrqu, char *extra)
6704 {
6705 struct ipw_priv *priv = libipw_priv(dev);
6706 struct libipw_device *ieee = priv->ieee;
6707 struct iw_param *param = &wrqu->param;
6708 struct lib80211_crypt_data *crypt;
6709 unsigned long flags;
6710 int ret = 0;
6711
6712 switch (param->flags & IW_AUTH_INDEX) {
6713 case IW_AUTH_WPA_VERSION:
6714 break;
6715 case IW_AUTH_CIPHER_PAIRWISE:
6716 ipw_set_hw_decrypt_unicast(priv,
6717 wext_cipher2level(param->value));
6718 break;
6719 case IW_AUTH_CIPHER_GROUP:
6720 ipw_set_hw_decrypt_multicast(priv,
6721 wext_cipher2level(param->value));
6722 break;
6723 case IW_AUTH_KEY_MGMT:
6724 /*
6725 * ipw2200 does not use these parameters
6726 */
6727 break;
6728
6729 case IW_AUTH_TKIP_COUNTERMEASURES:
6730 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6731 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6732 break;
6733
6734 flags = crypt->ops->get_flags(crypt->priv);
6735
6736 if (param->value)
6737 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6738 else
6739 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6740
6741 crypt->ops->set_flags(flags, crypt->priv);
6742
6743 break;
6744
6745 case IW_AUTH_DROP_UNENCRYPTED:{
6746 /* HACK:
6747 *
6748 * wpa_supplicant calls set_wpa_enabled when the driver
6749 * is loaded and unloaded, regardless of if WPA is being
6750 * used. No other calls are made which can be used to
6751 * determine if encryption will be used or not prior to
6752 * association being expected. If encryption is not being
6753 * used, drop_unencrypted is set to false, else true -- we
6754 * can use this to determine if the CAP_PRIVACY_ON bit should
6755 * be set.
6756 */
6757 struct libipw_security sec = {
6758 .flags = SEC_ENABLED,
6759 .enabled = param->value,
6760 };
6761 priv->ieee->drop_unencrypted = param->value;
6762 /* We only change SEC_LEVEL for open mode. Others
6763 * are set by ipw_wpa_set_encryption.
6764 */
6765 if (!param->value) {
6766 sec.flags |= SEC_LEVEL;
6767 sec.level = SEC_LEVEL_0;
6768 } else {
6769 sec.flags |= SEC_LEVEL;
6770 sec.level = SEC_LEVEL_1;
6771 }
6772 if (priv->ieee->set_security)
6773 priv->ieee->set_security(priv->ieee->dev, &sec);
6774 break;
6775 }
6776
6777 case IW_AUTH_80211_AUTH_ALG:
6778 ret = ipw_wpa_set_auth_algs(priv, param->value);
6779 break;
6780
6781 case IW_AUTH_WPA_ENABLED:
6782 ret = ipw_wpa_enable(priv, param->value);
6783 ipw_disassociate(priv);
6784 break;
6785
6786 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6787 ieee->ieee802_1x = param->value;
6788 break;
6789
6790 case IW_AUTH_PRIVACY_INVOKED:
6791 ieee->privacy_invoked = param->value;
6792 break;
6793
6794 default:
6795 return -EOPNOTSUPP;
6796 }
6797 return ret;
6798 }
6799
6800 /* SIOCGIWAUTH */
6801 static int ipw_wx_get_auth(struct net_device *dev,
6802 struct iw_request_info *info,
6803 union iwreq_data *wrqu, char *extra)
6804 {
6805 struct ipw_priv *priv = libipw_priv(dev);
6806 struct libipw_device *ieee = priv->ieee;
6807 struct lib80211_crypt_data *crypt;
6808 struct iw_param *param = &wrqu->param;
6809
6810 switch (param->flags & IW_AUTH_INDEX) {
6811 case IW_AUTH_WPA_VERSION:
6812 case IW_AUTH_CIPHER_PAIRWISE:
6813 case IW_AUTH_CIPHER_GROUP:
6814 case IW_AUTH_KEY_MGMT:
6815 /*
6816 * wpa_supplicant will control these internally
6817 */
6818 return -EOPNOTSUPP;
6819
6820 case IW_AUTH_TKIP_COUNTERMEASURES:
6821 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6822 if (!crypt || !crypt->ops->get_flags)
6823 break;
6824
6825 param->value = (crypt->ops->get_flags(crypt->priv) &
6826 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6827
6828 break;
6829
6830 case IW_AUTH_DROP_UNENCRYPTED:
6831 param->value = ieee->drop_unencrypted;
6832 break;
6833
6834 case IW_AUTH_80211_AUTH_ALG:
6835 param->value = ieee->sec.auth_mode;
6836 break;
6837
6838 case IW_AUTH_WPA_ENABLED:
6839 param->value = ieee->wpa_enabled;
6840 break;
6841
6842 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6843 param->value = ieee->ieee802_1x;
6844 break;
6845
6846 case IW_AUTH_ROAMING_CONTROL:
6847 case IW_AUTH_PRIVACY_INVOKED:
6848 param->value = ieee->privacy_invoked;
6849 break;
6850
6851 default:
6852 return -EOPNOTSUPP;
6853 }
6854 return 0;
6855 }
6856
6857 /* SIOCSIWENCODEEXT */
6858 static int ipw_wx_set_encodeext(struct net_device *dev,
6859 struct iw_request_info *info,
6860 union iwreq_data *wrqu, char *extra)
6861 {
6862 struct ipw_priv *priv = libipw_priv(dev);
6863 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6864
6865 if (hwcrypto) {
6866 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6867 /* IPW HW can't build TKIP MIC,
6868 host decryption still needed */
6869 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6870 priv->ieee->host_mc_decrypt = 1;
6871 else {
6872 priv->ieee->host_encrypt = 0;
6873 priv->ieee->host_encrypt_msdu = 1;
6874 priv->ieee->host_decrypt = 1;
6875 }
6876 } else {
6877 priv->ieee->host_encrypt = 0;
6878 priv->ieee->host_encrypt_msdu = 0;
6879 priv->ieee->host_decrypt = 0;
6880 priv->ieee->host_mc_decrypt = 0;
6881 }
6882 }
6883
6884 return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6885 }
6886
6887 /* SIOCGIWENCODEEXT */
6888 static int ipw_wx_get_encodeext(struct net_device *dev,
6889 struct iw_request_info *info,
6890 union iwreq_data *wrqu, char *extra)
6891 {
6892 struct ipw_priv *priv = libipw_priv(dev);
6893 return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6894 }
6895
6896 /* SIOCSIWMLME */
6897 static int ipw_wx_set_mlme(struct net_device *dev,
6898 struct iw_request_info *info,
6899 union iwreq_data *wrqu, char *extra)
6900 {
6901 struct ipw_priv *priv = libipw_priv(dev);
6902 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6903 __le16 reason;
6904
6905 reason = cpu_to_le16(mlme->reason_code);
6906
6907 switch (mlme->cmd) {
6908 case IW_MLME_DEAUTH:
6909 /* silently ignore */
6910 break;
6911
6912 case IW_MLME_DISASSOC:
6913 ipw_disassociate(priv);
6914 break;
6915
6916 default:
6917 return -EOPNOTSUPP;
6918 }
6919 return 0;
6920 }
6921
6922 #ifdef CONFIG_IPW2200_QOS
6923
6924 /* QoS */
6925 /*
6926 * get the modulation type of the current network or
6927 * the card current mode
6928 */
6929 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6930 {
6931 u8 mode = 0;
6932
6933 if (priv->status & STATUS_ASSOCIATED) {
6934 unsigned long flags;
6935
6936 spin_lock_irqsave(&priv->ieee->lock, flags);
6937 mode = priv->assoc_network->mode;
6938 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6939 } else {
6940 mode = priv->ieee->mode;
6941 }
6942 IPW_DEBUG_QOS("QoS network/card mode %d\n", mode);
6943 return mode;
6944 }
6945
6946 /*
6947 * Handle management frame beacon and probe response
6948 */
6949 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6950 int active_network,
6951 struct libipw_network *network)
6952 {
6953 u32 size = sizeof(struct libipw_qos_parameters);
6954
6955 if (network->capability & WLAN_CAPABILITY_IBSS)
6956 network->qos_data.active = network->qos_data.supported;
6957
6958 if (network->flags & NETWORK_HAS_QOS_MASK) {
6959 if (active_network &&
6960 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6961 network->qos_data.active = network->qos_data.supported;
6962
6963 if ((network->qos_data.active == 1) && (active_network == 1) &&
6964 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6965 (network->qos_data.old_param_count !=
6966 network->qos_data.param_count)) {
6967 network->qos_data.old_param_count =
6968 network->qos_data.param_count;
6969 schedule_work(&priv->qos_activate);
6970 IPW_DEBUG_QOS("QoS parameters change call "
6971 "qos_activate\n");
6972 }
6973 } else {
6974 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6975 memcpy(&network->qos_data.parameters,
6976 &def_parameters_CCK, size);
6977 else
6978 memcpy(&network->qos_data.parameters,
6979 &def_parameters_OFDM, size);
6980
6981 if ((network->qos_data.active == 1) && (active_network == 1)) {
6982 IPW_DEBUG_QOS("QoS was disabled call qos_activate\n");
6983 schedule_work(&priv->qos_activate);
6984 }
6985
6986 network->qos_data.active = 0;
6987 network->qos_data.supported = 0;
6988 }
6989 if ((priv->status & STATUS_ASSOCIATED) &&
6990 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6991 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6992 if (network->capability & WLAN_CAPABILITY_IBSS)
6993 if ((network->ssid_len ==
6994 priv->assoc_network->ssid_len) &&
6995 !memcmp(network->ssid,
6996 priv->assoc_network->ssid,
6997 network->ssid_len)) {
6998 schedule_work(&priv->merge_networks);
6999 }
7000 }
7001
7002 return 0;
7003 }
7004
7005 /*
7006 * This function set up the firmware to support QoS. It sends
7007 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
7008 */
7009 static int ipw_qos_activate(struct ipw_priv *priv,
7010 struct libipw_qos_data *qos_network_data)
7011 {
7012 int err;
7013 struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS];
7014 struct libipw_qos_parameters *active_one = NULL;
7015 u32 size = sizeof(struct libipw_qos_parameters);
7016 u32 burst_duration;
7017 int i;
7018 u8 type;
7019
7020 type = ipw_qos_current_mode(priv);
7021
7022 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
7023 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
7024 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
7025 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
7026
7027 if (qos_network_data == NULL) {
7028 if (type == IEEE_B) {
7029 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
7030 active_one = &def_parameters_CCK;
7031 } else
7032 active_one = &def_parameters_OFDM;
7033
7034 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7035 burst_duration = ipw_qos_get_burst_duration(priv);
7036 for (i = 0; i < QOS_QUEUE_NUM; i++)
7037 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
7038 cpu_to_le16(burst_duration);
7039 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7040 if (type == IEEE_B) {
7041 IPW_DEBUG_QOS("QoS activate IBSS network mode %d\n",
7042 type);
7043 if (priv->qos_data.qos_enable == 0)
7044 active_one = &def_parameters_CCK;
7045 else
7046 active_one = priv->qos_data.def_qos_parm_CCK;
7047 } else {
7048 if (priv->qos_data.qos_enable == 0)
7049 active_one = &def_parameters_OFDM;
7050 else
7051 active_one = priv->qos_data.def_qos_parm_OFDM;
7052 }
7053 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7054 } else {
7055 unsigned long flags;
7056 int active;
7057
7058 spin_lock_irqsave(&priv->ieee->lock, flags);
7059 active_one = &(qos_network_data->parameters);
7060 qos_network_data->old_param_count =
7061 qos_network_data->param_count;
7062 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7063 active = qos_network_data->supported;
7064 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7065
7066 if (active == 0) {
7067 burst_duration = ipw_qos_get_burst_duration(priv);
7068 for (i = 0; i < QOS_QUEUE_NUM; i++)
7069 qos_parameters[QOS_PARAM_SET_ACTIVE].
7070 tx_op_limit[i] = cpu_to_le16(burst_duration);
7071 }
7072 }
7073
7074 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
7075 err = ipw_send_qos_params_command(priv, &qos_parameters[0]);
7076 if (err)
7077 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
7078
7079 return err;
7080 }
7081
7082 /*
7083 * send IPW_CMD_WME_INFO to the firmware
7084 */
7085 static int ipw_qos_set_info_element(struct ipw_priv *priv)
7086 {
7087 int ret = 0;
7088 struct libipw_qos_information_element qos_info;
7089
7090 if (priv == NULL)
7091 return -1;
7092
7093 qos_info.elementID = QOS_ELEMENT_ID;
7094 qos_info.length = sizeof(struct libipw_qos_information_element) - 2;
7095
7096 qos_info.version = QOS_VERSION_1;
7097 qos_info.ac_info = 0;
7098
7099 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
7100 qos_info.qui_type = QOS_OUI_TYPE;
7101 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
7102
7103 ret = ipw_send_qos_info_command(priv, &qos_info);
7104 if (ret != 0) {
7105 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
7106 }
7107 return ret;
7108 }
7109
7110 /*
7111 * Set the QoS parameter with the association request structure
7112 */
7113 static int ipw_qos_association(struct ipw_priv *priv,
7114 struct libipw_network *network)
7115 {
7116 int err = 0;
7117 struct libipw_qos_data *qos_data = NULL;
7118 struct libipw_qos_data ibss_data = {
7119 .supported = 1,
7120 .active = 1,
7121 };
7122
7123 switch (priv->ieee->iw_mode) {
7124 case IW_MODE_ADHOC:
7125 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
7126
7127 qos_data = &ibss_data;
7128 break;
7129
7130 case IW_MODE_INFRA:
7131 qos_data = &network->qos_data;
7132 break;
7133
7134 default:
7135 BUG();
7136 break;
7137 }
7138
7139 err = ipw_qos_activate(priv, qos_data);
7140 if (err) {
7141 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7142 return err;
7143 }
7144
7145 if (priv->qos_data.qos_enable && qos_data->supported) {
7146 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7147 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7148 return ipw_qos_set_info_element(priv);
7149 }
7150
7151 return 0;
7152 }
7153
7154 /*
7155 * handling the beaconing responses. if we get different QoS setting
7156 * off the network from the associated setting, adjust the QoS
7157 * setting
7158 */
7159 static int ipw_qos_association_resp(struct ipw_priv *priv,
7160 struct libipw_network *network)
7161 {
7162 int ret = 0;
7163 unsigned long flags;
7164 u32 size = sizeof(struct libipw_qos_parameters);
7165 int set_qos_param = 0;
7166
7167 if ((priv == NULL) || (network == NULL) ||
7168 (priv->assoc_network == NULL))
7169 return ret;
7170
7171 if (!(priv->status & STATUS_ASSOCIATED))
7172 return ret;
7173
7174 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7175 return ret;
7176
7177 spin_lock_irqsave(&priv->ieee->lock, flags);
7178 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7179 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7180 sizeof(struct libipw_qos_data));
7181 priv->assoc_network->qos_data.active = 1;
7182 if ((network->qos_data.old_param_count !=
7183 network->qos_data.param_count)) {
7184 set_qos_param = 1;
7185 network->qos_data.old_param_count =
7186 network->qos_data.param_count;
7187 }
7188
7189 } else {
7190 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7191 memcpy(&priv->assoc_network->qos_data.parameters,
7192 &def_parameters_CCK, size);
7193 else
7194 memcpy(&priv->assoc_network->qos_data.parameters,
7195 &def_parameters_OFDM, size);
7196 priv->assoc_network->qos_data.active = 0;
7197 priv->assoc_network->qos_data.supported = 0;
7198 set_qos_param = 1;
7199 }
7200
7201 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7202
7203 if (set_qos_param == 1)
7204 schedule_work(&priv->qos_activate);
7205
7206 return ret;
7207 }
7208
7209 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7210 {
7211 u32 ret = 0;
7212
7213 if ((priv == NULL))
7214 return 0;
7215
7216 if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION))
7217 ret = priv->qos_data.burst_duration_CCK;
7218 else
7219 ret = priv->qos_data.burst_duration_OFDM;
7220
7221 return ret;
7222 }
7223
7224 /*
7225 * Initialize the setting of QoS global
7226 */
7227 static void ipw_qos_init(struct ipw_priv *priv, int enable,
7228 int burst_enable, u32 burst_duration_CCK,
7229 u32 burst_duration_OFDM)
7230 {
7231 priv->qos_data.qos_enable = enable;
7232
7233 if (priv->qos_data.qos_enable) {
7234 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7235 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7236 IPW_DEBUG_QOS("QoS is enabled\n");
7237 } else {
7238 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7239 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7240 IPW_DEBUG_QOS("QoS is not enabled\n");
7241 }
7242
7243 priv->qos_data.burst_enable = burst_enable;
7244
7245 if (burst_enable) {
7246 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7247 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7248 } else {
7249 priv->qos_data.burst_duration_CCK = 0;
7250 priv->qos_data.burst_duration_OFDM = 0;
7251 }
7252 }
7253
7254 /*
7255 * map the packet priority to the right TX Queue
7256 */
7257 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7258 {
7259 if (priority > 7 || !priv->qos_data.qos_enable)
7260 priority = 0;
7261
7262 return from_priority_to_tx_queue[priority] - 1;
7263 }
7264
7265 static int ipw_is_qos_active(struct net_device *dev,
7266 struct sk_buff *skb)
7267 {
7268 struct ipw_priv *priv = libipw_priv(dev);
7269 struct libipw_qos_data *qos_data = NULL;
7270 int active, supported;
7271 u8 *daddr = skb->data + ETH_ALEN;
7272 int unicast = !is_multicast_ether_addr(daddr);
7273
7274 if (!(priv->status & STATUS_ASSOCIATED))
7275 return 0;
7276
7277 qos_data = &priv->assoc_network->qos_data;
7278
7279 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7280 if (unicast == 0)
7281 qos_data->active = 0;
7282 else
7283 qos_data->active = qos_data->supported;
7284 }
7285 active = qos_data->active;
7286 supported = qos_data->supported;
7287 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7288 "unicast %d\n",
7289 priv->qos_data.qos_enable, active, supported, unicast);
7290 if (active && priv->qos_data.qos_enable)
7291 return 1;
7292
7293 return 0;
7294
7295 }
7296 /*
7297 * add QoS parameter to the TX command
7298 */
7299 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7300 u16 priority,
7301 struct tfd_data *tfd)
7302 {
7303 int tx_queue_id = 0;
7304
7305
7306 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7307 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7308
7309 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7310 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7311 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7312 }
7313 return 0;
7314 }
7315
7316 /*
7317 * background support to run QoS activate functionality
7318 */
7319 static void ipw_bg_qos_activate(struct work_struct *work)
7320 {
7321 struct ipw_priv *priv =
7322 container_of(work, struct ipw_priv, qos_activate);
7323
7324 mutex_lock(&priv->mutex);
7325
7326 if (priv->status & STATUS_ASSOCIATED)
7327 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7328
7329 mutex_unlock(&priv->mutex);
7330 }
7331
7332 static int ipw_handle_probe_response(struct net_device *dev,
7333 struct libipw_probe_response *resp,
7334 struct libipw_network *network)
7335 {
7336 struct ipw_priv *priv = libipw_priv(dev);
7337 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7338 (network == priv->assoc_network));
7339
7340 ipw_qos_handle_probe_response(priv, active_network, network);
7341
7342 return 0;
7343 }
7344
7345 static int ipw_handle_beacon(struct net_device *dev,
7346 struct libipw_beacon *resp,
7347 struct libipw_network *network)
7348 {
7349 struct ipw_priv *priv = libipw_priv(dev);
7350 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7351 (network == priv->assoc_network));
7352
7353 ipw_qos_handle_probe_response(priv, active_network, network);
7354
7355 return 0;
7356 }
7357
7358 static int ipw_handle_assoc_response(struct net_device *dev,
7359 struct libipw_assoc_response *resp,
7360 struct libipw_network *network)
7361 {
7362 struct ipw_priv *priv = libipw_priv(dev);
7363 ipw_qos_association_resp(priv, network);
7364 return 0;
7365 }
7366
7367 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
7368 *qos_param)
7369 {
7370 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7371 sizeof(*qos_param) * 3, qos_param);
7372 }
7373
7374 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
7375 *qos_param)
7376 {
7377 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7378 qos_param);
7379 }
7380
7381 #endif /* CONFIG_IPW2200_QOS */
7382
7383 static int ipw_associate_network(struct ipw_priv *priv,
7384 struct libipw_network *network,
7385 struct ipw_supported_rates *rates, int roaming)
7386 {
7387 int err;
7388 DECLARE_SSID_BUF(ssid);
7389
7390 if (priv->config & CFG_FIXED_RATE)
7391 ipw_set_fixed_rate(priv, network->mode);
7392
7393 if (!(priv->config & CFG_STATIC_ESSID)) {
7394 priv->essid_len = min(network->ssid_len,
7395 (u8) IW_ESSID_MAX_SIZE);
7396 memcpy(priv->essid, network->ssid, priv->essid_len);
7397 }
7398
7399 network->last_associate = jiffies;
7400
7401 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7402 priv->assoc_request.channel = network->channel;
7403 priv->assoc_request.auth_key = 0;
7404
7405 if ((priv->capability & CAP_PRIVACY_ON) &&
7406 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7407 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7408 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7409
7410 if (priv->ieee->sec.level == SEC_LEVEL_1)
7411 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7412
7413 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7414 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7415 priv->assoc_request.auth_type = AUTH_LEAP;
7416 else
7417 priv->assoc_request.auth_type = AUTH_OPEN;
7418
7419 if (priv->ieee->wpa_ie_len) {
7420 priv->assoc_request.policy_support = cpu_to_le16(0x02); /* RSN active */
7421 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7422 priv->ieee->wpa_ie_len);
7423 }
7424
7425 /*
7426 * It is valid for our ieee device to support multiple modes, but
7427 * when it comes to associating to a given network we have to choose
7428 * just one mode.
7429 */
7430 if (network->mode & priv->ieee->mode & IEEE_A)
7431 priv->assoc_request.ieee_mode = IPW_A_MODE;
7432 else if (network->mode & priv->ieee->mode & IEEE_G)
7433 priv->assoc_request.ieee_mode = IPW_G_MODE;
7434 else if (network->mode & priv->ieee->mode & IEEE_B)
7435 priv->assoc_request.ieee_mode = IPW_B_MODE;
7436
7437 priv->assoc_request.capability = cpu_to_le16(network->capability);
7438 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7439 && !(priv->config & CFG_PREAMBLE_LONG)) {
7440 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7441 } else {
7442 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7443
7444 /* Clear the short preamble if we won't be supporting it */
7445 priv->assoc_request.capability &=
7446 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7447 }
7448
7449 /* Clear capability bits that aren't used in Ad Hoc */
7450 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7451 priv->assoc_request.capability &=
7452 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7453
7454 IPW_DEBUG_ASSOC("%ssociation attempt: '%s', channel %d, "
7455 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7456 roaming ? "Rea" : "A",
7457 print_ssid(ssid, priv->essid, priv->essid_len),
7458 network->channel,
7459 ipw_modes[priv->assoc_request.ieee_mode],
7460 rates->num_rates,
7461 (priv->assoc_request.preamble_length ==
7462 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7463 network->capability &
7464 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7465 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7466 priv->capability & CAP_PRIVACY_ON ?
7467 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7468 "(open)") : "",
7469 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7470 priv->capability & CAP_PRIVACY_ON ?
7471 '1' + priv->ieee->sec.active_key : '.',
7472 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7473
7474 priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7475 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7476 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7477 priv->assoc_request.assoc_type = HC_IBSS_START;
7478 priv->assoc_request.assoc_tsf_msw = 0;
7479 priv->assoc_request.assoc_tsf_lsw = 0;
7480 } else {
7481 if (unlikely(roaming))
7482 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7483 else
7484 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7485 priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7486 priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7487 }
7488
7489 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7490
7491 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7492 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7493 priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7494 } else {
7495 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7496 priv->assoc_request.atim_window = 0;
7497 }
7498
7499 priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7500
7501 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7502 if (err) {
7503 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7504 return err;
7505 }
7506
7507 rates->ieee_mode = priv->assoc_request.ieee_mode;
7508 rates->purpose = IPW_RATE_CONNECT;
7509 ipw_send_supported_rates(priv, rates);
7510
7511 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7512 priv->sys_config.dot11g_auto_detection = 1;
7513 else
7514 priv->sys_config.dot11g_auto_detection = 0;
7515
7516 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7517 priv->sys_config.answer_broadcast_ssid_probe = 1;
7518 else
7519 priv->sys_config.answer_broadcast_ssid_probe = 0;
7520
7521 err = ipw_send_system_config(priv);
7522 if (err) {
7523 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7524 return err;
7525 }
7526
7527 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7528 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7529 if (err) {
7530 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7531 return err;
7532 }
7533
7534 /*
7535 * If preemption is enabled, it is possible for the association
7536 * to complete before we return from ipw_send_associate. Therefore
7537 * we have to be sure and update our priviate data first.
7538 */
7539 priv->channel = network->channel;
7540 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7541 priv->status |= STATUS_ASSOCIATING;
7542 priv->status &= ~STATUS_SECURITY_UPDATED;
7543
7544 priv->assoc_network = network;
7545
7546 #ifdef CONFIG_IPW2200_QOS
7547 ipw_qos_association(priv, network);
7548 #endif
7549
7550 err = ipw_send_associate(priv, &priv->assoc_request);
7551 if (err) {
7552 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7553 return err;
7554 }
7555
7556 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM\n",
7557 print_ssid(ssid, priv->essid, priv->essid_len),
7558 priv->bssid);
7559
7560 return 0;
7561 }
7562
7563 static void ipw_roam(void *data)
7564 {
7565 struct ipw_priv *priv = data;
7566 struct libipw_network *network = NULL;
7567 struct ipw_network_match match = {
7568 .network = priv->assoc_network
7569 };
7570
7571 /* The roaming process is as follows:
7572 *
7573 * 1. Missed beacon threshold triggers the roaming process by
7574 * setting the status ROAM bit and requesting a scan.
7575 * 2. When the scan completes, it schedules the ROAM work
7576 * 3. The ROAM work looks at all of the known networks for one that
7577 * is a better network than the currently associated. If none
7578 * found, the ROAM process is over (ROAM bit cleared)
7579 * 4. If a better network is found, a disassociation request is
7580 * sent.
7581 * 5. When the disassociation completes, the roam work is again
7582 * scheduled. The second time through, the driver is no longer
7583 * associated, and the newly selected network is sent an
7584 * association request.
7585 * 6. At this point ,the roaming process is complete and the ROAM
7586 * status bit is cleared.
7587 */
7588
7589 /* If we are no longer associated, and the roaming bit is no longer
7590 * set, then we are not actively roaming, so just return */
7591 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7592 return;
7593
7594 if (priv->status & STATUS_ASSOCIATED) {
7595 /* First pass through ROAM process -- look for a better
7596 * network */
7597 unsigned long flags;
7598 u8 rssi = priv->assoc_network->stats.rssi;
7599 priv->assoc_network->stats.rssi = -128;
7600 spin_lock_irqsave(&priv->ieee->lock, flags);
7601 list_for_each_entry(network, &priv->ieee->network_list, list) {
7602 if (network != priv->assoc_network)
7603 ipw_best_network(priv, &match, network, 1);
7604 }
7605 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7606 priv->assoc_network->stats.rssi = rssi;
7607
7608 if (match.network == priv->assoc_network) {
7609 IPW_DEBUG_ASSOC("No better APs in this network to "
7610 "roam to.\n");
7611 priv->status &= ~STATUS_ROAMING;
7612 ipw_debug_config(priv);
7613 return;
7614 }
7615
7616 ipw_send_disassociate(priv, 1);
7617 priv->assoc_network = match.network;
7618
7619 return;
7620 }
7621
7622 /* Second pass through ROAM process -- request association */
7623 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7624 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7625 priv->status &= ~STATUS_ROAMING;
7626 }
7627
7628 static void ipw_bg_roam(struct work_struct *work)
7629 {
7630 struct ipw_priv *priv =
7631 container_of(work, struct ipw_priv, roam);
7632 mutex_lock(&priv->mutex);
7633 ipw_roam(priv);
7634 mutex_unlock(&priv->mutex);
7635 }
7636
7637 static int ipw_associate(void *data)
7638 {
7639 struct ipw_priv *priv = data;
7640
7641 struct libipw_network *network = NULL;
7642 struct ipw_network_match match = {
7643 .network = NULL
7644 };
7645 struct ipw_supported_rates *rates;
7646 struct list_head *element;
7647 unsigned long flags;
7648 DECLARE_SSID_BUF(ssid);
7649
7650 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7651 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7652 return 0;
7653 }
7654
7655 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7656 IPW_DEBUG_ASSOC("Not attempting association (already in "
7657 "progress)\n");
7658 return 0;
7659 }
7660
7661 if (priv->status & STATUS_DISASSOCIATING) {
7662 IPW_DEBUG_ASSOC("Not attempting association (in "
7663 "disassociating)\n ");
7664 schedule_work(&priv->associate);
7665 return 0;
7666 }
7667
7668 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7669 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7670 "initialized)\n");
7671 return 0;
7672 }
7673
7674 if (!(priv->config & CFG_ASSOCIATE) &&
7675 !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
7676 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7677 return 0;
7678 }
7679
7680 /* Protect our use of the network_list */
7681 spin_lock_irqsave(&priv->ieee->lock, flags);
7682 list_for_each_entry(network, &priv->ieee->network_list, list)
7683 ipw_best_network(priv, &match, network, 0);
7684
7685 network = match.network;
7686 rates = &match.rates;
7687
7688 if (network == NULL &&
7689 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7690 priv->config & CFG_ADHOC_CREATE &&
7691 priv->config & CFG_STATIC_ESSID &&
7692 priv->config & CFG_STATIC_CHANNEL) {
7693 /* Use oldest network if the free list is empty */
7694 if (list_empty(&priv->ieee->network_free_list)) {
7695 struct libipw_network *oldest = NULL;
7696 struct libipw_network *target;
7697
7698 list_for_each_entry(target, &priv->ieee->network_list, list) {
7699 if ((oldest == NULL) ||
7700 (target->last_scanned < oldest->last_scanned))
7701 oldest = target;
7702 }
7703
7704 /* If there are no more slots, expire the oldest */
7705 list_del(&oldest->list);
7706 target = oldest;
7707 IPW_DEBUG_ASSOC("Expired '%s' (%pM) from "
7708 "network list.\n",
7709 print_ssid(ssid, target->ssid,
7710 target->ssid_len),
7711 target->bssid);
7712 list_add_tail(&target->list,
7713 &priv->ieee->network_free_list);
7714 }
7715
7716 element = priv->ieee->network_free_list.next;
7717 network = list_entry(element, struct libipw_network, list);
7718 ipw_adhoc_create(priv, network);
7719 rates = &priv->rates;
7720 list_del(element);
7721 list_add_tail(&network->list, &priv->ieee->network_list);
7722 }
7723 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7724
7725 /* If we reached the end of the list, then we don't have any valid
7726 * matching APs */
7727 if (!network) {
7728 ipw_debug_config(priv);
7729
7730 if (!(priv->status & STATUS_SCANNING)) {
7731 if (!(priv->config & CFG_SPEED_SCAN))
7732 schedule_delayed_work(&priv->request_scan,
7733 SCAN_INTERVAL);
7734 else
7735 schedule_delayed_work(&priv->request_scan, 0);
7736 }
7737
7738 return 0;
7739 }
7740
7741 ipw_associate_network(priv, network, rates, 0);
7742
7743 return 1;
7744 }
7745
7746 static void ipw_bg_associate(struct work_struct *work)
7747 {
7748 struct ipw_priv *priv =
7749 container_of(work, struct ipw_priv, associate);
7750 mutex_lock(&priv->mutex);
7751 ipw_associate(priv);
7752 mutex_unlock(&priv->mutex);
7753 }
7754
7755 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7756 struct sk_buff *skb)
7757 {
7758 struct ieee80211_hdr *hdr;
7759 u16 fc;
7760
7761 hdr = (struct ieee80211_hdr *)skb->data;
7762 fc = le16_to_cpu(hdr->frame_control);
7763 if (!(fc & IEEE80211_FCTL_PROTECTED))
7764 return;
7765
7766 fc &= ~IEEE80211_FCTL_PROTECTED;
7767 hdr->frame_control = cpu_to_le16(fc);
7768 switch (priv->ieee->sec.level) {
7769 case SEC_LEVEL_3:
7770 /* Remove CCMP HDR */
7771 memmove(skb->data + LIBIPW_3ADDR_LEN,
7772 skb->data + LIBIPW_3ADDR_LEN + 8,
7773 skb->len - LIBIPW_3ADDR_LEN - 8);
7774 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7775 break;
7776 case SEC_LEVEL_2:
7777 break;
7778 case SEC_LEVEL_1:
7779 /* Remove IV */
7780 memmove(skb->data + LIBIPW_3ADDR_LEN,
7781 skb->data + LIBIPW_3ADDR_LEN + 4,
7782 skb->len - LIBIPW_3ADDR_LEN - 4);
7783 skb_trim(skb, skb->len - 8); /* IV + ICV */
7784 break;
7785 case SEC_LEVEL_0:
7786 break;
7787 default:
7788 printk(KERN_ERR "Unknown security level %d\n",
7789 priv->ieee->sec.level);
7790 break;
7791 }
7792 }
7793
7794 static void ipw_handle_data_packet(struct ipw_priv *priv,
7795 struct ipw_rx_mem_buffer *rxb,
7796 struct libipw_rx_stats *stats)
7797 {
7798 struct net_device *dev = priv->net_dev;
7799 struct libipw_hdr_4addr *hdr;
7800 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7801
7802 /* We received data from the HW, so stop the watchdog */
7803 dev->trans_start = jiffies;
7804
7805 /* We only process data packets if the
7806 * interface is open */
7807 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7808 skb_tailroom(rxb->skb))) {
7809 dev->stats.rx_errors++;
7810 priv->wstats.discard.misc++;
7811 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7812 return;
7813 } else if (unlikely(!netif_running(priv->net_dev))) {
7814 dev->stats.rx_dropped++;
7815 priv->wstats.discard.misc++;
7816 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7817 return;
7818 }
7819
7820 /* Advance skb->data to the start of the actual payload */
7821 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7822
7823 /* Set the size of the skb to the size of the frame */
7824 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7825
7826 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7827
7828 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7829 hdr = (struct libipw_hdr_4addr *)rxb->skb->data;
7830 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7831 (is_multicast_ether_addr(hdr->addr1) ?
7832 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7833 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7834
7835 if (!libipw_rx(priv->ieee, rxb->skb, stats))
7836 dev->stats.rx_errors++;
7837 else { /* libipw_rx succeeded, so it now owns the SKB */
7838 rxb->skb = NULL;
7839 __ipw_led_activity_on(priv);
7840 }
7841 }
7842
7843 #ifdef CONFIG_IPW2200_RADIOTAP
7844 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7845 struct ipw_rx_mem_buffer *rxb,
7846 struct libipw_rx_stats *stats)
7847 {
7848 struct net_device *dev = priv->net_dev;
7849 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7850 struct ipw_rx_frame *frame = &pkt->u.frame;
7851
7852 /* initial pull of some data */
7853 u16 received_channel = frame->received_channel;
7854 u8 antennaAndPhy = frame->antennaAndPhy;
7855 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7856 u16 pktrate = frame->rate;
7857
7858 /* Magic struct that slots into the radiotap header -- no reason
7859 * to build this manually element by element, we can write it much
7860 * more efficiently than we can parse it. ORDER MATTERS HERE */
7861 struct ipw_rt_hdr *ipw_rt;
7862
7863 unsigned short len = le16_to_cpu(pkt->u.frame.length);
7864
7865 /* We received data from the HW, so stop the watchdog */
7866 dev->trans_start = jiffies;
7867
7868 /* We only process data packets if the
7869 * interface is open */
7870 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7871 skb_tailroom(rxb->skb))) {
7872 dev->stats.rx_errors++;
7873 priv->wstats.discard.misc++;
7874 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7875 return;
7876 } else if (unlikely(!netif_running(priv->net_dev))) {
7877 dev->stats.rx_dropped++;
7878 priv->wstats.discard.misc++;
7879 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7880 return;
7881 }
7882
7883 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7884 * that now */
7885 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7886 /* FIXME: Should alloc bigger skb instead */
7887 dev->stats.rx_dropped++;
7888 priv->wstats.discard.misc++;
7889 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7890 return;
7891 }
7892
7893 /* copy the frame itself */
7894 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7895 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7896
7897 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7898
7899 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7900 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7901 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total header+data */
7902
7903 /* Big bitfield of all the fields we provide in radiotap */
7904 ipw_rt->rt_hdr.it_present = cpu_to_le32(
7905 (1 << IEEE80211_RADIOTAP_TSFT) |
7906 (1 << IEEE80211_RADIOTAP_FLAGS) |
7907 (1 << IEEE80211_RADIOTAP_RATE) |
7908 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7909 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7910 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7911 (1 << IEEE80211_RADIOTAP_ANTENNA));
7912
7913 /* Zero the flags, we'll add to them as we go */
7914 ipw_rt->rt_flags = 0;
7915 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7916 frame->parent_tsf[2] << 16 |
7917 frame->parent_tsf[1] << 8 |
7918 frame->parent_tsf[0]);
7919
7920 /* Convert signal to DBM */
7921 ipw_rt->rt_dbmsignal = antsignal;
7922 ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise);
7923
7924 /* Convert the channel data and set the flags */
7925 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7926 if (received_channel > 14) { /* 802.11a */
7927 ipw_rt->rt_chbitmask =
7928 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7929 } else if (antennaAndPhy & 32) { /* 802.11b */
7930 ipw_rt->rt_chbitmask =
7931 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7932 } else { /* 802.11g */
7933 ipw_rt->rt_chbitmask =
7934 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7935 }
7936
7937 /* set the rate in multiples of 500k/s */
7938 switch (pktrate) {
7939 case IPW_TX_RATE_1MB:
7940 ipw_rt->rt_rate = 2;
7941 break;
7942 case IPW_TX_RATE_2MB:
7943 ipw_rt->rt_rate = 4;
7944 break;
7945 case IPW_TX_RATE_5MB:
7946 ipw_rt->rt_rate = 10;
7947 break;
7948 case IPW_TX_RATE_6MB:
7949 ipw_rt->rt_rate = 12;
7950 break;
7951 case IPW_TX_RATE_9MB:
7952 ipw_rt->rt_rate = 18;
7953 break;
7954 case IPW_TX_RATE_11MB:
7955 ipw_rt->rt_rate = 22;
7956 break;
7957 case IPW_TX_RATE_12MB:
7958 ipw_rt->rt_rate = 24;
7959 break;
7960 case IPW_TX_RATE_18MB:
7961 ipw_rt->rt_rate = 36;
7962 break;
7963 case IPW_TX_RATE_24MB:
7964 ipw_rt->rt_rate = 48;
7965 break;
7966 case IPW_TX_RATE_36MB:
7967 ipw_rt->rt_rate = 72;
7968 break;
7969 case IPW_TX_RATE_48MB:
7970 ipw_rt->rt_rate = 96;
7971 break;
7972 case IPW_TX_RATE_54MB:
7973 ipw_rt->rt_rate = 108;
7974 break;
7975 default:
7976 ipw_rt->rt_rate = 0;
7977 break;
7978 }
7979
7980 /* antenna number */
7981 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7982
7983 /* set the preamble flag if we have it */
7984 if ((antennaAndPhy & 64))
7985 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7986
7987 /* Set the size of the skb to the size of the frame */
7988 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7989
7990 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7991
7992 if (!libipw_rx(priv->ieee, rxb->skb, stats))
7993 dev->stats.rx_errors++;
7994 else { /* libipw_rx succeeded, so it now owns the SKB */
7995 rxb->skb = NULL;
7996 /* no LED during capture */
7997 }
7998 }
7999 #endif
8000
8001 #ifdef CONFIG_IPW2200_PROMISCUOUS
8002 #define libipw_is_probe_response(fc) \
8003 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
8004 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
8005
8006 #define libipw_is_management(fc) \
8007 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
8008
8009 #define libipw_is_control(fc) \
8010 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
8011
8012 #define libipw_is_data(fc) \
8013 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
8014
8015 #define libipw_is_assoc_request(fc) \
8016 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
8017
8018 #define libipw_is_reassoc_request(fc) \
8019 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
8020
8021 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
8022 struct ipw_rx_mem_buffer *rxb,
8023 struct libipw_rx_stats *stats)
8024 {
8025 struct net_device *dev = priv->prom_net_dev;
8026 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
8027 struct ipw_rx_frame *frame = &pkt->u.frame;
8028 struct ipw_rt_hdr *ipw_rt;
8029
8030 /* First cache any information we need before we overwrite
8031 * the information provided in the skb from the hardware */
8032 struct ieee80211_hdr *hdr;
8033 u16 channel = frame->received_channel;
8034 u8 phy_flags = frame->antennaAndPhy;
8035 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
8036 s8 noise = (s8) le16_to_cpu(frame->noise);
8037 u8 rate = frame->rate;
8038 unsigned short len = le16_to_cpu(pkt->u.frame.length);
8039 struct sk_buff *skb;
8040 int hdr_only = 0;
8041 u16 filter = priv->prom_priv->filter;
8042
8043 /* If the filter is set to not include Rx frames then return */
8044 if (filter & IPW_PROM_NO_RX)
8045 return;
8046
8047 /* We received data from the HW, so stop the watchdog */
8048 dev->trans_start = jiffies;
8049
8050 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
8051 dev->stats.rx_errors++;
8052 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
8053 return;
8054 }
8055
8056 /* We only process data packets if the interface is open */
8057 if (unlikely(!netif_running(dev))) {
8058 dev->stats.rx_dropped++;
8059 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
8060 return;
8061 }
8062
8063 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
8064 * that now */
8065 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
8066 /* FIXME: Should alloc bigger skb instead */
8067 dev->stats.rx_dropped++;
8068 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
8069 return;
8070 }
8071
8072 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
8073 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
8074 if (filter & IPW_PROM_NO_MGMT)
8075 return;
8076 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
8077 hdr_only = 1;
8078 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
8079 if (filter & IPW_PROM_NO_CTL)
8080 return;
8081 if (filter & IPW_PROM_CTL_HEADER_ONLY)
8082 hdr_only = 1;
8083 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
8084 if (filter & IPW_PROM_NO_DATA)
8085 return;
8086 if (filter & IPW_PROM_DATA_HEADER_ONLY)
8087 hdr_only = 1;
8088 }
8089
8090 /* Copy the SKB since this is for the promiscuous side */
8091 skb = skb_copy(rxb->skb, GFP_ATOMIC);
8092 if (skb == NULL) {
8093 IPW_ERROR("skb_clone failed for promiscuous copy.\n");
8094 return;
8095 }
8096
8097 /* copy the frame data to write after where the radiotap header goes */
8098 ipw_rt = (void *)skb->data;
8099
8100 if (hdr_only)
8101 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
8102
8103 memcpy(ipw_rt->payload, hdr, len);
8104
8105 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
8106 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
8107 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */
8108
8109 /* Set the size of the skb to the size of the frame */
8110 skb_put(skb, sizeof(*ipw_rt) + len);
8111
8112 /* Big bitfield of all the fields we provide in radiotap */
8113 ipw_rt->rt_hdr.it_present = cpu_to_le32(
8114 (1 << IEEE80211_RADIOTAP_TSFT) |
8115 (1 << IEEE80211_RADIOTAP_FLAGS) |
8116 (1 << IEEE80211_RADIOTAP_RATE) |
8117 (1 << IEEE80211_RADIOTAP_CHANNEL) |
8118 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
8119 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
8120 (1 << IEEE80211_RADIOTAP_ANTENNA));
8121
8122 /* Zero the flags, we'll add to them as we go */
8123 ipw_rt->rt_flags = 0;
8124 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
8125 frame->parent_tsf[2] << 16 |
8126 frame->parent_tsf[1] << 8 |
8127 frame->parent_tsf[0]);
8128
8129 /* Convert to DBM */
8130 ipw_rt->rt_dbmsignal = signal;
8131 ipw_rt->rt_dbmnoise = noise;
8132
8133 /* Convert the channel data and set the flags */
8134 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
8135 if (channel > 14) { /* 802.11a */
8136 ipw_rt->rt_chbitmask =
8137 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
8138 } else if (phy_flags & (1 << 5)) { /* 802.11b */
8139 ipw_rt->rt_chbitmask =
8140 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8141 } else { /* 802.11g */
8142 ipw_rt->rt_chbitmask =
8143 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8144 }
8145
8146 /* set the rate in multiples of 500k/s */
8147 switch (rate) {
8148 case IPW_TX_RATE_1MB:
8149 ipw_rt->rt_rate = 2;
8150 break;
8151 case IPW_TX_RATE_2MB:
8152 ipw_rt->rt_rate = 4;
8153 break;
8154 case IPW_TX_RATE_5MB:
8155 ipw_rt->rt_rate = 10;
8156 break;
8157 case IPW_TX_RATE_6MB:
8158 ipw_rt->rt_rate = 12;
8159 break;
8160 case IPW_TX_RATE_9MB:
8161 ipw_rt->rt_rate = 18;
8162 break;
8163 case IPW_TX_RATE_11MB:
8164 ipw_rt->rt_rate = 22;
8165 break;
8166 case IPW_TX_RATE_12MB:
8167 ipw_rt->rt_rate = 24;
8168 break;
8169 case IPW_TX_RATE_18MB:
8170 ipw_rt->rt_rate = 36;
8171 break;
8172 case IPW_TX_RATE_24MB:
8173 ipw_rt->rt_rate = 48;
8174 break;
8175 case IPW_TX_RATE_36MB:
8176 ipw_rt->rt_rate = 72;
8177 break;
8178 case IPW_TX_RATE_48MB:
8179 ipw_rt->rt_rate = 96;
8180 break;
8181 case IPW_TX_RATE_54MB:
8182 ipw_rt->rt_rate = 108;
8183 break;
8184 default:
8185 ipw_rt->rt_rate = 0;
8186 break;
8187 }
8188
8189 /* antenna number */
8190 ipw_rt->rt_antenna = (phy_flags & 3);
8191
8192 /* set the preamble flag if we have it */
8193 if (phy_flags & (1 << 6))
8194 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8195
8196 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8197
8198 if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) {
8199 dev->stats.rx_errors++;
8200 dev_kfree_skb_any(skb);
8201 }
8202 }
8203 #endif
8204
8205 static int is_network_packet(struct ipw_priv *priv,
8206 struct libipw_hdr_4addr *header)
8207 {
8208 /* Filter incoming packets to determine if they are targeted toward
8209 * this network, discarding packets coming from ourselves */
8210 switch (priv->ieee->iw_mode) {
8211 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
8212 /* packets from our adapter are dropped (echo) */
8213 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
8214 return 0;
8215
8216 /* {broad,multi}cast packets to our BSSID go through */
8217 if (is_multicast_ether_addr(header->addr1))
8218 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
8219
8220 /* packets to our adapter go through */
8221 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8222 ETH_ALEN);
8223
8224 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
8225 /* packets from our adapter are dropped (echo) */
8226 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
8227 return 0;
8228
8229 /* {broad,multi}cast packets to our BSS go through */
8230 if (is_multicast_ether_addr(header->addr1))
8231 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
8232
8233 /* packets to our adapter go through */
8234 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8235 ETH_ALEN);
8236 }
8237
8238 return 1;
8239 }
8240
8241 #define IPW_PACKET_RETRY_TIME HZ
8242
8243 static int is_duplicate_packet(struct ipw_priv *priv,
8244 struct libipw_hdr_4addr *header)
8245 {
8246 u16 sc = le16_to_cpu(header->seq_ctl);
8247 u16 seq = WLAN_GET_SEQ_SEQ(sc);
8248 u16 frag = WLAN_GET_SEQ_FRAG(sc);
8249 u16 *last_seq, *last_frag;
8250 unsigned long *last_time;
8251
8252 switch (priv->ieee->iw_mode) {
8253 case IW_MODE_ADHOC:
8254 {
8255 struct list_head *p;
8256 struct ipw_ibss_seq *entry = NULL;
8257 u8 *mac = header->addr2;
8258 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8259
8260 list_for_each(p, &priv->ibss_mac_hash[index]) {
8261 entry =
8262 list_entry(p, struct ipw_ibss_seq, list);
8263 if (!memcmp(entry->mac, mac, ETH_ALEN))
8264 break;
8265 }
8266 if (p == &priv->ibss_mac_hash[index]) {
8267 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8268 if (!entry) {
8269 IPW_ERROR
8270 ("Cannot malloc new mac entry\n");
8271 return 0;
8272 }
8273 memcpy(entry->mac, mac, ETH_ALEN);
8274 entry->seq_num = seq;
8275 entry->frag_num = frag;
8276 entry->packet_time = jiffies;
8277 list_add(&entry->list,
8278 &priv->ibss_mac_hash[index]);
8279 return 0;
8280 }
8281 last_seq = &entry->seq_num;
8282 last_frag = &entry->frag_num;
8283 last_time = &entry->packet_time;
8284 break;
8285 }
8286 case IW_MODE_INFRA:
8287 last_seq = &priv->last_seq_num;
8288 last_frag = &priv->last_frag_num;
8289 last_time = &priv->last_packet_time;
8290 break;
8291 default:
8292 return 0;
8293 }
8294 if ((*last_seq == seq) &&
8295 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8296 if (*last_frag == frag)
8297 goto drop;
8298 if (*last_frag + 1 != frag)
8299 /* out-of-order fragment */
8300 goto drop;
8301 } else
8302 *last_seq = seq;
8303
8304 *last_frag = frag;
8305 *last_time = jiffies;
8306 return 0;
8307
8308 drop:
8309 /* Comment this line now since we observed the card receives
8310 * duplicate packets but the FCTL_RETRY bit is not set in the
8311 * IBSS mode with fragmentation enabled.
8312 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
8313 return 1;
8314 }
8315
8316 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8317 struct ipw_rx_mem_buffer *rxb,
8318 struct libipw_rx_stats *stats)
8319 {
8320 struct sk_buff *skb = rxb->skb;
8321 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8322 struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *)
8323 (skb->data + IPW_RX_FRAME_SIZE);
8324
8325 libipw_rx_mgt(priv->ieee, header, stats);
8326
8327 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8328 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8329 IEEE80211_STYPE_PROBE_RESP) ||
8330 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8331 IEEE80211_STYPE_BEACON))) {
8332 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8333 ipw_add_station(priv, header->addr2);
8334 }
8335
8336 if (priv->config & CFG_NET_STATS) {
8337 IPW_DEBUG_HC("sending stat packet\n");
8338
8339 /* Set the size of the skb to the size of the full
8340 * ipw header and 802.11 frame */
8341 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8342 IPW_RX_FRAME_SIZE);
8343
8344 /* Advance past the ipw packet header to the 802.11 frame */
8345 skb_pull(skb, IPW_RX_FRAME_SIZE);
8346
8347 /* Push the libipw_rx_stats before the 802.11 frame */
8348 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8349
8350 skb->dev = priv->ieee->dev;
8351
8352 /* Point raw at the libipw_stats */
8353 skb_reset_mac_header(skb);
8354
8355 skb->pkt_type = PACKET_OTHERHOST;
8356 skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
8357 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8358 netif_rx(skb);
8359 rxb->skb = NULL;
8360 }
8361 }
8362
8363 /*
8364 * Main entry function for receiving a packet with 80211 headers. This
8365 * should be called when ever the FW has notified us that there is a new
8366 * skb in the receive queue.
8367 */
8368 static void ipw_rx(struct ipw_priv *priv)
8369 {
8370 struct ipw_rx_mem_buffer *rxb;
8371 struct ipw_rx_packet *pkt;
8372 struct libipw_hdr_4addr *header;
8373 u32 r, w, i;
8374 u8 network_packet;
8375 u8 fill_rx = 0;
8376
8377 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8378 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8379 i = priv->rxq->read;
8380
8381 if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8382 fill_rx = 1;
8383
8384 while (i != r) {
8385 rxb = priv->rxq->queue[i];
8386 if (unlikely(rxb == NULL)) {
8387 printk(KERN_CRIT "Queue not allocated!\n");
8388 break;
8389 }
8390 priv->rxq->queue[i] = NULL;
8391
8392 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8393 IPW_RX_BUF_SIZE,
8394 PCI_DMA_FROMDEVICE);
8395
8396 pkt = (struct ipw_rx_packet *)rxb->skb->data;
8397 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8398 pkt->header.message_type,
8399 pkt->header.rx_seq_num, pkt->header.control_bits);
8400
8401 switch (pkt->header.message_type) {
8402 case RX_FRAME_TYPE: /* 802.11 frame */ {
8403 struct libipw_rx_stats stats = {
8404 .rssi = pkt->u.frame.rssi_dbm -
8405 IPW_RSSI_TO_DBM,
8406 .signal =
8407 pkt->u.frame.rssi_dbm -
8408 IPW_RSSI_TO_DBM + 0x100,
8409 .noise =
8410 le16_to_cpu(pkt->u.frame.noise),
8411 .rate = pkt->u.frame.rate,
8412 .mac_time = jiffies,
8413 .received_channel =
8414 pkt->u.frame.received_channel,
8415 .freq =
8416 (pkt->u.frame.
8417 control & (1 << 0)) ?
8418 LIBIPW_24GHZ_BAND :
8419 LIBIPW_52GHZ_BAND,
8420 .len = le16_to_cpu(pkt->u.frame.length),
8421 };
8422
8423 if (stats.rssi != 0)
8424 stats.mask |= LIBIPW_STATMASK_RSSI;
8425 if (stats.signal != 0)
8426 stats.mask |= LIBIPW_STATMASK_SIGNAL;
8427 if (stats.noise != 0)
8428 stats.mask |= LIBIPW_STATMASK_NOISE;
8429 if (stats.rate != 0)
8430 stats.mask |= LIBIPW_STATMASK_RATE;
8431
8432 priv->rx_packets++;
8433
8434 #ifdef CONFIG_IPW2200_PROMISCUOUS
8435 if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8436 ipw_handle_promiscuous_rx(priv, rxb, &stats);
8437 #endif
8438
8439 #ifdef CONFIG_IPW2200_MONITOR
8440 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8441 #ifdef CONFIG_IPW2200_RADIOTAP
8442
8443 ipw_handle_data_packet_monitor(priv,
8444 rxb,
8445 &stats);
8446 #else
8447 ipw_handle_data_packet(priv, rxb,
8448 &stats);
8449 #endif
8450 break;
8451 }
8452 #endif
8453
8454 header =
8455 (struct libipw_hdr_4addr *)(rxb->skb->
8456 data +
8457 IPW_RX_FRAME_SIZE);
8458 /* TODO: Check Ad-Hoc dest/source and make sure
8459 * that we are actually parsing these packets
8460 * correctly -- we should probably use the
8461 * frame control of the packet and disregard
8462 * the current iw_mode */
8463
8464 network_packet =
8465 is_network_packet(priv, header);
8466 if (network_packet && priv->assoc_network) {
8467 priv->assoc_network->stats.rssi =
8468 stats.rssi;
8469 priv->exp_avg_rssi =
8470 exponential_average(priv->exp_avg_rssi,
8471 stats.rssi, DEPTH_RSSI);
8472 }
8473
8474 IPW_DEBUG_RX("Frame: len=%u\n",
8475 le16_to_cpu(pkt->u.frame.length));
8476
8477 if (le16_to_cpu(pkt->u.frame.length) <
8478 libipw_get_hdrlen(le16_to_cpu(
8479 header->frame_ctl))) {
8480 IPW_DEBUG_DROP
8481 ("Received packet is too small. "
8482 "Dropping.\n");
8483 priv->net_dev->stats.rx_errors++;
8484 priv->wstats.discard.misc++;
8485 break;
8486 }
8487
8488 switch (WLAN_FC_GET_TYPE
8489 (le16_to_cpu(header->frame_ctl))) {
8490
8491 case IEEE80211_FTYPE_MGMT:
8492 ipw_handle_mgmt_packet(priv, rxb,
8493 &stats);
8494 break;
8495
8496 case IEEE80211_FTYPE_CTL:
8497 break;
8498
8499 case IEEE80211_FTYPE_DATA:
8500 if (unlikely(!network_packet ||
8501 is_duplicate_packet(priv,
8502 header)))
8503 {
8504 IPW_DEBUG_DROP("Dropping: "
8505 "%pM, "
8506 "%pM, "
8507 "%pM\n",
8508 header->addr1,
8509 header->addr2,
8510 header->addr3);
8511 break;
8512 }
8513
8514 ipw_handle_data_packet(priv, rxb,
8515 &stats);
8516
8517 break;
8518 }
8519 break;
8520 }
8521
8522 case RX_HOST_NOTIFICATION_TYPE:{
8523 IPW_DEBUG_RX
8524 ("Notification: subtype=%02X flags=%02X size=%d\n",
8525 pkt->u.notification.subtype,
8526 pkt->u.notification.flags,
8527 le16_to_cpu(pkt->u.notification.size));
8528 ipw_rx_notification(priv, &pkt->u.notification);
8529 break;
8530 }
8531
8532 default:
8533 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8534 pkt->header.message_type);
8535 break;
8536 }
8537
8538 /* For now we just don't re-use anything. We can tweak this
8539 * later to try and re-use notification packets and SKBs that
8540 * fail to Rx correctly */
8541 if (rxb->skb != NULL) {
8542 dev_kfree_skb_any(rxb->skb);
8543 rxb->skb = NULL;
8544 }
8545
8546 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8547 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8548 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8549
8550 i = (i + 1) % RX_QUEUE_SIZE;
8551
8552 /* If there are a lot of unsued frames, restock the Rx queue
8553 * so the ucode won't assert */
8554 if (fill_rx) {
8555 priv->rxq->read = i;
8556 ipw_rx_queue_replenish(priv);
8557 }
8558 }
8559
8560 /* Backtrack one entry */
8561 priv->rxq->read = i;
8562 ipw_rx_queue_restock(priv);
8563 }
8564
8565 #define DEFAULT_RTS_THRESHOLD 2304U
8566 #define MIN_RTS_THRESHOLD 1U
8567 #define MAX_RTS_THRESHOLD 2304U
8568 #define DEFAULT_BEACON_INTERVAL 100U
8569 #define DEFAULT_SHORT_RETRY_LIMIT 7U
8570 #define DEFAULT_LONG_RETRY_LIMIT 4U
8571
8572 /**
8573 * ipw_sw_reset
8574 * @option: options to control different reset behaviour
8575 * 0 = reset everything except the 'disable' module_param
8576 * 1 = reset everything and print out driver info (for probe only)
8577 * 2 = reset everything
8578 */
8579 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8580 {
8581 int band, modulation;
8582 int old_mode = priv->ieee->iw_mode;
8583
8584 /* Initialize module parameter values here */
8585 priv->config = 0;
8586
8587 /* We default to disabling the LED code as right now it causes
8588 * too many systems to lock up... */
8589 if (!led_support)
8590 priv->config |= CFG_NO_LED;
8591
8592 if (associate)
8593 priv->config |= CFG_ASSOCIATE;
8594 else
8595 IPW_DEBUG_INFO("Auto associate disabled.\n");
8596
8597 if (auto_create)
8598 priv->config |= CFG_ADHOC_CREATE;
8599 else
8600 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8601
8602 priv->config &= ~CFG_STATIC_ESSID;
8603 priv->essid_len = 0;
8604 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8605
8606 if (disable && option) {
8607 priv->status |= STATUS_RF_KILL_SW;
8608 IPW_DEBUG_INFO("Radio disabled.\n");
8609 }
8610
8611 if (default_channel != 0) {
8612 priv->config |= CFG_STATIC_CHANNEL;
8613 priv->channel = default_channel;
8614 IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel);
8615 /* TODO: Validate that provided channel is in range */
8616 }
8617 #ifdef CONFIG_IPW2200_QOS
8618 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8619 burst_duration_CCK, burst_duration_OFDM);
8620 #endif /* CONFIG_IPW2200_QOS */
8621
8622 switch (network_mode) {
8623 case 1:
8624 priv->ieee->iw_mode = IW_MODE_ADHOC;
8625 priv->net_dev->type = ARPHRD_ETHER;
8626
8627 break;
8628 #ifdef CONFIG_IPW2200_MONITOR
8629 case 2:
8630 priv->ieee->iw_mode = IW_MODE_MONITOR;
8631 #ifdef CONFIG_IPW2200_RADIOTAP
8632 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8633 #else
8634 priv->net_dev->type = ARPHRD_IEEE80211;
8635 #endif
8636 break;
8637 #endif
8638 default:
8639 case 0:
8640 priv->net_dev->type = ARPHRD_ETHER;
8641 priv->ieee->iw_mode = IW_MODE_INFRA;
8642 break;
8643 }
8644
8645 if (hwcrypto) {
8646 priv->ieee->host_encrypt = 0;
8647 priv->ieee->host_encrypt_msdu = 0;
8648 priv->ieee->host_decrypt = 0;
8649 priv->ieee->host_mc_decrypt = 0;
8650 }
8651 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8652
8653 /* IPW2200/2915 is abled to do hardware fragmentation. */
8654 priv->ieee->host_open_frag = 0;
8655
8656 if ((priv->pci_dev->device == 0x4223) ||
8657 (priv->pci_dev->device == 0x4224)) {
8658 if (option == 1)
8659 printk(KERN_INFO DRV_NAME
8660 ": Detected Intel PRO/Wireless 2915ABG Network "
8661 "Connection\n");
8662 priv->ieee->abg_true = 1;
8663 band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND;
8664 modulation = LIBIPW_OFDM_MODULATION |
8665 LIBIPW_CCK_MODULATION;
8666 priv->adapter = IPW_2915ABG;
8667 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8668 } else {
8669 if (option == 1)
8670 printk(KERN_INFO DRV_NAME
8671 ": Detected Intel PRO/Wireless 2200BG Network "
8672 "Connection\n");
8673
8674 priv->ieee->abg_true = 0;
8675 band = LIBIPW_24GHZ_BAND;
8676 modulation = LIBIPW_OFDM_MODULATION |
8677 LIBIPW_CCK_MODULATION;
8678 priv->adapter = IPW_2200BG;
8679 priv->ieee->mode = IEEE_G | IEEE_B;
8680 }
8681
8682 priv->ieee->freq_band = band;
8683 priv->ieee->modulation = modulation;
8684
8685 priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK;
8686
8687 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8688 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8689
8690 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8691 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8692 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8693
8694 /* If power management is turned on, default to AC mode */
8695 priv->power_mode = IPW_POWER_AC;
8696 priv->tx_power = IPW_TX_POWER_DEFAULT;
8697
8698 return old_mode == priv->ieee->iw_mode;
8699 }
8700
8701 /*
8702 * This file defines the Wireless Extension handlers. It does not
8703 * define any methods of hardware manipulation and relies on the
8704 * functions defined in ipw_main to provide the HW interaction.
8705 *
8706 * The exception to this is the use of the ipw_get_ordinal()
8707 * function used to poll the hardware vs. making unnecessary calls.
8708 *
8709 */
8710
8711 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8712 {
8713 if (channel == 0) {
8714 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8715 priv->config &= ~CFG_STATIC_CHANNEL;
8716 IPW_DEBUG_ASSOC("Attempting to associate with new "
8717 "parameters.\n");
8718 ipw_associate(priv);
8719 return 0;
8720 }
8721
8722 priv->config |= CFG_STATIC_CHANNEL;
8723
8724 if (priv->channel == channel) {
8725 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8726 channel);
8727 return 0;
8728 }
8729
8730 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8731 priv->channel = channel;
8732
8733 #ifdef CONFIG_IPW2200_MONITOR
8734 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8735 int i;
8736 if (priv->status & STATUS_SCANNING) {
8737 IPW_DEBUG_SCAN("Scan abort triggered due to "
8738 "channel change.\n");
8739 ipw_abort_scan(priv);
8740 }
8741
8742 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8743 udelay(10);
8744
8745 if (priv->status & STATUS_SCANNING)
8746 IPW_DEBUG_SCAN("Still scanning...\n");
8747 else
8748 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8749 1000 - i);
8750
8751 return 0;
8752 }
8753 #endif /* CONFIG_IPW2200_MONITOR */
8754
8755 /* Network configuration changed -- force [re]association */
8756 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8757 if (!ipw_disassociate(priv))
8758 ipw_associate(priv);
8759
8760 return 0;
8761 }
8762
8763 static int ipw_wx_set_freq(struct net_device *dev,
8764 struct iw_request_info *info,
8765 union iwreq_data *wrqu, char *extra)
8766 {
8767 struct ipw_priv *priv = libipw_priv(dev);
8768 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8769 struct iw_freq *fwrq = &wrqu->freq;
8770 int ret = 0, i;
8771 u8 channel, flags;
8772 int band;
8773
8774 if (fwrq->m == 0) {
8775 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8776 mutex_lock(&priv->mutex);
8777 ret = ipw_set_channel(priv, 0);
8778 mutex_unlock(&priv->mutex);
8779 return ret;
8780 }
8781 /* if setting by freq convert to channel */
8782 if (fwrq->e == 1) {
8783 channel = libipw_freq_to_channel(priv->ieee, fwrq->m);
8784 if (channel == 0)
8785 return -EINVAL;
8786 } else
8787 channel = fwrq->m;
8788
8789 if (!(band = libipw_is_valid_channel(priv->ieee, channel)))
8790 return -EINVAL;
8791
8792 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8793 i = libipw_channel_to_index(priv->ieee, channel);
8794 if (i == -1)
8795 return -EINVAL;
8796
8797 flags = (band == LIBIPW_24GHZ_BAND) ?
8798 geo->bg[i].flags : geo->a[i].flags;
8799 if (flags & LIBIPW_CH_PASSIVE_ONLY) {
8800 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8801 return -EINVAL;
8802 }
8803 }
8804
8805 IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
8806 mutex_lock(&priv->mutex);
8807 ret = ipw_set_channel(priv, channel);
8808 mutex_unlock(&priv->mutex);
8809 return ret;
8810 }
8811
8812 static int ipw_wx_get_freq(struct net_device *dev,
8813 struct iw_request_info *info,
8814 union iwreq_data *wrqu, char *extra)
8815 {
8816 struct ipw_priv *priv = libipw_priv(dev);
8817
8818 wrqu->freq.e = 0;
8819
8820 /* If we are associated, trying to associate, or have a statically
8821 * configured CHANNEL then return that; otherwise return ANY */
8822 mutex_lock(&priv->mutex);
8823 if (priv->config & CFG_STATIC_CHANNEL ||
8824 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8825 int i;
8826
8827 i = libipw_channel_to_index(priv->ieee, priv->channel);
8828 BUG_ON(i == -1);
8829 wrqu->freq.e = 1;
8830
8831 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
8832 case LIBIPW_52GHZ_BAND:
8833 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8834 break;
8835
8836 case LIBIPW_24GHZ_BAND:
8837 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8838 break;
8839
8840 default:
8841 BUG();
8842 }
8843 } else
8844 wrqu->freq.m = 0;
8845
8846 mutex_unlock(&priv->mutex);
8847 IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
8848 return 0;
8849 }
8850
8851 static int ipw_wx_set_mode(struct net_device *dev,
8852 struct iw_request_info *info,
8853 union iwreq_data *wrqu, char *extra)
8854 {
8855 struct ipw_priv *priv = libipw_priv(dev);
8856 int err = 0;
8857
8858 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8859
8860 switch (wrqu->mode) {
8861 #ifdef CONFIG_IPW2200_MONITOR
8862 case IW_MODE_MONITOR:
8863 #endif
8864 case IW_MODE_ADHOC:
8865 case IW_MODE_INFRA:
8866 break;
8867 case IW_MODE_AUTO:
8868 wrqu->mode = IW_MODE_INFRA;
8869 break;
8870 default:
8871 return -EINVAL;
8872 }
8873 if (wrqu->mode == priv->ieee->iw_mode)
8874 return 0;
8875
8876 mutex_lock(&priv->mutex);
8877
8878 ipw_sw_reset(priv, 0);
8879
8880 #ifdef CONFIG_IPW2200_MONITOR
8881 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8882 priv->net_dev->type = ARPHRD_ETHER;
8883
8884 if (wrqu->mode == IW_MODE_MONITOR)
8885 #ifdef CONFIG_IPW2200_RADIOTAP
8886 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8887 #else
8888 priv->net_dev->type = ARPHRD_IEEE80211;
8889 #endif
8890 #endif /* CONFIG_IPW2200_MONITOR */
8891
8892 /* Free the existing firmware and reset the fw_loaded
8893 * flag so ipw_load() will bring in the new firmware */
8894 free_firmware();
8895
8896 priv->ieee->iw_mode = wrqu->mode;
8897
8898 schedule_work(&priv->adapter_restart);
8899 mutex_unlock(&priv->mutex);
8900 return err;
8901 }
8902
8903 static int ipw_wx_get_mode(struct net_device *dev,
8904 struct iw_request_info *info,
8905 union iwreq_data *wrqu, char *extra)
8906 {
8907 struct ipw_priv *priv = libipw_priv(dev);
8908 mutex_lock(&priv->mutex);
8909 wrqu->mode = priv->ieee->iw_mode;
8910 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8911 mutex_unlock(&priv->mutex);
8912 return 0;
8913 }
8914
8915 /* Values are in microsecond */
8916 static const s32 timeout_duration[] = {
8917 350000,
8918 250000,
8919 75000,
8920 37000,
8921 25000,
8922 };
8923
8924 static const s32 period_duration[] = {
8925 400000,
8926 700000,
8927 1000000,
8928 1000000,
8929 1000000
8930 };
8931
8932 static int ipw_wx_get_range(struct net_device *dev,
8933 struct iw_request_info *info,
8934 union iwreq_data *wrqu, char *extra)
8935 {
8936 struct ipw_priv *priv = libipw_priv(dev);
8937 struct iw_range *range = (struct iw_range *)extra;
8938 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8939 int i = 0, j;
8940
8941 wrqu->data.length = sizeof(*range);
8942 memset(range, 0, sizeof(*range));
8943
8944 /* 54Mbs == ~27 Mb/s real (802.11g) */
8945 range->throughput = 27 * 1000 * 1000;
8946
8947 range->max_qual.qual = 100;
8948 /* TODO: Find real max RSSI and stick here */
8949 range->max_qual.level = 0;
8950 range->max_qual.noise = 0;
8951 range->max_qual.updated = 7; /* Updated all three */
8952
8953 range->avg_qual.qual = 70;
8954 /* TODO: Find real 'good' to 'bad' threshold value for RSSI */
8955 range->avg_qual.level = 0; /* FIXME to real average level */
8956 range->avg_qual.noise = 0;
8957 range->avg_qual.updated = 7; /* Updated all three */
8958 mutex_lock(&priv->mutex);
8959 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8960
8961 for (i = 0; i < range->num_bitrates; i++)
8962 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8963 500000;
8964
8965 range->max_rts = DEFAULT_RTS_THRESHOLD;
8966 range->min_frag = MIN_FRAG_THRESHOLD;
8967 range->max_frag = MAX_FRAG_THRESHOLD;
8968
8969 range->encoding_size[0] = 5;
8970 range->encoding_size[1] = 13;
8971 range->num_encoding_sizes = 2;
8972 range->max_encoding_tokens = WEP_KEYS;
8973
8974 /* Set the Wireless Extension versions */
8975 range->we_version_compiled = WIRELESS_EXT;
8976 range->we_version_source = 18;
8977
8978 i = 0;
8979 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8980 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8981 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8982 (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8983 continue;
8984
8985 range->freq[i].i = geo->bg[j].channel;
8986 range->freq[i].m = geo->bg[j].freq * 100000;
8987 range->freq[i].e = 1;
8988 i++;
8989 }
8990 }
8991
8992 if (priv->ieee->mode & IEEE_A) {
8993 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8994 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8995 (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8996 continue;
8997
8998 range->freq[i].i = geo->a[j].channel;
8999 range->freq[i].m = geo->a[j].freq * 100000;
9000 range->freq[i].e = 1;
9001 i++;
9002 }
9003 }
9004
9005 range->num_channels = i;
9006 range->num_frequency = i;
9007
9008 mutex_unlock(&priv->mutex);
9009
9010 /* Event capability (kernel + driver) */
9011 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
9012 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
9013 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
9014 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
9015 range->event_capa[1] = IW_EVENT_CAPA_K_1;
9016
9017 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
9018 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
9019
9020 range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
9021
9022 IPW_DEBUG_WX("GET Range\n");
9023 return 0;
9024 }
9025
9026 static int ipw_wx_set_wap(struct net_device *dev,
9027 struct iw_request_info *info,
9028 union iwreq_data *wrqu, char *extra)
9029 {
9030 struct ipw_priv *priv = libipw_priv(dev);
9031
9032 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
9033 return -EINVAL;
9034 mutex_lock(&priv->mutex);
9035 if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) ||
9036 is_zero_ether_addr(wrqu->ap_addr.sa_data)) {
9037 /* we disable mandatory BSSID association */
9038 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
9039 priv->config &= ~CFG_STATIC_BSSID;
9040 IPW_DEBUG_ASSOC("Attempting to associate with new "
9041 "parameters.\n");
9042 ipw_associate(priv);
9043 mutex_unlock(&priv->mutex);
9044 return 0;
9045 }
9046
9047 priv->config |= CFG_STATIC_BSSID;
9048 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
9049 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
9050 mutex_unlock(&priv->mutex);
9051 return 0;
9052 }
9053
9054 IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
9055 wrqu->ap_addr.sa_data);
9056
9057 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
9058
9059 /* Network configuration changed -- force [re]association */
9060 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
9061 if (!ipw_disassociate(priv))
9062 ipw_associate(priv);
9063
9064 mutex_unlock(&priv->mutex);
9065 return 0;
9066 }
9067
9068 static int ipw_wx_get_wap(struct net_device *dev,
9069 struct iw_request_info *info,
9070 union iwreq_data *wrqu, char *extra)
9071 {
9072 struct ipw_priv *priv = libipw_priv(dev);
9073
9074 /* If we are associated, trying to associate, or have a statically
9075 * configured BSSID then return that; otherwise return ANY */
9076 mutex_lock(&priv->mutex);
9077 if (priv->config & CFG_STATIC_BSSID ||
9078 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9079 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
9080 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
9081 } else
9082 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
9083
9084 IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
9085 wrqu->ap_addr.sa_data);
9086 mutex_unlock(&priv->mutex);
9087 return 0;
9088 }
9089
9090 static int ipw_wx_set_essid(struct net_device *dev,
9091 struct iw_request_info *info,
9092 union iwreq_data *wrqu, char *extra)
9093 {
9094 struct ipw_priv *priv = libipw_priv(dev);
9095 int length;
9096 DECLARE_SSID_BUF(ssid);
9097
9098 mutex_lock(&priv->mutex);
9099
9100 if (!wrqu->essid.flags)
9101 {
9102 IPW_DEBUG_WX("Setting ESSID to ANY\n");
9103 ipw_disassociate(priv);
9104 priv->config &= ~CFG_STATIC_ESSID;
9105 ipw_associate(priv);
9106 mutex_unlock(&priv->mutex);
9107 return 0;
9108 }
9109
9110 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
9111
9112 priv->config |= CFG_STATIC_ESSID;
9113
9114 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
9115 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
9116 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
9117 mutex_unlock(&priv->mutex);
9118 return 0;
9119 }
9120
9121 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n",
9122 print_ssid(ssid, extra, length), length);
9123
9124 priv->essid_len = length;
9125 memcpy(priv->essid, extra, priv->essid_len);
9126
9127 /* Network configuration changed -- force [re]association */
9128 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9129 if (!ipw_disassociate(priv))
9130 ipw_associate(priv);
9131
9132 mutex_unlock(&priv->mutex);
9133 return 0;
9134 }
9135
9136 static int ipw_wx_get_essid(struct net_device *dev,
9137 struct iw_request_info *info,
9138 union iwreq_data *wrqu, char *extra)
9139 {
9140 struct ipw_priv *priv = libipw_priv(dev);
9141 DECLARE_SSID_BUF(ssid);
9142
9143 /* If we are associated, trying to associate, or have a statically
9144 * configured ESSID then return that; otherwise return ANY */
9145 mutex_lock(&priv->mutex);
9146 if (priv->config & CFG_STATIC_ESSID ||
9147 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9148 IPW_DEBUG_WX("Getting essid: '%s'\n",
9149 print_ssid(ssid, priv->essid, priv->essid_len));
9150 memcpy(extra, priv->essid, priv->essid_len);
9151 wrqu->essid.length = priv->essid_len;
9152 wrqu->essid.flags = 1; /* active */
9153 } else {
9154 IPW_DEBUG_WX("Getting essid: ANY\n");
9155 wrqu->essid.length = 0;
9156 wrqu->essid.flags = 0; /* active */
9157 }
9158 mutex_unlock(&priv->mutex);
9159 return 0;
9160 }
9161
9162 static int ipw_wx_set_nick(struct net_device *dev,
9163 struct iw_request_info *info,
9164 union iwreq_data *wrqu, char *extra)
9165 {
9166 struct ipw_priv *priv = libipw_priv(dev);
9167
9168 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9169 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9170 return -E2BIG;
9171 mutex_lock(&priv->mutex);
9172 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
9173 memset(priv->nick, 0, sizeof(priv->nick));
9174 memcpy(priv->nick, extra, wrqu->data.length);
9175 IPW_DEBUG_TRACE("<<\n");
9176 mutex_unlock(&priv->mutex);
9177 return 0;
9178
9179 }
9180
9181 static int ipw_wx_get_nick(struct net_device *dev,
9182 struct iw_request_info *info,
9183 union iwreq_data *wrqu, char *extra)
9184 {
9185 struct ipw_priv *priv = libipw_priv(dev);
9186 IPW_DEBUG_WX("Getting nick\n");
9187 mutex_lock(&priv->mutex);
9188 wrqu->data.length = strlen(priv->nick);
9189 memcpy(extra, priv->nick, wrqu->data.length);
9190 wrqu->data.flags = 1; /* active */
9191 mutex_unlock(&priv->mutex);
9192 return 0;
9193 }
9194
9195 static int ipw_wx_set_sens(struct net_device *dev,
9196 struct iw_request_info *info,
9197 union iwreq_data *wrqu, char *extra)
9198 {
9199 struct ipw_priv *priv = libipw_priv(dev);
9200 int err = 0;
9201
9202 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9203 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9204 mutex_lock(&priv->mutex);
9205
9206 if (wrqu->sens.fixed == 0)
9207 {
9208 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9209 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9210 goto out;
9211 }
9212 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9213 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9214 err = -EINVAL;
9215 goto out;
9216 }
9217
9218 priv->roaming_threshold = wrqu->sens.value;
9219 priv->disassociate_threshold = 3*wrqu->sens.value;
9220 out:
9221 mutex_unlock(&priv->mutex);
9222 return err;
9223 }
9224
9225 static int ipw_wx_get_sens(struct net_device *dev,
9226 struct iw_request_info *info,
9227 union iwreq_data *wrqu, char *extra)
9228 {
9229 struct ipw_priv *priv = libipw_priv(dev);
9230 mutex_lock(&priv->mutex);
9231 wrqu->sens.fixed = 1;
9232 wrqu->sens.value = priv->roaming_threshold;
9233 mutex_unlock(&priv->mutex);
9234
9235 IPW_DEBUG_WX("GET roaming threshold -> %s %d\n",
9236 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9237
9238 return 0;
9239 }
9240
9241 static int ipw_wx_set_rate(struct net_device *dev,
9242 struct iw_request_info *info,
9243 union iwreq_data *wrqu, char *extra)
9244 {
9245 /* TODO: We should use semaphores or locks for access to priv */
9246 struct ipw_priv *priv = libipw_priv(dev);
9247 u32 target_rate = wrqu->bitrate.value;
9248 u32 fixed, mask;
9249
9250 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9251 /* value = X, fixed = 1 means only rate X */
9252 /* value = X, fixed = 0 means all rates lower equal X */
9253
9254 if (target_rate == -1) {
9255 fixed = 0;
9256 mask = LIBIPW_DEFAULT_RATES_MASK;
9257 /* Now we should reassociate */
9258 goto apply;
9259 }
9260
9261 mask = 0;
9262 fixed = wrqu->bitrate.fixed;
9263
9264 if (target_rate == 1000000 || !fixed)
9265 mask |= LIBIPW_CCK_RATE_1MB_MASK;
9266 if (target_rate == 1000000)
9267 goto apply;
9268
9269 if (target_rate == 2000000 || !fixed)
9270 mask |= LIBIPW_CCK_RATE_2MB_MASK;
9271 if (target_rate == 2000000)
9272 goto apply;
9273
9274 if (target_rate == 5500000 || !fixed)
9275 mask |= LIBIPW_CCK_RATE_5MB_MASK;
9276 if (target_rate == 5500000)
9277 goto apply;
9278
9279 if (target_rate == 6000000 || !fixed)
9280 mask |= LIBIPW_OFDM_RATE_6MB_MASK;
9281 if (target_rate == 6000000)
9282 goto apply;
9283
9284 if (target_rate == 9000000 || !fixed)
9285 mask |= LIBIPW_OFDM_RATE_9MB_MASK;
9286 if (target_rate == 9000000)
9287 goto apply;
9288
9289 if (target_rate == 11000000 || !fixed)
9290 mask |= LIBIPW_CCK_RATE_11MB_MASK;
9291 if (target_rate == 11000000)
9292 goto apply;
9293
9294 if (target_rate == 12000000 || !fixed)
9295 mask |= LIBIPW_OFDM_RATE_12MB_MASK;
9296 if (target_rate == 12000000)
9297 goto apply;
9298
9299 if (target_rate == 18000000 || !fixed)
9300 mask |= LIBIPW_OFDM_RATE_18MB_MASK;
9301 if (target_rate == 18000000)
9302 goto apply;
9303
9304 if (target_rate == 24000000 || !fixed)
9305 mask |= LIBIPW_OFDM_RATE_24MB_MASK;
9306 if (target_rate == 24000000)
9307 goto apply;
9308
9309 if (target_rate == 36000000 || !fixed)
9310 mask |= LIBIPW_OFDM_RATE_36MB_MASK;
9311 if (target_rate == 36000000)
9312 goto apply;
9313
9314 if (target_rate == 48000000 || !fixed)
9315 mask |= LIBIPW_OFDM_RATE_48MB_MASK;
9316 if (target_rate == 48000000)
9317 goto apply;
9318
9319 if (target_rate == 54000000 || !fixed)
9320 mask |= LIBIPW_OFDM_RATE_54MB_MASK;
9321 if (target_rate == 54000000)
9322 goto apply;
9323
9324 IPW_DEBUG_WX("invalid rate specified, returning error\n");
9325 return -EINVAL;
9326
9327 apply:
9328 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9329 mask, fixed ? "fixed" : "sub-rates");
9330 mutex_lock(&priv->mutex);
9331 if (mask == LIBIPW_DEFAULT_RATES_MASK) {
9332 priv->config &= ~CFG_FIXED_RATE;
9333 ipw_set_fixed_rate(priv, priv->ieee->mode);
9334 } else
9335 priv->config |= CFG_FIXED_RATE;
9336
9337 if (priv->rates_mask == mask) {
9338 IPW_DEBUG_WX("Mask set to current mask.\n");
9339 mutex_unlock(&priv->mutex);
9340 return 0;
9341 }
9342
9343 priv->rates_mask = mask;
9344
9345 /* Network configuration changed -- force [re]association */
9346 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9347 if (!ipw_disassociate(priv))
9348 ipw_associate(priv);
9349
9350 mutex_unlock(&priv->mutex);
9351 return 0;
9352 }
9353
9354 static int ipw_wx_get_rate(struct net_device *dev,
9355 struct iw_request_info *info,
9356 union iwreq_data *wrqu, char *extra)
9357 {
9358 struct ipw_priv *priv = libipw_priv(dev);
9359 mutex_lock(&priv->mutex);
9360 wrqu->bitrate.value = priv->last_rate;
9361 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9362 mutex_unlock(&priv->mutex);
9363 IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
9364 return 0;
9365 }
9366
9367 static int ipw_wx_set_rts(struct net_device *dev,
9368 struct iw_request_info *info,
9369 union iwreq_data *wrqu, char *extra)
9370 {
9371 struct ipw_priv *priv = libipw_priv(dev);
9372 mutex_lock(&priv->mutex);
9373 if (wrqu->rts.disabled || !wrqu->rts.fixed)
9374 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9375 else {
9376 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9377 wrqu->rts.value > MAX_RTS_THRESHOLD) {
9378 mutex_unlock(&priv->mutex);
9379 return -EINVAL;
9380 }
9381 priv->rts_threshold = wrqu->rts.value;
9382 }
9383
9384 ipw_send_rts_threshold(priv, priv->rts_threshold);
9385 mutex_unlock(&priv->mutex);
9386 IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold);
9387 return 0;
9388 }
9389
9390 static int ipw_wx_get_rts(struct net_device *dev,
9391 struct iw_request_info *info,
9392 union iwreq_data *wrqu, char *extra)
9393 {
9394 struct ipw_priv *priv = libipw_priv(dev);
9395 mutex_lock(&priv->mutex);
9396 wrqu->rts.value = priv->rts_threshold;
9397 wrqu->rts.fixed = 0; /* no auto select */
9398 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9399 mutex_unlock(&priv->mutex);
9400 IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value);
9401 return 0;
9402 }
9403
9404 static int ipw_wx_set_txpow(struct net_device *dev,
9405 struct iw_request_info *info,
9406 union iwreq_data *wrqu, char *extra)
9407 {
9408 struct ipw_priv *priv = libipw_priv(dev);
9409 int err = 0;
9410
9411 mutex_lock(&priv->mutex);
9412 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9413 err = -EINPROGRESS;
9414 goto out;
9415 }
9416
9417 if (!wrqu->power.fixed)
9418 wrqu->power.value = IPW_TX_POWER_DEFAULT;
9419
9420 if (wrqu->power.flags != IW_TXPOW_DBM) {
9421 err = -EINVAL;
9422 goto out;
9423 }
9424
9425 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9426 (wrqu->power.value < IPW_TX_POWER_MIN)) {
9427 err = -EINVAL;
9428 goto out;
9429 }
9430
9431 priv->tx_power = wrqu->power.value;
9432 err = ipw_set_tx_power(priv);
9433 out:
9434 mutex_unlock(&priv->mutex);
9435 return err;
9436 }
9437
9438 static int ipw_wx_get_txpow(struct net_device *dev,
9439 struct iw_request_info *info,
9440 union iwreq_data *wrqu, char *extra)
9441 {
9442 struct ipw_priv *priv = libipw_priv(dev);
9443 mutex_lock(&priv->mutex);
9444 wrqu->power.value = priv->tx_power;
9445 wrqu->power.fixed = 1;
9446 wrqu->power.flags = IW_TXPOW_DBM;
9447 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9448 mutex_unlock(&priv->mutex);
9449
9450 IPW_DEBUG_WX("GET TX Power -> %s %d\n",
9451 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9452
9453 return 0;
9454 }
9455
9456 static int ipw_wx_set_frag(struct net_device *dev,
9457 struct iw_request_info *info,
9458 union iwreq_data *wrqu, char *extra)
9459 {
9460 struct ipw_priv *priv = libipw_priv(dev);
9461 mutex_lock(&priv->mutex);
9462 if (wrqu->frag.disabled || !wrqu->frag.fixed)
9463 priv->ieee->fts = DEFAULT_FTS;
9464 else {
9465 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9466 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9467 mutex_unlock(&priv->mutex);
9468 return -EINVAL;
9469 }
9470
9471 priv->ieee->fts = wrqu->frag.value & ~0x1;
9472 }
9473
9474 ipw_send_frag_threshold(priv, wrqu->frag.value);
9475 mutex_unlock(&priv->mutex);
9476 IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value);
9477 return 0;
9478 }
9479
9480 static int ipw_wx_get_frag(struct net_device *dev,
9481 struct iw_request_info *info,
9482 union iwreq_data *wrqu, char *extra)
9483 {
9484 struct ipw_priv *priv = libipw_priv(dev);
9485 mutex_lock(&priv->mutex);
9486 wrqu->frag.value = priv->ieee->fts;
9487 wrqu->frag.fixed = 0; /* no auto select */
9488 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9489 mutex_unlock(&priv->mutex);
9490 IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
9491
9492 return 0;
9493 }
9494
9495 static int ipw_wx_set_retry(struct net_device *dev,
9496 struct iw_request_info *info,
9497 union iwreq_data *wrqu, char *extra)
9498 {
9499 struct ipw_priv *priv = libipw_priv(dev);
9500
9501 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9502 return -EINVAL;
9503
9504 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9505 return 0;
9506
9507 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9508 return -EINVAL;
9509
9510 mutex_lock(&priv->mutex);
9511 if (wrqu->retry.flags & IW_RETRY_SHORT)
9512 priv->short_retry_limit = (u8) wrqu->retry.value;
9513 else if (wrqu->retry.flags & IW_RETRY_LONG)
9514 priv->long_retry_limit = (u8) wrqu->retry.value;
9515 else {
9516 priv->short_retry_limit = (u8) wrqu->retry.value;
9517 priv->long_retry_limit = (u8) wrqu->retry.value;
9518 }
9519
9520 ipw_send_retry_limit(priv, priv->short_retry_limit,
9521 priv->long_retry_limit);
9522 mutex_unlock(&priv->mutex);
9523 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9524 priv->short_retry_limit, priv->long_retry_limit);
9525 return 0;
9526 }
9527
9528 static int ipw_wx_get_retry(struct net_device *dev,
9529 struct iw_request_info *info,
9530 union iwreq_data *wrqu, char *extra)
9531 {
9532 struct ipw_priv *priv = libipw_priv(dev);
9533
9534 mutex_lock(&priv->mutex);
9535 wrqu->retry.disabled = 0;
9536
9537 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9538 mutex_unlock(&priv->mutex);
9539 return -EINVAL;
9540 }
9541
9542 if (wrqu->retry.flags & IW_RETRY_LONG) {
9543 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9544 wrqu->retry.value = priv->long_retry_limit;
9545 } else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9546 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9547 wrqu->retry.value = priv->short_retry_limit;
9548 } else {
9549 wrqu->retry.flags = IW_RETRY_LIMIT;
9550 wrqu->retry.value = priv->short_retry_limit;
9551 }
9552 mutex_unlock(&priv->mutex);
9553
9554 IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value);
9555
9556 return 0;
9557 }
9558
9559 static int ipw_wx_set_scan(struct net_device *dev,
9560 struct iw_request_info *info,
9561 union iwreq_data *wrqu, char *extra)
9562 {
9563 struct ipw_priv *priv = libipw_priv(dev);
9564 struct iw_scan_req *req = (struct iw_scan_req *)extra;
9565 struct delayed_work *work = NULL;
9566
9567 mutex_lock(&priv->mutex);
9568
9569 priv->user_requested_scan = 1;
9570
9571 if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9572 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9573 int len = min((int)req->essid_len,
9574 (int)sizeof(priv->direct_scan_ssid));
9575 memcpy(priv->direct_scan_ssid, req->essid, len);
9576 priv->direct_scan_ssid_len = len;
9577 work = &priv->request_direct_scan;
9578 } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9579 work = &priv->request_passive_scan;
9580 }
9581 } else {
9582 /* Normal active broadcast scan */
9583 work = &priv->request_scan;
9584 }
9585
9586 mutex_unlock(&priv->mutex);
9587
9588 IPW_DEBUG_WX("Start scan\n");
9589
9590 schedule_delayed_work(work, 0);
9591
9592 return 0;
9593 }
9594
9595 static int ipw_wx_get_scan(struct net_device *dev,
9596 struct iw_request_info *info,
9597 union iwreq_data *wrqu, char *extra)
9598 {
9599 struct ipw_priv *priv = libipw_priv(dev);
9600 return libipw_wx_get_scan(priv->ieee, info, wrqu, extra);
9601 }
9602
9603 static int ipw_wx_set_encode(struct net_device *dev,
9604 struct iw_request_info *info,
9605 union iwreq_data *wrqu, char *key)
9606 {
9607 struct ipw_priv *priv = libipw_priv(dev);
9608 int ret;
9609 u32 cap = priv->capability;
9610
9611 mutex_lock(&priv->mutex);
9612 ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key);
9613
9614 /* In IBSS mode, we need to notify the firmware to update
9615 * the beacon info after we changed the capability. */
9616 if (cap != priv->capability &&
9617 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9618 priv->status & STATUS_ASSOCIATED)
9619 ipw_disassociate(priv);
9620
9621 mutex_unlock(&priv->mutex);
9622 return ret;
9623 }
9624
9625 static int ipw_wx_get_encode(struct net_device *dev,
9626 struct iw_request_info *info,
9627 union iwreq_data *wrqu, char *key)
9628 {
9629 struct ipw_priv *priv = libipw_priv(dev);
9630 return libipw_wx_get_encode(priv->ieee, info, wrqu, key);
9631 }
9632
9633 static int ipw_wx_set_power(struct net_device *dev,
9634 struct iw_request_info *info,
9635 union iwreq_data *wrqu, char *extra)
9636 {
9637 struct ipw_priv *priv = libipw_priv(dev);
9638 int err;
9639 mutex_lock(&priv->mutex);
9640 if (wrqu->power.disabled) {
9641 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9642 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9643 if (err) {
9644 IPW_DEBUG_WX("failed setting power mode.\n");
9645 mutex_unlock(&priv->mutex);
9646 return err;
9647 }
9648 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9649 mutex_unlock(&priv->mutex);
9650 return 0;
9651 }
9652
9653 switch (wrqu->power.flags & IW_POWER_MODE) {
9654 case IW_POWER_ON: /* If not specified */
9655 case IW_POWER_MODE: /* If set all mask */
9656 case IW_POWER_ALL_R: /* If explicitly state all */
9657 break;
9658 default: /* Otherwise we don't support it */
9659 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9660 wrqu->power.flags);
9661 mutex_unlock(&priv->mutex);
9662 return -EOPNOTSUPP;
9663 }
9664
9665 /* If the user hasn't specified a power management mode yet, default
9666 * to BATTERY */
9667 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9668 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9669 else
9670 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9671
9672 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9673 if (err) {
9674 IPW_DEBUG_WX("failed setting power mode.\n");
9675 mutex_unlock(&priv->mutex);
9676 return err;
9677 }
9678
9679 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9680 mutex_unlock(&priv->mutex);
9681 return 0;
9682 }
9683
9684 static int ipw_wx_get_power(struct net_device *dev,
9685 struct iw_request_info *info,
9686 union iwreq_data *wrqu, char *extra)
9687 {
9688 struct ipw_priv *priv = libipw_priv(dev);
9689 mutex_lock(&priv->mutex);
9690 if (!(priv->power_mode & IPW_POWER_ENABLED))
9691 wrqu->power.disabled = 1;
9692 else
9693 wrqu->power.disabled = 0;
9694
9695 mutex_unlock(&priv->mutex);
9696 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9697
9698 return 0;
9699 }
9700
9701 static int ipw_wx_set_powermode(struct net_device *dev,
9702 struct iw_request_info *info,
9703 union iwreq_data *wrqu, char *extra)
9704 {
9705 struct ipw_priv *priv = libipw_priv(dev);
9706 int mode = *(int *)extra;
9707 int err;
9708
9709 mutex_lock(&priv->mutex);
9710 if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9711 mode = IPW_POWER_AC;
9712
9713 if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9714 err = ipw_send_power_mode(priv, mode);
9715 if (err) {
9716 IPW_DEBUG_WX("failed setting power mode.\n");
9717 mutex_unlock(&priv->mutex);
9718 return err;
9719 }
9720 priv->power_mode = IPW_POWER_ENABLED | mode;
9721 }
9722 mutex_unlock(&priv->mutex);
9723 return 0;
9724 }
9725
9726 #define MAX_WX_STRING 80
9727 static int ipw_wx_get_powermode(struct net_device *dev,
9728 struct iw_request_info *info,
9729 union iwreq_data *wrqu, char *extra)
9730 {
9731 struct ipw_priv *priv = libipw_priv(dev);
9732 int level = IPW_POWER_LEVEL(priv->power_mode);
9733 char *p = extra;
9734
9735 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9736
9737 switch (level) {
9738 case IPW_POWER_AC:
9739 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9740 break;
9741 case IPW_POWER_BATTERY:
9742 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9743 break;
9744 default:
9745 p += snprintf(p, MAX_WX_STRING - (p - extra),
9746 "(Timeout %dms, Period %dms)",
9747 timeout_duration[level - 1] / 1000,
9748 period_duration[level - 1] / 1000);
9749 }
9750
9751 if (!(priv->power_mode & IPW_POWER_ENABLED))
9752 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9753
9754 wrqu->data.length = p - extra + 1;
9755
9756 return 0;
9757 }
9758
9759 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9760 struct iw_request_info *info,
9761 union iwreq_data *wrqu, char *extra)
9762 {
9763 struct ipw_priv *priv = libipw_priv(dev);
9764 int mode = *(int *)extra;
9765 u8 band = 0, modulation = 0;
9766
9767 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9768 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9769 return -EINVAL;
9770 }
9771 mutex_lock(&priv->mutex);
9772 if (priv->adapter == IPW_2915ABG) {
9773 priv->ieee->abg_true = 1;
9774 if (mode & IEEE_A) {
9775 band |= LIBIPW_52GHZ_BAND;
9776 modulation |= LIBIPW_OFDM_MODULATION;
9777 } else
9778 priv->ieee->abg_true = 0;
9779 } else {
9780 if (mode & IEEE_A) {
9781 IPW_WARNING("Attempt to set 2200BG into "
9782 "802.11a mode\n");
9783 mutex_unlock(&priv->mutex);
9784 return -EINVAL;
9785 }
9786
9787 priv->ieee->abg_true = 0;
9788 }
9789
9790 if (mode & IEEE_B) {
9791 band |= LIBIPW_24GHZ_BAND;
9792 modulation |= LIBIPW_CCK_MODULATION;
9793 } else
9794 priv->ieee->abg_true = 0;
9795
9796 if (mode & IEEE_G) {
9797 band |= LIBIPW_24GHZ_BAND;
9798 modulation |= LIBIPW_OFDM_MODULATION;
9799 } else
9800 priv->ieee->abg_true = 0;
9801
9802 priv->ieee->mode = mode;
9803 priv->ieee->freq_band = band;
9804 priv->ieee->modulation = modulation;
9805 init_supported_rates(priv, &priv->rates);
9806
9807 /* Network configuration changed -- force [re]association */
9808 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9809 if (!ipw_disassociate(priv)) {
9810 ipw_send_supported_rates(priv, &priv->rates);
9811 ipw_associate(priv);
9812 }
9813
9814 /* Update the band LEDs */
9815 ipw_led_band_on(priv);
9816
9817 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9818 mode & IEEE_A ? 'a' : '.',
9819 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9820 mutex_unlock(&priv->mutex);
9821 return 0;
9822 }
9823
9824 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9825 struct iw_request_info *info,
9826 union iwreq_data *wrqu, char *extra)
9827 {
9828 struct ipw_priv *priv = libipw_priv(dev);
9829 mutex_lock(&priv->mutex);
9830 switch (priv->ieee->mode) {
9831 case IEEE_A:
9832 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9833 break;
9834 case IEEE_B:
9835 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9836 break;
9837 case IEEE_A | IEEE_B:
9838 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9839 break;
9840 case IEEE_G:
9841 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9842 break;
9843 case IEEE_A | IEEE_G:
9844 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9845 break;
9846 case IEEE_B | IEEE_G:
9847 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9848 break;
9849 case IEEE_A | IEEE_B | IEEE_G:
9850 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9851 break;
9852 default:
9853 strncpy(extra, "unknown", MAX_WX_STRING);
9854 break;
9855 }
9856
9857 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9858
9859 wrqu->data.length = strlen(extra) + 1;
9860 mutex_unlock(&priv->mutex);
9861
9862 return 0;
9863 }
9864
9865 static int ipw_wx_set_preamble(struct net_device *dev,
9866 struct iw_request_info *info,
9867 union iwreq_data *wrqu, char *extra)
9868 {
9869 struct ipw_priv *priv = libipw_priv(dev);
9870 int mode = *(int *)extra;
9871 mutex_lock(&priv->mutex);
9872 /* Switching from SHORT -> LONG requires a disassociation */
9873 if (mode == 1) {
9874 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9875 priv->config |= CFG_PREAMBLE_LONG;
9876
9877 /* Network configuration changed -- force [re]association */
9878 IPW_DEBUG_ASSOC
9879 ("[re]association triggered due to preamble change.\n");
9880 if (!ipw_disassociate(priv))
9881 ipw_associate(priv);
9882 }
9883 goto done;
9884 }
9885
9886 if (mode == 0) {
9887 priv->config &= ~CFG_PREAMBLE_LONG;
9888 goto done;
9889 }
9890 mutex_unlock(&priv->mutex);
9891 return -EINVAL;
9892
9893 done:
9894 mutex_unlock(&priv->mutex);
9895 return 0;
9896 }
9897
9898 static int ipw_wx_get_preamble(struct net_device *dev,
9899 struct iw_request_info *info,
9900 union iwreq_data *wrqu, char *extra)
9901 {
9902 struct ipw_priv *priv = libipw_priv(dev);
9903 mutex_lock(&priv->mutex);
9904 if (priv->config & CFG_PREAMBLE_LONG)
9905 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9906 else
9907 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9908 mutex_unlock(&priv->mutex);
9909 return 0;
9910 }
9911
9912 #ifdef CONFIG_IPW2200_MONITOR
9913 static int ipw_wx_set_monitor(struct net_device *dev,
9914 struct iw_request_info *info,
9915 union iwreq_data *wrqu, char *extra)
9916 {
9917 struct ipw_priv *priv = libipw_priv(dev);
9918 int *parms = (int *)extra;
9919 int enable = (parms[0] > 0);
9920 mutex_lock(&priv->mutex);
9921 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9922 if (enable) {
9923 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9924 #ifdef CONFIG_IPW2200_RADIOTAP
9925 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9926 #else
9927 priv->net_dev->type = ARPHRD_IEEE80211;
9928 #endif
9929 schedule_work(&priv->adapter_restart);
9930 }
9931
9932 ipw_set_channel(priv, parms[1]);
9933 } else {
9934 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9935 mutex_unlock(&priv->mutex);
9936 return 0;
9937 }
9938 priv->net_dev->type = ARPHRD_ETHER;
9939 schedule_work(&priv->adapter_restart);
9940 }
9941 mutex_unlock(&priv->mutex);
9942 return 0;
9943 }
9944
9945 #endif /* CONFIG_IPW2200_MONITOR */
9946
9947 static int ipw_wx_reset(struct net_device *dev,
9948 struct iw_request_info *info,
9949 union iwreq_data *wrqu, char *extra)
9950 {
9951 struct ipw_priv *priv = libipw_priv(dev);
9952 IPW_DEBUG_WX("RESET\n");
9953 schedule_work(&priv->adapter_restart);
9954 return 0;
9955 }
9956
9957 static int ipw_wx_sw_reset(struct net_device *dev,
9958 struct iw_request_info *info,
9959 union iwreq_data *wrqu, char *extra)
9960 {
9961 struct ipw_priv *priv = libipw_priv(dev);
9962 union iwreq_data wrqu_sec = {
9963 .encoding = {
9964 .flags = IW_ENCODE_DISABLED,
9965 },
9966 };
9967 int ret;
9968
9969 IPW_DEBUG_WX("SW_RESET\n");
9970
9971 mutex_lock(&priv->mutex);
9972
9973 ret = ipw_sw_reset(priv, 2);
9974 if (!ret) {
9975 free_firmware();
9976 ipw_adapter_restart(priv);
9977 }
9978
9979 /* The SW reset bit might have been toggled on by the 'disable'
9980 * module parameter, so take appropriate action */
9981 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9982
9983 mutex_unlock(&priv->mutex);
9984 libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9985 mutex_lock(&priv->mutex);
9986
9987 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9988 /* Configuration likely changed -- force [re]association */
9989 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9990 "reset.\n");
9991 if (!ipw_disassociate(priv))
9992 ipw_associate(priv);
9993 }
9994
9995 mutex_unlock(&priv->mutex);
9996
9997 return 0;
9998 }
9999
10000 /* Rebase the WE IOCTLs to zero for the handler array */
10001 static iw_handler ipw_wx_handlers[] = {
10002 IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
10003 IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq),
10004 IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq),
10005 IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode),
10006 IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode),
10007 IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens),
10008 IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens),
10009 IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range),
10010 IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap),
10011 IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap),
10012 IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan),
10013 IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan),
10014 IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid),
10015 IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid),
10016 IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick),
10017 IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick),
10018 IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate),
10019 IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate),
10020 IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts),
10021 IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts),
10022 IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag),
10023 IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag),
10024 IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow),
10025 IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow),
10026 IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry),
10027 IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry),
10028 IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode),
10029 IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode),
10030 IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power),
10031 IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power),
10032 IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
10033 IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
10034 IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
10035 IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
10036 IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie),
10037 IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie),
10038 IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme),
10039 IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth),
10040 IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth),
10041 IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext),
10042 IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext),
10043 };
10044
10045 enum {
10046 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
10047 IPW_PRIV_GET_POWER,
10048 IPW_PRIV_SET_MODE,
10049 IPW_PRIV_GET_MODE,
10050 IPW_PRIV_SET_PREAMBLE,
10051 IPW_PRIV_GET_PREAMBLE,
10052 IPW_PRIV_RESET,
10053 IPW_PRIV_SW_RESET,
10054 #ifdef CONFIG_IPW2200_MONITOR
10055 IPW_PRIV_SET_MONITOR,
10056 #endif
10057 };
10058
10059 static struct iw_priv_args ipw_priv_args[] = {
10060 {
10061 .cmd = IPW_PRIV_SET_POWER,
10062 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10063 .name = "set_power"},
10064 {
10065 .cmd = IPW_PRIV_GET_POWER,
10066 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10067 .name = "get_power"},
10068 {
10069 .cmd = IPW_PRIV_SET_MODE,
10070 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10071 .name = "set_mode"},
10072 {
10073 .cmd = IPW_PRIV_GET_MODE,
10074 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10075 .name = "get_mode"},
10076 {
10077 .cmd = IPW_PRIV_SET_PREAMBLE,
10078 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10079 .name = "set_preamble"},
10080 {
10081 .cmd = IPW_PRIV_GET_PREAMBLE,
10082 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
10083 .name = "get_preamble"},
10084 {
10085 IPW_PRIV_RESET,
10086 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
10087 {
10088 IPW_PRIV_SW_RESET,
10089 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
10090 #ifdef CONFIG_IPW2200_MONITOR
10091 {
10092 IPW_PRIV_SET_MONITOR,
10093 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
10094 #endif /* CONFIG_IPW2200_MONITOR */
10095 };
10096
10097 static iw_handler ipw_priv_handler[] = {
10098 ipw_wx_set_powermode,
10099 ipw_wx_get_powermode,
10100 ipw_wx_set_wireless_mode,
10101 ipw_wx_get_wireless_mode,
10102 ipw_wx_set_preamble,
10103 ipw_wx_get_preamble,
10104 ipw_wx_reset,
10105 ipw_wx_sw_reset,
10106 #ifdef CONFIG_IPW2200_MONITOR
10107 ipw_wx_set_monitor,
10108 #endif
10109 };
10110
10111 static struct iw_handler_def ipw_wx_handler_def = {
10112 .standard = ipw_wx_handlers,
10113 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
10114 .num_private = ARRAY_SIZE(ipw_priv_handler),
10115 .num_private_args = ARRAY_SIZE(ipw_priv_args),
10116 .private = ipw_priv_handler,
10117 .private_args = ipw_priv_args,
10118 .get_wireless_stats = ipw_get_wireless_stats,
10119 };
10120
10121 /*
10122 * Get wireless statistics.
10123 * Called by /proc/net/wireless
10124 * Also called by SIOCGIWSTATS
10125 */
10126 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10127 {
10128 struct ipw_priv *priv = libipw_priv(dev);
10129 struct iw_statistics *wstats;
10130
10131 wstats = &priv->wstats;
10132
10133 /* if hw is disabled, then ipw_get_ordinal() can't be called.
10134 * netdev->get_wireless_stats seems to be called before fw is
10135 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
10136 * and associated; if not associcated, the values are all meaningless
10137 * anyway, so set them all to NULL and INVALID */
10138 if (!(priv->status & STATUS_ASSOCIATED)) {
10139 wstats->miss.beacon = 0;
10140 wstats->discard.retries = 0;
10141 wstats->qual.qual = 0;
10142 wstats->qual.level = 0;
10143 wstats->qual.noise = 0;
10144 wstats->qual.updated = 7;
10145 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10146 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10147 return wstats;
10148 }
10149
10150 wstats->qual.qual = priv->quality;
10151 wstats->qual.level = priv->exp_avg_rssi;
10152 wstats->qual.noise = priv->exp_avg_noise;
10153 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10154 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10155
10156 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10157 wstats->discard.retries = priv->last_tx_failures;
10158 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10159
10160 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10161 goto fail_get_ordinal;
10162 wstats->discard.retries += tx_retry; */
10163
10164 return wstats;
10165 }
10166
10167 /* net device stuff */
10168
10169 static void init_sys_config(struct ipw_sys_config *sys_config)
10170 {
10171 memset(sys_config, 0, sizeof(struct ipw_sys_config));
10172 sys_config->bt_coexistence = 0;
10173 sys_config->answer_broadcast_ssid_probe = 0;
10174 sys_config->accept_all_data_frames = 0;
10175 sys_config->accept_non_directed_frames = 1;
10176 sys_config->exclude_unicast_unencrypted = 0;
10177 sys_config->disable_unicast_decryption = 1;
10178 sys_config->exclude_multicast_unencrypted = 0;
10179 sys_config->disable_multicast_decryption = 1;
10180 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10181 antenna = CFG_SYS_ANTENNA_BOTH;
10182 sys_config->antenna_diversity = antenna;
10183 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
10184 sys_config->dot11g_auto_detection = 0;
10185 sys_config->enable_cts_to_self = 0;
10186 sys_config->bt_coexist_collision_thr = 0;
10187 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
10188 sys_config->silence_threshold = 0x1e;
10189 }
10190
10191 static int ipw_net_open(struct net_device *dev)
10192 {
10193 IPW_DEBUG_INFO("dev->open\n");
10194 netif_start_queue(dev);
10195 return 0;
10196 }
10197
10198 static int ipw_net_stop(struct net_device *dev)
10199 {
10200 IPW_DEBUG_INFO("dev->close\n");
10201 netif_stop_queue(dev);
10202 return 0;
10203 }
10204
10205 /*
10206 todo:
10207
10208 modify to send one tfd per fragment instead of using chunking. otherwise
10209 we need to heavily modify the libipw_skb_to_txb.
10210 */
10211
10212 static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
10213 int pri)
10214 {
10215 struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *)
10216 txb->fragments[0]->data;
10217 int i = 0;
10218 struct tfd_frame *tfd;
10219 #ifdef CONFIG_IPW2200_QOS
10220 int tx_id = ipw_get_tx_queue_number(priv, pri);
10221 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10222 #else
10223 struct clx2_tx_queue *txq = &priv->txq[0];
10224 #endif
10225 struct clx2_queue *q = &txq->q;
10226 u8 id, hdr_len, unicast;
10227 int fc;
10228
10229 if (!(priv->status & STATUS_ASSOCIATED))
10230 goto drop;
10231
10232 hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10233 switch (priv->ieee->iw_mode) {
10234 case IW_MODE_ADHOC:
10235 unicast = !is_multicast_ether_addr(hdr->addr1);
10236 id = ipw_find_station(priv, hdr->addr1);
10237 if (id == IPW_INVALID_STATION) {
10238 id = ipw_add_station(priv, hdr->addr1);
10239 if (id == IPW_INVALID_STATION) {
10240 IPW_WARNING("Attempt to send data to "
10241 "invalid cell: %pM\n",
10242 hdr->addr1);
10243 goto drop;
10244 }
10245 }
10246 break;
10247
10248 case IW_MODE_INFRA:
10249 default:
10250 unicast = !is_multicast_ether_addr(hdr->addr3);
10251 id = 0;
10252 break;
10253 }
10254
10255 tfd = &txq->bd[q->first_empty];
10256 txq->txb[q->first_empty] = txb;
10257 memset(tfd, 0, sizeof(*tfd));
10258 tfd->u.data.station_number = id;
10259
10260 tfd->control_flags.message_type = TX_FRAME_TYPE;
10261 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10262
10263 tfd->u.data.cmd_id = DINO_CMD_TX;
10264 tfd->u.data.len = cpu_to_le16(txb->payload_size);
10265
10266 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10267 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10268 else
10269 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10270
10271 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10272 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10273
10274 fc = le16_to_cpu(hdr->frame_ctl);
10275 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10276
10277 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10278
10279 if (likely(unicast))
10280 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10281
10282 if (txb->encrypted && !priv->ieee->host_encrypt) {
10283 switch (priv->ieee->sec.level) {
10284 case SEC_LEVEL_3:
10285 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10286 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10287 /* XXX: ACK flag must be set for CCMP even if it
10288 * is a multicast/broadcast packet, because CCMP
10289 * group communication encrypted by GTK is
10290 * actually done by the AP. */
10291 if (!unicast)
10292 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10293
10294 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10295 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10296 tfd->u.data.key_index = 0;
10297 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10298 break;
10299 case SEC_LEVEL_2:
10300 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10301 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10302 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10303 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10304 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10305 break;
10306 case SEC_LEVEL_1:
10307 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10308 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10309 tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
10310 if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
10311 40)
10312 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10313 else
10314 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10315 break;
10316 case SEC_LEVEL_0:
10317 break;
10318 default:
10319 printk(KERN_ERR "Unknown security level %d\n",
10320 priv->ieee->sec.level);
10321 break;
10322 }
10323 } else
10324 /* No hardware encryption */
10325 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10326
10327 #ifdef CONFIG_IPW2200_QOS
10328 if (fc & IEEE80211_STYPE_QOS_DATA)
10329 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10330 #endif /* CONFIG_IPW2200_QOS */
10331
10332 /* payload */
10333 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10334 txb->nr_frags));
10335 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10336 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10337 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10338 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10339 i, le32_to_cpu(tfd->u.data.num_chunks),
10340 txb->fragments[i]->len - hdr_len);
10341 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10342 i, tfd->u.data.num_chunks,
10343 txb->fragments[i]->len - hdr_len);
10344 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10345 txb->fragments[i]->len - hdr_len);
10346
10347 tfd->u.data.chunk_ptr[i] =
10348 cpu_to_le32(pci_map_single
10349 (priv->pci_dev,
10350 txb->fragments[i]->data + hdr_len,
10351 txb->fragments[i]->len - hdr_len,
10352 PCI_DMA_TODEVICE));
10353 tfd->u.data.chunk_len[i] =
10354 cpu_to_le16(txb->fragments[i]->len - hdr_len);
10355 }
10356
10357 if (i != txb->nr_frags) {
10358 struct sk_buff *skb;
10359 u16 remaining_bytes = 0;
10360 int j;
10361
10362 for (j = i; j < txb->nr_frags; j++)
10363 remaining_bytes += txb->fragments[j]->len - hdr_len;
10364
10365 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10366 remaining_bytes);
10367 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10368 if (skb != NULL) {
10369 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10370 for (j = i; j < txb->nr_frags; j++) {
10371 int size = txb->fragments[j]->len - hdr_len;
10372
10373 printk(KERN_INFO "Adding frag %d %d...\n",
10374 j, size);
10375 memcpy(skb_put(skb, size),
10376 txb->fragments[j]->data + hdr_len, size);
10377 }
10378 dev_kfree_skb_any(txb->fragments[i]);
10379 txb->fragments[i] = skb;
10380 tfd->u.data.chunk_ptr[i] =
10381 cpu_to_le32(pci_map_single
10382 (priv->pci_dev, skb->data,
10383 remaining_bytes,
10384 PCI_DMA_TODEVICE));
10385
10386 le32_add_cpu(&tfd->u.data.num_chunks, 1);
10387 }
10388 }
10389
10390 /* kick DMA */
10391 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10392 ipw_write32(priv, q->reg_w, q->first_empty);
10393
10394 if (ipw_tx_queue_space(q) < q->high_mark)
10395 netif_stop_queue(priv->net_dev);
10396
10397 return NETDEV_TX_OK;
10398
10399 drop:
10400 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10401 libipw_txb_free(txb);
10402 return NETDEV_TX_OK;
10403 }
10404
10405 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10406 {
10407 struct ipw_priv *priv = libipw_priv(dev);
10408 #ifdef CONFIG_IPW2200_QOS
10409 int tx_id = ipw_get_tx_queue_number(priv, pri);
10410 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10411 #else
10412 struct clx2_tx_queue *txq = &priv->txq[0];
10413 #endif /* CONFIG_IPW2200_QOS */
10414
10415 if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10416 return 1;
10417
10418 return 0;
10419 }
10420
10421 #ifdef CONFIG_IPW2200_PROMISCUOUS
10422 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10423 struct libipw_txb *txb)
10424 {
10425 struct libipw_rx_stats dummystats;
10426 struct ieee80211_hdr *hdr;
10427 u8 n;
10428 u16 filter = priv->prom_priv->filter;
10429 int hdr_only = 0;
10430
10431 if (filter & IPW_PROM_NO_TX)
10432 return;
10433
10434 memset(&dummystats, 0, sizeof(dummystats));
10435
10436 /* Filtering of fragment chains is done against the first fragment */
10437 hdr = (void *)txb->fragments[0]->data;
10438 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
10439 if (filter & IPW_PROM_NO_MGMT)
10440 return;
10441 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10442 hdr_only = 1;
10443 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
10444 if (filter & IPW_PROM_NO_CTL)
10445 return;
10446 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10447 hdr_only = 1;
10448 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
10449 if (filter & IPW_PROM_NO_DATA)
10450 return;
10451 if (filter & IPW_PROM_DATA_HEADER_ONLY)
10452 hdr_only = 1;
10453 }
10454
10455 for(n=0; n<txb->nr_frags; ++n) {
10456 struct sk_buff *src = txb->fragments[n];
10457 struct sk_buff *dst;
10458 struct ieee80211_radiotap_header *rt_hdr;
10459 int len;
10460
10461 if (hdr_only) {
10462 hdr = (void *)src->data;
10463 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
10464 } else
10465 len = src->len;
10466
10467 dst = alloc_skb(len + sizeof(*rt_hdr) + sizeof(u16)*2, GFP_ATOMIC);
10468 if (!dst)
10469 continue;
10470
10471 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10472
10473 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10474 rt_hdr->it_pad = 0;
10475 rt_hdr->it_present = 0; /* after all, it's just an idea */
10476 rt_hdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10477
10478 *(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10479 ieee80211chan2mhz(priv->channel));
10480 if (priv->channel > 14) /* 802.11a */
10481 *(__le16*)skb_put(dst, sizeof(u16)) =
10482 cpu_to_le16(IEEE80211_CHAN_OFDM |
10483 IEEE80211_CHAN_5GHZ);
10484 else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10485 *(__le16*)skb_put(dst, sizeof(u16)) =
10486 cpu_to_le16(IEEE80211_CHAN_CCK |
10487 IEEE80211_CHAN_2GHZ);
10488 else /* 802.11g */
10489 *(__le16*)skb_put(dst, sizeof(u16)) =
10490 cpu_to_le16(IEEE80211_CHAN_OFDM |
10491 IEEE80211_CHAN_2GHZ);
10492
10493 rt_hdr->it_len = cpu_to_le16(dst->len);
10494
10495 skb_copy_from_linear_data(src, skb_put(dst, len), len);
10496
10497 if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats))
10498 dev_kfree_skb_any(dst);
10499 }
10500 }
10501 #endif
10502
10503 static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb,
10504 struct net_device *dev, int pri)
10505 {
10506 struct ipw_priv *priv = libipw_priv(dev);
10507 unsigned long flags;
10508 netdev_tx_t ret;
10509
10510 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10511 spin_lock_irqsave(&priv->lock, flags);
10512
10513 #ifdef CONFIG_IPW2200_PROMISCUOUS
10514 if (rtap_iface && netif_running(priv->prom_net_dev))
10515 ipw_handle_promiscuous_tx(priv, txb);
10516 #endif
10517
10518 ret = ipw_tx_skb(priv, txb, pri);
10519 if (ret == NETDEV_TX_OK)
10520 __ipw_led_activity_on(priv);
10521 spin_unlock_irqrestore(&priv->lock, flags);
10522
10523 return ret;
10524 }
10525
10526 static void ipw_net_set_multicast_list(struct net_device *dev)
10527 {
10528
10529 }
10530
10531 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10532 {
10533 struct ipw_priv *priv = libipw_priv(dev);
10534 struct sockaddr *addr = p;
10535
10536 if (!is_valid_ether_addr(addr->sa_data))
10537 return -EADDRNOTAVAIL;
10538 mutex_lock(&priv->mutex);
10539 priv->config |= CFG_CUSTOM_MAC;
10540 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10541 printk(KERN_INFO "%s: Setting MAC to %pM\n",
10542 priv->net_dev->name, priv->mac_addr);
10543 schedule_work(&priv->adapter_restart);
10544 mutex_unlock(&priv->mutex);
10545 return 0;
10546 }
10547
10548 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10549 struct ethtool_drvinfo *info)
10550 {
10551 struct ipw_priv *p = libipw_priv(dev);
10552 char vers[64];
10553 char date[32];
10554 u32 len;
10555
10556 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
10557 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
10558
10559 len = sizeof(vers);
10560 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10561 len = sizeof(date);
10562 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10563
10564 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10565 vers, date);
10566 strlcpy(info->bus_info, pci_name(p->pci_dev),
10567 sizeof(info->bus_info));
10568 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10569 }
10570
10571 static u32 ipw_ethtool_get_link(struct net_device *dev)
10572 {
10573 struct ipw_priv *priv = libipw_priv(dev);
10574 return (priv->status & STATUS_ASSOCIATED) != 0;
10575 }
10576
10577 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10578 {
10579 return IPW_EEPROM_IMAGE_SIZE;
10580 }
10581
10582 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10583 struct ethtool_eeprom *eeprom, u8 * bytes)
10584 {
10585 struct ipw_priv *p = libipw_priv(dev);
10586
10587 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10588 return -EINVAL;
10589 mutex_lock(&p->mutex);
10590 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10591 mutex_unlock(&p->mutex);
10592 return 0;
10593 }
10594
10595 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10596 struct ethtool_eeprom *eeprom, u8 * bytes)
10597 {
10598 struct ipw_priv *p = libipw_priv(dev);
10599 int i;
10600
10601 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10602 return -EINVAL;
10603 mutex_lock(&p->mutex);
10604 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10605 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10606 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10607 mutex_unlock(&p->mutex);
10608 return 0;
10609 }
10610
10611 static const struct ethtool_ops ipw_ethtool_ops = {
10612 .get_link = ipw_ethtool_get_link,
10613 .get_drvinfo = ipw_ethtool_get_drvinfo,
10614 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10615 .get_eeprom = ipw_ethtool_get_eeprom,
10616 .set_eeprom = ipw_ethtool_set_eeprom,
10617 };
10618
10619 static irqreturn_t ipw_isr(int irq, void *data)
10620 {
10621 struct ipw_priv *priv = data;
10622 u32 inta, inta_mask;
10623
10624 if (!priv)
10625 return IRQ_NONE;
10626
10627 spin_lock(&priv->irq_lock);
10628
10629 if (!(priv->status & STATUS_INT_ENABLED)) {
10630 /* IRQ is disabled */
10631 goto none;
10632 }
10633
10634 inta = ipw_read32(priv, IPW_INTA_RW);
10635 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10636
10637 if (inta == 0xFFFFFFFF) {
10638 /* Hardware disappeared */
10639 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10640 goto none;
10641 }
10642
10643 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10644 /* Shared interrupt */
10645 goto none;
10646 }
10647
10648 /* tell the device to stop sending interrupts */
10649 __ipw_disable_interrupts(priv);
10650
10651 /* ack current interrupts */
10652 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10653 ipw_write32(priv, IPW_INTA_RW, inta);
10654
10655 /* Cache INTA value for our tasklet */
10656 priv->isr_inta = inta;
10657
10658 tasklet_schedule(&priv->irq_tasklet);
10659
10660 spin_unlock(&priv->irq_lock);
10661
10662 return IRQ_HANDLED;
10663 none:
10664 spin_unlock(&priv->irq_lock);
10665 return IRQ_NONE;
10666 }
10667
10668 static void ipw_rf_kill(void *adapter)
10669 {
10670 struct ipw_priv *priv = adapter;
10671 unsigned long flags;
10672
10673 spin_lock_irqsave(&priv->lock, flags);
10674
10675 if (rf_kill_active(priv)) {
10676 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10677 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
10678 goto exit_unlock;
10679 }
10680
10681 /* RF Kill is now disabled, so bring the device back up */
10682
10683 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10684 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10685 "device\n");
10686
10687 /* we can not do an adapter restart while inside an irq lock */
10688 schedule_work(&priv->adapter_restart);
10689 } else
10690 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10691 "enabled\n");
10692
10693 exit_unlock:
10694 spin_unlock_irqrestore(&priv->lock, flags);
10695 }
10696
10697 static void ipw_bg_rf_kill(struct work_struct *work)
10698 {
10699 struct ipw_priv *priv =
10700 container_of(work, struct ipw_priv, rf_kill.work);
10701 mutex_lock(&priv->mutex);
10702 ipw_rf_kill(priv);
10703 mutex_unlock(&priv->mutex);
10704 }
10705
10706 static void ipw_link_up(struct ipw_priv *priv)
10707 {
10708 priv->last_seq_num = -1;
10709 priv->last_frag_num = -1;
10710 priv->last_packet_time = 0;
10711
10712 netif_carrier_on(priv->net_dev);
10713
10714 cancel_delayed_work(&priv->request_scan);
10715 cancel_delayed_work(&priv->request_direct_scan);
10716 cancel_delayed_work(&priv->request_passive_scan);
10717 cancel_delayed_work(&priv->scan_event);
10718 ipw_reset_stats(priv);
10719 /* Ensure the rate is updated immediately */
10720 priv->last_rate = ipw_get_current_rate(priv);
10721 ipw_gather_stats(priv);
10722 ipw_led_link_up(priv);
10723 notify_wx_assoc_event(priv);
10724
10725 if (priv->config & CFG_BACKGROUND_SCAN)
10726 schedule_delayed_work(&priv->request_scan, HZ);
10727 }
10728
10729 static void ipw_bg_link_up(struct work_struct *work)
10730 {
10731 struct ipw_priv *priv =
10732 container_of(work, struct ipw_priv, link_up);
10733 mutex_lock(&priv->mutex);
10734 ipw_link_up(priv);
10735 mutex_unlock(&priv->mutex);
10736 }
10737
10738 static void ipw_link_down(struct ipw_priv *priv)
10739 {
10740 ipw_led_link_down(priv);
10741 netif_carrier_off(priv->net_dev);
10742 notify_wx_assoc_event(priv);
10743
10744 /* Cancel any queued work ... */
10745 cancel_delayed_work(&priv->request_scan);
10746 cancel_delayed_work(&priv->request_direct_scan);
10747 cancel_delayed_work(&priv->request_passive_scan);
10748 cancel_delayed_work(&priv->adhoc_check);
10749 cancel_delayed_work(&priv->gather_stats);
10750
10751 ipw_reset_stats(priv);
10752
10753 if (!(priv->status & STATUS_EXIT_PENDING)) {
10754 /* Queue up another scan... */
10755 schedule_delayed_work(&priv->request_scan, 0);
10756 } else
10757 cancel_delayed_work(&priv->scan_event);
10758 }
10759
10760 static void ipw_bg_link_down(struct work_struct *work)
10761 {
10762 struct ipw_priv *priv =
10763 container_of(work, struct ipw_priv, link_down);
10764 mutex_lock(&priv->mutex);
10765 ipw_link_down(priv);
10766 mutex_unlock(&priv->mutex);
10767 }
10768
10769 static int ipw_setup_deferred_work(struct ipw_priv *priv)
10770 {
10771 int ret = 0;
10772
10773 init_waitqueue_head(&priv->wait_command_queue);
10774 init_waitqueue_head(&priv->wait_state);
10775
10776 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10777 INIT_WORK(&priv->associate, ipw_bg_associate);
10778 INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10779 INIT_WORK(&priv->system_config, ipw_system_config);
10780 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10781 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10782 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10783 INIT_WORK(&priv->up, ipw_bg_up);
10784 INIT_WORK(&priv->down, ipw_bg_down);
10785 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10786 INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
10787 INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10788 INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10789 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10790 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10791 INIT_WORK(&priv->roam, ipw_bg_roam);
10792 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10793 INIT_WORK(&priv->link_up, ipw_bg_link_up);
10794 INIT_WORK(&priv->link_down, ipw_bg_link_down);
10795 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10796 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10797 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10798 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10799
10800 #ifdef CONFIG_IPW2200_QOS
10801 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10802 #endif /* CONFIG_IPW2200_QOS */
10803
10804 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10805 ipw_irq_tasklet, (unsigned long)priv);
10806
10807 return ret;
10808 }
10809
10810 static void shim__set_security(struct net_device *dev,
10811 struct libipw_security *sec)
10812 {
10813 struct ipw_priv *priv = libipw_priv(dev);
10814 int i;
10815 for (i = 0; i < 4; i++) {
10816 if (sec->flags & (1 << i)) {
10817 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10818 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10819 if (sec->key_sizes[i] == 0)
10820 priv->ieee->sec.flags &= ~(1 << i);
10821 else {
10822 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10823 sec->key_sizes[i]);
10824 priv->ieee->sec.flags |= (1 << i);
10825 }
10826 priv->status |= STATUS_SECURITY_UPDATED;
10827 } else if (sec->level != SEC_LEVEL_1)
10828 priv->ieee->sec.flags &= ~(1 << i);
10829 }
10830
10831 if (sec->flags & SEC_ACTIVE_KEY) {
10832 if (sec->active_key <= 3) {
10833 priv->ieee->sec.active_key = sec->active_key;
10834 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10835 } else
10836 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10837 priv->status |= STATUS_SECURITY_UPDATED;
10838 } else
10839 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10840
10841 if ((sec->flags & SEC_AUTH_MODE) &&
10842 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10843 priv->ieee->sec.auth_mode = sec->auth_mode;
10844 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10845 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10846 priv->capability |= CAP_SHARED_KEY;
10847 else
10848 priv->capability &= ~CAP_SHARED_KEY;
10849 priv->status |= STATUS_SECURITY_UPDATED;
10850 }
10851
10852 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10853 priv->ieee->sec.flags |= SEC_ENABLED;
10854 priv->ieee->sec.enabled = sec->enabled;
10855 priv->status |= STATUS_SECURITY_UPDATED;
10856 if (sec->enabled)
10857 priv->capability |= CAP_PRIVACY_ON;
10858 else
10859 priv->capability &= ~CAP_PRIVACY_ON;
10860 }
10861
10862 if (sec->flags & SEC_ENCRYPT)
10863 priv->ieee->sec.encrypt = sec->encrypt;
10864
10865 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10866 priv->ieee->sec.level = sec->level;
10867 priv->ieee->sec.flags |= SEC_LEVEL;
10868 priv->status |= STATUS_SECURITY_UPDATED;
10869 }
10870
10871 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10872 ipw_set_hwcrypto_keys(priv);
10873
10874 /* To match current functionality of ipw2100 (which works well w/
10875 * various supplicants, we don't force a disassociate if the
10876 * privacy capability changes ... */
10877 #if 0
10878 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10879 (((priv->assoc_request.capability &
10880 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10881 (!(priv->assoc_request.capability &
10882 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10883 IPW_DEBUG_ASSOC("Disassociating due to capability "
10884 "change.\n");
10885 ipw_disassociate(priv);
10886 }
10887 #endif
10888 }
10889
10890 static int init_supported_rates(struct ipw_priv *priv,
10891 struct ipw_supported_rates *rates)
10892 {
10893 /* TODO: Mask out rates based on priv->rates_mask */
10894
10895 memset(rates, 0, sizeof(*rates));
10896 /* configure supported rates */
10897 switch (priv->ieee->freq_band) {
10898 case LIBIPW_52GHZ_BAND:
10899 rates->ieee_mode = IPW_A_MODE;
10900 rates->purpose = IPW_RATE_CAPABILITIES;
10901 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10902 LIBIPW_OFDM_DEFAULT_RATES_MASK);
10903 break;
10904
10905 default: /* Mixed or 2.4Ghz */
10906 rates->ieee_mode = IPW_G_MODE;
10907 rates->purpose = IPW_RATE_CAPABILITIES;
10908 ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION,
10909 LIBIPW_CCK_DEFAULT_RATES_MASK);
10910 if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) {
10911 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10912 LIBIPW_OFDM_DEFAULT_RATES_MASK);
10913 }
10914 break;
10915 }
10916
10917 return 0;
10918 }
10919
10920 static int ipw_config(struct ipw_priv *priv)
10921 {
10922 /* This is only called from ipw_up, which resets/reloads the firmware
10923 so, we don't need to first disable the card before we configure
10924 it */
10925 if (ipw_set_tx_power(priv))
10926 goto error;
10927
10928 /* initialize adapter address */
10929 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10930 goto error;
10931
10932 /* set basic system config settings */
10933 init_sys_config(&priv->sys_config);
10934
10935 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10936 * Does not support BT priority yet (don't abort or defer our Tx) */
10937 if (bt_coexist) {
10938 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10939
10940 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10941 priv->sys_config.bt_coexistence
10942 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10943 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10944 priv->sys_config.bt_coexistence
10945 |= CFG_BT_COEXISTENCE_OOB;
10946 }
10947
10948 #ifdef CONFIG_IPW2200_PROMISCUOUS
10949 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10950 priv->sys_config.accept_all_data_frames = 1;
10951 priv->sys_config.accept_non_directed_frames = 1;
10952 priv->sys_config.accept_all_mgmt_bcpr = 1;
10953 priv->sys_config.accept_all_mgmt_frames = 1;
10954 }
10955 #endif
10956
10957 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10958 priv->sys_config.answer_broadcast_ssid_probe = 1;
10959 else
10960 priv->sys_config.answer_broadcast_ssid_probe = 0;
10961
10962 if (ipw_send_system_config(priv))
10963 goto error;
10964
10965 init_supported_rates(priv, &priv->rates);
10966 if (ipw_send_supported_rates(priv, &priv->rates))
10967 goto error;
10968
10969 /* Set request-to-send threshold */
10970 if (priv->rts_threshold) {
10971 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10972 goto error;
10973 }
10974 #ifdef CONFIG_IPW2200_QOS
10975 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10976 ipw_qos_activate(priv, NULL);
10977 #endif /* CONFIG_IPW2200_QOS */
10978
10979 if (ipw_set_random_seed(priv))
10980 goto error;
10981
10982 /* final state transition to the RUN state */
10983 if (ipw_send_host_complete(priv))
10984 goto error;
10985
10986 priv->status |= STATUS_INIT;
10987
10988 ipw_led_init(priv);
10989 ipw_led_radio_on(priv);
10990 priv->notif_missed_beacons = 0;
10991
10992 /* Set hardware WEP key if it is configured. */
10993 if ((priv->capability & CAP_PRIVACY_ON) &&
10994 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10995 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10996 ipw_set_hwcrypto_keys(priv);
10997
10998 return 0;
10999
11000 error:
11001 return -EIO;
11002 }
11003
11004 /*
11005 * NOTE:
11006 *
11007 * These tables have been tested in conjunction with the
11008 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
11009 *
11010 * Altering this values, using it on other hardware, or in geographies
11011 * not intended for resale of the above mentioned Intel adapters has
11012 * not been tested.
11013 *
11014 * Remember to update the table in README.ipw2200 when changing this
11015 * table.
11016 *
11017 */
11018 static const struct libipw_geo ipw_geos[] = {
11019 { /* Restricted */
11020 "---",
11021 .bg_channels = 11,
11022 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11023 {2427, 4}, {2432, 5}, {2437, 6},
11024 {2442, 7}, {2447, 8}, {2452, 9},
11025 {2457, 10}, {2462, 11}},
11026 },
11027
11028 { /* Custom US/Canada */
11029 "ZZF",
11030 .bg_channels = 11,
11031 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11032 {2427, 4}, {2432, 5}, {2437, 6},
11033 {2442, 7}, {2447, 8}, {2452, 9},
11034 {2457, 10}, {2462, 11}},
11035 .a_channels = 8,
11036 .a = {{5180, 36},
11037 {5200, 40},
11038 {5220, 44},
11039 {5240, 48},
11040 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11041 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11042 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11043 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}},
11044 },
11045
11046 { /* Rest of World */
11047 "ZZD",
11048 .bg_channels = 13,
11049 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11050 {2427, 4}, {2432, 5}, {2437, 6},
11051 {2442, 7}, {2447, 8}, {2452, 9},
11052 {2457, 10}, {2462, 11}, {2467, 12},
11053 {2472, 13}},
11054 },
11055
11056 { /* Custom USA & Europe & High */
11057 "ZZA",
11058 .bg_channels = 11,
11059 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11060 {2427, 4}, {2432, 5}, {2437, 6},
11061 {2442, 7}, {2447, 8}, {2452, 9},
11062 {2457, 10}, {2462, 11}},
11063 .a_channels = 13,
11064 .a = {{5180, 36},
11065 {5200, 40},
11066 {5220, 44},
11067 {5240, 48},
11068 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11069 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11070 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11071 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11072 {5745, 149},
11073 {5765, 153},
11074 {5785, 157},
11075 {5805, 161},
11076 {5825, 165}},
11077 },
11078
11079 { /* Custom NA & Europe */
11080 "ZZB",
11081 .bg_channels = 11,
11082 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11083 {2427, 4}, {2432, 5}, {2437, 6},
11084 {2442, 7}, {2447, 8}, {2452, 9},
11085 {2457, 10}, {2462, 11}},
11086 .a_channels = 13,
11087 .a = {{5180, 36},
11088 {5200, 40},
11089 {5220, 44},
11090 {5240, 48},
11091 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11092 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11093 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11094 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11095 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11096 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11097 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11098 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11099 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11100 },
11101
11102 { /* Custom Japan */
11103 "ZZC",
11104 .bg_channels = 11,
11105 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11106 {2427, 4}, {2432, 5}, {2437, 6},
11107 {2442, 7}, {2447, 8}, {2452, 9},
11108 {2457, 10}, {2462, 11}},
11109 .a_channels = 4,
11110 .a = {{5170, 34}, {5190, 38},
11111 {5210, 42}, {5230, 46}},
11112 },
11113
11114 { /* Custom */
11115 "ZZM",
11116 .bg_channels = 11,
11117 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11118 {2427, 4}, {2432, 5}, {2437, 6},
11119 {2442, 7}, {2447, 8}, {2452, 9},
11120 {2457, 10}, {2462, 11}},
11121 },
11122
11123 { /* Europe */
11124 "ZZE",
11125 .bg_channels = 13,
11126 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11127 {2427, 4}, {2432, 5}, {2437, 6},
11128 {2442, 7}, {2447, 8}, {2452, 9},
11129 {2457, 10}, {2462, 11}, {2467, 12},
11130 {2472, 13}},
11131 .a_channels = 19,
11132 .a = {{5180, 36},
11133 {5200, 40},
11134 {5220, 44},
11135 {5240, 48},
11136 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11137 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11138 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11139 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11140 {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11141 {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11142 {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11143 {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11144 {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11145 {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11146 {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11147 {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11148 {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11149 {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11150 {5700, 140, LIBIPW_CH_PASSIVE_ONLY}},
11151 },
11152
11153 { /* Custom Japan */
11154 "ZZJ",
11155 .bg_channels = 14,
11156 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11157 {2427, 4}, {2432, 5}, {2437, 6},
11158 {2442, 7}, {2447, 8}, {2452, 9},
11159 {2457, 10}, {2462, 11}, {2467, 12},
11160 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}},
11161 .a_channels = 4,
11162 .a = {{5170, 34}, {5190, 38},
11163 {5210, 42}, {5230, 46}},
11164 },
11165
11166 { /* Rest of World */
11167 "ZZR",
11168 .bg_channels = 14,
11169 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11170 {2427, 4}, {2432, 5}, {2437, 6},
11171 {2442, 7}, {2447, 8}, {2452, 9},
11172 {2457, 10}, {2462, 11}, {2467, 12},
11173 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY |
11174 LIBIPW_CH_PASSIVE_ONLY}},
11175 },
11176
11177 { /* High Band */
11178 "ZZH",
11179 .bg_channels = 13,
11180 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11181 {2427, 4}, {2432, 5}, {2437, 6},
11182 {2442, 7}, {2447, 8}, {2452, 9},
11183 {2457, 10}, {2462, 11},
11184 {2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11185 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11186 .a_channels = 4,
11187 .a = {{5745, 149}, {5765, 153},
11188 {5785, 157}, {5805, 161}},
11189 },
11190
11191 { /* Custom Europe */
11192 "ZZG",
11193 .bg_channels = 13,
11194 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11195 {2427, 4}, {2432, 5}, {2437, 6},
11196 {2442, 7}, {2447, 8}, {2452, 9},
11197 {2457, 10}, {2462, 11},
11198 {2467, 12}, {2472, 13}},
11199 .a_channels = 4,
11200 .a = {{5180, 36}, {5200, 40},
11201 {5220, 44}, {5240, 48}},
11202 },
11203
11204 { /* Europe */
11205 "ZZK",
11206 .bg_channels = 13,
11207 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11208 {2427, 4}, {2432, 5}, {2437, 6},
11209 {2442, 7}, {2447, 8}, {2452, 9},
11210 {2457, 10}, {2462, 11},
11211 {2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11212 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11213 .a_channels = 24,
11214 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11215 {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11216 {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11217 {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11218 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11219 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11220 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11221 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11222 {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11223 {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11224 {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11225 {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11226 {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11227 {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11228 {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11229 {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11230 {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11231 {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11232 {5700, 140, LIBIPW_CH_PASSIVE_ONLY},
11233 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11234 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11235 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11236 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11237 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11238 },
11239
11240 { /* Europe */
11241 "ZZL",
11242 .bg_channels = 11,
11243 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11244 {2427, 4}, {2432, 5}, {2437, 6},
11245 {2442, 7}, {2447, 8}, {2452, 9},
11246 {2457, 10}, {2462, 11}},
11247 .a_channels = 13,
11248 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11249 {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11250 {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11251 {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11252 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11253 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11254 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11255 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11256 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11257 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11258 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11259 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11260 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11261 }
11262 };
11263
11264 static void ipw_set_geo(struct ipw_priv *priv)
11265 {
11266 int j;
11267
11268 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11269 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11270 ipw_geos[j].name, 3))
11271 break;
11272 }
11273
11274 if (j == ARRAY_SIZE(ipw_geos)) {
11275 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11276 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11277 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11278 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11279 j = 0;
11280 }
11281
11282 libipw_set_geo(priv->ieee, &ipw_geos[j]);
11283 }
11284
11285 #define MAX_HW_RESTARTS 5
11286 static int ipw_up(struct ipw_priv *priv)
11287 {
11288 int rc, i;
11289
11290 /* Age scan list entries found before suspend */
11291 if (priv->suspend_time) {
11292 libipw_networks_age(priv->ieee, priv->suspend_time);
11293 priv->suspend_time = 0;
11294 }
11295
11296 if (priv->status & STATUS_EXIT_PENDING)
11297 return -EIO;
11298
11299 if (cmdlog && !priv->cmdlog) {
11300 priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11301 GFP_KERNEL);
11302 if (priv->cmdlog == NULL) {
11303 IPW_ERROR("Error allocating %d command log entries.\n",
11304 cmdlog);
11305 return -ENOMEM;
11306 } else {
11307 priv->cmdlog_len = cmdlog;
11308 }
11309 }
11310
11311 for (i = 0; i < MAX_HW_RESTARTS; i++) {
11312 /* Load the microcode, firmware, and eeprom.
11313 * Also start the clocks. */
11314 rc = ipw_load(priv);
11315 if (rc) {
11316 IPW_ERROR("Unable to load firmware: %d\n", rc);
11317 return rc;
11318 }
11319
11320 ipw_init_ordinals(priv);
11321 if (!(priv->config & CFG_CUSTOM_MAC))
11322 eeprom_parse_mac(priv, priv->mac_addr);
11323 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11324
11325 ipw_set_geo(priv);
11326
11327 if (priv->status & STATUS_RF_KILL_SW) {
11328 IPW_WARNING("Radio disabled by module parameter.\n");
11329 return 0;
11330 } else if (rf_kill_active(priv)) {
11331 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11332 "Kill switch must be turned off for "
11333 "wireless networking to work.\n");
11334 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
11335 return 0;
11336 }
11337
11338 rc = ipw_config(priv);
11339 if (!rc) {
11340 IPW_DEBUG_INFO("Configured device on count %i\n", i);
11341
11342 /* If configure to try and auto-associate, kick
11343 * off a scan. */
11344 schedule_delayed_work(&priv->request_scan, 0);
11345
11346 return 0;
11347 }
11348
11349 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11350 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11351 i, MAX_HW_RESTARTS);
11352
11353 /* We had an error bringing up the hardware, so take it
11354 * all the way back down so we can try again */
11355 ipw_down(priv);
11356 }
11357
11358 /* tried to restart and config the device for as long as our
11359 * patience could withstand */
11360 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11361
11362 return -EIO;
11363 }
11364
11365 static void ipw_bg_up(struct work_struct *work)
11366 {
11367 struct ipw_priv *priv =
11368 container_of(work, struct ipw_priv, up);
11369 mutex_lock(&priv->mutex);
11370 ipw_up(priv);
11371 mutex_unlock(&priv->mutex);
11372 }
11373
11374 static void ipw_deinit(struct ipw_priv *priv)
11375 {
11376 int i;
11377
11378 if (priv->status & STATUS_SCANNING) {
11379 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11380 ipw_abort_scan(priv);
11381 }
11382
11383 if (priv->status & STATUS_ASSOCIATED) {
11384 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11385 ipw_disassociate(priv);
11386 }
11387
11388 ipw_led_shutdown(priv);
11389
11390 /* Wait up to 1s for status to change to not scanning and not
11391 * associated (disassociation can take a while for a ful 802.11
11392 * exchange */
11393 for (i = 1000; i && (priv->status &
11394 (STATUS_DISASSOCIATING |
11395 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11396 udelay(10);
11397
11398 if (priv->status & (STATUS_DISASSOCIATING |
11399 STATUS_ASSOCIATED | STATUS_SCANNING))
11400 IPW_DEBUG_INFO("Still associated or scanning...\n");
11401 else
11402 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11403
11404 /* Attempt to disable the card */
11405 ipw_send_card_disable(priv, 0);
11406
11407 priv->status &= ~STATUS_INIT;
11408 }
11409
11410 static void ipw_down(struct ipw_priv *priv)
11411 {
11412 int exit_pending = priv->status & STATUS_EXIT_PENDING;
11413
11414 priv->status |= STATUS_EXIT_PENDING;
11415
11416 if (ipw_is_init(priv))
11417 ipw_deinit(priv);
11418
11419 /* Wipe out the EXIT_PENDING status bit if we are not actually
11420 * exiting the module */
11421 if (!exit_pending)
11422 priv->status &= ~STATUS_EXIT_PENDING;
11423
11424 /* tell the device to stop sending interrupts */
11425 ipw_disable_interrupts(priv);
11426
11427 /* Clear all bits but the RF Kill */
11428 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11429 netif_carrier_off(priv->net_dev);
11430
11431 ipw_stop_nic(priv);
11432
11433 ipw_led_radio_off(priv);
11434 }
11435
11436 static void ipw_bg_down(struct work_struct *work)
11437 {
11438 struct ipw_priv *priv =
11439 container_of(work, struct ipw_priv, down);
11440 mutex_lock(&priv->mutex);
11441 ipw_down(priv);
11442 mutex_unlock(&priv->mutex);
11443 }
11444
11445 static int ipw_wdev_init(struct net_device *dev)
11446 {
11447 int i, rc = 0;
11448 struct ipw_priv *priv = libipw_priv(dev);
11449 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11450 struct wireless_dev *wdev = &priv->ieee->wdev;
11451
11452 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11453
11454 /* fill-out priv->ieee->bg_band */
11455 if (geo->bg_channels) {
11456 struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
11457
11458 bg_band->band = IEEE80211_BAND_2GHZ;
11459 bg_band->n_channels = geo->bg_channels;
11460 bg_band->channels = kcalloc(geo->bg_channels,
11461 sizeof(struct ieee80211_channel),
11462 GFP_KERNEL);
11463 if (!bg_band->channels) {
11464 rc = -ENOMEM;
11465 goto out;
11466 }
11467 /* translate geo->bg to bg_band.channels */
11468 for (i = 0; i < geo->bg_channels; i++) {
11469 bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
11470 bg_band->channels[i].center_freq = geo->bg[i].freq;
11471 bg_band->channels[i].hw_value = geo->bg[i].channel;
11472 bg_band->channels[i].max_power = geo->bg[i].max_power;
11473 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11474 bg_band->channels[i].flags |=
11475 IEEE80211_CHAN_PASSIVE_SCAN;
11476 if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
11477 bg_band->channels[i].flags |=
11478 IEEE80211_CHAN_NO_IBSS;
11479 if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
11480 bg_band->channels[i].flags |=
11481 IEEE80211_CHAN_RADAR;
11482 /* No equivalent for LIBIPW_CH_80211H_RULES,
11483 LIBIPW_CH_UNIFORM_SPREADING, or
11484 LIBIPW_CH_B_ONLY... */
11485 }
11486 /* point at bitrate info */
11487 bg_band->bitrates = ipw2200_bg_rates;
11488 bg_band->n_bitrates = ipw2200_num_bg_rates;
11489
11490 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
11491 }
11492
11493 /* fill-out priv->ieee->a_band */
11494 if (geo->a_channels) {
11495 struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
11496
11497 a_band->band = IEEE80211_BAND_5GHZ;
11498 a_band->n_channels = geo->a_channels;
11499 a_band->channels = kcalloc(geo->a_channels,
11500 sizeof(struct ieee80211_channel),
11501 GFP_KERNEL);
11502 if (!a_band->channels) {
11503 rc = -ENOMEM;
11504 goto out;
11505 }
11506 /* translate geo->a to a_band.channels */
11507 for (i = 0; i < geo->a_channels; i++) {
11508 a_band->channels[i].band = IEEE80211_BAND_5GHZ;
11509 a_band->channels[i].center_freq = geo->a[i].freq;
11510 a_band->channels[i].hw_value = geo->a[i].channel;
11511 a_band->channels[i].max_power = geo->a[i].max_power;
11512 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11513 a_band->channels[i].flags |=
11514 IEEE80211_CHAN_PASSIVE_SCAN;
11515 if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
11516 a_band->channels[i].flags |=
11517 IEEE80211_CHAN_NO_IBSS;
11518 if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
11519 a_band->channels[i].flags |=
11520 IEEE80211_CHAN_RADAR;
11521 /* No equivalent for LIBIPW_CH_80211H_RULES,
11522 LIBIPW_CH_UNIFORM_SPREADING, or
11523 LIBIPW_CH_B_ONLY... */
11524 }
11525 /* point at bitrate info */
11526 a_band->bitrates = ipw2200_a_rates;
11527 a_band->n_bitrates = ipw2200_num_a_rates;
11528
11529 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
11530 }
11531
11532 wdev->wiphy->cipher_suites = ipw_cipher_suites;
11533 wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
11534
11535 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11536
11537 /* With that information in place, we can now register the wiphy... */
11538 if (wiphy_register(wdev->wiphy))
11539 rc = -EIO;
11540 out:
11541 return rc;
11542 }
11543
11544 /* PCI driver stuff */
11545 static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
11546 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11547 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11548 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11549 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11550 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11551 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11552 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11553 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11554 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11555 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11556 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11557 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11558 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11559 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11560 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11561 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11562 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11563 {PCI_VDEVICE(INTEL, 0x104f), 0},
11564 {PCI_VDEVICE(INTEL, 0x4220), 0}, /* BG */
11565 {PCI_VDEVICE(INTEL, 0x4221), 0}, /* BG */
11566 {PCI_VDEVICE(INTEL, 0x4223), 0}, /* ABG */
11567 {PCI_VDEVICE(INTEL, 0x4224), 0}, /* ABG */
11568
11569 /* required last entry */
11570 {0,}
11571 };
11572
11573 MODULE_DEVICE_TABLE(pci, card_ids);
11574
11575 static struct attribute *ipw_sysfs_entries[] = {
11576 &dev_attr_rf_kill.attr,
11577 &dev_attr_direct_dword.attr,
11578 &dev_attr_indirect_byte.attr,
11579 &dev_attr_indirect_dword.attr,
11580 &dev_attr_mem_gpio_reg.attr,
11581 &dev_attr_command_event_reg.attr,
11582 &dev_attr_nic_type.attr,
11583 &dev_attr_status.attr,
11584 &dev_attr_cfg.attr,
11585 &dev_attr_error.attr,
11586 &dev_attr_event_log.attr,
11587 &dev_attr_cmd_log.attr,
11588 &dev_attr_eeprom_delay.attr,
11589 &dev_attr_ucode_version.attr,
11590 &dev_attr_rtc.attr,
11591 &dev_attr_scan_age.attr,
11592 &dev_attr_led.attr,
11593 &dev_attr_speed_scan.attr,
11594 &dev_attr_net_stats.attr,
11595 &dev_attr_channels.attr,
11596 #ifdef CONFIG_IPW2200_PROMISCUOUS
11597 &dev_attr_rtap_iface.attr,
11598 &dev_attr_rtap_filter.attr,
11599 #endif
11600 NULL
11601 };
11602
11603 static struct attribute_group ipw_attribute_group = {
11604 .name = NULL, /* put in device directory */
11605 .attrs = ipw_sysfs_entries,
11606 };
11607
11608 #ifdef CONFIG_IPW2200_PROMISCUOUS
11609 static int ipw_prom_open(struct net_device *dev)
11610 {
11611 struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11612 struct ipw_priv *priv = prom_priv->priv;
11613
11614 IPW_DEBUG_INFO("prom dev->open\n");
11615 netif_carrier_off(dev);
11616
11617 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11618 priv->sys_config.accept_all_data_frames = 1;
11619 priv->sys_config.accept_non_directed_frames = 1;
11620 priv->sys_config.accept_all_mgmt_bcpr = 1;
11621 priv->sys_config.accept_all_mgmt_frames = 1;
11622
11623 ipw_send_system_config(priv);
11624 }
11625
11626 return 0;
11627 }
11628
11629 static int ipw_prom_stop(struct net_device *dev)
11630 {
11631 struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11632 struct ipw_priv *priv = prom_priv->priv;
11633
11634 IPW_DEBUG_INFO("prom dev->stop\n");
11635
11636 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11637 priv->sys_config.accept_all_data_frames = 0;
11638 priv->sys_config.accept_non_directed_frames = 0;
11639 priv->sys_config.accept_all_mgmt_bcpr = 0;
11640 priv->sys_config.accept_all_mgmt_frames = 0;
11641
11642 ipw_send_system_config(priv);
11643 }
11644
11645 return 0;
11646 }
11647
11648 static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb,
11649 struct net_device *dev)
11650 {
11651 IPW_DEBUG_INFO("prom dev->xmit\n");
11652 dev_kfree_skb(skb);
11653 return NETDEV_TX_OK;
11654 }
11655
11656 static const struct net_device_ops ipw_prom_netdev_ops = {
11657 .ndo_open = ipw_prom_open,
11658 .ndo_stop = ipw_prom_stop,
11659 .ndo_start_xmit = ipw_prom_hard_start_xmit,
11660 .ndo_change_mtu = libipw_change_mtu,
11661 .ndo_set_mac_address = eth_mac_addr,
11662 .ndo_validate_addr = eth_validate_addr,
11663 };
11664
11665 static int ipw_prom_alloc(struct ipw_priv *priv)
11666 {
11667 int rc = 0;
11668
11669 if (priv->prom_net_dev)
11670 return -EPERM;
11671
11672 priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1);
11673 if (priv->prom_net_dev == NULL)
11674 return -ENOMEM;
11675
11676 priv->prom_priv = libipw_priv(priv->prom_net_dev);
11677 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11678 priv->prom_priv->priv = priv;
11679
11680 strcpy(priv->prom_net_dev->name, "rtap%d");
11681 memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11682
11683 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11684 priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
11685
11686 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11687 SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11688
11689 rc = register_netdev(priv->prom_net_dev);
11690 if (rc) {
11691 free_libipw(priv->prom_net_dev, 1);
11692 priv->prom_net_dev = NULL;
11693 return rc;
11694 }
11695
11696 return 0;
11697 }
11698
11699 static void ipw_prom_free(struct ipw_priv *priv)
11700 {
11701 if (!priv->prom_net_dev)
11702 return;
11703
11704 unregister_netdev(priv->prom_net_dev);
11705 free_libipw(priv->prom_net_dev, 1);
11706
11707 priv->prom_net_dev = NULL;
11708 }
11709
11710 #endif
11711
11712 static const struct net_device_ops ipw_netdev_ops = {
11713 .ndo_open = ipw_net_open,
11714 .ndo_stop = ipw_net_stop,
11715 .ndo_set_rx_mode = ipw_net_set_multicast_list,
11716 .ndo_set_mac_address = ipw_net_set_mac_address,
11717 .ndo_start_xmit = libipw_xmit,
11718 .ndo_change_mtu = libipw_change_mtu,
11719 .ndo_validate_addr = eth_validate_addr,
11720 };
11721
11722 static int ipw_pci_probe(struct pci_dev *pdev,
11723 const struct pci_device_id *ent)
11724 {
11725 int err = 0;
11726 struct net_device *net_dev;
11727 void __iomem *base;
11728 u32 length, val;
11729 struct ipw_priv *priv;
11730 int i;
11731
11732 net_dev = alloc_libipw(sizeof(struct ipw_priv), 0);
11733 if (net_dev == NULL) {
11734 err = -ENOMEM;
11735 goto out;
11736 }
11737
11738 priv = libipw_priv(net_dev);
11739 priv->ieee = netdev_priv(net_dev);
11740
11741 priv->net_dev = net_dev;
11742 priv->pci_dev = pdev;
11743 ipw_debug_level = debug;
11744 spin_lock_init(&priv->irq_lock);
11745 spin_lock_init(&priv->lock);
11746 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11747 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11748
11749 mutex_init(&priv->mutex);
11750 if (pci_enable_device(pdev)) {
11751 err = -ENODEV;
11752 goto out_free_libipw;
11753 }
11754
11755 pci_set_master(pdev);
11756
11757 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
11758 if (!err)
11759 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
11760 if (err) {
11761 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11762 goto out_pci_disable_device;
11763 }
11764
11765 pci_set_drvdata(pdev, priv);
11766
11767 err = pci_request_regions(pdev, DRV_NAME);
11768 if (err)
11769 goto out_pci_disable_device;
11770
11771 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11772 * PCI Tx retries from interfering with C3 CPU state */
11773 pci_read_config_dword(pdev, 0x40, &val);
11774 if ((val & 0x0000ff00) != 0)
11775 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11776
11777 length = pci_resource_len(pdev, 0);
11778 priv->hw_len = length;
11779
11780 base = pci_ioremap_bar(pdev, 0);
11781 if (!base) {
11782 err = -ENODEV;
11783 goto out_pci_release_regions;
11784 }
11785
11786 priv->hw_base = base;
11787 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11788 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11789
11790 err = ipw_setup_deferred_work(priv);
11791 if (err) {
11792 IPW_ERROR("Unable to setup deferred work\n");
11793 goto out_iounmap;
11794 }
11795
11796 ipw_sw_reset(priv, 1);
11797
11798 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11799 if (err) {
11800 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11801 goto out_iounmap;
11802 }
11803
11804 SET_NETDEV_DEV(net_dev, &pdev->dev);
11805
11806 mutex_lock(&priv->mutex);
11807
11808 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11809 priv->ieee->set_security = shim__set_security;
11810 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11811
11812 #ifdef CONFIG_IPW2200_QOS
11813 priv->ieee->is_qos_active = ipw_is_qos_active;
11814 priv->ieee->handle_probe_response = ipw_handle_beacon;
11815 priv->ieee->handle_beacon = ipw_handle_probe_response;
11816 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11817 #endif /* CONFIG_IPW2200_QOS */
11818
11819 priv->ieee->perfect_rssi = -20;
11820 priv->ieee->worst_rssi = -85;
11821
11822 net_dev->netdev_ops = &ipw_netdev_ops;
11823 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11824 net_dev->wireless_data = &priv->wireless_data;
11825 net_dev->wireless_handlers = &ipw_wx_handler_def;
11826 net_dev->ethtool_ops = &ipw_ethtool_ops;
11827
11828 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11829 if (err) {
11830 IPW_ERROR("failed to create sysfs device attributes\n");
11831 mutex_unlock(&priv->mutex);
11832 goto out_release_irq;
11833 }
11834
11835 if (ipw_up(priv)) {
11836 mutex_unlock(&priv->mutex);
11837 err = -EIO;
11838 goto out_remove_sysfs;
11839 }
11840
11841 mutex_unlock(&priv->mutex);
11842
11843 err = ipw_wdev_init(net_dev);
11844 if (err) {
11845 IPW_ERROR("failed to register wireless device\n");
11846 goto out_remove_sysfs;
11847 }
11848
11849 err = register_netdev(net_dev);
11850 if (err) {
11851 IPW_ERROR("failed to register network device\n");
11852 goto out_unregister_wiphy;
11853 }
11854
11855 #ifdef CONFIG_IPW2200_PROMISCUOUS
11856 if (rtap_iface) {
11857 err = ipw_prom_alloc(priv);
11858 if (err) {
11859 IPW_ERROR("Failed to register promiscuous network "
11860 "device (error %d).\n", err);
11861 unregister_netdev(priv->net_dev);
11862 goto out_unregister_wiphy;
11863 }
11864 }
11865 #endif
11866
11867 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11868 "channels, %d 802.11a channels)\n",
11869 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11870 priv->ieee->geo.a_channels);
11871
11872 return 0;
11873
11874 out_unregister_wiphy:
11875 wiphy_unregister(priv->ieee->wdev.wiphy);
11876 kfree(priv->ieee->a_band.channels);
11877 kfree(priv->ieee->bg_band.channels);
11878 out_remove_sysfs:
11879 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11880 out_release_irq:
11881 free_irq(pdev->irq, priv);
11882 out_iounmap:
11883 iounmap(priv->hw_base);
11884 out_pci_release_regions:
11885 pci_release_regions(pdev);
11886 out_pci_disable_device:
11887 pci_disable_device(pdev);
11888 pci_set_drvdata(pdev, NULL);
11889 out_free_libipw:
11890 free_libipw(priv->net_dev, 0);
11891 out:
11892 return err;
11893 }
11894
11895 static void ipw_pci_remove(struct pci_dev *pdev)
11896 {
11897 struct ipw_priv *priv = pci_get_drvdata(pdev);
11898 struct list_head *p, *q;
11899 int i;
11900
11901 if (!priv)
11902 return;
11903
11904 mutex_lock(&priv->mutex);
11905
11906 priv->status |= STATUS_EXIT_PENDING;
11907 ipw_down(priv);
11908 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11909
11910 mutex_unlock(&priv->mutex);
11911
11912 unregister_netdev(priv->net_dev);
11913
11914 if (priv->rxq) {
11915 ipw_rx_queue_free(priv, priv->rxq);
11916 priv->rxq = NULL;
11917 }
11918 ipw_tx_queue_free(priv);
11919
11920 if (priv->cmdlog) {
11921 kfree(priv->cmdlog);
11922 priv->cmdlog = NULL;
11923 }
11924
11925 /* make sure all works are inactive */
11926 cancel_delayed_work_sync(&priv->adhoc_check);
11927 cancel_work_sync(&priv->associate);
11928 cancel_work_sync(&priv->disassociate);
11929 cancel_work_sync(&priv->system_config);
11930 cancel_work_sync(&priv->rx_replenish);
11931 cancel_work_sync(&priv->adapter_restart);
11932 cancel_delayed_work_sync(&priv->rf_kill);
11933 cancel_work_sync(&priv->up);
11934 cancel_work_sync(&priv->down);
11935 cancel_delayed_work_sync(&priv->request_scan);
11936 cancel_delayed_work_sync(&priv->request_direct_scan);
11937 cancel_delayed_work_sync(&priv->request_passive_scan);
11938 cancel_delayed_work_sync(&priv->scan_event);
11939 cancel_delayed_work_sync(&priv->gather_stats);
11940 cancel_work_sync(&priv->abort_scan);
11941 cancel_work_sync(&priv->roam);
11942 cancel_delayed_work_sync(&priv->scan_check);
11943 cancel_work_sync(&priv->link_up);
11944 cancel_work_sync(&priv->link_down);
11945 cancel_delayed_work_sync(&priv->led_link_on);
11946 cancel_delayed_work_sync(&priv->led_link_off);
11947 cancel_delayed_work_sync(&priv->led_act_off);
11948 cancel_work_sync(&priv->merge_networks);
11949
11950 /* Free MAC hash list for ADHOC */
11951 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11952 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11953 list_del(p);
11954 kfree(list_entry(p, struct ipw_ibss_seq, list));
11955 }
11956 }
11957
11958 kfree(priv->error);
11959 priv->error = NULL;
11960
11961 #ifdef CONFIG_IPW2200_PROMISCUOUS
11962 ipw_prom_free(priv);
11963 #endif
11964
11965 free_irq(pdev->irq, priv);
11966 iounmap(priv->hw_base);
11967 pci_release_regions(pdev);
11968 pci_disable_device(pdev);
11969 pci_set_drvdata(pdev, NULL);
11970 /* wiphy_unregister needs to be here, before free_libipw */
11971 wiphy_unregister(priv->ieee->wdev.wiphy);
11972 kfree(priv->ieee->a_band.channels);
11973 kfree(priv->ieee->bg_band.channels);
11974 free_libipw(priv->net_dev, 0);
11975 free_firmware();
11976 }
11977
11978 #ifdef CONFIG_PM
11979 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11980 {
11981 struct ipw_priv *priv = pci_get_drvdata(pdev);
11982 struct net_device *dev = priv->net_dev;
11983
11984 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11985
11986 /* Take down the device; powers it off, etc. */
11987 ipw_down(priv);
11988
11989 /* Remove the PRESENT state of the device */
11990 netif_device_detach(dev);
11991
11992 pci_save_state(pdev);
11993 pci_disable_device(pdev);
11994 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11995
11996 priv->suspend_at = get_seconds();
11997
11998 return 0;
11999 }
12000
12001 static int ipw_pci_resume(struct pci_dev *pdev)
12002 {
12003 struct ipw_priv *priv = pci_get_drvdata(pdev);
12004 struct net_device *dev = priv->net_dev;
12005 int err;
12006 u32 val;
12007
12008 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
12009
12010 pci_set_power_state(pdev, PCI_D0);
12011 err = pci_enable_device(pdev);
12012 if (err) {
12013 printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
12014 dev->name);
12015 return err;
12016 }
12017 pci_restore_state(pdev);
12018
12019 /*
12020 * Suspend/Resume resets the PCI configuration space, so we have to
12021 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
12022 * from interfering with C3 CPU state. pci_restore_state won't help
12023 * here since it only restores the first 64 bytes pci config header.
12024 */
12025 pci_read_config_dword(pdev, 0x40, &val);
12026 if ((val & 0x0000ff00) != 0)
12027 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
12028
12029 /* Set the device back into the PRESENT state; this will also wake
12030 * the queue of needed */
12031 netif_device_attach(dev);
12032
12033 priv->suspend_time = get_seconds() - priv->suspend_at;
12034
12035 /* Bring the device back up */
12036 schedule_work(&priv->up);
12037
12038 return 0;
12039 }
12040 #endif
12041
12042 static void ipw_pci_shutdown(struct pci_dev *pdev)
12043 {
12044 struct ipw_priv *priv = pci_get_drvdata(pdev);
12045
12046 /* Take down the device; powers it off, etc. */
12047 ipw_down(priv);
12048
12049 pci_disable_device(pdev);
12050 }
12051
12052 /* driver initialization stuff */
12053 static struct pci_driver ipw_driver = {
12054 .name = DRV_NAME,
12055 .id_table = card_ids,
12056 .probe = ipw_pci_probe,
12057 .remove = ipw_pci_remove,
12058 #ifdef CONFIG_PM
12059 .suspend = ipw_pci_suspend,
12060 .resume = ipw_pci_resume,
12061 #endif
12062 .shutdown = ipw_pci_shutdown,
12063 };
12064
12065 static int __init ipw_init(void)
12066 {
12067 int ret;
12068
12069 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
12070 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
12071
12072 ret = pci_register_driver(&ipw_driver);
12073 if (ret) {
12074 IPW_ERROR("Unable to initialize PCI module\n");
12075 return ret;
12076 }
12077
12078 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
12079 if (ret) {
12080 IPW_ERROR("Unable to create driver sysfs file\n");
12081 pci_unregister_driver(&ipw_driver);
12082 return ret;
12083 }
12084
12085 return ret;
12086 }
12087
12088 static void __exit ipw_exit(void)
12089 {
12090 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
12091 pci_unregister_driver(&ipw_driver);
12092 }
12093
12094 module_param(disable, int, 0444);
12095 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
12096
12097 module_param(associate, int, 0444);
12098 MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
12099
12100 module_param(auto_create, int, 0444);
12101 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
12102
12103 module_param_named(led, led_support, int, 0444);
12104 MODULE_PARM_DESC(led, "enable led control on some systems (default 1 on)");
12105
12106 module_param(debug, int, 0444);
12107 MODULE_PARM_DESC(debug, "debug output mask");
12108
12109 module_param_named(channel, default_channel, int, 0444);
12110 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
12111
12112 #ifdef CONFIG_IPW2200_PROMISCUOUS
12113 module_param(rtap_iface, int, 0444);
12114 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
12115 #endif
12116
12117 #ifdef CONFIG_IPW2200_QOS
12118 module_param(qos_enable, int, 0444);
12119 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
12120
12121 module_param(qos_burst_enable, int, 0444);
12122 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
12123
12124 module_param(qos_no_ack_mask, int, 0444);
12125 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
12126
12127 module_param(burst_duration_CCK, int, 0444);
12128 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
12129
12130 module_param(burst_duration_OFDM, int, 0444);
12131 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
12132 #endif /* CONFIG_IPW2200_QOS */
12133
12134 #ifdef CONFIG_IPW2200_MONITOR
12135 module_param_named(mode, network_mode, int, 0444);
12136 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
12137 #else
12138 module_param_named(mode, network_mode, int, 0444);
12139 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
12140 #endif
12141
12142 module_param(bt_coexist, int, 0444);
12143 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
12144
12145 module_param(hwcrypto, int, 0444);
12146 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
12147
12148 module_param(cmdlog, int, 0444);
12149 MODULE_PARM_DESC(cmdlog,
12150 "allocate a ring buffer for logging firmware commands");
12151
12152 module_param(roaming, int, 0444);
12153 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
12154
12155 module_param(antenna, int, 0444);
12156 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
12157
12158 module_exit(ipw_exit);
12159 module_init(ipw_init);