]>
Commit | Line | Data |
---|---|---|
43f66a6c JK |
1 | /****************************************************************************** |
2 | ||
3 | Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved. | |
4 | ||
5 | 802.11 status code portion of this file from ethereal-0.10.6: | |
6 | Copyright 2000, Axis Communications AB | |
7 | Ethereal - Network traffic analyzer | |
8 | By Gerald Combs <gerald@ethereal.com> | |
9 | Copyright 1998 Gerald Combs | |
10 | ||
11 | This program is free software; you can redistribute it and/or modify it | |
12 | under the terms of version 2 of the GNU General Public License as | |
13 | published by the Free Software Foundation. | |
14 | ||
15 | This program is distributed in the hope that it will be useful, but WITHOUT | |
16 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
17 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
18 | more details. | |
19 | ||
20 | You should have received a copy of the GNU General Public License along with | |
21 | this program; if not, write to the Free Software Foundation, Inc., 59 | |
22 | Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
23 | ||
24 | The full GNU General Public License is included in this distribution in the | |
25 | file called LICENSE. | |
26 | ||
27 | Contact Information: | |
28 | James P. Ketrenos <ipw2100-admin@linux.intel.com> | |
29 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
30 | ||
31 | ******************************************************************************/ | |
32 | ||
33 | #include "ipw2200.h" | |
34 | ||
35 | #define IPW2200_VERSION "1.0.0" | |
36 | #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver" | |
37 | #define DRV_COPYRIGHT "Copyright(c) 2003-2004 Intel Corporation" | |
38 | #define DRV_VERSION IPW2200_VERSION | |
39 | ||
40 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | |
41 | MODULE_VERSION(DRV_VERSION); | |
42 | MODULE_AUTHOR(DRV_COPYRIGHT); | |
43 | MODULE_LICENSE("GPL"); | |
44 | ||
45 | static int debug = 0; | |
46 | static int channel = 0; | |
47 | static char *ifname; | |
48 | static int mode = 0; | |
49 | ||
50 | static u32 ipw_debug_level; | |
51 | static int associate = 1; | |
52 | static int auto_create = 1; | |
53 | static int disable = 0; | |
54 | static const char ipw_modes[] = { | |
55 | 'a', 'b', 'g', '?' | |
56 | }; | |
57 | ||
58 | static void ipw_rx(struct ipw_priv *priv); | |
59 | static int ipw_queue_tx_reclaim(struct ipw_priv *priv, | |
60 | struct clx2_tx_queue *txq, int qindex); | |
61 | static int ipw_queue_reset(struct ipw_priv *priv); | |
62 | ||
63 | static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf, | |
64 | int len, int sync); | |
65 | ||
66 | static void ipw_tx_queue_free(struct ipw_priv *); | |
67 | ||
68 | static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *); | |
69 | static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *); | |
70 | static void ipw_rx_queue_replenish(void *); | |
71 | ||
72 | static int ipw_up(struct ipw_priv *); | |
73 | static void ipw_down(struct ipw_priv *); | |
74 | static int ipw_config(struct ipw_priv *); | |
75 | static int init_supported_rates(struct ipw_priv *priv, struct ipw_supported_rates *prates); | |
76 | ||
77 | static u8 band_b_active_channel[MAX_B_CHANNELS] = { | |
78 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0 | |
79 | }; | |
80 | static u8 band_a_active_channel[MAX_A_CHANNELS] = { | |
81 | 36, 40, 44, 48, 149, 153, 157, 161, 165, 52, 56, 60, 64, 0 | |
82 | }; | |
83 | ||
84 | static int is_valid_channel(int mode_mask, int channel) | |
85 | { | |
86 | int i; | |
87 | ||
88 | if (!channel) | |
89 | return 0; | |
90 | ||
91 | if (mode_mask & IEEE_A) | |
92 | for (i = 0; i < MAX_A_CHANNELS; i++) | |
93 | if (band_a_active_channel[i] == channel) | |
94 | return IEEE_A; | |
95 | ||
96 | if (mode_mask & (IEEE_B | IEEE_G)) | |
97 | for (i = 0; i < MAX_B_CHANNELS; i++) | |
98 | if (band_b_active_channel[i] == channel) | |
99 | return mode_mask & (IEEE_B | IEEE_G); | |
100 | ||
101 | return 0; | |
102 | } | |
103 | ||
104 | static char *snprint_line(char *buf, size_t count, | |
105 | const u8 *data, u32 len, u32 ofs) | |
106 | { | |
107 | int out, i, j, l; | |
108 | char c; | |
109 | ||
110 | out = snprintf(buf, count, "%08X", ofs); | |
111 | ||
112 | for (l = 0, i = 0; i < 2; i++) { | |
113 | out += snprintf(buf + out, count - out, " "); | |
114 | for (j = 0; j < 8 && l < len; j++, l++) | |
115 | out += snprintf(buf + out, count - out, "%02X ", | |
116 | data[(i * 8 + j)]); | |
117 | for (; j < 8; j++) | |
118 | out += snprintf(buf + out, count - out, " "); | |
119 | } | |
120 | ||
121 | out += snprintf(buf + out, count - out, " "); | |
122 | for (l = 0, i = 0; i < 2; i++) { | |
123 | out += snprintf(buf + out, count - out, " "); | |
124 | for (j = 0; j < 8 && l < len; j++, l++) { | |
125 | c = data[(i * 8 + j)]; | |
126 | if (!isascii(c) || !isprint(c)) | |
127 | c = '.'; | |
128 | ||
129 | out += snprintf(buf + out, count - out, "%c", c); | |
130 | } | |
131 | ||
132 | for (; j < 8; j++) | |
133 | out += snprintf(buf + out, count - out, " "); | |
134 | } | |
135 | ||
136 | return buf; | |
137 | } | |
138 | ||
139 | static void printk_buf(int level, const u8 *data, u32 len) | |
140 | { | |
141 | char line[81]; | |
142 | u32 ofs = 0; | |
143 | if (!(ipw_debug_level & level)) | |
144 | return; | |
145 | ||
146 | while (len) { | |
147 | printk(KERN_DEBUG "%s\n", | |
148 | snprint_line(line, sizeof(line), &data[ofs], | |
149 | min(len, 16U), ofs)); | |
150 | ofs += 16; | |
151 | len -= min(len, 16U); | |
152 | } | |
153 | } | |
154 | ||
155 | static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg); | |
156 | #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b) | |
157 | ||
158 | static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg); | |
159 | #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b) | |
160 | ||
161 | static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value); | |
162 | static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c) | |
163 | { | |
164 | IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(b), (u32)(c)); | |
165 | _ipw_write_reg8(a, b, c); | |
166 | } | |
167 | ||
168 | static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value); | |
169 | static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c) | |
170 | { | |
171 | IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(b), (u32)(c)); | |
172 | _ipw_write_reg16(a, b, c); | |
173 | } | |
174 | ||
175 | static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value); | |
176 | static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c) | |
177 | { | |
178 | IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(b), (u32)(c)); | |
179 | _ipw_write_reg32(a, b, c); | |
180 | } | |
181 | ||
182 | #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs)) | |
183 | #define ipw_write8(ipw, ofs, val) \ | |
184 | IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \ | |
185 | _ipw_write8(ipw, ofs, val) | |
186 | ||
187 | #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs)) | |
188 | #define ipw_write16(ipw, ofs, val) \ | |
189 | IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \ | |
190 | _ipw_write16(ipw, ofs, val) | |
191 | ||
192 | #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs)) | |
193 | #define ipw_write32(ipw, ofs, val) \ | |
194 | IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \ | |
195 | _ipw_write32(ipw, ofs, val) | |
196 | ||
197 | #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs)) | |
198 | static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) { | |
199 | IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32)(ofs)); | |
200 | return _ipw_read8(ipw, ofs); | |
201 | } | |
202 | #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs) | |
203 | ||
204 | #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs)) | |
205 | static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) { | |
206 | IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32)(ofs)); | |
207 | return _ipw_read16(ipw, ofs); | |
208 | } | |
209 | #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs) | |
210 | ||
211 | #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs)) | |
212 | static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) { | |
213 | IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32)(ofs)); | |
214 | return _ipw_read32(ipw, ofs); | |
215 | } | |
216 | #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs) | |
217 | ||
218 | static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int); | |
219 | #define ipw_read_indirect(a, b, c, d) \ | |
220 | IPW_DEBUG_IO("%s %d: read_inddirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \ | |
221 | _ipw_read_indirect(a, b, c, d) | |
222 | ||
223 | static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 *data, int num); | |
224 | #define ipw_write_indirect(a, b, c, d) \ | |
225 | IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \ | |
226 | _ipw_write_indirect(a, b, c, d) | |
227 | ||
228 | /* indirect write s */ | |
229 | static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, | |
230 | u32 value) | |
231 | { | |
232 | IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", | |
233 | priv, reg, value); | |
234 | _ipw_write32(priv, CX2_INDIRECT_ADDR, reg); | |
235 | _ipw_write32(priv, CX2_INDIRECT_DATA, value); | |
236 | } | |
237 | ||
238 | ||
239 | static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value) | |
240 | { | |
241 | IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value); | |
242 | _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK); | |
243 | _ipw_write8(priv, CX2_INDIRECT_DATA, value); | |
244 | IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", | |
245 | (unsigned)(priv->hw_base + CX2_INDIRECT_DATA), | |
246 | value); | |
247 | } | |
248 | ||
249 | static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, | |
250 | u16 value) | |
251 | { | |
252 | IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value); | |
253 | _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK); | |
254 | _ipw_write16(priv, CX2_INDIRECT_DATA, value); | |
255 | } | |
256 | ||
257 | /* indirect read s */ | |
258 | ||
259 | static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg) | |
260 | { | |
261 | u32 word; | |
262 | _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK); | |
263 | IPW_DEBUG_IO(" reg = 0x%8X : \n", reg); | |
264 | word = _ipw_read32(priv, CX2_INDIRECT_DATA); | |
265 | return (word >> ((reg & 0x3)*8)) & 0xff; | |
266 | } | |
267 | ||
268 | static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg) | |
269 | { | |
270 | u32 value; | |
271 | ||
272 | IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg); | |
273 | ||
274 | _ipw_write32(priv, CX2_INDIRECT_ADDR, reg); | |
275 | value = _ipw_read32(priv, CX2_INDIRECT_DATA); | |
276 | IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value); | |
277 | return value; | |
278 | } | |
279 | ||
280 | /* iterative/auto-increment 32 bit reads and writes */ | |
281 | static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, | |
282 | int num) | |
283 | { | |
284 | u32 aligned_addr = addr & CX2_INDIRECT_ADDR_MASK; | |
285 | u32 dif_len = addr - aligned_addr; | |
286 | u32 aligned_len; | |
287 | u32 i; | |
288 | ||
289 | IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num); | |
290 | ||
291 | /* Read the first nibble byte by byte */ | |
292 | if (unlikely(dif_len)) { | |
293 | /* Start reading at aligned_addr + dif_len */ | |
294 | _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr); | |
295 | for (i = dif_len; i < 4; i++, buf++) | |
296 | *buf = _ipw_read8(priv, CX2_INDIRECT_DATA + i); | |
297 | num -= dif_len; | |
298 | aligned_addr += 4; | |
299 | } | |
300 | ||
301 | /* Read DWs through autoinc register */ | |
302 | _ipw_write32(priv, CX2_AUTOINC_ADDR, aligned_addr); | |
303 | aligned_len = num & CX2_INDIRECT_ADDR_MASK; | |
304 | for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4) | |
305 | *(u32*)buf = ipw_read32(priv, CX2_AUTOINC_DATA); | |
306 | ||
307 | /* Copy the last nibble */ | |
308 | dif_len = num - aligned_len; | |
309 | _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr); | |
310 | for (i = 0; i < dif_len; i++, buf++) | |
311 | *buf = ipw_read8(priv, CX2_INDIRECT_DATA + i); | |
312 | } | |
313 | ||
314 | static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 *buf, | |
315 | int num) | |
316 | { | |
317 | u32 aligned_addr = addr & CX2_INDIRECT_ADDR_MASK; | |
318 | u32 dif_len = addr - aligned_addr; | |
319 | u32 aligned_len; | |
320 | u32 i; | |
321 | ||
322 | IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num); | |
323 | ||
324 | /* Write the first nibble byte by byte */ | |
325 | if (unlikely(dif_len)) { | |
326 | /* Start writing at aligned_addr + dif_len */ | |
327 | _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr); | |
328 | for (i = dif_len; i < 4; i++, buf++) | |
329 | _ipw_write8(priv, CX2_INDIRECT_DATA + i, *buf); | |
330 | num -= dif_len; | |
331 | aligned_addr += 4; | |
332 | } | |
333 | ||
334 | /* Write DWs through autoinc register */ | |
335 | _ipw_write32(priv, CX2_AUTOINC_ADDR, aligned_addr); | |
336 | aligned_len = num & CX2_INDIRECT_ADDR_MASK; | |
337 | for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4) | |
338 | _ipw_write32(priv, CX2_AUTOINC_DATA, *(u32*)buf); | |
339 | ||
340 | /* Copy the last nibble */ | |
341 | dif_len = num - aligned_len; | |
342 | _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr); | |
343 | for (i = 0; i < dif_len; i++, buf++) | |
344 | _ipw_write8(priv, CX2_INDIRECT_DATA + i, *buf); | |
345 | } | |
346 | ||
347 | static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf, | |
348 | int num) | |
349 | { | |
350 | memcpy_toio((priv->hw_base + addr), buf, num); | |
351 | } | |
352 | ||
353 | static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask) | |
354 | { | |
355 | ipw_write32(priv, reg, ipw_read32(priv, reg) | mask); | |
356 | } | |
357 | ||
358 | static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask) | |
359 | { | |
360 | ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask); | |
361 | } | |
362 | ||
363 | static inline void ipw_enable_interrupts(struct ipw_priv *priv) | |
364 | { | |
365 | if (priv->status & STATUS_INT_ENABLED) | |
366 | return; | |
367 | priv->status |= STATUS_INT_ENABLED; | |
368 | ipw_write32(priv, CX2_INTA_MASK_R, CX2_INTA_MASK_ALL); | |
369 | } | |
370 | ||
371 | static inline void ipw_disable_interrupts(struct ipw_priv *priv) | |
372 | { | |
373 | if (!(priv->status & STATUS_INT_ENABLED)) | |
374 | return; | |
375 | priv->status &= ~STATUS_INT_ENABLED; | |
376 | ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL); | |
377 | } | |
378 | ||
379 | static char *ipw_error_desc(u32 val) | |
380 | { | |
381 | switch (val) { | |
382 | case IPW_FW_ERROR_OK: | |
383 | return "ERROR_OK"; | |
384 | case IPW_FW_ERROR_FAIL: | |
385 | return "ERROR_FAIL"; | |
386 | case IPW_FW_ERROR_MEMORY_UNDERFLOW: | |
387 | return "MEMORY_UNDERFLOW"; | |
388 | case IPW_FW_ERROR_MEMORY_OVERFLOW: | |
389 | return "MEMORY_OVERFLOW"; | |
390 | case IPW_FW_ERROR_BAD_PARAM: | |
391 | return "ERROR_BAD_PARAM"; | |
392 | case IPW_FW_ERROR_BAD_CHECKSUM: | |
393 | return "ERROR_BAD_CHECKSUM"; | |
394 | case IPW_FW_ERROR_NMI_INTERRUPT: | |
395 | return "ERROR_NMI_INTERRUPT"; | |
396 | case IPW_FW_ERROR_BAD_DATABASE: | |
397 | return "ERROR_BAD_DATABASE"; | |
398 | case IPW_FW_ERROR_ALLOC_FAIL: | |
399 | return "ERROR_ALLOC_FAIL"; | |
400 | case IPW_FW_ERROR_DMA_UNDERRUN: | |
401 | return "ERROR_DMA_UNDERRUN"; | |
402 | case IPW_FW_ERROR_DMA_STATUS: | |
403 | return "ERROR_DMA_STATUS"; | |
404 | case IPW_FW_ERROR_DINOSTATUS_ERROR: | |
405 | return "ERROR_DINOSTATUS_ERROR"; | |
406 | case IPW_FW_ERROR_EEPROMSTATUS_ERROR: | |
407 | return "ERROR_EEPROMSTATUS_ERROR"; | |
408 | case IPW_FW_ERROR_SYSASSERT: | |
409 | return "ERROR_SYSASSERT"; | |
410 | case IPW_FW_ERROR_FATAL_ERROR: | |
411 | return "ERROR_FATALSTATUS_ERROR"; | |
412 | default: | |
413 | return "UNKNOWNSTATUS_ERROR"; | |
414 | } | |
415 | } | |
416 | ||
417 | static void ipw_dump_nic_error_log(struct ipw_priv *priv) | |
418 | { | |
419 | u32 desc, time, blink1, blink2, ilink1, ilink2, idata, i, count, base; | |
420 | ||
421 | base = ipw_read32(priv, IPWSTATUS_ERROR_LOG); | |
422 | count = ipw_read_reg32(priv, base); | |
423 | ||
424 | if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { | |
425 | IPW_ERROR("Start IPW Error Log Dump:\n"); | |
426 | IPW_ERROR("Status: 0x%08X, Config: %08X\n", | |
427 | priv->status, priv->config); | |
428 | } | |
429 | ||
430 | for (i = ERROR_START_OFFSET; | |
431 | i <= count * ERROR_ELEM_SIZE; | |
432 | i += ERROR_ELEM_SIZE) { | |
433 | desc = ipw_read_reg32(priv, base + i); | |
434 | time = ipw_read_reg32(priv, base + i + 1*sizeof(u32)); | |
435 | blink1 = ipw_read_reg32(priv, base + i + 2*sizeof(u32)); | |
436 | blink2 = ipw_read_reg32(priv, base + i + 3*sizeof(u32)); | |
437 | ilink1 = ipw_read_reg32(priv, base + i + 4*sizeof(u32)); | |
438 | ilink2 = ipw_read_reg32(priv, base + i + 5*sizeof(u32)); | |
439 | idata = ipw_read_reg32(priv, base + i + 6*sizeof(u32)); | |
440 | ||
441 | IPW_ERROR( | |
442 | "%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", | |
443 | ipw_error_desc(desc), time, blink1, blink2, | |
444 | ilink1, ilink2, idata); | |
445 | } | |
446 | } | |
447 | ||
448 | static void ipw_dump_nic_event_log(struct ipw_priv *priv) | |
449 | { | |
450 | u32 ev, time, data, i, count, base; | |
451 | ||
452 | base = ipw_read32(priv, IPW_EVENT_LOG); | |
453 | count = ipw_read_reg32(priv, base); | |
454 | ||
455 | if (EVENT_START_OFFSET <= count * EVENT_ELEM_SIZE) | |
456 | IPW_ERROR("Start IPW Event Log Dump:\n"); | |
457 | ||
458 | for (i = EVENT_START_OFFSET; | |
459 | i <= count * EVENT_ELEM_SIZE; | |
460 | i += EVENT_ELEM_SIZE) { | |
461 | ev = ipw_read_reg32(priv, base + i); | |
462 | time = ipw_read_reg32(priv, base + i + 1*sizeof(u32)); | |
463 | data = ipw_read_reg32(priv, base + i + 2*sizeof(u32)); | |
464 | ||
465 | #ifdef CONFIG_IPW_DEBUG | |
466 | IPW_ERROR("%i\t0x%08x\t%i\n", time, data, ev); | |
467 | #endif | |
468 | } | |
469 | } | |
470 | ||
471 | static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, | |
472 | u32 *len) | |
473 | { | |
474 | u32 addr, field_info, field_len, field_count, total_len; | |
475 | ||
476 | IPW_DEBUG_ORD("ordinal = %i\n", ord); | |
477 | ||
478 | if (!priv || !val || !len) { | |
479 | IPW_DEBUG_ORD("Invalid argument\n"); | |
480 | return -EINVAL; | |
481 | } | |
482 | ||
483 | /* verify device ordinal tables have been initialized */ | |
484 | if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) { | |
485 | IPW_DEBUG_ORD("Access ordinals before initialization\n"); | |
486 | return -EINVAL; | |
487 | } | |
488 | ||
489 | switch (IPW_ORD_TABLE_ID_MASK & ord) { | |
490 | case IPW_ORD_TABLE_0_MASK: | |
491 | /* | |
492 | * TABLE 0: Direct access to a table of 32 bit values | |
493 | * | |
494 | * This is a very simple table with the data directly | |
495 | * read from the table | |
496 | */ | |
497 | ||
498 | /* remove the table id from the ordinal */ | |
499 | ord &= IPW_ORD_TABLE_VALUE_MASK; | |
500 | ||
501 | /* boundary check */ | |
502 | if (ord > priv->table0_len) { | |
503 | IPW_DEBUG_ORD("ordinal value (%i) longer then " | |
504 | "max (%i)\n", ord, priv->table0_len); | |
505 | return -EINVAL; | |
506 | } | |
507 | ||
508 | /* verify we have enough room to store the value */ | |
509 | if (*len < sizeof(u32)) { | |
510 | IPW_DEBUG_ORD("ordinal buffer length too small, " | |
511 | "need %d\n", sizeof(u32)); | |
512 | return -EINVAL; | |
513 | } | |
514 | ||
515 | IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n", | |
516 | ord, priv->table0_addr + (ord << 2)); | |
517 | ||
518 | *len = sizeof(u32); | |
519 | ord <<= 2; | |
520 | *((u32 *)val) = ipw_read32(priv, priv->table0_addr + ord); | |
521 | break; | |
522 | ||
523 | case IPW_ORD_TABLE_1_MASK: | |
524 | /* | |
525 | * TABLE 1: Indirect access to a table of 32 bit values | |
526 | * | |
527 | * This is a fairly large table of u32 values each | |
528 | * representing starting addr for the data (which is | |
529 | * also a u32) | |
530 | */ | |
531 | ||
532 | /* remove the table id from the ordinal */ | |
533 | ord &= IPW_ORD_TABLE_VALUE_MASK; | |
534 | ||
535 | /* boundary check */ | |
536 | if (ord > priv->table1_len) { | |
537 | IPW_DEBUG_ORD("ordinal value too long\n"); | |
538 | return -EINVAL; | |
539 | } | |
540 | ||
541 | /* verify we have enough room to store the value */ | |
542 | if (*len < sizeof(u32)) { | |
543 | IPW_DEBUG_ORD("ordinal buffer length too small, " | |
544 | "need %d\n", sizeof(u32)); | |
545 | return -EINVAL; | |
546 | } | |
547 | ||
548 | *((u32 *)val) = ipw_read_reg32(priv, (priv->table1_addr + (ord << 2))); | |
549 | *len = sizeof(u32); | |
550 | break; | |
551 | ||
552 | case IPW_ORD_TABLE_2_MASK: | |
553 | /* | |
554 | * TABLE 2: Indirect access to a table of variable sized values | |
555 | * | |
556 | * This table consist of six values, each containing | |
557 | * - dword containing the starting offset of the data | |
558 | * - dword containing the lengh in the first 16bits | |
559 | * and the count in the second 16bits | |
560 | */ | |
561 | ||
562 | /* remove the table id from the ordinal */ | |
563 | ord &= IPW_ORD_TABLE_VALUE_MASK; | |
564 | ||
565 | /* boundary check */ | |
566 | if (ord > priv->table2_len) { | |
567 | IPW_DEBUG_ORD("ordinal value too long\n"); | |
568 | return -EINVAL; | |
569 | } | |
570 | ||
571 | /* get the address of statistic */ | |
572 | addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3)); | |
573 | ||
574 | /* get the second DW of statistics ; | |
575 | * two 16-bit words - first is length, second is count */ | |
576 | field_info = ipw_read_reg32(priv, priv->table2_addr + (ord << 3) + sizeof(u32)); | |
577 | ||
578 | /* get each entry length */ | |
579 | field_len = *((u16 *)&field_info); | |
580 | ||
581 | /* get number of entries */ | |
582 | field_count = *(((u16 *)&field_info) + 1); | |
583 | ||
584 | /* abort if not enought memory */ | |
585 | total_len = field_len * field_count; | |
586 | if (total_len > *len) { | |
587 | *len = total_len; | |
588 | return -EINVAL; | |
589 | } | |
590 | ||
591 | *len = total_len; | |
592 | if (!total_len) | |
593 | return 0; | |
594 | ||
595 | IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, " | |
596 | "field_info = 0x%08x\n", | |
597 | addr, total_len, field_info); | |
598 | ipw_read_indirect(priv, addr, val, total_len); | |
599 | break; | |
600 | ||
601 | default: | |
602 | IPW_DEBUG_ORD("Invalid ordinal!\n"); | |
603 | return -EINVAL; | |
604 | ||
605 | } | |
606 | ||
607 | ||
608 | return 0; | |
609 | } | |
610 | ||
611 | static void ipw_init_ordinals(struct ipw_priv *priv) | |
612 | { | |
613 | priv->table0_addr = IPW_ORDINALS_TABLE_LOWER; | |
614 | priv->table0_len = ipw_read32(priv, priv->table0_addr); | |
615 | ||
616 | IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n", | |
617 | priv->table0_addr, priv->table0_len); | |
618 | ||
619 | priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1); | |
620 | priv->table1_len = ipw_read_reg32(priv, priv->table1_addr); | |
621 | ||
622 | IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n", | |
623 | priv->table1_addr, priv->table1_len); | |
624 | ||
625 | priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2); | |
626 | priv->table2_len = ipw_read_reg32(priv, priv->table2_addr); | |
627 | priv->table2_len &= 0x0000ffff; /* use first two bytes */ | |
628 | ||
629 | IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n", | |
630 | priv->table2_addr, priv->table2_len); | |
631 | ||
632 | } | |
633 | ||
634 | /* | |
635 | * The following adds a new attribute to the sysfs representation | |
636 | * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/) | |
637 | * used for controling the debug level. | |
638 | * | |
639 | * See the level definitions in ipw for details. | |
640 | */ | |
641 | static ssize_t show_debug_level(struct device_driver *d, char *buf) | |
642 | { | |
643 | return sprintf(buf, "0x%08X\n", ipw_debug_level); | |
644 | } | |
645 | static ssize_t store_debug_level(struct device_driver *d, const char *buf, | |
646 | size_t count) | |
647 | { | |
648 | char *p = (char *)buf; | |
649 | u32 val; | |
650 | ||
651 | if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { | |
652 | p++; | |
653 | if (p[0] == 'x' || p[0] == 'X') | |
654 | p++; | |
655 | val = simple_strtoul(p, &p, 16); | |
656 | } else | |
657 | val = simple_strtoul(p, &p, 10); | |
658 | if (p == buf) | |
659 | printk(KERN_INFO DRV_NAME | |
660 | ": %s is not in hex or decimal form.\n", buf); | |
661 | else | |
662 | ipw_debug_level = val; | |
663 | ||
664 | return strnlen(buf, count); | |
665 | } | |
666 | ||
667 | static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, | |
668 | show_debug_level, store_debug_level); | |
669 | ||
670 | static ssize_t show_status(struct device *d, char *buf) | |
671 | { | |
672 | struct ipw_priv *p = (struct ipw_priv *)d->driver_data; | |
673 | return sprintf(buf, "0x%08x\n", (int)p->status); | |
674 | } | |
675 | static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); | |
676 | ||
677 | static ssize_t show_cfg(struct device *d, char *buf) | |
678 | { | |
679 | struct ipw_priv *p = (struct ipw_priv *)d->driver_data; | |
680 | return sprintf(buf, "0x%08x\n", (int)p->config); | |
681 | } | |
682 | static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL); | |
683 | ||
684 | static ssize_t show_nic_type(struct device *d, char *buf) | |
685 | { | |
686 | struct ipw_priv *p = (struct ipw_priv *)d->driver_data; | |
687 | u8 type = p->eeprom[EEPROM_NIC_TYPE]; | |
688 | ||
689 | switch (type) { | |
690 | case EEPROM_NIC_TYPE_STANDARD: | |
691 | return sprintf(buf, "STANDARD\n"); | |
692 | case EEPROM_NIC_TYPE_DELL: | |
693 | return sprintf(buf, "DELL\n"); | |
694 | case EEPROM_NIC_TYPE_FUJITSU: | |
695 | return sprintf(buf, "FUJITSU\n"); | |
696 | case EEPROM_NIC_TYPE_IBM: | |
697 | return sprintf(buf, "IBM\n"); | |
698 | case EEPROM_NIC_TYPE_HP: | |
699 | return sprintf(buf, "HP\n"); | |
700 | } | |
701 | ||
702 | return sprintf(buf, "UNKNOWN\n"); | |
703 | } | |
704 | static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL); | |
705 | ||
706 | static ssize_t dump_error_log(struct device *d, const char *buf, | |
707 | size_t count) | |
708 | { | |
709 | char *p = (char *)buf; | |
710 | ||
711 | if (p[0] == '1') | |
712 | ipw_dump_nic_error_log((struct ipw_priv*)d->driver_data); | |
713 | ||
714 | return strnlen(buf, count); | |
715 | } | |
716 | static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log); | |
717 | ||
718 | static ssize_t dump_event_log(struct device *d, const char *buf, | |
719 | size_t count) | |
720 | { | |
721 | char *p = (char *)buf; | |
722 | ||
723 | if (p[0] == '1') | |
724 | ipw_dump_nic_event_log((struct ipw_priv*)d->driver_data); | |
725 | ||
726 | return strnlen(buf, count); | |
727 | } | |
728 | static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log); | |
729 | ||
730 | static ssize_t show_ucode_version(struct device *d, char *buf) | |
731 | { | |
732 | u32 len = sizeof(u32), tmp = 0; | |
733 | struct ipw_priv *p = (struct ipw_priv*)d->driver_data; | |
734 | ||
735 | if(ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len)) | |
736 | return 0; | |
737 | ||
738 | return sprintf(buf, "0x%08x\n", tmp); | |
739 | } | |
740 | static DEVICE_ATTR(ucode_version, S_IWUSR|S_IRUGO, show_ucode_version, NULL); | |
741 | ||
742 | static ssize_t show_rtc(struct device *d, char *buf) | |
743 | { | |
744 | u32 len = sizeof(u32), tmp = 0; | |
745 | struct ipw_priv *p = (struct ipw_priv*)d->driver_data; | |
746 | ||
747 | if(ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len)) | |
748 | return 0; | |
749 | ||
750 | return sprintf(buf, "0x%08x\n", tmp); | |
751 | } | |
752 | static DEVICE_ATTR(rtc, S_IWUSR|S_IRUGO, show_rtc, NULL); | |
753 | ||
754 | /* | |
755 | * Add a device attribute to view/control the delay between eeprom | |
756 | * operations. | |
757 | */ | |
758 | static ssize_t show_eeprom_delay(struct device *d, char *buf) | |
759 | { | |
760 | int n = ((struct ipw_priv*)d->driver_data)->eeprom_delay; | |
761 | return sprintf(buf, "%i\n", n); | |
762 | } | |
763 | static ssize_t store_eeprom_delay(struct device *d, const char *buf, | |
764 | size_t count) | |
765 | { | |
766 | struct ipw_priv *p = (struct ipw_priv*)d->driver_data; | |
767 | sscanf(buf, "%i", &p->eeprom_delay); | |
768 | return strnlen(buf, count); | |
769 | } | |
770 | static DEVICE_ATTR(eeprom_delay, S_IWUSR|S_IRUGO, | |
771 | show_eeprom_delay,store_eeprom_delay); | |
772 | ||
773 | static ssize_t show_command_event_reg(struct device *d, char *buf) | |
774 | { | |
775 | u32 reg = 0; | |
776 | struct ipw_priv *p = (struct ipw_priv *)d->driver_data; | |
777 | ||
778 | reg = ipw_read_reg32(p, CX2_INTERNAL_CMD_EVENT); | |
779 | return sprintf(buf, "0x%08x\n", reg); | |
780 | } | |
781 | static ssize_t store_command_event_reg(struct device *d, | |
782 | const char *buf, | |
783 | size_t count) | |
784 | { | |
785 | u32 reg; | |
786 | struct ipw_priv *p = (struct ipw_priv *)d->driver_data; | |
787 | ||
788 | sscanf(buf, "%x", ®); | |
789 | ipw_write_reg32(p, CX2_INTERNAL_CMD_EVENT, reg); | |
790 | return strnlen(buf, count); | |
791 | } | |
792 | static DEVICE_ATTR(command_event_reg, S_IWUSR|S_IRUGO, | |
793 | show_command_event_reg,store_command_event_reg); | |
794 | ||
795 | static ssize_t show_mem_gpio_reg(struct device *d, char *buf) | |
796 | { | |
797 | u32 reg = 0; | |
798 | struct ipw_priv *p = (struct ipw_priv *)d->driver_data; | |
799 | ||
800 | reg = ipw_read_reg32(p, 0x301100); | |
801 | return sprintf(buf, "0x%08x\n", reg); | |
802 | } | |
803 | static ssize_t store_mem_gpio_reg(struct device *d, | |
804 | const char *buf, | |
805 | size_t count) | |
806 | { | |
807 | u32 reg; | |
808 | struct ipw_priv *p = (struct ipw_priv *)d->driver_data; | |
809 | ||
810 | sscanf(buf, "%x", ®); | |
811 | ipw_write_reg32(p, 0x301100, reg); | |
812 | return strnlen(buf, count); | |
813 | } | |
814 | static DEVICE_ATTR(mem_gpio_reg, S_IWUSR|S_IRUGO, | |
815 | show_mem_gpio_reg,store_mem_gpio_reg); | |
816 | ||
817 | static ssize_t show_indirect_dword(struct device *d, char *buf) | |
818 | { | |
819 | u32 reg = 0; | |
820 | struct ipw_priv *priv = (struct ipw_priv *)d->driver_data; | |
821 | if (priv->status & STATUS_INDIRECT_DWORD) | |
822 | reg = ipw_read_reg32(priv, priv->indirect_dword); | |
823 | else | |
824 | reg = 0; | |
825 | ||
826 | return sprintf(buf, "0x%08x\n", reg); | |
827 | } | |
828 | static ssize_t store_indirect_dword(struct device *d, | |
829 | const char *buf, | |
830 | size_t count) | |
831 | { | |
832 | struct ipw_priv *priv = (struct ipw_priv *)d->driver_data; | |
833 | ||
834 | sscanf(buf, "%x", &priv->indirect_dword); | |
835 | priv->status |= STATUS_INDIRECT_DWORD; | |
836 | return strnlen(buf, count); | |
837 | } | |
838 | static DEVICE_ATTR(indirect_dword, S_IWUSR|S_IRUGO, | |
839 | show_indirect_dword,store_indirect_dword); | |
840 | ||
841 | static ssize_t show_indirect_byte(struct device *d, char *buf) | |
842 | { | |
843 | u8 reg = 0; | |
844 | struct ipw_priv *priv = (struct ipw_priv *)d->driver_data; | |
845 | if (priv->status & STATUS_INDIRECT_BYTE) | |
846 | reg = ipw_read_reg8(priv, priv->indirect_byte); | |
847 | else | |
848 | reg = 0; | |
849 | ||
850 | return sprintf(buf, "0x%02x\n", reg); | |
851 | } | |
852 | static ssize_t store_indirect_byte(struct device *d, | |
853 | const char *buf, | |
854 | size_t count) | |
855 | { | |
856 | struct ipw_priv *priv = (struct ipw_priv *)d->driver_data; | |
857 | ||
858 | sscanf(buf, "%x", &priv->indirect_byte); | |
859 | priv->status |= STATUS_INDIRECT_BYTE; | |
860 | return strnlen(buf, count); | |
861 | } | |
862 | static DEVICE_ATTR(indirect_byte, S_IWUSR|S_IRUGO, | |
863 | show_indirect_byte, store_indirect_byte); | |
864 | ||
865 | static ssize_t show_direct_dword(struct device *d, char *buf) | |
866 | { | |
867 | u32 reg = 0; | |
868 | struct ipw_priv *priv = (struct ipw_priv *)d->driver_data; | |
869 | ||
870 | if (priv->status & STATUS_DIRECT_DWORD) | |
871 | reg = ipw_read32(priv, priv->direct_dword); | |
872 | else | |
873 | reg = 0; | |
874 | ||
875 | return sprintf(buf, "0x%08x\n", reg); | |
876 | } | |
877 | static ssize_t store_direct_dword(struct device *d, | |
878 | const char *buf, | |
879 | size_t count) | |
880 | { | |
881 | struct ipw_priv *priv = (struct ipw_priv *)d->driver_data; | |
882 | ||
883 | sscanf(buf, "%x", &priv->direct_dword); | |
884 | priv->status |= STATUS_DIRECT_DWORD; | |
885 | return strnlen(buf, count); | |
886 | } | |
887 | static DEVICE_ATTR(direct_dword, S_IWUSR|S_IRUGO, | |
888 | show_direct_dword,store_direct_dword); | |
889 | ||
890 | ||
891 | static inline int rf_kill_active(struct ipw_priv *priv) | |
892 | { | |
893 | if (0 == (ipw_read32(priv, 0x30) & 0x10000)) | |
894 | priv->status |= STATUS_RF_KILL_HW; | |
895 | else | |
896 | priv->status &= ~STATUS_RF_KILL_HW; | |
897 | ||
898 | return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0; | |
899 | } | |
900 | ||
901 | static ssize_t show_rf_kill(struct device *d, char *buf) | |
902 | { | |
903 | /* 0 - RF kill not enabled | |
904 | 1 - SW based RF kill active (sysfs) | |
905 | 2 - HW based RF kill active | |
906 | 3 - Both HW and SW baed RF kill active */ | |
907 | struct ipw_priv *priv = (struct ipw_priv *)d->driver_data; | |
908 | int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) | | |
909 | (rf_kill_active(priv) ? 0x2 : 0x0); | |
910 | return sprintf(buf, "%i\n", val); | |
911 | } | |
912 | ||
913 | static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio) | |
914 | { | |
915 | if ((disable_radio ? 1 : 0) == | |
916 | (priv->status & STATUS_RF_KILL_SW ? 1 : 0)) | |
917 | return 0 ; | |
918 | ||
919 | IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n", | |
920 | disable_radio ? "OFF" : "ON"); | |
921 | ||
922 | if (disable_radio) { | |
923 | priv->status |= STATUS_RF_KILL_SW; | |
924 | ||
925 | if (priv->workqueue) { | |
926 | cancel_delayed_work(&priv->request_scan); | |
927 | } | |
928 | wake_up_interruptible(&priv->wait_command_queue); | |
929 | queue_work(priv->workqueue, &priv->down); | |
930 | } else { | |
931 | priv->status &= ~STATUS_RF_KILL_SW; | |
932 | if (rf_kill_active(priv)) { | |
933 | IPW_DEBUG_RF_KILL("Can not turn radio back on - " | |
934 | "disabled by HW switch\n"); | |
935 | /* Make sure the RF_KILL check timer is running */ | |
936 | cancel_delayed_work(&priv->rf_kill); | |
937 | queue_delayed_work(priv->workqueue, &priv->rf_kill, | |
938 | 2 * HZ); | |
939 | } else | |
940 | queue_work(priv->workqueue, &priv->up); | |
941 | } | |
942 | ||
943 | return 1; | |
944 | } | |
945 | ||
946 | static ssize_t store_rf_kill(struct device *d, const char *buf, size_t count) | |
947 | { | |
948 | struct ipw_priv *priv = (struct ipw_priv *)d->driver_data; | |
949 | ||
950 | ipw_radio_kill_sw(priv, buf[0] == '1'); | |
951 | ||
952 | return count; | |
953 | } | |
954 | static DEVICE_ATTR(rf_kill, S_IWUSR|S_IRUGO, show_rf_kill, store_rf_kill); | |
955 | ||
956 | static void ipw_irq_tasklet(struct ipw_priv *priv) | |
957 | { | |
958 | u32 inta, inta_mask, handled = 0; | |
959 | unsigned long flags; | |
960 | int rc = 0; | |
961 | ||
962 | spin_lock_irqsave(&priv->lock, flags); | |
963 | ||
964 | inta = ipw_read32(priv, CX2_INTA_RW); | |
965 | inta_mask = ipw_read32(priv, CX2_INTA_MASK_R); | |
966 | inta &= (CX2_INTA_MASK_ALL & inta_mask); | |
967 | ||
968 | /* Add any cached INTA values that need to be handled */ | |
969 | inta |= priv->isr_inta; | |
970 | ||
971 | /* handle all the justifications for the interrupt */ | |
972 | if (inta & CX2_INTA_BIT_RX_TRANSFER) { | |
973 | ipw_rx(priv); | |
974 | handled |= CX2_INTA_BIT_RX_TRANSFER; | |
975 | } | |
976 | ||
977 | if (inta & CX2_INTA_BIT_TX_CMD_QUEUE) { | |
978 | IPW_DEBUG_HC("Command completed.\n"); | |
979 | rc = ipw_queue_tx_reclaim( priv, &priv->txq_cmd, -1); | |
980 | priv->status &= ~STATUS_HCMD_ACTIVE; | |
981 | wake_up_interruptible(&priv->wait_command_queue); | |
982 | handled |= CX2_INTA_BIT_TX_CMD_QUEUE; | |
983 | } | |
984 | ||
985 | if (inta & CX2_INTA_BIT_TX_QUEUE_1) { | |
986 | IPW_DEBUG_TX("TX_QUEUE_1\n"); | |
987 | rc = ipw_queue_tx_reclaim( priv, &priv->txq[0], 0); | |
988 | handled |= CX2_INTA_BIT_TX_QUEUE_1; | |
989 | } | |
990 | ||
991 | if (inta & CX2_INTA_BIT_TX_QUEUE_2) { | |
992 | IPW_DEBUG_TX("TX_QUEUE_2\n"); | |
993 | rc = ipw_queue_tx_reclaim( priv, &priv->txq[1], 1); | |
994 | handled |= CX2_INTA_BIT_TX_QUEUE_2; | |
995 | } | |
996 | ||
997 | if (inta & CX2_INTA_BIT_TX_QUEUE_3) { | |
998 | IPW_DEBUG_TX("TX_QUEUE_3\n"); | |
999 | rc = ipw_queue_tx_reclaim( priv, &priv->txq[2], 2); | |
1000 | handled |= CX2_INTA_BIT_TX_QUEUE_3; | |
1001 | } | |
1002 | ||
1003 | if (inta & CX2_INTA_BIT_TX_QUEUE_4) { | |
1004 | IPW_DEBUG_TX("TX_QUEUE_4\n"); | |
1005 | rc = ipw_queue_tx_reclaim( priv, &priv->txq[3], 3); | |
1006 | handled |= CX2_INTA_BIT_TX_QUEUE_4; | |
1007 | } | |
1008 | ||
1009 | if (inta & CX2_INTA_BIT_STATUS_CHANGE) { | |
1010 | IPW_WARNING("STATUS_CHANGE\n"); | |
1011 | handled |= CX2_INTA_BIT_STATUS_CHANGE; | |
1012 | } | |
1013 | ||
1014 | if (inta & CX2_INTA_BIT_BEACON_PERIOD_EXPIRED) { | |
1015 | IPW_WARNING("TX_PERIOD_EXPIRED\n"); | |
1016 | handled |= CX2_INTA_BIT_BEACON_PERIOD_EXPIRED; | |
1017 | } | |
1018 | ||
1019 | if (inta & CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) { | |
1020 | IPW_WARNING("HOST_CMD_DONE\n"); | |
1021 | handled |= CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE; | |
1022 | } | |
1023 | ||
1024 | if (inta & CX2_INTA_BIT_FW_INITIALIZATION_DONE) { | |
1025 | IPW_WARNING("FW_INITIALIZATION_DONE\n"); | |
1026 | handled |= CX2_INTA_BIT_FW_INITIALIZATION_DONE; | |
1027 | } | |
1028 | ||
1029 | if (inta & CX2_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) { | |
1030 | IPW_WARNING("PHY_OFF_DONE\n"); | |
1031 | handled |= CX2_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE; | |
1032 | } | |
1033 | ||
1034 | if (inta & CX2_INTA_BIT_RF_KILL_DONE) { | |
1035 | IPW_DEBUG_RF_KILL("RF_KILL_DONE\n"); | |
1036 | priv->status |= STATUS_RF_KILL_HW; | |
1037 | wake_up_interruptible(&priv->wait_command_queue); | |
1038 | netif_carrier_off(priv->net_dev); | |
1039 | netif_stop_queue(priv->net_dev); | |
1040 | cancel_delayed_work(&priv->request_scan); | |
1041 | queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ); | |
1042 | handled |= CX2_INTA_BIT_RF_KILL_DONE; | |
1043 | } | |
1044 | ||
1045 | if (inta & CX2_INTA_BIT_FATAL_ERROR) { | |
1046 | IPW_ERROR("Firmware error detected. Restarting.\n"); | |
1047 | #ifdef CONFIG_IPW_DEBUG | |
1048 | if (ipw_debug_level & IPW_DL_FW_ERRORS) { | |
1049 | ipw_dump_nic_error_log(priv); | |
1050 | ipw_dump_nic_event_log(priv); | |
1051 | } | |
1052 | #endif | |
1053 | queue_work(priv->workqueue, &priv->adapter_restart); | |
1054 | handled |= CX2_INTA_BIT_FATAL_ERROR; | |
1055 | } | |
1056 | ||
1057 | if (inta & CX2_INTA_BIT_PARITY_ERROR) { | |
1058 | IPW_ERROR("Parity error\n"); | |
1059 | handled |= CX2_INTA_BIT_PARITY_ERROR; | |
1060 | } | |
1061 | ||
1062 | if (handled != inta) { | |
1063 | IPW_ERROR("Unhandled INTA bits 0x%08x\n", | |
1064 | inta & ~handled); | |
1065 | } | |
1066 | ||
1067 | /* enable all interrupts */ | |
1068 | ipw_enable_interrupts(priv); | |
1069 | ||
1070 | spin_unlock_irqrestore(&priv->lock, flags); | |
1071 | } | |
1072 | ||
1073 | #ifdef CONFIG_IPW_DEBUG | |
1074 | #define IPW_CMD(x) case IPW_CMD_ ## x : return #x | |
1075 | static char *get_cmd_string(u8 cmd) | |
1076 | { | |
1077 | switch (cmd) { | |
1078 | IPW_CMD(HOST_COMPLETE); | |
1079 | IPW_CMD(POWER_DOWN); | |
1080 | IPW_CMD(SYSTEM_CONFIG); | |
1081 | IPW_CMD(MULTICAST_ADDRESS); | |
1082 | IPW_CMD(SSID); | |
1083 | IPW_CMD(ADAPTER_ADDRESS); | |
1084 | IPW_CMD(PORT_TYPE); | |
1085 | IPW_CMD(RTS_THRESHOLD); | |
1086 | IPW_CMD(FRAG_THRESHOLD); | |
1087 | IPW_CMD(POWER_MODE); | |
1088 | IPW_CMD(WEP_KEY); | |
1089 | IPW_CMD(TGI_TX_KEY); | |
1090 | IPW_CMD(SCAN_REQUEST); | |
1091 | IPW_CMD(SCAN_REQUEST_EXT); | |
1092 | IPW_CMD(ASSOCIATE); | |
1093 | IPW_CMD(SUPPORTED_RATES); | |
1094 | IPW_CMD(SCAN_ABORT); | |
1095 | IPW_CMD(TX_FLUSH); | |
1096 | IPW_CMD(QOS_PARAMETERS); | |
1097 | IPW_CMD(DINO_CONFIG); | |
1098 | IPW_CMD(RSN_CAPABILITIES); | |
1099 | IPW_CMD(RX_KEY); | |
1100 | IPW_CMD(CARD_DISABLE); | |
1101 | IPW_CMD(SEED_NUMBER); | |
1102 | IPW_CMD(TX_POWER); | |
1103 | IPW_CMD(COUNTRY_INFO); | |
1104 | IPW_CMD(AIRONET_INFO); | |
1105 | IPW_CMD(AP_TX_POWER); | |
1106 | IPW_CMD(CCKM_INFO); | |
1107 | IPW_CMD(CCX_VER_INFO); | |
1108 | IPW_CMD(SET_CALIBRATION); | |
1109 | IPW_CMD(SENSITIVITY_CALIB); | |
1110 | IPW_CMD(RETRY_LIMIT); | |
1111 | IPW_CMD(IPW_PRE_POWER_DOWN); | |
1112 | IPW_CMD(VAP_BEACON_TEMPLATE); | |
1113 | IPW_CMD(VAP_DTIM_PERIOD); | |
1114 | IPW_CMD(EXT_SUPPORTED_RATES); | |
1115 | IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT); | |
1116 | IPW_CMD(VAP_QUIET_INTERVALS); | |
1117 | IPW_CMD(VAP_CHANNEL_SWITCH); | |
1118 | IPW_CMD(VAP_MANDATORY_CHANNELS); | |
1119 | IPW_CMD(VAP_CELL_PWR_LIMIT); | |
1120 | IPW_CMD(VAP_CF_PARAM_SET); | |
1121 | IPW_CMD(VAP_SET_BEACONING_STATE); | |
1122 | IPW_CMD(MEASUREMENT); | |
1123 | IPW_CMD(POWER_CAPABILITY); | |
1124 | IPW_CMD(SUPPORTED_CHANNELS); | |
1125 | IPW_CMD(TPC_REPORT); | |
1126 | IPW_CMD(WME_INFO); | |
1127 | IPW_CMD(PRODUCTION_COMMAND); | |
1128 | default: | |
1129 | return "UNKNOWN"; | |
1130 | } | |
1131 | } | |
1132 | #endif /* CONFIG_IPW_DEBUG */ | |
1133 | ||
1134 | #define HOST_COMPLETE_TIMEOUT HZ | |
1135 | static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd) | |
1136 | { | |
1137 | int rc = 0; | |
1138 | ||
1139 | if (priv->status & STATUS_HCMD_ACTIVE) { | |
1140 | IPW_ERROR("Already sending a command\n"); | |
1141 | return -1; | |
1142 | } | |
1143 | ||
1144 | priv->status |= STATUS_HCMD_ACTIVE; | |
1145 | ||
1146 | IPW_DEBUG_HC("Sending %s command (#%d), %d bytes\n", | |
1147 | get_cmd_string(cmd->cmd), cmd->cmd, cmd->len); | |
1148 | printk_buf(IPW_DL_HOST_COMMAND, (u8*)cmd->param, cmd->len); | |
1149 | ||
1150 | rc = ipw_queue_tx_hcmd(priv, cmd->cmd, &cmd->param, cmd->len, 0); | |
1151 | if (rc) | |
1152 | return rc; | |
1153 | ||
1154 | rc = wait_event_interruptible_timeout( | |
1155 | priv->wait_command_queue, !(priv->status & STATUS_HCMD_ACTIVE), | |
1156 | HOST_COMPLETE_TIMEOUT); | |
1157 | if (rc == 0) { | |
1158 | IPW_DEBUG_INFO("Command completion failed out after %dms.\n", | |
1159 | HOST_COMPLETE_TIMEOUT / (HZ / 1000)); | |
1160 | priv->status &= ~STATUS_HCMD_ACTIVE; | |
1161 | return -EIO; | |
1162 | } | |
1163 | if (priv->status & STATUS_RF_KILL_MASK) { | |
1164 | IPW_DEBUG_INFO("Command aborted due to RF Kill Switch\n"); | |
1165 | return -EIO; | |
1166 | } | |
1167 | ||
1168 | return 0; | |
1169 | } | |
1170 | ||
1171 | static int ipw_send_host_complete(struct ipw_priv *priv) | |
1172 | { | |
1173 | struct host_cmd cmd = { | |
1174 | .cmd = IPW_CMD_HOST_COMPLETE, | |
1175 | .len = 0 | |
1176 | }; | |
1177 | ||
1178 | if (!priv) { | |
1179 | IPW_ERROR("Invalid args\n"); | |
1180 | return -1; | |
1181 | } | |
1182 | ||
1183 | if (ipw_send_cmd(priv, &cmd)) { | |
1184 | IPW_ERROR("failed to send HOST_COMPLETE command\n"); | |
1185 | return -1; | |
1186 | } | |
1187 | ||
1188 | return 0; | |
1189 | } | |
1190 | ||
1191 | static int ipw_send_system_config(struct ipw_priv *priv, | |
1192 | struct ipw_sys_config *config) | |
1193 | { | |
1194 | struct host_cmd cmd = { | |
1195 | .cmd = IPW_CMD_SYSTEM_CONFIG, | |
1196 | .len = sizeof(*config) | |
1197 | }; | |
1198 | ||
1199 | if (!priv || !config) { | |
1200 | IPW_ERROR("Invalid args\n"); | |
1201 | return -1; | |
1202 | } | |
1203 | ||
1204 | memcpy(&cmd.param,config,sizeof(*config)); | |
1205 | if (ipw_send_cmd(priv, &cmd)) { | |
1206 | IPW_ERROR("failed to send SYSTEM_CONFIG command\n"); | |
1207 | return -1; | |
1208 | } | |
1209 | ||
1210 | return 0; | |
1211 | } | |
1212 | ||
1213 | static int ipw_send_ssid(struct ipw_priv *priv, u8 *ssid, int len) | |
1214 | { | |
1215 | struct host_cmd cmd = { | |
1216 | .cmd = IPW_CMD_SSID, | |
1217 | .len = min(len, IW_ESSID_MAX_SIZE) | |
1218 | }; | |
1219 | ||
1220 | if (!priv || !ssid) { | |
1221 | IPW_ERROR("Invalid args\n"); | |
1222 | return -1; | |
1223 | } | |
1224 | ||
1225 | memcpy(&cmd.param, ssid, cmd.len); | |
1226 | if (ipw_send_cmd(priv, &cmd)) { | |
1227 | IPW_ERROR("failed to send SSID command\n"); | |
1228 | return -1; | |
1229 | } | |
1230 | ||
1231 | return 0; | |
1232 | } | |
1233 | ||
1234 | static int ipw_send_adapter_address(struct ipw_priv *priv, u8 *mac) | |
1235 | { | |
1236 | struct host_cmd cmd = { | |
1237 | .cmd = IPW_CMD_ADAPTER_ADDRESS, | |
1238 | .len = ETH_ALEN | |
1239 | }; | |
1240 | ||
1241 | if (!priv || !mac) { | |
1242 | IPW_ERROR("Invalid args\n"); | |
1243 | return -1; | |
1244 | } | |
1245 | ||
1246 | IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n", | |
1247 | priv->net_dev->name, MAC_ARG(mac)); | |
1248 | ||
1249 | memcpy(&cmd.param, mac, ETH_ALEN); | |
1250 | ||
1251 | if (ipw_send_cmd(priv, &cmd)) { | |
1252 | IPW_ERROR("failed to send ADAPTER_ADDRESS command\n"); | |
1253 | return -1; | |
1254 | } | |
1255 | ||
1256 | return 0; | |
1257 | } | |
1258 | ||
1259 | static void ipw_adapter_restart(void *adapter) | |
1260 | { | |
1261 | struct ipw_priv *priv = adapter; | |
1262 | ||
1263 | if (priv->status & STATUS_RF_KILL_MASK) | |
1264 | return; | |
1265 | ||
1266 | ipw_down(priv); | |
1267 | if (ipw_up(priv)) { | |
1268 | IPW_ERROR("Failed to up device\n"); | |
1269 | return; | |
1270 | } | |
1271 | } | |
1272 | ||
1273 | ||
1274 | ||
1275 | ||
1276 | #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ) | |
1277 | ||
1278 | static void ipw_scan_check(void *data) | |
1279 | { | |
1280 | struct ipw_priv *priv = data; | |
1281 | if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) { | |
1282 | IPW_DEBUG_SCAN("Scan completion watchdog resetting " | |
1283 | "adapter (%dms).\n", | |
1284 | IPW_SCAN_CHECK_WATCHDOG / 100); | |
1285 | ipw_adapter_restart(priv); | |
1286 | } | |
1287 | } | |
1288 | ||
1289 | static int ipw_send_scan_request_ext(struct ipw_priv *priv, | |
1290 | struct ipw_scan_request_ext *request) | |
1291 | { | |
1292 | struct host_cmd cmd = { | |
1293 | .cmd = IPW_CMD_SCAN_REQUEST_EXT, | |
1294 | .len = sizeof(*request) | |
1295 | }; | |
1296 | ||
1297 | if (!priv || !request) { | |
1298 | IPW_ERROR("Invalid args\n"); | |
1299 | return -1; | |
1300 | } | |
1301 | ||
1302 | memcpy(&cmd.param,request,sizeof(*request)); | |
1303 | if (ipw_send_cmd(priv, &cmd)) { | |
1304 | IPW_ERROR("failed to send SCAN_REQUEST_EXT command\n"); | |
1305 | return -1; | |
1306 | } | |
1307 | ||
1308 | queue_delayed_work(priv->workqueue, &priv->scan_check, | |
1309 | IPW_SCAN_CHECK_WATCHDOG); | |
1310 | return 0; | |
1311 | } | |
1312 | ||
1313 | static int ipw_send_scan_abort(struct ipw_priv *priv) | |
1314 | { | |
1315 | struct host_cmd cmd = { | |
1316 | .cmd = IPW_CMD_SCAN_ABORT, | |
1317 | .len = 0 | |
1318 | }; | |
1319 | ||
1320 | if (!priv) { | |
1321 | IPW_ERROR("Invalid args\n"); | |
1322 | return -1; | |
1323 | } | |
1324 | ||
1325 | if (ipw_send_cmd(priv, &cmd)) { | |
1326 | IPW_ERROR("failed to send SCAN_ABORT command\n"); | |
1327 | return -1; | |
1328 | } | |
1329 | ||
1330 | return 0; | |
1331 | } | |
1332 | ||
1333 | static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens) | |
1334 | { | |
1335 | struct host_cmd cmd = { | |
1336 | .cmd = IPW_CMD_SENSITIVITY_CALIB, | |
1337 | .len = sizeof(struct ipw_sensitivity_calib) | |
1338 | }; | |
1339 | struct ipw_sensitivity_calib *calib = (struct ipw_sensitivity_calib *) | |
1340 | &cmd.param; | |
1341 | calib->beacon_rssi_raw = sens; | |
1342 | if (ipw_send_cmd(priv, &cmd)) { | |
1343 | IPW_ERROR("failed to send SENSITIVITY CALIB command\n"); | |
1344 | return -1; | |
1345 | } | |
1346 | ||
1347 | return 0; | |
1348 | } | |
1349 | ||
1350 | static int ipw_send_associate(struct ipw_priv *priv, | |
1351 | struct ipw_associate *associate) | |
1352 | { | |
1353 | struct host_cmd cmd = { | |
1354 | .cmd = IPW_CMD_ASSOCIATE, | |
1355 | .len = sizeof(*associate) | |
1356 | }; | |
1357 | ||
1358 | if (!priv || !associate) { | |
1359 | IPW_ERROR("Invalid args\n"); | |
1360 | return -1; | |
1361 | } | |
1362 | ||
1363 | memcpy(&cmd.param,associate,sizeof(*associate)); | |
1364 | if (ipw_send_cmd(priv, &cmd)) { | |
1365 | IPW_ERROR("failed to send ASSOCIATE command\n"); | |
1366 | return -1; | |
1367 | } | |
1368 | ||
1369 | return 0; | |
1370 | } | |
1371 | ||
1372 | static int ipw_send_supported_rates(struct ipw_priv *priv, | |
1373 | struct ipw_supported_rates *rates) | |
1374 | { | |
1375 | struct host_cmd cmd = { | |
1376 | .cmd = IPW_CMD_SUPPORTED_RATES, | |
1377 | .len = sizeof(*rates) | |
1378 | }; | |
1379 | ||
1380 | if (!priv || !rates) { | |
1381 | IPW_ERROR("Invalid args\n"); | |
1382 | return -1; | |
1383 | } | |
1384 | ||
1385 | memcpy(&cmd.param,rates,sizeof(*rates)); | |
1386 | if (ipw_send_cmd(priv, &cmd)) { | |
1387 | IPW_ERROR("failed to send SUPPORTED_RATES command\n"); | |
1388 | return -1; | |
1389 | } | |
1390 | ||
1391 | return 0; | |
1392 | } | |
1393 | ||
1394 | static int ipw_set_random_seed(struct ipw_priv *priv) | |
1395 | { | |
1396 | struct host_cmd cmd = { | |
1397 | .cmd = IPW_CMD_SEED_NUMBER, | |
1398 | .len = sizeof(u32) | |
1399 | }; | |
1400 | ||
1401 | if (!priv) { | |
1402 | IPW_ERROR("Invalid args\n"); | |
1403 | return -1; | |
1404 | } | |
1405 | ||
1406 | get_random_bytes(&cmd.param, sizeof(u32)); | |
1407 | ||
1408 | if (ipw_send_cmd(priv, &cmd)) { | |
1409 | IPW_ERROR("failed to send SEED_NUMBER command\n"); | |
1410 | return -1; | |
1411 | } | |
1412 | ||
1413 | return 0; | |
1414 | } | |
1415 | ||
1416 | #if 0 | |
1417 | static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off) | |
1418 | { | |
1419 | struct host_cmd cmd = { | |
1420 | .cmd = IPW_CMD_CARD_DISABLE, | |
1421 | .len = sizeof(u32) | |
1422 | }; | |
1423 | ||
1424 | if (!priv) { | |
1425 | IPW_ERROR("Invalid args\n"); | |
1426 | return -1; | |
1427 | } | |
1428 | ||
1429 | *((u32*)&cmd.param) = phy_off; | |
1430 | ||
1431 | if (ipw_send_cmd(priv, &cmd)) { | |
1432 | IPW_ERROR("failed to send CARD_DISABLE command\n"); | |
1433 | return -1; | |
1434 | } | |
1435 | ||
1436 | return 0; | |
1437 | } | |
1438 | #endif | |
1439 | ||
1440 | static int ipw_send_tx_power(struct ipw_priv *priv, | |
1441 | struct ipw_tx_power *power) | |
1442 | { | |
1443 | struct host_cmd cmd = { | |
1444 | .cmd = IPW_CMD_TX_POWER, | |
1445 | .len = sizeof(*power) | |
1446 | }; | |
1447 | ||
1448 | if (!priv || !power) { | |
1449 | IPW_ERROR("Invalid args\n"); | |
1450 | return -1; | |
1451 | } | |
1452 | ||
1453 | memcpy(&cmd.param,power,sizeof(*power)); | |
1454 | if (ipw_send_cmd(priv, &cmd)) { | |
1455 | IPW_ERROR("failed to send TX_POWER command\n"); | |
1456 | return -1; | |
1457 | } | |
1458 | ||
1459 | return 0; | |
1460 | } | |
1461 | ||
1462 | static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts) | |
1463 | { | |
1464 | struct ipw_rts_threshold rts_threshold = { | |
1465 | .rts_threshold = rts, | |
1466 | }; | |
1467 | struct host_cmd cmd = { | |
1468 | .cmd = IPW_CMD_RTS_THRESHOLD, | |
1469 | .len = sizeof(rts_threshold) | |
1470 | }; | |
1471 | ||
1472 | if (!priv) { | |
1473 | IPW_ERROR("Invalid args\n"); | |
1474 | return -1; | |
1475 | } | |
1476 | ||
1477 | memcpy(&cmd.param, &rts_threshold, sizeof(rts_threshold)); | |
1478 | if (ipw_send_cmd(priv, &cmd)) { | |
1479 | IPW_ERROR("failed to send RTS_THRESHOLD command\n"); | |
1480 | return -1; | |
1481 | } | |
1482 | ||
1483 | return 0; | |
1484 | } | |
1485 | ||
1486 | static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag) | |
1487 | { | |
1488 | struct ipw_frag_threshold frag_threshold = { | |
1489 | .frag_threshold = frag, | |
1490 | }; | |
1491 | struct host_cmd cmd = { | |
1492 | .cmd = IPW_CMD_FRAG_THRESHOLD, | |
1493 | .len = sizeof(frag_threshold) | |
1494 | }; | |
1495 | ||
1496 | if (!priv) { | |
1497 | IPW_ERROR("Invalid args\n"); | |
1498 | return -1; | |
1499 | } | |
1500 | ||
1501 | memcpy(&cmd.param, &frag_threshold, sizeof(frag_threshold)); | |
1502 | if (ipw_send_cmd(priv, &cmd)) { | |
1503 | IPW_ERROR("failed to send FRAG_THRESHOLD command\n"); | |
1504 | return -1; | |
1505 | } | |
1506 | ||
1507 | return 0; | |
1508 | } | |
1509 | ||
1510 | static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode) | |
1511 | { | |
1512 | struct host_cmd cmd = { | |
1513 | .cmd = IPW_CMD_POWER_MODE, | |
1514 | .len = sizeof(u32) | |
1515 | }; | |
1516 | u32 *param = (u32*)(&cmd.param); | |
1517 | ||
1518 | if (!priv) { | |
1519 | IPW_ERROR("Invalid args\n"); | |
1520 | return -1; | |
1521 | } | |
1522 | ||
1523 | /* If on battery, set to 3, if AC set to CAM, else user | |
1524 | * level */ | |
1525 | switch (mode) { | |
1526 | case IPW_POWER_BATTERY: | |
1527 | *param = IPW_POWER_INDEX_3; | |
1528 | break; | |
1529 | case IPW_POWER_AC: | |
1530 | *param = IPW_POWER_MODE_CAM; | |
1531 | break; | |
1532 | default: | |
1533 | *param = mode; | |
1534 | break; | |
1535 | } | |
1536 | ||
1537 | if (ipw_send_cmd(priv, &cmd)) { | |
1538 | IPW_ERROR("failed to send POWER_MODE command\n"); | |
1539 | return -1; | |
1540 | } | |
1541 | ||
1542 | return 0; | |
1543 | } | |
1544 | ||
1545 | /* | |
1546 | * The IPW device contains a Microwire compatible EEPROM that stores | |
1547 | * various data like the MAC address. Usually the firmware has exclusive | |
1548 | * access to the eeprom, but during device initialization (before the | |
1549 | * device driver has sent the HostComplete command to the firmware) the | |
1550 | * device driver has read access to the EEPROM by way of indirect addressing | |
1551 | * through a couple of memory mapped registers. | |
1552 | * | |
1553 | * The following is a simplified implementation for pulling data out of the | |
1554 | * the eeprom, along with some helper functions to find information in | |
1555 | * the per device private data's copy of the eeprom. | |
1556 | * | |
1557 | * NOTE: To better understand how these functions work (i.e what is a chip | |
1558 | * select and why do have to keep driving the eeprom clock?), read | |
1559 | * just about any data sheet for a Microwire compatible EEPROM. | |
1560 | */ | |
1561 | ||
1562 | /* write a 32 bit value into the indirect accessor register */ | |
1563 | static inline void eeprom_write_reg(struct ipw_priv *p, u32 data) | |
1564 | { | |
1565 | ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data); | |
1566 | ||
1567 | /* the eeprom requires some time to complete the operation */ | |
1568 | udelay(p->eeprom_delay); | |
1569 | ||
1570 | return; | |
1571 | } | |
1572 | ||
1573 | /* perform a chip select operation */ | |
1574 | static inline void eeprom_cs(struct ipw_priv* priv) | |
1575 | { | |
1576 | eeprom_write_reg(priv,0); | |
1577 | eeprom_write_reg(priv,EEPROM_BIT_CS); | |
1578 | eeprom_write_reg(priv,EEPROM_BIT_CS|EEPROM_BIT_SK); | |
1579 | eeprom_write_reg(priv,EEPROM_BIT_CS); | |
1580 | } | |
1581 | ||
1582 | /* perform a chip select operation */ | |
1583 | static inline void eeprom_disable_cs(struct ipw_priv* priv) | |
1584 | { | |
1585 | eeprom_write_reg(priv,EEPROM_BIT_CS); | |
1586 | eeprom_write_reg(priv,0); | |
1587 | eeprom_write_reg(priv,EEPROM_BIT_SK); | |
1588 | } | |
1589 | ||
1590 | /* push a single bit down to the eeprom */ | |
1591 | static inline void eeprom_write_bit(struct ipw_priv *p,u8 bit) | |
1592 | { | |
1593 | int d = ( bit ? EEPROM_BIT_DI : 0); | |
1594 | eeprom_write_reg(p,EEPROM_BIT_CS|d); | |
1595 | eeprom_write_reg(p,EEPROM_BIT_CS|d|EEPROM_BIT_SK); | |
1596 | } | |
1597 | ||
1598 | /* push an opcode followed by an address down to the eeprom */ | |
1599 | static void eeprom_op(struct ipw_priv* priv, u8 op, u8 addr) | |
1600 | { | |
1601 | int i; | |
1602 | ||
1603 | eeprom_cs(priv); | |
1604 | eeprom_write_bit(priv,1); | |
1605 | eeprom_write_bit(priv,op&2); | |
1606 | eeprom_write_bit(priv,op&1); | |
1607 | for ( i=7; i>=0; i-- ) { | |
1608 | eeprom_write_bit(priv,addr&(1<<i)); | |
1609 | } | |
1610 | } | |
1611 | ||
1612 | /* pull 16 bits off the eeprom, one bit at a time */ | |
1613 | static u16 eeprom_read_u16(struct ipw_priv* priv, u8 addr) | |
1614 | { | |
1615 | int i; | |
1616 | u16 r=0; | |
1617 | ||
1618 | /* Send READ Opcode */ | |
1619 | eeprom_op(priv,EEPROM_CMD_READ,addr); | |
1620 | ||
1621 | /* Send dummy bit */ | |
1622 | eeprom_write_reg(priv,EEPROM_BIT_CS); | |
1623 | ||
1624 | /* Read the byte off the eeprom one bit at a time */ | |
1625 | for ( i=0; i<16; i++ ) { | |
1626 | u32 data = 0; | |
1627 | eeprom_write_reg(priv,EEPROM_BIT_CS|EEPROM_BIT_SK); | |
1628 | eeprom_write_reg(priv,EEPROM_BIT_CS); | |
1629 | data = ipw_read_reg32(priv,FW_MEM_REG_EEPROM_ACCESS); | |
1630 | r = (r<<1) | ((data & EEPROM_BIT_DO)?1:0); | |
1631 | } | |
1632 | ||
1633 | /* Send another dummy bit */ | |
1634 | eeprom_write_reg(priv,0); | |
1635 | eeprom_disable_cs(priv); | |
1636 | ||
1637 | return r; | |
1638 | } | |
1639 | ||
1640 | /* helper function for pulling the mac address out of the private */ | |
1641 | /* data's copy of the eeprom data */ | |
1642 | static void eeprom_parse_mac(struct ipw_priv* priv, u8* mac) | |
1643 | { | |
1644 | u8* ee = (u8*)priv->eeprom; | |
1645 | memcpy(mac, &ee[EEPROM_MAC_ADDRESS], 6); | |
1646 | } | |
1647 | ||
1648 | /* | |
1649 | * Either the device driver (i.e. the host) or the firmware can | |
1650 | * load eeprom data into the designated region in SRAM. If neither | |
1651 | * happens then the FW will shutdown with a fatal error. | |
1652 | * | |
1653 | * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE | |
1654 | * bit needs region of shared SRAM needs to be non-zero. | |
1655 | */ | |
1656 | static void ipw_eeprom_init_sram(struct ipw_priv *priv) | |
1657 | { | |
1658 | int i; | |
1659 | u16 *eeprom = (u16 *)priv->eeprom; | |
1660 | ||
1661 | IPW_DEBUG_TRACE(">>\n"); | |
1662 | ||
1663 | /* read entire contents of eeprom into private buffer */ | |
1664 | for ( i=0; i<128; i++ ) | |
1665 | eeprom[i] = eeprom_read_u16(priv,(u8)i); | |
1666 | ||
1667 | /* | |
1668 | If the data looks correct, then copy it to our private | |
1669 | copy. Otherwise let the firmware know to perform the operation | |
1670 | on it's own | |
1671 | */ | |
1672 | if ((priv->eeprom + EEPROM_VERSION) != 0) { | |
1673 | IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n"); | |
1674 | ||
1675 | /* write the eeprom data to sram */ | |
1676 | for( i=0; i<CX2_EEPROM_IMAGE_SIZE; i++ ) | |
1677 | ipw_write8(priv, IPW_EEPROM_DATA + i, | |
1678 | priv->eeprom[i]); | |
1679 | ||
1680 | /* Do not load eeprom data on fatal error or suspend */ | |
1681 | ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0); | |
1682 | } else { | |
1683 | IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n"); | |
1684 | ||
1685 | /* Load eeprom data on fatal error or suspend */ | |
1686 | ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1); | |
1687 | } | |
1688 | ||
1689 | IPW_DEBUG_TRACE("<<\n"); | |
1690 | } | |
1691 | ||
1692 | ||
1693 | static inline void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count) | |
1694 | { | |
1695 | count >>= 2; | |
1696 | if (!count) return; | |
1697 | _ipw_write32(priv, CX2_AUTOINC_ADDR, start); | |
1698 | while (count--) | |
1699 | _ipw_write32(priv, CX2_AUTOINC_DATA, 0); | |
1700 | } | |
1701 | ||
1702 | static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv) | |
1703 | { | |
1704 | ipw_zero_memory(priv, CX2_SHARED_SRAM_DMA_CONTROL, | |
1705 | CB_NUMBER_OF_ELEMENTS_SMALL * | |
1706 | sizeof(struct command_block)); | |
1707 | } | |
1708 | ||
1709 | static int ipw_fw_dma_enable(struct ipw_priv *priv) | |
1710 | { /* start dma engine but no transfers yet*/ | |
1711 | ||
1712 | IPW_DEBUG_FW(">> : \n"); | |
1713 | ||
1714 | /* Start the dma */ | |
1715 | ipw_fw_dma_reset_command_blocks(priv); | |
1716 | ||
1717 | /* Write CB base address */ | |
1718 | ipw_write_reg32(priv, CX2_DMA_I_CB_BASE, CX2_SHARED_SRAM_DMA_CONTROL); | |
1719 | ||
1720 | IPW_DEBUG_FW("<< : \n"); | |
1721 | return 0; | |
1722 | } | |
1723 | ||
1724 | static void ipw_fw_dma_abort(struct ipw_priv *priv) | |
1725 | { | |
1726 | u32 control = 0; | |
1727 | ||
1728 | IPW_DEBUG_FW(">> :\n"); | |
1729 | ||
1730 | //set the Stop and Abort bit | |
1731 | control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT; | |
1732 | ipw_write_reg32(priv, CX2_DMA_I_DMA_CONTROL, control); | |
1733 | priv->sram_desc.last_cb_index = 0; | |
1734 | ||
1735 | IPW_DEBUG_FW("<< \n"); | |
1736 | } | |
1737 | ||
1738 | static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index, struct command_block *cb) | |
1739 | { | |
1740 | u32 address = CX2_SHARED_SRAM_DMA_CONTROL + (sizeof(struct command_block) * index); | |
1741 | IPW_DEBUG_FW(">> :\n"); | |
1742 | ||
1743 | ipw_write_indirect(priv, address, (u8*)cb, sizeof(struct command_block)); | |
1744 | ||
1745 | IPW_DEBUG_FW("<< :\n"); | |
1746 | return 0; | |
1747 | ||
1748 | } | |
1749 | ||
1750 | static int ipw_fw_dma_kick(struct ipw_priv *priv) | |
1751 | { | |
1752 | u32 control = 0; | |
1753 | u32 index=0; | |
1754 | ||
1755 | IPW_DEBUG_FW(">> :\n"); | |
1756 | ||
1757 | for (index = 0; index < priv->sram_desc.last_cb_index; index++) | |
1758 | ipw_fw_dma_write_command_block(priv, index, &priv->sram_desc.cb_list[index]); | |
1759 | ||
1760 | /* Enable the DMA in the CSR register */ | |
1761 | ipw_clear_bit(priv, CX2_RESET_REG,CX2_RESET_REG_MASTER_DISABLED | CX2_RESET_REG_STOP_MASTER); | |
1762 | ||
1763 | /* Set the Start bit. */ | |
1764 | control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START; | |
1765 | ipw_write_reg32(priv, CX2_DMA_I_DMA_CONTROL, control); | |
1766 | ||
1767 | IPW_DEBUG_FW("<< :\n"); | |
1768 | return 0; | |
1769 | } | |
1770 | ||
1771 | static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv) | |
1772 | { | |
1773 | u32 address; | |
1774 | u32 register_value=0; | |
1775 | u32 cb_fields_address=0; | |
1776 | ||
1777 | IPW_DEBUG_FW(">> :\n"); | |
1778 | address = ipw_read_reg32(priv,CX2_DMA_I_CURRENT_CB); | |
1779 | IPW_DEBUG_FW_INFO("Current CB is 0x%x \n",address); | |
1780 | ||
1781 | /* Read the DMA Controlor register */ | |
1782 | register_value = ipw_read_reg32(priv, CX2_DMA_I_DMA_CONTROL); | |
1783 | IPW_DEBUG_FW_INFO("CX2_DMA_I_DMA_CONTROL is 0x%x \n",register_value); | |
1784 | ||
1785 | /* Print the CB values*/ | |
1786 | cb_fields_address = address; | |
1787 | register_value = ipw_read_reg32(priv, cb_fields_address); | |
1788 | IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n",register_value); | |
1789 | ||
1790 | cb_fields_address += sizeof(u32); | |
1791 | register_value = ipw_read_reg32(priv, cb_fields_address); | |
1792 | IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n",register_value); | |
1793 | ||
1794 | cb_fields_address += sizeof(u32); | |
1795 | register_value = ipw_read_reg32(priv, cb_fields_address); | |
1796 | IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n", | |
1797 | register_value); | |
1798 | ||
1799 | cb_fields_address += sizeof(u32); | |
1800 | register_value = ipw_read_reg32(priv, cb_fields_address); | |
1801 | IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n",register_value); | |
1802 | ||
1803 | IPW_DEBUG_FW(">> :\n"); | |
1804 | } | |
1805 | ||
1806 | static int ipw_fw_dma_command_block_index(struct ipw_priv *priv) | |
1807 | { | |
1808 | u32 current_cb_address = 0; | |
1809 | u32 current_cb_index = 0; | |
1810 | ||
1811 | IPW_DEBUG_FW("<< :\n"); | |
1812 | current_cb_address= ipw_read_reg32(priv, CX2_DMA_I_CURRENT_CB); | |
1813 | ||
1814 | current_cb_index = (current_cb_address - CX2_SHARED_SRAM_DMA_CONTROL )/ | |
1815 | sizeof (struct command_block); | |
1816 | ||
1817 | IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n", | |
1818 | current_cb_index, current_cb_address ); | |
1819 | ||
1820 | IPW_DEBUG_FW(">> :\n"); | |
1821 | return current_cb_index; | |
1822 | ||
1823 | } | |
1824 | ||
1825 | static int ipw_fw_dma_add_command_block(struct ipw_priv *priv, | |
1826 | u32 src_address, | |
1827 | u32 dest_address, | |
1828 | u32 length, | |
1829 | int interrupt_enabled, | |
1830 | int is_last) | |
1831 | { | |
1832 | ||
1833 | u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC | | |
1834 | CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG | | |
1835 | CB_DEST_SIZE_LONG; | |
1836 | struct command_block *cb; | |
1837 | u32 last_cb_element=0; | |
1838 | ||
1839 | IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n", | |
1840 | src_address, dest_address, length); | |
1841 | ||
1842 | if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL) | |
1843 | return -1; | |
1844 | ||
1845 | last_cb_element = priv->sram_desc.last_cb_index; | |
1846 | cb = &priv->sram_desc.cb_list[last_cb_element]; | |
1847 | priv->sram_desc.last_cb_index++; | |
1848 | ||
1849 | /* Calculate the new CB control word */ | |
1850 | if (interrupt_enabled ) | |
1851 | control |= CB_INT_ENABLED; | |
1852 | ||
1853 | if (is_last) | |
1854 | control |= CB_LAST_VALID; | |
1855 | ||
1856 | control |= length; | |
1857 | ||
1858 | /* Calculate the CB Element's checksum value */ | |
1859 | cb->status = control ^src_address ^dest_address; | |
1860 | ||
1861 | /* Copy the Source and Destination addresses */ | |
1862 | cb->dest_addr = dest_address; | |
1863 | cb->source_addr = src_address; | |
1864 | ||
1865 | /* Copy the Control Word last */ | |
1866 | cb->control = control; | |
1867 | ||
1868 | return 0; | |
1869 | } | |
1870 | ||
1871 | static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, | |
1872 | u32 src_phys, | |
1873 | u32 dest_address, | |
1874 | u32 length) | |
1875 | { | |
1876 | u32 bytes_left = length; | |
1877 | u32 src_offset=0; | |
1878 | u32 dest_offset=0; | |
1879 | int status = 0; | |
1880 | IPW_DEBUG_FW(">> \n"); | |
1881 | IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n", | |
1882 | src_phys, dest_address, length); | |
1883 | while (bytes_left > CB_MAX_LENGTH) { | |
1884 | status = ipw_fw_dma_add_command_block( priv, | |
1885 | src_phys + src_offset, | |
1886 | dest_address + dest_offset, | |
1887 | CB_MAX_LENGTH, 0, 0); | |
1888 | if (status) { | |
1889 | IPW_DEBUG_FW_INFO(": Failed\n"); | |
1890 | return -1; | |
1891 | } else | |
1892 | IPW_DEBUG_FW_INFO(": Added new cb\n"); | |
1893 | ||
1894 | src_offset += CB_MAX_LENGTH; | |
1895 | dest_offset += CB_MAX_LENGTH; | |
1896 | bytes_left -= CB_MAX_LENGTH; | |
1897 | } | |
1898 | ||
1899 | /* add the buffer tail */ | |
1900 | if (bytes_left > 0) { | |
1901 | status = ipw_fw_dma_add_command_block( | |
1902 | priv, src_phys + src_offset, | |
1903 | dest_address + dest_offset, | |
1904 | bytes_left, 0, 0); | |
1905 | if (status) { | |
1906 | IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n"); | |
1907 | return -1; | |
1908 | } else | |
1909 | IPW_DEBUG_FW_INFO(": Adding new cb - the buffer tail\n"); | |
1910 | } | |
1911 | ||
1912 | ||
1913 | IPW_DEBUG_FW("<< \n"); | |
1914 | return 0; | |
1915 | } | |
1916 | ||
1917 | static int ipw_fw_dma_wait(struct ipw_priv *priv) | |
1918 | { | |
1919 | u32 current_index = 0; | |
1920 | u32 watchdog = 0; | |
1921 | ||
1922 | IPW_DEBUG_FW(">> : \n"); | |
1923 | ||
1924 | current_index = ipw_fw_dma_command_block_index(priv); | |
1925 | IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%8X\n", | |
1926 | (int) priv->sram_desc.last_cb_index); | |
1927 | ||
1928 | while (current_index < priv->sram_desc.last_cb_index) { | |
1929 | udelay(50); | |
1930 | current_index = ipw_fw_dma_command_block_index(priv); | |
1931 | ||
1932 | watchdog++; | |
1933 | ||
1934 | if (watchdog > 400) { | |
1935 | IPW_DEBUG_FW_INFO("Timeout\n"); | |
1936 | ipw_fw_dma_dump_command_block(priv); | |
1937 | ipw_fw_dma_abort(priv); | |
1938 | return -1; | |
1939 | } | |
1940 | } | |
1941 | ||
1942 | ipw_fw_dma_abort(priv); | |
1943 | ||
1944 | /*Disable the DMA in the CSR register*/ | |
1945 | ipw_set_bit(priv, CX2_RESET_REG, | |
1946 | CX2_RESET_REG_MASTER_DISABLED | CX2_RESET_REG_STOP_MASTER); | |
1947 | ||
1948 | IPW_DEBUG_FW("<< dmaWaitSync \n"); | |
1949 | return 0; | |
1950 | } | |
1951 | ||
1952 | static void ipw_remove_current_network(struct ipw_priv *priv) | |
1953 | { | |
1954 | struct list_head *element, *safe; | |
1955 | struct ieee80211_network *network = NULL; | |
1956 | list_for_each_safe(element, safe, &priv->ieee->network_list) { | |
1957 | network = list_entry(element, struct ieee80211_network, list); | |
1958 | if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) { | |
1959 | list_del(element); | |
1960 | list_add_tail(&network->list, | |
1961 | &priv->ieee->network_free_list); | |
1962 | } | |
1963 | } | |
1964 | } | |
1965 | ||
1966 | /** | |
1967 | * Check that card is still alive. | |
1968 | * Reads debug register from domain0. | |
1969 | * If card is present, pre-defined value should | |
1970 | * be found there. | |
1971 | * | |
1972 | * @param priv | |
1973 | * @return 1 if card is present, 0 otherwise | |
1974 | */ | |
1975 | static inline int ipw_alive(struct ipw_priv *priv) | |
1976 | { | |
1977 | return ipw_read32(priv, 0x90) == 0xd55555d5; | |
1978 | } | |
1979 | ||
1980 | static inline int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask, | |
1981 | int timeout) | |
1982 | { | |
1983 | int i = 0; | |
1984 | ||
1985 | do { | |
1986 | if ((ipw_read32(priv, addr) & mask) == mask) | |
1987 | return i; | |
1988 | mdelay(10); | |
1989 | i += 10; | |
1990 | } while (i < timeout); | |
1991 | ||
1992 | return -ETIME; | |
1993 | } | |
1994 | ||
1995 | /* These functions load the firmware and micro code for the operation of | |
1996 | * the ipw hardware. It assumes the buffer has all the bits for the | |
1997 | * image and the caller is handling the memory allocation and clean up. | |
1998 | */ | |
1999 | ||
2000 | ||
2001 | static int ipw_stop_master(struct ipw_priv * priv) | |
2002 | { | |
2003 | int rc; | |
2004 | ||
2005 | IPW_DEBUG_TRACE(">> \n"); | |
2006 | /* stop master. typical delay - 0 */ | |
2007 | ipw_set_bit(priv, CX2_RESET_REG, CX2_RESET_REG_STOP_MASTER); | |
2008 | ||
2009 | rc = ipw_poll_bit(priv, CX2_RESET_REG, | |
2010 | CX2_RESET_REG_MASTER_DISABLED, 100); | |
2011 | if (rc < 0) { | |
2012 | IPW_ERROR("stop master failed in 10ms\n"); | |
2013 | return -1; | |
2014 | } | |
2015 | ||
2016 | IPW_DEBUG_INFO("stop master %dms\n", rc); | |
2017 | ||
2018 | return rc; | |
2019 | } | |
2020 | ||
2021 | static void ipw_arc_release(struct ipw_priv *priv) | |
2022 | { | |
2023 | IPW_DEBUG_TRACE(">> \n"); | |
2024 | mdelay(5); | |
2025 | ||
2026 | ipw_clear_bit(priv, CX2_RESET_REG, CBD_RESET_REG_PRINCETON_RESET); | |
2027 | ||
2028 | /* no one knows timing, for safety add some delay */ | |
2029 | mdelay(5); | |
2030 | } | |
2031 | ||
2032 | struct fw_header { | |
2033 | u32 version; | |
2034 | u32 mode; | |
2035 | }; | |
2036 | ||
2037 | struct fw_chunk { | |
2038 | u32 address; | |
2039 | u32 length; | |
2040 | }; | |
2041 | ||
2042 | #define IPW_FW_MAJOR_VERSION 2 | |
2043 | #define IPW_FW_MINOR_VERSION 2 | |
2044 | ||
2045 | #define IPW_FW_MINOR(x) ((x & 0xff) >> 8) | |
2046 | #define IPW_FW_MAJOR(x) (x & 0xff) | |
2047 | ||
2048 | #define IPW_FW_VERSION ((IPW_FW_MINOR_VERSION << 8) | \ | |
2049 | IPW_FW_MAJOR_VERSION) | |
2050 | ||
2051 | #define IPW_FW_PREFIX "ipw-" __stringify(IPW_FW_MAJOR_VERSION) \ | |
2052 | "." __stringify(IPW_FW_MINOR_VERSION) "-" | |
2053 | ||
2054 | #if IPW_FW_MAJOR_VERSION >= 2 && IPW_FW_MINOR_VERSION > 0 | |
2055 | #define IPW_FW_NAME(x) IPW_FW_PREFIX "" x ".fw" | |
2056 | #else | |
2057 | #define IPW_FW_NAME(x) "ipw2200_" x ".fw" | |
2058 | #endif | |
2059 | ||
2060 | static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, | |
2061 | size_t len) | |
2062 | { | |
2063 | int rc = 0, i, addr; | |
2064 | u8 cr = 0; | |
2065 | u16 *image; | |
2066 | ||
2067 | image = (u16 *)data; | |
2068 | ||
2069 | IPW_DEBUG_TRACE(">> \n"); | |
2070 | ||
2071 | rc = ipw_stop_master(priv); | |
2072 | ||
2073 | if (rc < 0) | |
2074 | return rc; | |
2075 | ||
2076 | // spin_lock_irqsave(&priv->lock, flags); | |
2077 | ||
2078 | for (addr = CX2_SHARED_LOWER_BOUND; | |
2079 | addr < CX2_REGISTER_DOMAIN1_END; addr += 4) { | |
2080 | ipw_write32(priv, addr, 0); | |
2081 | } | |
2082 | ||
2083 | /* no ucode (yet) */ | |
2084 | memset(&priv->dino_alive, 0, sizeof(priv->dino_alive)); | |
2085 | /* destroy DMA queues */ | |
2086 | /* reset sequence */ | |
2087 | ||
2088 | ipw_write_reg32(priv, CX2_MEM_HALT_AND_RESET ,CX2_BIT_HALT_RESET_ON); | |
2089 | ipw_arc_release(priv); | |
2090 | ipw_write_reg32(priv, CX2_MEM_HALT_AND_RESET, CX2_BIT_HALT_RESET_OFF); | |
2091 | mdelay(1); | |
2092 | ||
2093 | /* reset PHY */ | |
2094 | ipw_write_reg32(priv, CX2_INTERNAL_CMD_EVENT, CX2_BASEBAND_POWER_DOWN); | |
2095 | mdelay(1); | |
2096 | ||
2097 | ipw_write_reg32(priv, CX2_INTERNAL_CMD_EVENT, 0); | |
2098 | mdelay(1); | |
2099 | ||
2100 | /* enable ucode store */ | |
2101 | ipw_write_reg8(priv, DINO_CONTROL_REG, 0x0); | |
2102 | ipw_write_reg8(priv, DINO_CONTROL_REG, DINO_ENABLE_CS); | |
2103 | mdelay(1); | |
2104 | ||
2105 | /* write ucode */ | |
2106 | /** | |
2107 | * @bug | |
2108 | * Do NOT set indirect address register once and then | |
2109 | * store data to indirect data register in the loop. | |
2110 | * It seems very reasonable, but in this case DINO do not | |
2111 | * accept ucode. It is essential to set address each time. | |
2112 | */ | |
2113 | /* load new ipw uCode */ | |
2114 | for (i = 0; i < len / 2; i++) | |
2115 | ipw_write_reg16(priv, CX2_BASEBAND_CONTROL_STORE, image[i]); | |
2116 | ||
2117 | ||
2118 | /* enable DINO */ | |
2119 | ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 0); | |
2120 | ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, | |
2121 | DINO_ENABLE_SYSTEM ); | |
2122 | ||
2123 | /* this is where the igx / win driver deveates from the VAP driver.*/ | |
2124 | ||
2125 | /* wait for alive response */ | |
2126 | for (i = 0; i < 100; i++) { | |
2127 | /* poll for incoming data */ | |
2128 | cr = ipw_read_reg8(priv, CX2_BASEBAND_CONTROL_STATUS); | |
2129 | if (cr & DINO_RXFIFO_DATA) | |
2130 | break; | |
2131 | mdelay(1); | |
2132 | } | |
2133 | ||
2134 | if (cr & DINO_RXFIFO_DATA) { | |
2135 | /* alive_command_responce size is NOT multiple of 4 */ | |
2136 | u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4]; | |
2137 | ||
2138 | for (i = 0; i < ARRAY_SIZE(response_buffer); i++) | |
2139 | response_buffer[i] = | |
2140 | ipw_read_reg32(priv, | |
2141 | CX2_BASEBAND_RX_FIFO_READ); | |
2142 | memcpy(&priv->dino_alive, response_buffer, | |
2143 | sizeof(priv->dino_alive)); | |
2144 | if (priv->dino_alive.alive_command == 1 | |
2145 | && priv->dino_alive.ucode_valid == 1) { | |
2146 | rc = 0; | |
2147 | IPW_DEBUG_INFO( | |
2148 | "Microcode OK, rev. %d (0x%x) dev. %d (0x%x) " | |
2149 | "of %02d/%02d/%02d %02d:%02d\n", | |
2150 | priv->dino_alive.software_revision, | |
2151 | priv->dino_alive.software_revision, | |
2152 | priv->dino_alive.device_identifier, | |
2153 | priv->dino_alive.device_identifier, | |
2154 | priv->dino_alive.time_stamp[0], | |
2155 | priv->dino_alive.time_stamp[1], | |
2156 | priv->dino_alive.time_stamp[2], | |
2157 | priv->dino_alive.time_stamp[3], | |
2158 | priv->dino_alive.time_stamp[4]); | |
2159 | } else { | |
2160 | IPW_DEBUG_INFO("Microcode is not alive\n"); | |
2161 | rc = -EINVAL; | |
2162 | } | |
2163 | } else { | |
2164 | IPW_DEBUG_INFO("No alive response from DINO\n"); | |
2165 | rc = -ETIME; | |
2166 | } | |
2167 | ||
2168 | /* disable DINO, otherwise for some reason | |
2169 | firmware have problem getting alive resp. */ | |
2170 | ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 0); | |
2171 | ||
2172 | // spin_unlock_irqrestore(&priv->lock, flags); | |
2173 | ||
2174 | return rc; | |
2175 | } | |
2176 | ||
2177 | static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, | |
2178 | size_t len) | |
2179 | { | |
2180 | int rc = -1; | |
2181 | int offset = 0; | |
2182 | struct fw_chunk *chunk; | |
2183 | dma_addr_t shared_phys; | |
2184 | u8 *shared_virt; | |
2185 | ||
2186 | IPW_DEBUG_TRACE("<< : \n"); | |
2187 | shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys); | |
2188 | ||
2189 | if (!shared_virt) | |
2190 | return -ENOMEM; | |
2191 | ||
2192 | memmove(shared_virt, data, len); | |
2193 | ||
2194 | /* Start the Dma */ | |
2195 | rc = ipw_fw_dma_enable(priv); | |
2196 | ||
2197 | if (priv->sram_desc.last_cb_index > 0) { | |
2198 | /* the DMA is already ready this would be a bug. */ | |
2199 | BUG(); | |
2200 | goto out; | |
2201 | } | |
2202 | ||
2203 | do { | |
2204 | chunk = (struct fw_chunk *)(data + offset); | |
2205 | offset += sizeof(struct fw_chunk); | |
2206 | /* build DMA packet and queue up for sending */ | |
2207 | /* dma to chunk->address, the chunk->length bytes from data + | |
2208 | * offeset*/ | |
2209 | /* Dma loading */ | |
2210 | rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset, | |
2211 | chunk->address, chunk->length); | |
2212 | if (rc) { | |
2213 | IPW_DEBUG_INFO("dmaAddBuffer Failed\n"); | |
2214 | goto out; | |
2215 | } | |
2216 | ||
2217 | offset += chunk->length; | |
2218 | } while (offset < len); | |
2219 | ||
2220 | /* Run the DMA and wait for the answer*/ | |
2221 | rc = ipw_fw_dma_kick(priv); | |
2222 | if (rc) { | |
2223 | IPW_ERROR("dmaKick Failed\n"); | |
2224 | goto out; | |
2225 | } | |
2226 | ||
2227 | rc = ipw_fw_dma_wait(priv); | |
2228 | if (rc) { | |
2229 | IPW_ERROR("dmaWaitSync Failed\n"); | |
2230 | goto out; | |
2231 | } | |
2232 | out: | |
2233 | pci_free_consistent( priv->pci_dev, len, shared_virt, shared_phys); | |
2234 | return rc; | |
2235 | } | |
2236 | ||
2237 | /* stop nic */ | |
2238 | static int ipw_stop_nic(struct ipw_priv *priv) | |
2239 | { | |
2240 | int rc = 0; | |
2241 | ||
2242 | /* stop*/ | |
2243 | ipw_write32(priv, CX2_RESET_REG, CX2_RESET_REG_STOP_MASTER); | |
2244 | ||
2245 | rc = ipw_poll_bit(priv, CX2_RESET_REG, | |
2246 | CX2_RESET_REG_MASTER_DISABLED, 500); | |
2247 | if (rc < 0) { | |
2248 | IPW_ERROR("wait for reg master disabled failed\n"); | |
2249 | return rc; | |
2250 | } | |
2251 | ||
2252 | ipw_set_bit(priv, CX2_RESET_REG, CBD_RESET_REG_PRINCETON_RESET); | |
2253 | ||
2254 | return rc; | |
2255 | } | |
2256 | ||
2257 | static void ipw_start_nic(struct ipw_priv *priv) | |
2258 | { | |
2259 | IPW_DEBUG_TRACE(">>\n"); | |
2260 | ||
2261 | /* prvHwStartNic release ARC*/ | |
2262 | ipw_clear_bit(priv, CX2_RESET_REG, | |
2263 | CX2_RESET_REG_MASTER_DISABLED | | |
2264 | CX2_RESET_REG_STOP_MASTER | | |
2265 | CBD_RESET_REG_PRINCETON_RESET); | |
2266 | ||
2267 | /* enable power management */ | |
2268 | ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY); | |
2269 | ||
2270 | IPW_DEBUG_TRACE("<<\n"); | |
2271 | } | |
2272 | ||
2273 | static int ipw_init_nic(struct ipw_priv *priv) | |
2274 | { | |
2275 | int rc; | |
2276 | ||
2277 | IPW_DEBUG_TRACE(">>\n"); | |
2278 | /* reset */ | |
2279 | /*prvHwInitNic */ | |
2280 | /* set "initialization complete" bit to move adapter to D0 state */ | |
2281 | ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_INIT_DONE); | |
2282 | ||
2283 | /* low-level PLL activation */ | |
2284 | ipw_write32(priv, CX2_READ_INT_REGISTER, CX2_BIT_INT_HOST_SRAM_READ_INT_REGISTER); | |
2285 | ||
2286 | /* wait for clock stabilization */ | |
2287 | rc = ipw_poll_bit(priv, CX2_GP_CNTRL_RW, | |
2288 | CX2_GP_CNTRL_BIT_CLOCK_READY, 250); | |
2289 | if (rc < 0 ) | |
2290 | IPW_DEBUG_INFO("FAILED wait for clock stablization\n"); | |
2291 | ||
2292 | /* assert SW reset */ | |
2293 | ipw_set_bit(priv, CX2_RESET_REG, CX2_RESET_REG_SW_RESET); | |
2294 | ||
2295 | udelay(10); | |
2296 | ||
2297 | /* set "initialization complete" bit to move adapter to D0 state */ | |
2298 | ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_INIT_DONE); | |
2299 | ||
2300 | IPW_DEBUG_TRACE(">>\n"); | |
2301 | return 0; | |
2302 | } | |
2303 | ||
2304 | ||
2305 | /* Call this function from process context, it will sleep in request_firmware. | |
2306 | * Probe is an ok place to call this from. | |
2307 | */ | |
2308 | static int ipw_reset_nic(struct ipw_priv *priv) | |
2309 | { | |
2310 | int rc = 0; | |
2311 | ||
2312 | IPW_DEBUG_TRACE(">>\n"); | |
2313 | ||
2314 | rc = ipw_init_nic(priv); | |
2315 | ||
2316 | /* Clear the 'host command active' bit... */ | |
2317 | priv->status &= ~STATUS_HCMD_ACTIVE; | |
2318 | wake_up_interruptible(&priv->wait_command_queue); | |
2319 | ||
2320 | IPW_DEBUG_TRACE("<<\n"); | |
2321 | return rc; | |
2322 | } | |
2323 | ||
2324 | static int ipw_get_fw(struct ipw_priv *priv, | |
2325 | const struct firmware **fw, const char *name) | |
2326 | { | |
2327 | struct fw_header *header; | |
2328 | int rc; | |
2329 | ||
2330 | /* ask firmware_class module to get the boot firmware off disk */ | |
2331 | rc = request_firmware(fw, name, &priv->pci_dev->dev); | |
2332 | if (rc < 0) { | |
2333 | IPW_ERROR("%s load failed: Reason %d\n", name, rc); | |
2334 | return rc; | |
2335 | } | |
2336 | ||
2337 | header = (struct fw_header *)(*fw)->data; | |
2338 | if (IPW_FW_MAJOR(header->version) != IPW_FW_MAJOR_VERSION) { | |
2339 | IPW_ERROR("'%s' firmware version not compatible (%d != %d)\n", | |
2340 | name, | |
2341 | IPW_FW_MAJOR(header->version), IPW_FW_MAJOR_VERSION); | |
2342 | return -EINVAL; | |
2343 | } | |
2344 | ||
2345 | IPW_DEBUG_INFO("Loading firmware '%s' file v%d.%d (%d bytes)\n", | |
2346 | name, | |
2347 | IPW_FW_MAJOR(header->version), | |
2348 | IPW_FW_MINOR(header->version), | |
2349 | (*fw)->size - sizeof(struct fw_header)); | |
2350 | return 0; | |
2351 | } | |
2352 | ||
2353 | #define CX2_RX_BUF_SIZE (3000) | |
2354 | ||
2355 | static inline void ipw_rx_queue_reset(struct ipw_priv *priv, | |
2356 | struct ipw_rx_queue *rxq) | |
2357 | { | |
2358 | unsigned long flags; | |
2359 | int i; | |
2360 | ||
2361 | spin_lock_irqsave(&rxq->lock, flags); | |
2362 | ||
2363 | INIT_LIST_HEAD(&rxq->rx_free); | |
2364 | INIT_LIST_HEAD(&rxq->rx_used); | |
2365 | ||
2366 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | |
2367 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | |
2368 | /* In the reset function, these buffers may have been allocated | |
2369 | * to an SKB, so we need to unmap and free potential storage */ | |
2370 | if (rxq->pool[i].skb != NULL) { | |
2371 | pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr, | |
2372 | CX2_RX_BUF_SIZE, | |
2373 | PCI_DMA_FROMDEVICE); | |
2374 | dev_kfree_skb(rxq->pool[i].skb); | |
2375 | } | |
2376 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | |
2377 | } | |
2378 | ||
2379 | /* Set us so that we have processed and used all buffers, but have | |
2380 | * not restocked the Rx queue with fresh buffers */ | |
2381 | rxq->read = rxq->write = 0; | |
2382 | rxq->processed = RX_QUEUE_SIZE - 1; | |
2383 | rxq->free_count = 0; | |
2384 | spin_unlock_irqrestore(&rxq->lock, flags); | |
2385 | } | |
2386 | ||
2387 | #ifdef CONFIG_PM | |
2388 | static int fw_loaded = 0; | |
2389 | static const struct firmware *bootfw = NULL; | |
2390 | static const struct firmware *firmware = NULL; | |
2391 | static const struct firmware *ucode = NULL; | |
2392 | #endif | |
2393 | ||
2394 | static int ipw_load(struct ipw_priv *priv) | |
2395 | { | |
2396 | #ifndef CONFIG_PM | |
2397 | const struct firmware *bootfw = NULL; | |
2398 | const struct firmware *firmware = NULL; | |
2399 | const struct firmware *ucode = NULL; | |
2400 | #endif | |
2401 | int rc = 0, retries = 3; | |
2402 | ||
2403 | #ifdef CONFIG_PM | |
2404 | if (!fw_loaded) { | |
2405 | #endif | |
2406 | rc = ipw_get_fw(priv, &bootfw, IPW_FW_NAME("boot")); | |
2407 | if (rc) | |
2408 | goto error; | |
2409 | ||
2410 | switch (priv->ieee->iw_mode) { | |
2411 | case IW_MODE_ADHOC: | |
2412 | rc = ipw_get_fw(priv, &ucode, | |
2413 | IPW_FW_NAME("ibss_ucode")); | |
2414 | if (rc) | |
2415 | goto error; | |
2416 | ||
2417 | rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("ibss")); | |
2418 | break; | |
2419 | ||
2420 | #ifdef CONFIG_IPW_PROMISC | |
2421 | case IW_MODE_MONITOR: | |
2422 | rc = ipw_get_fw(priv, &ucode, | |
2423 | IPW_FW_NAME("ibss_ucode")); | |
2424 | if (rc) | |
2425 | goto error; | |
2426 | ||
2427 | rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("sniffer")); | |
2428 | break; | |
2429 | #endif | |
2430 | case IW_MODE_INFRA: | |
2431 | rc = ipw_get_fw(priv, &ucode, | |
2432 | IPW_FW_NAME("bss_ucode")); | |
2433 | if (rc) | |
2434 | goto error; | |
2435 | ||
2436 | rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("bss")); | |
2437 | break; | |
2438 | ||
2439 | default: | |
2440 | rc = -EINVAL; | |
2441 | } | |
2442 | ||
2443 | if (rc) | |
2444 | goto error; | |
2445 | ||
2446 | #ifdef CONFIG_PM | |
2447 | fw_loaded = 1; | |
2448 | } | |
2449 | #endif | |
2450 | ||
2451 | if (!priv->rxq) | |
2452 | priv->rxq = ipw_rx_queue_alloc(priv); | |
2453 | else | |
2454 | ipw_rx_queue_reset(priv, priv->rxq); | |
2455 | if (!priv->rxq) { | |
2456 | IPW_ERROR("Unable to initialize Rx queue\n"); | |
2457 | goto error; | |
2458 | } | |
2459 | ||
2460 | retry: | |
2461 | /* Ensure interrupts are disabled */ | |
2462 | ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL); | |
2463 | priv->status &= ~STATUS_INT_ENABLED; | |
2464 | ||
2465 | /* ack pending interrupts */ | |
2466 | ipw_write32(priv, CX2_INTA_RW, CX2_INTA_MASK_ALL); | |
2467 | ||
2468 | ipw_stop_nic(priv); | |
2469 | ||
2470 | rc = ipw_reset_nic(priv); | |
2471 | if (rc) { | |
2472 | IPW_ERROR("Unable to reset NIC\n"); | |
2473 | goto error; | |
2474 | } | |
2475 | ||
2476 | ipw_zero_memory(priv, CX2_NIC_SRAM_LOWER_BOUND, | |
2477 | CX2_NIC_SRAM_UPPER_BOUND - CX2_NIC_SRAM_LOWER_BOUND); | |
2478 | ||
2479 | /* DMA the initial boot firmware into the device */ | |
2480 | rc = ipw_load_firmware(priv, bootfw->data + sizeof(struct fw_header), | |
2481 | bootfw->size - sizeof(struct fw_header)); | |
2482 | if (rc < 0) { | |
2483 | IPW_ERROR("Unable to load boot firmware\n"); | |
2484 | goto error; | |
2485 | } | |
2486 | ||
2487 | /* kick start the device */ | |
2488 | ipw_start_nic(priv); | |
2489 | ||
2490 | /* wait for the device to finish it's initial startup sequence */ | |
2491 | rc = ipw_poll_bit(priv, CX2_INTA_RW, | |
2492 | CX2_INTA_BIT_FW_INITIALIZATION_DONE, 500); | |
2493 | if (rc < 0) { | |
2494 | IPW_ERROR("device failed to boot initial fw image\n"); | |
2495 | goto error; | |
2496 | } | |
2497 | IPW_DEBUG_INFO("initial device response after %dms\n", rc); | |
2498 | ||
2499 | /* ack fw init done interrupt */ | |
2500 | ipw_write32(priv, CX2_INTA_RW, CX2_INTA_BIT_FW_INITIALIZATION_DONE); | |
2501 | ||
2502 | /* DMA the ucode into the device */ | |
2503 | rc = ipw_load_ucode(priv, ucode->data + sizeof(struct fw_header), | |
2504 | ucode->size - sizeof(struct fw_header)); | |
2505 | if (rc < 0) { | |
2506 | IPW_ERROR("Unable to load ucode\n"); | |
2507 | goto error; | |
2508 | } | |
2509 | ||
2510 | /* stop nic */ | |
2511 | ipw_stop_nic(priv); | |
2512 | ||
2513 | /* DMA bss firmware into the device */ | |
2514 | rc = ipw_load_firmware(priv, firmware->data + | |
2515 | sizeof(struct fw_header), | |
2516 | firmware->size - sizeof(struct fw_header)); | |
2517 | if (rc < 0 ) { | |
2518 | IPW_ERROR("Unable to load firmware\n"); | |
2519 | goto error; | |
2520 | } | |
2521 | ||
2522 | ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0); | |
2523 | ||
2524 | rc = ipw_queue_reset(priv); | |
2525 | if (rc) { | |
2526 | IPW_ERROR("Unable to initialize queues\n"); | |
2527 | goto error; | |
2528 | } | |
2529 | ||
2530 | /* Ensure interrupts are disabled */ | |
2531 | ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL); | |
2532 | ||
2533 | /* kick start the device */ | |
2534 | ipw_start_nic(priv); | |
2535 | ||
2536 | if (ipw_read32(priv, CX2_INTA_RW) & CX2_INTA_BIT_PARITY_ERROR) { | |
2537 | if (retries > 0) { | |
2538 | IPW_WARNING("Parity error. Retrying init.\n"); | |
2539 | retries--; | |
2540 | goto retry; | |
2541 | } | |
2542 | ||
2543 | IPW_ERROR("TODO: Handle parity error -- schedule restart?\n"); | |
2544 | rc = -EIO; | |
2545 | goto error; | |
2546 | } | |
2547 | ||
2548 | /* wait for the device */ | |
2549 | rc = ipw_poll_bit(priv, CX2_INTA_RW, | |
2550 | CX2_INTA_BIT_FW_INITIALIZATION_DONE, 500); | |
2551 | if (rc < 0) { | |
2552 | IPW_ERROR("device failed to start after 500ms\n"); | |
2553 | goto error; | |
2554 | } | |
2555 | IPW_DEBUG_INFO("device response after %dms\n", rc); | |
2556 | ||
2557 | /* ack fw init done interrupt */ | |
2558 | ipw_write32(priv, CX2_INTA_RW, CX2_INTA_BIT_FW_INITIALIZATION_DONE); | |
2559 | ||
2560 | /* read eeprom data and initialize the eeprom region of sram */ | |
2561 | priv->eeprom_delay = 1; | |
2562 | ipw_eeprom_init_sram(priv); | |
2563 | ||
2564 | /* enable interrupts */ | |
2565 | ipw_enable_interrupts(priv); | |
2566 | ||
2567 | /* Ensure our queue has valid packets */ | |
2568 | ipw_rx_queue_replenish(priv); | |
2569 | ||
2570 | ipw_write32(priv, CX2_RX_READ_INDEX, priv->rxq->read); | |
2571 | ||
2572 | /* ack pending interrupts */ | |
2573 | ipw_write32(priv, CX2_INTA_RW, CX2_INTA_MASK_ALL); | |
2574 | ||
2575 | #ifndef CONFIG_PM | |
2576 | release_firmware(bootfw); | |
2577 | release_firmware(ucode); | |
2578 | release_firmware(firmware); | |
2579 | #endif | |
2580 | return 0; | |
2581 | ||
2582 | error: | |
2583 | if (priv->rxq) { | |
2584 | ipw_rx_queue_free(priv, priv->rxq); | |
2585 | priv->rxq = NULL; | |
2586 | } | |
2587 | ipw_tx_queue_free(priv); | |
2588 | if (bootfw) | |
2589 | release_firmware(bootfw); | |
2590 | if (ucode) | |
2591 | release_firmware(ucode); | |
2592 | if (firmware) | |
2593 | release_firmware(firmware); | |
2594 | #ifdef CONFIG_PM | |
2595 | fw_loaded = 0; | |
2596 | bootfw = ucode = firmware = NULL; | |
2597 | #endif | |
2598 | ||
2599 | return rc; | |
2600 | } | |
2601 | ||
2602 | /** | |
2603 | * DMA services | |
2604 | * | |
2605 | * Theory of operation | |
2606 | * | |
2607 | * A queue is a circular buffers with 'Read' and 'Write' pointers. | |
2608 | * 2 empty entries always kept in the buffer to protect from overflow. | |
2609 | * | |
2610 | * For Tx queue, there are low mark and high mark limits. If, after queuing | |
2611 | * the packet for Tx, free space become < low mark, Tx queue stopped. When | |
2612 | * reclaiming packets (on 'tx done IRQ), if free space become > high mark, | |
2613 | * Tx queue resumed. | |
2614 | * | |
2615 | * The IPW operates with six queues, one receive queue in the device's | |
2616 | * sram, one transmit queue for sending commands to the device firmware, | |
2617 | * and four transmit queues for data. | |
2618 | * | |
2619 | * The four transmit queues allow for performing quality of service (qos) | |
2620 | * transmissions as per the 802.11 protocol. Currently Linux does not | |
2621 | * provide a mechanism to the user for utilizing prioritized queues, so | |
2622 | * we only utilize the first data transmit queue (queue1). | |
2623 | */ | |
2624 | ||
2625 | /** | |
2626 | * Driver allocates buffers of this size for Rx | |
2627 | */ | |
2628 | ||
2629 | static inline int ipw_queue_space(const struct clx2_queue *q) | |
2630 | { | |
2631 | int s = q->last_used - q->first_empty; | |
2632 | if (s <= 0) | |
2633 | s += q->n_bd; | |
2634 | s -= 2; /* keep some reserve to not confuse empty and full situations */ | |
2635 | if (s < 0) | |
2636 | s = 0; | |
2637 | return s; | |
2638 | } | |
2639 | ||
2640 | static inline int ipw_queue_inc_wrap(int index, int n_bd) | |
2641 | { | |
2642 | return (++index == n_bd) ? 0 : index; | |
2643 | } | |
2644 | ||
2645 | /** | |
2646 | * Initialize common DMA queue structure | |
2647 | * | |
2648 | * @param q queue to init | |
2649 | * @param count Number of BD's to allocate. Should be power of 2 | |
2650 | * @param read_register Address for 'read' register | |
2651 | * (not offset within BAR, full address) | |
2652 | * @param write_register Address for 'write' register | |
2653 | * (not offset within BAR, full address) | |
2654 | * @param base_register Address for 'base' register | |
2655 | * (not offset within BAR, full address) | |
2656 | * @param size Address for 'size' register | |
2657 | * (not offset within BAR, full address) | |
2658 | */ | |
2659 | static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q, | |
2660 | int count, u32 read, u32 write, | |
2661 | u32 base, u32 size) | |
2662 | { | |
2663 | q->n_bd = count; | |
2664 | ||
2665 | q->low_mark = q->n_bd / 4; | |
2666 | if (q->low_mark < 4) | |
2667 | q->low_mark = 4; | |
2668 | ||
2669 | q->high_mark = q->n_bd / 8; | |
2670 | if (q->high_mark < 2) | |
2671 | q->high_mark = 2; | |
2672 | ||
2673 | q->first_empty = q->last_used = 0; | |
2674 | q->reg_r = read; | |
2675 | q->reg_w = write; | |
2676 | ||
2677 | ipw_write32(priv, base, q->dma_addr); | |
2678 | ipw_write32(priv, size, count); | |
2679 | ipw_write32(priv, read, 0); | |
2680 | ipw_write32(priv, write, 0); | |
2681 | ||
2682 | _ipw_read32(priv, 0x90); | |
2683 | } | |
2684 | ||
2685 | static int ipw_queue_tx_init(struct ipw_priv *priv, | |
2686 | struct clx2_tx_queue *q, | |
2687 | int count, u32 read, u32 write, | |
2688 | u32 base, u32 size) | |
2689 | { | |
2690 | struct pci_dev *dev = priv->pci_dev; | |
2691 | ||
2692 | q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL); | |
2693 | if (!q->txb) { | |
2694 | IPW_ERROR("vmalloc for auxilary BD structures failed\n"); | |
2695 | return -ENOMEM; | |
2696 | } | |
2697 | ||
2698 | q->bd = pci_alloc_consistent(dev,sizeof(q->bd[0])*count, &q->q.dma_addr); | |
2699 | if (!q->bd) { | |
2700 | IPW_ERROR("pci_alloc_consistent(%d) failed\n", | |
2701 | sizeof(q->bd[0]) * count); | |
2702 | kfree(q->txb); | |
2703 | q->txb = NULL; | |
2704 | return -ENOMEM; | |
2705 | } | |
2706 | ||
2707 | ipw_queue_init(priv, &q->q, count, read, write, base, size); | |
2708 | return 0; | |
2709 | } | |
2710 | ||
2711 | /** | |
2712 | * Free one TFD, those at index [txq->q.last_used]. | |
2713 | * Do NOT advance any indexes | |
2714 | * | |
2715 | * @param dev | |
2716 | * @param txq | |
2717 | */ | |
2718 | static void ipw_queue_tx_free_tfd(struct ipw_priv *priv, | |
2719 | struct clx2_tx_queue *txq) | |
2720 | { | |
2721 | struct tfd_frame *bd = &txq->bd[txq->q.last_used]; | |
2722 | struct pci_dev *dev = priv->pci_dev; | |
2723 | int i; | |
2724 | ||
2725 | /* classify bd */ | |
2726 | if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE) | |
2727 | /* nothing to cleanup after for host commands */ | |
2728 | return; | |
2729 | ||
2730 | /* sanity check */ | |
2731 | if (bd->u.data.num_chunks > NUM_TFD_CHUNKS) { | |
2732 | IPW_ERROR("Too many chunks: %i\n", bd->u.data.num_chunks); | |
2733 | /** @todo issue fatal error, it is quite serious situation */ | |
2734 | return; | |
2735 | } | |
2736 | ||
2737 | /* unmap chunks if any */ | |
2738 | for (i = 0; i < bd->u.data.num_chunks; i++) { | |
2739 | pci_unmap_single(dev, bd->u.data.chunk_ptr[i], | |
2740 | bd->u.data.chunk_len[i], PCI_DMA_TODEVICE); | |
2741 | if (txq->txb[txq->q.last_used]) { | |
2742 | ieee80211_txb_free(txq->txb[txq->q.last_used]); | |
2743 | txq->txb[txq->q.last_used] = NULL; | |
2744 | } | |
2745 | } | |
2746 | } | |
2747 | ||
2748 | /** | |
2749 | * Deallocate DMA queue. | |
2750 | * | |
2751 | * Empty queue by removing and destroying all BD's. | |
2752 | * Free all buffers. | |
2753 | * | |
2754 | * @param dev | |
2755 | * @param q | |
2756 | */ | |
2757 | static void ipw_queue_tx_free(struct ipw_priv *priv, | |
2758 | struct clx2_tx_queue *txq) | |
2759 | { | |
2760 | struct clx2_queue *q = &txq->q; | |
2761 | struct pci_dev *dev = priv->pci_dev; | |
2762 | ||
2763 | if (q->n_bd == 0) | |
2764 | return; | |
2765 | ||
2766 | /* first, empty all BD's */ | |
2767 | for (; q->first_empty != q->last_used; | |
2768 | q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) { | |
2769 | ipw_queue_tx_free_tfd(priv, txq); | |
2770 | } | |
2771 | ||
2772 | /* free buffers belonging to queue itself */ | |
2773 | pci_free_consistent(dev, sizeof(txq->bd[0])*q->n_bd, txq->bd, | |
2774 | q->dma_addr); | |
2775 | kfree(txq->txb); | |
2776 | ||
2777 | /* 0 fill whole structure */ | |
2778 | memset(txq, 0, sizeof(*txq)); | |
2779 | } | |
2780 | ||
2781 | ||
2782 | /** | |
2783 | * Destroy all DMA queues and structures | |
2784 | * | |
2785 | * @param priv | |
2786 | */ | |
2787 | static void ipw_tx_queue_free(struct ipw_priv *priv) | |
2788 | { | |
2789 | /* Tx CMD queue */ | |
2790 | ipw_queue_tx_free(priv, &priv->txq_cmd); | |
2791 | ||
2792 | /* Tx queues */ | |
2793 | ipw_queue_tx_free(priv, &priv->txq[0]); | |
2794 | ipw_queue_tx_free(priv, &priv->txq[1]); | |
2795 | ipw_queue_tx_free(priv, &priv->txq[2]); | |
2796 | ipw_queue_tx_free(priv, &priv->txq[3]); | |
2797 | } | |
2798 | ||
2799 | static void inline __maybe_wake_tx(struct ipw_priv *priv) | |
2800 | { | |
2801 | if (netif_running(priv->net_dev)) { | |
2802 | switch (priv->port_type) { | |
2803 | case DCR_TYPE_MU_BSS: | |
2804 | case DCR_TYPE_MU_IBSS: | |
2805 | if (!(priv->status & STATUS_ASSOCIATED)) { | |
2806 | return; | |
2807 | } | |
2808 | } | |
2809 | netif_wake_queue(priv->net_dev); | |
2810 | } | |
2811 | ||
2812 | } | |
2813 | ||
2814 | static inline void ipw_create_bssid(struct ipw_priv *priv, u8 *bssid) | |
2815 | { | |
2816 | /* First 3 bytes are manufacturer */ | |
2817 | bssid[0] = priv->mac_addr[0]; | |
2818 | bssid[1] = priv->mac_addr[1]; | |
2819 | bssid[2] = priv->mac_addr[2]; | |
2820 | ||
2821 | /* Last bytes are random */ | |
2822 | get_random_bytes(&bssid[3], ETH_ALEN-3); | |
2823 | ||
2824 | bssid[0] &= 0xfe; /* clear multicast bit */ | |
2825 | bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */ | |
2826 | } | |
2827 | ||
2828 | static inline u8 ipw_add_station(struct ipw_priv *priv, u8 *bssid) | |
2829 | { | |
2830 | struct ipw_station_entry entry; | |
2831 | int i; | |
2832 | ||
2833 | for (i = 0; i < priv->num_stations; i++) { | |
2834 | if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) { | |
2835 | /* Another node is active in network */ | |
2836 | priv->missed_adhoc_beacons = 0; | |
2837 | if (!(priv->config & CFG_STATIC_CHANNEL)) | |
2838 | /* when other nodes drop out, we drop out */ | |
2839 | priv->config &= ~CFG_ADHOC_PERSIST; | |
2840 | ||
2841 | return i; | |
2842 | } | |
2843 | } | |
2844 | ||
2845 | if (i == MAX_STATIONS) | |
2846 | return IPW_INVALID_STATION; | |
2847 | ||
2848 | IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid)); | |
2849 | ||
2850 | entry.reserved = 0; | |
2851 | entry.support_mode = 0; | |
2852 | memcpy(entry.mac_addr, bssid, ETH_ALEN); | |
2853 | memcpy(priv->stations[i], bssid, ETH_ALEN); | |
2854 | ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry), | |
2855 | &entry, | |
2856 | sizeof(entry)); | |
2857 | priv->num_stations++; | |
2858 | ||
2859 | return i; | |
2860 | } | |
2861 | ||
2862 | static inline u8 ipw_find_station(struct ipw_priv *priv, u8 *bssid) | |
2863 | { | |
2864 | int i; | |
2865 | ||
2866 | for (i = 0; i < priv->num_stations; i++) | |
2867 | if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) | |
2868 | return i; | |
2869 | ||
2870 | return IPW_INVALID_STATION; | |
2871 | } | |
2872 | ||
2873 | static void ipw_send_disassociate(struct ipw_priv *priv, int quiet) | |
2874 | { | |
2875 | int err; | |
2876 | ||
2877 | if (!(priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))) { | |
2878 | IPW_DEBUG_ASSOC("Disassociating while not associated.\n"); | |
2879 | return; | |
2880 | } | |
2881 | ||
2882 | IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " " | |
2883 | "on channel %d.\n", | |
2884 | MAC_ARG(priv->assoc_request.bssid), | |
2885 | priv->assoc_request.channel); | |
2886 | ||
2887 | priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED); | |
2888 | priv->status |= STATUS_DISASSOCIATING; | |
2889 | ||
2890 | if (quiet) | |
2891 | priv->assoc_request.assoc_type = HC_DISASSOC_QUIET; | |
2892 | else | |
2893 | priv->assoc_request.assoc_type = HC_DISASSOCIATE; | |
2894 | err = ipw_send_associate(priv, &priv->assoc_request); | |
2895 | if (err) { | |
2896 | IPW_DEBUG_HC("Attempt to send [dis]associate command " | |
2897 | "failed.\n"); | |
2898 | return; | |
2899 | } | |
2900 | ||
2901 | } | |
2902 | ||
2903 | static void ipw_disassociate(void *data) | |
2904 | { | |
2905 | ipw_send_disassociate(data, 0); | |
2906 | } | |
2907 | ||
2908 | static void notify_wx_assoc_event(struct ipw_priv *priv) | |
2909 | { | |
2910 | union iwreq_data wrqu; | |
2911 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; | |
2912 | if (priv->status & STATUS_ASSOCIATED) | |
2913 | memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN); | |
2914 | else | |
2915 | memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); | |
2916 | wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); | |
2917 | } | |
2918 | ||
2919 | struct ipw_status_code { | |
2920 | u16 status; | |
2921 | const char *reason; | |
2922 | }; | |
2923 | ||
2924 | static const struct ipw_status_code ipw_status_codes[] = { | |
2925 | {0x00, "Successful"}, | |
2926 | {0x01, "Unspecified failure"}, | |
2927 | {0x0A, "Cannot support all requested capabilities in the " | |
2928 | "Capability information field"}, | |
2929 | {0x0B, "Reassociation denied due to inability to confirm that " | |
2930 | "association exists"}, | |
2931 | {0x0C, "Association denied due to reason outside the scope of this " | |
2932 | "standard"}, | |
2933 | {0x0D, "Responding station does not support the specified authentication " | |
2934 | "algorithm"}, | |
2935 | {0x0E, "Received an Authentication frame with authentication sequence " | |
2936 | "transaction sequence number out of expected sequence"}, | |
2937 | {0x0F, "Authentication rejected because of challenge failure"}, | |
2938 | {0x10, "Authentication rejected due to timeout waiting for next " | |
2939 | "frame in sequence"}, | |
2940 | {0x11, "Association denied because AP is unable to handle additional " | |
2941 | "associated stations"}, | |
2942 | {0x12, "Association denied due to requesting station not supporting all " | |
2943 | "of the datarates in the BSSBasicServiceSet Parameter"}, | |
2944 | {0x13, "Association denied due to requesting station not supporting " | |
2945 | "short preamble operation"}, | |
2946 | {0x14, "Association denied due to requesting station not supporting " | |
2947 | "PBCC encoding"}, | |
2948 | {0x15, "Association denied due to requesting station not supporting " | |
2949 | "channel agility"}, | |
2950 | {0x19, "Association denied due to requesting station not supporting " | |
2951 | "short slot operation"}, | |
2952 | {0x1A, "Association denied due to requesting station not supporting " | |
2953 | "DSSS-OFDM operation"}, | |
2954 | {0x28, "Invalid Information Element"}, | |
2955 | {0x29, "Group Cipher is not valid"}, | |
2956 | {0x2A, "Pairwise Cipher is not valid"}, | |
2957 | {0x2B, "AKMP is not valid"}, | |
2958 | {0x2C, "Unsupported RSN IE version"}, | |
2959 | {0x2D, "Invalid RSN IE Capabilities"}, | |
2960 | {0x2E, "Cipher suite is rejected per security policy"}, | |
2961 | }; | |
2962 | ||
2963 | #ifdef CONFIG_IPW_DEBUG | |
2964 | static const char *ipw_get_status_code(u16 status) | |
2965 | { | |
2966 | int i; | |
2967 | for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++) | |
2968 | if (ipw_status_codes[i].status == status) | |
2969 | return ipw_status_codes[i].reason; | |
2970 | return "Unknown status value."; | |
2971 | } | |
2972 | #endif | |
2973 | ||
2974 | static void inline average_init(struct average *avg) | |
2975 | { | |
2976 | memset(avg, 0, sizeof(*avg)); | |
2977 | } | |
2978 | ||
2979 | static void inline average_add(struct average *avg, s16 val) | |
2980 | { | |
2981 | avg->sum -= avg->entries[avg->pos]; | |
2982 | avg->sum += val; | |
2983 | avg->entries[avg->pos++] = val; | |
2984 | if (unlikely(avg->pos == AVG_ENTRIES)) { | |
2985 | avg->init = 1; | |
2986 | avg->pos = 0; | |
2987 | } | |
2988 | } | |
2989 | ||
2990 | static s16 inline average_value(struct average *avg) | |
2991 | { | |
2992 | if (!unlikely(avg->init)) { | |
2993 | if (avg->pos) | |
2994 | return avg->sum / avg->pos; | |
2995 | return 0; | |
2996 | } | |
2997 | ||
2998 | return avg->sum / AVG_ENTRIES; | |
2999 | } | |
3000 | ||
3001 | static void ipw_reset_stats(struct ipw_priv *priv) | |
3002 | { | |
3003 | u32 len = sizeof(u32); | |
3004 | ||
3005 | priv->quality = 0; | |
3006 | ||
3007 | average_init(&priv->average_missed_beacons); | |
3008 | average_init(&priv->average_rssi); | |
3009 | average_init(&priv->average_noise); | |
3010 | ||
3011 | priv->last_rate = 0; | |
3012 | priv->last_missed_beacons = 0; | |
3013 | priv->last_rx_packets = 0; | |
3014 | priv->last_tx_packets = 0; | |
3015 | priv->last_tx_failures = 0; | |
3016 | ||
3017 | /* Firmware managed, reset only when NIC is restarted, so we have to | |
3018 | * normalize on the current value */ | |
3019 | ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, | |
3020 | &priv->last_rx_err, &len); | |
3021 | ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, | |
3022 | &priv->last_tx_failures, &len); | |
3023 | ||
3024 | /* Driver managed, reset with each association */ | |
3025 | priv->missed_adhoc_beacons = 0; | |
3026 | priv->missed_beacons = 0; | |
3027 | priv->tx_packets = 0; | |
3028 | priv->rx_packets = 0; | |
3029 | ||
3030 | } | |
3031 | ||
3032 | ||
3033 | static inline u32 ipw_get_max_rate(struct ipw_priv *priv) | |
3034 | { | |
3035 | u32 i = 0x80000000; | |
3036 | u32 mask = priv->rates_mask; | |
3037 | /* If currently associated in B mode, restrict the maximum | |
3038 | * rate match to B rates */ | |
3039 | if (priv->assoc_request.ieee_mode == IPW_B_MODE) | |
3040 | mask &= IEEE80211_CCK_RATES_MASK; | |
3041 | ||
3042 | /* TODO: Verify that the rate is supported by the current rates | |
3043 | * list. */ | |
3044 | ||
3045 | while (i && !(mask & i)) i >>= 1; | |
3046 | switch (i) { | |
3047 | case IEEE80211_CCK_RATE_1MB_MASK: return 1000000; | |
3048 | case IEEE80211_CCK_RATE_2MB_MASK: return 2000000; | |
3049 | case IEEE80211_CCK_RATE_5MB_MASK: return 5500000; | |
3050 | case IEEE80211_OFDM_RATE_6MB_MASK: return 6000000; | |
3051 | case IEEE80211_OFDM_RATE_9MB_MASK: return 9000000; | |
3052 | case IEEE80211_CCK_RATE_11MB_MASK: return 11000000; | |
3053 | case IEEE80211_OFDM_RATE_12MB_MASK: return 12000000; | |
3054 | case IEEE80211_OFDM_RATE_18MB_MASK: return 18000000; | |
3055 | case IEEE80211_OFDM_RATE_24MB_MASK: return 24000000; | |
3056 | case IEEE80211_OFDM_RATE_36MB_MASK: return 36000000; | |
3057 | case IEEE80211_OFDM_RATE_48MB_MASK: return 48000000; | |
3058 | case IEEE80211_OFDM_RATE_54MB_MASK: return 54000000; | |
3059 | } | |
3060 | ||
3061 | if (priv->ieee->mode == IEEE_B) | |
3062 | return 11000000; | |
3063 | else | |
3064 | return 54000000; | |
3065 | } | |
3066 | ||
3067 | static u32 ipw_get_current_rate(struct ipw_priv *priv) | |
3068 | { | |
3069 | u32 rate, len = sizeof(rate); | |
3070 | int err; | |
3071 | ||
3072 | if (!(priv->status & STATUS_ASSOCIATED)) | |
3073 | return 0; | |
3074 | ||
3075 | if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) { | |
3076 | err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate, | |
3077 | &len); | |
3078 | if (err) { | |
3079 | IPW_DEBUG_INFO("failed querying ordinals.\n"); | |
3080 | return 0; | |
3081 | } | |
3082 | } else | |
3083 | return ipw_get_max_rate(priv); | |
3084 | ||
3085 | switch (rate) { | |
3086 | case IPW_TX_RATE_1MB: return 1000000; | |
3087 | case IPW_TX_RATE_2MB: return 2000000; | |
3088 | case IPW_TX_RATE_5MB: return 5500000; | |
3089 | case IPW_TX_RATE_6MB: return 6000000; | |
3090 | case IPW_TX_RATE_9MB: return 9000000; | |
3091 | case IPW_TX_RATE_11MB: return 11000000; | |
3092 | case IPW_TX_RATE_12MB: return 12000000; | |
3093 | case IPW_TX_RATE_18MB: return 18000000; | |
3094 | case IPW_TX_RATE_24MB: return 24000000; | |
3095 | case IPW_TX_RATE_36MB: return 36000000; | |
3096 | case IPW_TX_RATE_48MB: return 48000000; | |
3097 | case IPW_TX_RATE_54MB: return 54000000; | |
3098 | } | |
3099 | ||
3100 | return 0; | |
3101 | } | |
3102 | ||
3103 | #define PERFECT_RSSI (-50) | |
3104 | #define WORST_RSSI (-85) | |
3105 | #define IPW_STATS_INTERVAL (2 * HZ) | |
3106 | static void ipw_gather_stats(struct ipw_priv *priv) | |
3107 | { | |
3108 | u32 rx_err, rx_err_delta, rx_packets_delta; | |
3109 | u32 tx_failures, tx_failures_delta, tx_packets_delta; | |
3110 | u32 missed_beacons_percent, missed_beacons_delta; | |
3111 | u32 quality = 0; | |
3112 | u32 len = sizeof(u32); | |
3113 | s16 rssi; | |
3114 | u32 beacon_quality, signal_quality, tx_quality, rx_quality, | |
3115 | rate_quality; | |
3116 | ||
3117 | if (!(priv->status & STATUS_ASSOCIATED)) { | |
3118 | priv->quality = 0; | |
3119 | return; | |
3120 | } | |
3121 | ||
3122 | /* Update the statistics */ | |
3123 | ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS, | |
3124 | &priv->missed_beacons, &len); | |
3125 | missed_beacons_delta = priv->missed_beacons - | |
3126 | priv->last_missed_beacons; | |
3127 | priv->last_missed_beacons = priv->missed_beacons; | |
3128 | if (priv->assoc_request.beacon_interval) { | |
3129 | missed_beacons_percent = missed_beacons_delta * | |
3130 | (HZ * priv->assoc_request.beacon_interval) / | |
3131 | (IPW_STATS_INTERVAL * 10); | |
3132 | } else { | |
3133 | missed_beacons_percent = 0; | |
3134 | } | |
3135 | average_add(&priv->average_missed_beacons, missed_beacons_percent); | |
3136 | ||
3137 | ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len); | |
3138 | rx_err_delta = rx_err - priv->last_rx_err; | |
3139 | priv->last_rx_err = rx_err; | |
3140 | ||
3141 | ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len); | |
3142 | tx_failures_delta = tx_failures - priv->last_tx_failures; | |
3143 | priv->last_tx_failures = tx_failures; | |
3144 | ||
3145 | rx_packets_delta = priv->rx_packets - priv->last_rx_packets; | |
3146 | priv->last_rx_packets = priv->rx_packets; | |
3147 | ||
3148 | tx_packets_delta = priv->tx_packets - priv->last_tx_packets; | |
3149 | priv->last_tx_packets = priv->tx_packets; | |
3150 | ||
3151 | /* Calculate quality based on the following: | |
3152 | * | |
3153 | * Missed beacon: 100% = 0, 0% = 70% missed | |
3154 | * Rate: 60% = 1Mbs, 100% = Max | |
3155 | * Rx and Tx errors represent a straight % of total Rx/Tx | |
3156 | * RSSI: 100% = > -50, 0% = < -80 | |
3157 | * Rx errors: 100% = 0, 0% = 50% missed | |
3158 | * | |
3159 | * The lowest computed quality is used. | |
3160 | * | |
3161 | */ | |
3162 | #define BEACON_THRESHOLD 5 | |
3163 | beacon_quality = 100 - missed_beacons_percent; | |
3164 | if (beacon_quality < BEACON_THRESHOLD) | |
3165 | beacon_quality = 0; | |
3166 | else | |
3167 | beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 / | |
3168 | (100 - BEACON_THRESHOLD); | |
3169 | IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n", | |
3170 | beacon_quality, missed_beacons_percent); | |
3171 | ||
3172 | priv->last_rate = ipw_get_current_rate(priv); | |
3173 | rate_quality = priv->last_rate * 40 / priv->last_rate + 60; | |
3174 | IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n", | |
3175 | rate_quality, priv->last_rate / 1000000); | |
3176 | ||
3177 | if (rx_packets_delta > 100 && | |
3178 | rx_packets_delta + rx_err_delta) | |
3179 | rx_quality = 100 - (rx_err_delta * 100) / | |
3180 | (rx_packets_delta + rx_err_delta); | |
3181 | else | |
3182 | rx_quality = 100; | |
3183 | IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n", | |
3184 | rx_quality, rx_err_delta, rx_packets_delta); | |
3185 | ||
3186 | if (tx_packets_delta > 100 && | |
3187 | tx_packets_delta + tx_failures_delta) | |
3188 | tx_quality = 100 - (tx_failures_delta * 100) / | |
3189 | (tx_packets_delta + tx_failures_delta); | |
3190 | else | |
3191 | tx_quality = 100; | |
3192 | IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n", | |
3193 | tx_quality, tx_failures_delta, tx_packets_delta); | |
3194 | ||
3195 | rssi = average_value(&priv->average_rssi); | |
3196 | if (rssi > PERFECT_RSSI) | |
3197 | signal_quality = 100; | |
3198 | else if (rssi < WORST_RSSI) | |
3199 | signal_quality = 0; | |
3200 | else | |
3201 | signal_quality = (rssi - WORST_RSSI) * 100 / | |
3202 | (PERFECT_RSSI - WORST_RSSI); | |
3203 | IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n", | |
3204 | signal_quality, rssi); | |
3205 | ||
3206 | quality = min(beacon_quality, | |
3207 | min(rate_quality, | |
3208 | min(tx_quality, min(rx_quality, signal_quality)))); | |
3209 | if (quality == beacon_quality) | |
3210 | IPW_DEBUG_STATS( | |
3211 | "Quality (%d%%): Clamped to missed beacons.\n", | |
3212 | quality); | |
3213 | if (quality == rate_quality) | |
3214 | IPW_DEBUG_STATS( | |
3215 | "Quality (%d%%): Clamped to rate quality.\n", | |
3216 | quality); | |
3217 | if (quality == tx_quality) | |
3218 | IPW_DEBUG_STATS( | |
3219 | "Quality (%d%%): Clamped to Tx quality.\n", | |
3220 | quality); | |
3221 | if (quality == rx_quality) | |
3222 | IPW_DEBUG_STATS( | |
3223 | "Quality (%d%%): Clamped to Rx quality.\n", | |
3224 | quality); | |
3225 | if (quality == signal_quality) | |
3226 | IPW_DEBUG_STATS( | |
3227 | "Quality (%d%%): Clamped to signal quality.\n", | |
3228 | quality); | |
3229 | ||
3230 | priv->quality = quality; | |
3231 | ||
3232 | queue_delayed_work(priv->workqueue, &priv->gather_stats, | |
3233 | IPW_STATS_INTERVAL); | |
3234 | } | |
3235 | ||
3236 | /** | |
3237 | * Handle host notification packet. | |
3238 | * Called from interrupt routine | |
3239 | */ | |
3240 | static inline void ipw_rx_notification(struct ipw_priv* priv, | |
3241 | struct ipw_rx_notification *notif) | |
3242 | { | |
3243 | IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", | |
3244 | notif->subtype, notif->size); | |
3245 | ||
3246 | switch (notif->subtype) { | |
3247 | case HOST_NOTIFICATION_STATUS_ASSOCIATED: { | |
3248 | struct notif_association *assoc = ¬if->u.assoc; | |
3249 | ||
3250 | switch (assoc->state) { | |
3251 | case CMAS_ASSOCIATED: { | |
3252 | IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, | |
3253 | "associated: '%s' " MAC_FMT " \n", | |
3254 | escape_essid(priv->essid, priv->essid_len), | |
3255 | MAC_ARG(priv->bssid)); | |
3256 | ||
3257 | switch (priv->ieee->iw_mode) { | |
3258 | case IW_MODE_INFRA: | |
3259 | memcpy(priv->ieee->bssid, priv->bssid, | |
3260 | ETH_ALEN); | |
3261 | break; | |
3262 | ||
3263 | case IW_MODE_ADHOC: | |
3264 | memcpy(priv->ieee->bssid, priv->bssid, | |
3265 | ETH_ALEN); | |
3266 | ||
3267 | /* clear out the station table */ | |
3268 | priv->num_stations = 0; | |
3269 | ||
3270 | IPW_DEBUG_ASSOC("queueing adhoc check\n"); | |
3271 | queue_delayed_work(priv->workqueue, | |
3272 | &priv->adhoc_check, | |
3273 | priv->assoc_request.beacon_interval); | |
3274 | break; | |
3275 | } | |
3276 | ||
3277 | priv->status &= ~STATUS_ASSOCIATING; | |
3278 | priv->status |= STATUS_ASSOCIATED; | |
3279 | ||
3280 | netif_carrier_on(priv->net_dev); | |
3281 | if (netif_queue_stopped(priv->net_dev)) { | |
3282 | IPW_DEBUG_NOTIF("waking queue\n"); | |
3283 | netif_wake_queue(priv->net_dev); | |
3284 | } else { | |
3285 | IPW_DEBUG_NOTIF("starting queue\n"); | |
3286 | netif_start_queue(priv->net_dev); | |
3287 | } | |
3288 | ||
3289 | ipw_reset_stats(priv); | |
3290 | /* Ensure the rate is updated immediately */ | |
3291 | priv->last_rate = ipw_get_current_rate(priv); | |
3292 | schedule_work(&priv->gather_stats); | |
3293 | notify_wx_assoc_event(priv); | |
3294 | ||
3295 | /* queue_delayed_work(priv->workqueue, | |
3296 | &priv->request_scan, | |
3297 | SCAN_ASSOCIATED_INTERVAL); | |
3298 | */ | |
3299 | break; | |
3300 | } | |
3301 | ||
3302 | case CMAS_AUTHENTICATED: { | |
3303 | if (priv->status & (STATUS_ASSOCIATED | STATUS_AUTH)) { | |
3304 | #ifdef CONFIG_IPW_DEBUG | |
3305 | struct notif_authenticate *auth = ¬if->u.auth; | |
3306 | IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, | |
3307 | "deauthenticated: '%s' " MAC_FMT ": (0x%04X) - %s \n", | |
3308 | escape_essid(priv->essid, priv->essid_len), | |
3309 | MAC_ARG(priv->bssid), | |
3310 | ntohs(auth->status), | |
3311 | ipw_get_status_code(ntohs(auth->status))); | |
3312 | #endif | |
3313 | ||
3314 | priv->status &= ~(STATUS_ASSOCIATING | | |
3315 | STATUS_AUTH | | |
3316 | STATUS_ASSOCIATED); | |
3317 | ||
3318 | netif_carrier_off(priv->net_dev); | |
3319 | netif_stop_queue(priv->net_dev); | |
3320 | queue_work(priv->workqueue, &priv->request_scan); | |
3321 | notify_wx_assoc_event(priv); | |
3322 | break; | |
3323 | } | |
3324 | ||
3325 | IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, | |
3326 | "authenticated: '%s' " MAC_FMT "\n", | |
3327 | escape_essid(priv->essid, priv->essid_len), | |
3328 | MAC_ARG(priv->bssid)); | |
3329 | break; | |
3330 | } | |
3331 | ||
3332 | case CMAS_INIT: { | |
3333 | IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, | |
3334 | "disassociated: '%s' " MAC_FMT " \n", | |
3335 | escape_essid(priv->essid, priv->essid_len), | |
3336 | MAC_ARG(priv->bssid)); | |
3337 | ||
3338 | priv->status &= ~( | |
3339 | STATUS_DISASSOCIATING | | |
3340 | STATUS_ASSOCIATING | | |
3341 | STATUS_ASSOCIATED | | |
3342 | STATUS_AUTH); | |
3343 | ||
3344 | netif_stop_queue(priv->net_dev); | |
3345 | if (!(priv->status & STATUS_ROAMING)) { | |
3346 | netif_carrier_off(priv->net_dev); | |
3347 | notify_wx_assoc_event(priv); | |
3348 | ||
3349 | /* Cancel any queued work ... */ | |
3350 | cancel_delayed_work(&priv->request_scan); | |
3351 | cancel_delayed_work(&priv->adhoc_check); | |
3352 | ||
3353 | /* Queue up another scan... */ | |
3354 | queue_work(priv->workqueue, | |
3355 | &priv->request_scan); | |
3356 | ||
3357 | cancel_delayed_work(&priv->gather_stats); | |
3358 | } else { | |
3359 | priv->status |= STATUS_ROAMING; | |
3360 | queue_work(priv->workqueue, | |
3361 | &priv->request_scan); | |
3362 | } | |
3363 | ||
3364 | ipw_reset_stats(priv); | |
3365 | break; | |
3366 | } | |
3367 | ||
3368 | default: | |
3369 | IPW_ERROR("assoc: unknown (%d)\n", | |
3370 | assoc->state); | |
3371 | break; | |
3372 | } | |
3373 | ||
3374 | break; | |
3375 | } | |
3376 | ||
3377 | case HOST_NOTIFICATION_STATUS_AUTHENTICATE: { | |
3378 | struct notif_authenticate *auth = ¬if->u.auth; | |
3379 | switch (auth->state) { | |
3380 | case CMAS_AUTHENTICATED: | |
3381 | IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, | |
3382 | "authenticated: '%s' " MAC_FMT " \n", | |
3383 | escape_essid(priv->essid, priv->essid_len), | |
3384 | MAC_ARG(priv->bssid)); | |
3385 | priv->status |= STATUS_AUTH; | |
3386 | break; | |
3387 | ||
3388 | case CMAS_INIT: | |
3389 | if (priv->status & STATUS_AUTH) { | |
3390 | IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, | |
3391 | "authentication failed (0x%04X): %s\n", | |
3392 | ntohs(auth->status), | |
3393 | ipw_get_status_code(ntohs(auth->status))); | |
3394 | } | |
3395 | IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, | |
3396 | "deauthenticated: '%s' " MAC_FMT "\n", | |
3397 | escape_essid(priv->essid, priv->essid_len), | |
3398 | MAC_ARG(priv->bssid)); | |
3399 | ||
3400 | priv->status &= ~(STATUS_ASSOCIATING | | |
3401 | STATUS_AUTH | | |
3402 | STATUS_ASSOCIATED); | |
3403 | ||
3404 | netif_carrier_off(priv->net_dev); | |
3405 | netif_stop_queue(priv->net_dev); | |
3406 | queue_work(priv->workqueue, &priv->request_scan); | |
3407 | notify_wx_assoc_event(priv); | |
3408 | break; | |
3409 | ||
3410 | case CMAS_TX_AUTH_SEQ_1: | |
3411 | IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, | |
3412 | "AUTH_SEQ_1\n"); | |
3413 | break; | |
3414 | case CMAS_RX_AUTH_SEQ_2: | |
3415 | IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, | |
3416 | "AUTH_SEQ_2\n"); | |
3417 | break; | |
3418 | case CMAS_AUTH_SEQ_1_PASS: | |
3419 | IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, | |
3420 | "AUTH_SEQ_1_PASS\n"); | |
3421 | break; | |
3422 | case CMAS_AUTH_SEQ_1_FAIL: | |
3423 | IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, | |
3424 | "AUTH_SEQ_1_FAIL\n"); | |
3425 | break; | |
3426 | case CMAS_TX_AUTH_SEQ_3: | |
3427 | IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, | |
3428 | "AUTH_SEQ_3\n"); | |
3429 | break; | |
3430 | case CMAS_RX_AUTH_SEQ_4: | |
3431 | IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, | |
3432 | "RX_AUTH_SEQ_4\n"); | |
3433 | break; | |
3434 | case CMAS_AUTH_SEQ_2_PASS: | |
3435 | IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, | |
3436 | "AUTH_SEQ_2_PASS\n"); | |
3437 | break; | |
3438 | case CMAS_AUTH_SEQ_2_FAIL: | |
3439 | IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, | |
3440 | "AUT_SEQ_2_FAIL\n"); | |
3441 | break; | |
3442 | case CMAS_TX_ASSOC: | |
3443 | IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, | |
3444 | "TX_ASSOC\n"); | |
3445 | break; | |
3446 | case CMAS_RX_ASSOC_RESP: | |
3447 | IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, | |
3448 | "RX_ASSOC_RESP\n"); | |
3449 | break; | |
3450 | case CMAS_ASSOCIATED: | |
3451 | IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, | |
3452 | "ASSOCIATED\n"); | |
3453 | break; | |
3454 | default: | |
3455 | IPW_DEBUG_NOTIF("auth: failure - %d\n", auth->state); | |
3456 | break; | |
3457 | } | |
3458 | break; | |
3459 | } | |
3460 | ||
3461 | case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT: { | |
3462 | struct notif_channel_result *x = ¬if->u.channel_result; | |
3463 | ||
3464 | if (notif->size == sizeof(*x)) { | |
3465 | IPW_DEBUG_SCAN("Scan result for channel %d\n", | |
3466 | x->channel_num); | |
3467 | } else { | |
3468 | IPW_DEBUG_SCAN("Scan result of wrong size %d " | |
3469 | "(should be %d)\n", | |
3470 | notif->size,sizeof(*x)); | |
3471 | } | |
3472 | break; | |
3473 | } | |
3474 | ||
3475 | case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED: { | |
3476 | struct notif_scan_complete* x = ¬if->u.scan_complete; | |
3477 | if (notif->size == sizeof(*x)) { | |
3478 | IPW_DEBUG_SCAN("Scan completed: type %d, %d channels, " | |
3479 | "%d status\n", | |
3480 | x->scan_type, | |
3481 | x->num_channels, | |
3482 | x->status); | |
3483 | } else { | |
3484 | IPW_ERROR("Scan completed of wrong size %d " | |
3485 | "(should be %d)\n", | |
3486 | notif->size,sizeof(*x)); | |
3487 | } | |
3488 | ||
3489 | priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING); | |
3490 | ||
3491 | cancel_delayed_work(&priv->scan_check); | |
3492 | ||
3493 | if (!(priv->status & (STATUS_ASSOCIATED | | |
3494 | STATUS_ASSOCIATING | | |
3495 | STATUS_ROAMING | | |
3496 | STATUS_DISASSOCIATING))) | |
3497 | queue_work(priv->workqueue, &priv->associate); | |
3498 | else if (priv->status & STATUS_ROAMING) { | |
3499 | /* If a scan completed and we are in roam mode, then | |
3500 | * the scan that completed was the one requested as a | |
3501 | * result of entering roam... so, schedule the | |
3502 | * roam work */ | |
3503 | queue_work(priv->workqueue, &priv->roam); | |
3504 | } else if (priv->status & STATUS_SCAN_PENDING) | |
3505 | queue_work(priv->workqueue, &priv->request_scan); | |
3506 | ||
3507 | priv->ieee->scans++; | |
3508 | break; | |
3509 | } | |
3510 | ||
3511 | case HOST_NOTIFICATION_STATUS_FRAG_LENGTH: { | |
3512 | struct notif_frag_length *x = ¬if->u.frag_len; | |
3513 | ||
3514 | if (notif->size == sizeof(*x)) { | |
3515 | IPW_ERROR("Frag length: %d\n", x->frag_length); | |
3516 | } else { | |
3517 | IPW_ERROR("Frag length of wrong size %d " | |
3518 | "(should be %d)\n", | |
3519 | notif->size, sizeof(*x)); | |
3520 | } | |
3521 | break; | |
3522 | } | |
3523 | ||
3524 | case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION: { | |
3525 | struct notif_link_deterioration *x = | |
3526 | ¬if->u.link_deterioration; | |
3527 | if (notif->size==sizeof(*x)) { | |
3528 | IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, | |
3529 | "link deterioration: '%s' " MAC_FMT " \n", | |
3530 | escape_essid(priv->essid, priv->essid_len), | |
3531 | MAC_ARG(priv->bssid)); | |
3532 | memcpy(&priv->last_link_deterioration, x, sizeof(*x)); | |
3533 | } else { | |
3534 | IPW_ERROR("Link Deterioration of wrong size %d " | |
3535 | "(should be %d)\n", | |
3536 | notif->size,sizeof(*x)); | |
3537 | } | |
3538 | break; | |
3539 | } | |
3540 | ||
3541 | case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE: { | |
3542 | IPW_ERROR("Dino config\n"); | |
3543 | if (priv->hcmd && priv->hcmd->cmd == HOST_CMD_DINO_CONFIG) { | |
3544 | /* TODO: Do anything special? */ | |
3545 | } else { | |
3546 | IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n"); | |
3547 | } | |
3548 | break; | |
3549 | } | |
3550 | ||
3551 | case HOST_NOTIFICATION_STATUS_BEACON_STATE: { | |
3552 | struct notif_beacon_state *x = ¬if->u.beacon_state; | |
3553 | if (notif->size != sizeof(*x)) { | |
3554 | IPW_ERROR("Beacon state of wrong size %d (should " | |
3555 | "be %d)\n", notif->size, sizeof(*x)); | |
3556 | break; | |
3557 | } | |
3558 | ||
3559 | if (x->state == HOST_NOTIFICATION_STATUS_BEACON_MISSING) { | |
3560 | if (priv->status & STATUS_SCANNING) { | |
3561 | /* Stop scan to keep fw from getting | |
3562 | * stuck... */ | |
3563 | queue_work(priv->workqueue, | |
3564 | &priv->abort_scan); | |
3565 | } | |
3566 | ||
3567 | if (x->number > priv->missed_beacon_threshold && | |
3568 | priv->status & STATUS_ASSOCIATED) { | |
3569 | IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | | |
3570 | IPW_DL_STATE, | |
3571 | "Missed beacon: %d - disassociate\n", | |
3572 | x->number); | |
3573 | queue_work(priv->workqueue, | |
3574 | &priv->disassociate); | |
3575 | } else if (x->number > priv->roaming_threshold) { | |
3576 | IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, | |
3577 | "Missed beacon: %d - initiate " | |
3578 | "roaming\n", | |
3579 | x->number); | |
3580 | queue_work(priv->workqueue, | |
3581 | &priv->roam); | |
3582 | } else { | |
3583 | IPW_DEBUG_NOTIF("Missed beacon: %d\n", | |
3584 | x->number); | |
3585 | } | |
3586 | ||
3587 | priv->notif_missed_beacons = x->number; | |
3588 | ||
3589 | } | |
3590 | ||
3591 | ||
3592 | break; | |
3593 | } | |
3594 | ||
3595 | case HOST_NOTIFICATION_STATUS_TGI_TX_KEY: { | |
3596 | struct notif_tgi_tx_key *x = ¬if->u.tgi_tx_key; | |
3597 | if (notif->size==sizeof(*x)) { | |
3598 | IPW_ERROR("TGi Tx Key: state 0x%02x sec type " | |
3599 | "0x%02x station %d\n", | |
3600 | x->key_state,x->security_type, | |
3601 | x->station_index); | |
3602 | break; | |
3603 | } | |
3604 | ||
3605 | IPW_ERROR("TGi Tx Key of wrong size %d (should be %d)\n", | |
3606 | notif->size,sizeof(*x)); | |
3607 | break; | |
3608 | } | |
3609 | ||
3610 | case HOST_NOTIFICATION_CALIB_KEEP_RESULTS: { | |
3611 | struct notif_calibration *x = ¬if->u.calibration; | |
3612 | ||
3613 | if (notif->size == sizeof(*x)) { | |
3614 | memcpy(&priv->calib, x, sizeof(*x)); | |
3615 | IPW_DEBUG_INFO("TODO: Calibration\n"); | |
3616 | break; | |
3617 | } | |
3618 | ||
3619 | IPW_ERROR("Calibration of wrong size %d (should be %d)\n", | |
3620 | notif->size,sizeof(*x)); | |
3621 | break; | |
3622 | } | |
3623 | ||
3624 | case HOST_NOTIFICATION_NOISE_STATS: { | |
3625 | if (notif->size == sizeof(u32)) { | |
3626 | priv->last_noise = (u8)(notif->u.noise.value & 0xff); | |
3627 | average_add(&priv->average_noise, priv->last_noise); | |
3628 | break; | |
3629 | } | |
3630 | ||
3631 | IPW_ERROR("Noise stat is wrong size %d (should be %d)\n", | |
3632 | notif->size, sizeof(u32)); | |
3633 | break; | |
3634 | } | |
3635 | ||
3636 | default: | |
3637 | IPW_ERROR("Unknown notification: " | |
3638 | "subtype=%d,flags=0x%2x,size=%d\n", | |
3639 | notif->subtype, notif->flags, notif->size); | |
3640 | } | |
3641 | } | |
3642 | ||
3643 | /** | |
3644 | * Destroys all DMA structures and initialise them again | |
3645 | * | |
3646 | * @param priv | |
3647 | * @return error code | |
3648 | */ | |
3649 | static int ipw_queue_reset(struct ipw_priv *priv) | |
3650 | { | |
3651 | int rc = 0; | |
3652 | /** @todo customize queue sizes */ | |
3653 | int nTx = 64, nTxCmd = 8; | |
3654 | ipw_tx_queue_free(priv); | |
3655 | /* Tx CMD queue */ | |
3656 | rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd, | |
3657 | CX2_TX_CMD_QUEUE_READ_INDEX, | |
3658 | CX2_TX_CMD_QUEUE_WRITE_INDEX, | |
3659 | CX2_TX_CMD_QUEUE_BD_BASE, | |
3660 | CX2_TX_CMD_QUEUE_BD_SIZE); | |
3661 | if (rc) { | |
3662 | IPW_ERROR("Tx Cmd queue init failed\n"); | |
3663 | goto error; | |
3664 | } | |
3665 | /* Tx queue(s) */ | |
3666 | rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx, | |
3667 | CX2_TX_QUEUE_0_READ_INDEX, | |
3668 | CX2_TX_QUEUE_0_WRITE_INDEX, | |
3669 | CX2_TX_QUEUE_0_BD_BASE, | |
3670 | CX2_TX_QUEUE_0_BD_SIZE); | |
3671 | if (rc) { | |
3672 | IPW_ERROR("Tx 0 queue init failed\n"); | |
3673 | goto error; | |
3674 | } | |
3675 | rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx, | |
3676 | CX2_TX_QUEUE_1_READ_INDEX, | |
3677 | CX2_TX_QUEUE_1_WRITE_INDEX, | |
3678 | CX2_TX_QUEUE_1_BD_BASE, | |
3679 | CX2_TX_QUEUE_1_BD_SIZE); | |
3680 | if (rc) { | |
3681 | IPW_ERROR("Tx 1 queue init failed\n"); | |
3682 | goto error; | |
3683 | } | |
3684 | rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx, | |
3685 | CX2_TX_QUEUE_2_READ_INDEX, | |
3686 | CX2_TX_QUEUE_2_WRITE_INDEX, | |
3687 | CX2_TX_QUEUE_2_BD_BASE, | |
3688 | CX2_TX_QUEUE_2_BD_SIZE); | |
3689 | if (rc) { | |
3690 | IPW_ERROR("Tx 2 queue init failed\n"); | |
3691 | goto error; | |
3692 | } | |
3693 | rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx, | |
3694 | CX2_TX_QUEUE_3_READ_INDEX, | |
3695 | CX2_TX_QUEUE_3_WRITE_INDEX, | |
3696 | CX2_TX_QUEUE_3_BD_BASE, | |
3697 | CX2_TX_QUEUE_3_BD_SIZE); | |
3698 | if (rc) { | |
3699 | IPW_ERROR("Tx 3 queue init failed\n"); | |
3700 | goto error; | |
3701 | } | |
3702 | /* statistics */ | |
3703 | priv->rx_bufs_min = 0; | |
3704 | priv->rx_pend_max = 0; | |
3705 | return rc; | |
3706 | ||
3707 | error: | |
3708 | ipw_tx_queue_free(priv); | |
3709 | return rc; | |
3710 | } | |
3711 | ||
3712 | /** | |
3713 | * Reclaim Tx queue entries no more used by NIC. | |
3714 | * | |
3715 | * When FW adwances 'R' index, all entries between old and | |
3716 | * new 'R' index need to be reclaimed. As result, some free space | |
3717 | * forms. If there is enough free space (> low mark), wake Tx queue. | |
3718 | * | |
3719 | * @note Need to protect against garbage in 'R' index | |
3720 | * @param priv | |
3721 | * @param txq | |
3722 | * @param qindex | |
3723 | * @return Number of used entries remains in the queue | |
3724 | */ | |
3725 | static int ipw_queue_tx_reclaim(struct ipw_priv *priv, | |
3726 | struct clx2_tx_queue *txq, int qindex) | |
3727 | { | |
3728 | u32 hw_tail; | |
3729 | int used; | |
3730 | struct clx2_queue *q = &txq->q; | |
3731 | ||
3732 | hw_tail = ipw_read32(priv, q->reg_r); | |
3733 | if (hw_tail >= q->n_bd) { | |
3734 | IPW_ERROR | |
3735 | ("Read index for DMA queue (%d) is out of range [0-%d)\n", | |
3736 | hw_tail, q->n_bd); | |
3737 | goto done; | |
3738 | } | |
3739 | for (; q->last_used != hw_tail; | |
3740 | q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) { | |
3741 | ipw_queue_tx_free_tfd(priv, txq); | |
3742 | priv->tx_packets++; | |
3743 | } | |
3744 | done: | |
3745 | if (ipw_queue_space(q) > q->low_mark && qindex >= 0) { | |
3746 | __maybe_wake_tx(priv); | |
3747 | } | |
3748 | used = q->first_empty - q->last_used; | |
3749 | if (used < 0) | |
3750 | used += q->n_bd; | |
3751 | ||
3752 | return used; | |
3753 | } | |
3754 | ||
3755 | static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf, | |
3756 | int len, int sync) | |
3757 | { | |
3758 | struct clx2_tx_queue *txq = &priv->txq_cmd; | |
3759 | struct clx2_queue *q = &txq->q; | |
3760 | struct tfd_frame *tfd; | |
3761 | ||
3762 | if (ipw_queue_space(q) < (sync ? 1 : 2)) { | |
3763 | IPW_ERROR("No space for Tx\n"); | |
3764 | return -EBUSY; | |
3765 | } | |
3766 | ||
3767 | tfd = &txq->bd[q->first_empty]; | |
3768 | txq->txb[q->first_empty] = NULL; | |
3769 | ||
3770 | memset(tfd, 0, sizeof(*tfd)); | |
3771 | tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE; | |
3772 | tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK; | |
3773 | priv->hcmd_seq++; | |
3774 | tfd->u.cmd.index = hcmd; | |
3775 | tfd->u.cmd.length = len; | |
3776 | memcpy(tfd->u.cmd.payload, buf, len); | |
3777 | q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd); | |
3778 | ipw_write32(priv, q->reg_w, q->first_empty); | |
3779 | _ipw_read32(priv, 0x90); | |
3780 | ||
3781 | return 0; | |
3782 | } | |
3783 | ||
3784 | ||
3785 | ||
3786 | /* | |
3787 | * Rx theory of operation | |
3788 | * | |
3789 | * The host allocates 32 DMA target addresses and passes the host address | |
3790 | * to the firmware at register CX2_RFDS_TABLE_LOWER + N * RFD_SIZE where N is | |
3791 | * 0 to 31 | |
3792 | * | |
3793 | * Rx Queue Indexes | |
3794 | * The host/firmware share two index registers for managing the Rx buffers. | |
3795 | * | |
3796 | * The READ index maps to the first position that the firmware may be writing | |
3797 | * to -- the driver can read up to (but not including) this position and get | |
3798 | * good data. | |
3799 | * The READ index is managed by the firmware once the card is enabled. | |
3800 | * | |
3801 | * The WRITE index maps to the last position the driver has read from -- the | |
3802 | * position preceding WRITE is the last slot the firmware can place a packet. | |
3803 | * | |
3804 | * The queue is empty (no good data) if WRITE = READ - 1, and is full if | |
3805 | * WRITE = READ. | |
3806 | * | |
3807 | * During initialization the host sets up the READ queue position to the first | |
3808 | * INDEX position, and WRITE to the last (READ - 1 wrapped) | |
3809 | * | |
3810 | * When the firmware places a packet in a buffer it will advance the READ index | |
3811 | * and fire the RX interrupt. The driver can then query the READ index and | |
3812 | * process as many packets as possible, moving the WRITE index forward as it | |
3813 | * resets the Rx queue buffers with new memory. | |
3814 | * | |
3815 | * The management in the driver is as follows: | |
3816 | * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When | |
3817 | * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled | |
3818 | * to replensish the ipw->rxq->rx_free. | |
3819 | * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the | |
3820 | * ipw->rxq is replenished and the READ INDEX is updated (updating the | |
3821 | * 'processed' and 'read' driver indexes as well) | |
3822 | * + A received packet is processed and handed to the kernel network stack, | |
3823 | * detached from the ipw->rxq. The driver 'processed' index is updated. | |
3824 | * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free | |
3825 | * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ | |
3826 | * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there | |
3827 | * were enough free buffers and RX_STALLED is set it is cleared. | |
3828 | * | |
3829 | * | |
3830 | * Driver sequence: | |
3831 | * | |
3832 | * ipw_rx_queue_alloc() Allocates rx_free | |
3833 | * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls | |
3834 | * ipw_rx_queue_restock | |
3835 | * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx | |
3836 | * queue, updates firmware pointers, and updates | |
3837 | * the WRITE index. If insufficient rx_free buffers | |
3838 | * are available, schedules ipw_rx_queue_replenish | |
3839 | * | |
3840 | * -- enable interrupts -- | |
3841 | * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the | |
3842 | * READ INDEX, detaching the SKB from the pool. | |
3843 | * Moves the packet buffer from queue to rx_used. | |
3844 | * Calls ipw_rx_queue_restock to refill any empty | |
3845 | * slots. | |
3846 | * ... | |
3847 | * | |
3848 | */ | |
3849 | ||
3850 | /* | |
3851 | * If there are slots in the RX queue that need to be restocked, | |
3852 | * and we have free pre-allocated buffers, fill the ranks as much | |
3853 | * as we can pulling from rx_free. | |
3854 | * | |
3855 | * This moves the 'write' index forward to catch up with 'processed', and | |
3856 | * also updates the memory address in the firmware to reference the new | |
3857 | * target buffer. | |
3858 | */ | |
3859 | static void ipw_rx_queue_restock(struct ipw_priv *priv) | |
3860 | { | |
3861 | struct ipw_rx_queue *rxq = priv->rxq; | |
3862 | struct list_head *element; | |
3863 | struct ipw_rx_mem_buffer *rxb; | |
3864 | unsigned long flags; | |
3865 | int write; | |
3866 | ||
3867 | spin_lock_irqsave(&rxq->lock, flags); | |
3868 | write = rxq->write; | |
3869 | while ((rxq->write != rxq->processed) && (rxq->free_count)) { | |
3870 | element = rxq->rx_free.next; | |
3871 | rxb = list_entry(element, struct ipw_rx_mem_buffer, list); | |
3872 | list_del(element); | |
3873 | ||
3874 | ipw_write32(priv, CX2_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE, | |
3875 | rxb->dma_addr); | |
3876 | rxq->queue[rxq->write] = rxb; | |
3877 | rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE; | |
3878 | rxq->free_count--; | |
3879 | } | |
3880 | spin_unlock_irqrestore(&rxq->lock, flags); | |
3881 | ||
3882 | /* If the pre-allocated buffer pool is dropping low, schedule to | |
3883 | * refill it */ | |
3884 | if (rxq->free_count <= RX_LOW_WATERMARK) | |
3885 | queue_work(priv->workqueue, &priv->rx_replenish); | |
3886 | ||
3887 | /* If we've added more space for the firmware to place data, tell it */ | |
3888 | if (write != rxq->write) | |
3889 | ipw_write32(priv, CX2_RX_WRITE_INDEX, rxq->write); | |
3890 | } | |
3891 | ||
3892 | /* | |
3893 | * Move all used packet from rx_used to rx_free, allocating a new SKB for each. | |
3894 | * Also restock the Rx queue via ipw_rx_queue_restock. | |
3895 | * | |
3896 | * This is called as a scheduled work item (except for during intialization) | |
3897 | */ | |
3898 | static void ipw_rx_queue_replenish(void *data) | |
3899 | { | |
3900 | struct ipw_priv *priv = data; | |
3901 | struct ipw_rx_queue *rxq = priv->rxq; | |
3902 | struct list_head *element; | |
3903 | struct ipw_rx_mem_buffer *rxb; | |
3904 | unsigned long flags; | |
3905 | ||
3906 | spin_lock_irqsave(&rxq->lock, flags); | |
3907 | while (!list_empty(&rxq->rx_used)) { | |
3908 | element = rxq->rx_used.next; | |
3909 | rxb = list_entry(element, struct ipw_rx_mem_buffer, list); | |
3910 | rxb->skb = alloc_skb(CX2_RX_BUF_SIZE, GFP_ATOMIC); | |
3911 | if (!rxb->skb) { | |
3912 | printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n", | |
3913 | priv->net_dev->name); | |
3914 | /* We don't reschedule replenish work here -- we will | |
3915 | * call the restock method and if it still needs | |
3916 | * more buffers it will schedule replenish */ | |
3917 | break; | |
3918 | } | |
3919 | list_del(element); | |
3920 | ||
3921 | rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data; | |
3922 | rxb->dma_addr = pci_map_single( | |
3923 | priv->pci_dev, rxb->skb->data, CX2_RX_BUF_SIZE, | |
3924 | PCI_DMA_FROMDEVICE); | |
3925 | ||
3926 | list_add_tail(&rxb->list, &rxq->rx_free); | |
3927 | rxq->free_count++; | |
3928 | } | |
3929 | spin_unlock_irqrestore(&rxq->lock, flags); | |
3930 | ||
3931 | ipw_rx_queue_restock(priv); | |
3932 | } | |
3933 | ||
3934 | /* Assumes that the skb field of the buffers in 'pool' is kept accurate. | |
3935 | * If an SKB has been detached, the POOL needs to have it's SKB set to NULL | |
3936 | * This free routine walks the list of POOL entries and if SKB is set to | |
3937 | * non NULL it is unmapped and freed | |
3938 | */ | |
3939 | static void ipw_rx_queue_free(struct ipw_priv *priv, | |
3940 | struct ipw_rx_queue *rxq) | |
3941 | { | |
3942 | int i; | |
3943 | ||
3944 | if (!rxq) | |
3945 | return; | |
3946 | ||
3947 | for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { | |
3948 | if (rxq->pool[i].skb != NULL) { | |
3949 | pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr, | |
3950 | CX2_RX_BUF_SIZE, | |
3951 | PCI_DMA_FROMDEVICE); | |
3952 | dev_kfree_skb(rxq->pool[i].skb); | |
3953 | } | |
3954 | } | |
3955 | ||
3956 | kfree(rxq); | |
3957 | } | |
3958 | ||
3959 | static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv) | |
3960 | { | |
3961 | struct ipw_rx_queue *rxq; | |
3962 | int i; | |
3963 | ||
3964 | rxq = (struct ipw_rx_queue *)kmalloc(sizeof(*rxq), GFP_KERNEL); | |
3965 | memset(rxq, 0, sizeof(*rxq)); | |
3966 | spin_lock_init(&rxq->lock); | |
3967 | INIT_LIST_HEAD(&rxq->rx_free); | |
3968 | INIT_LIST_HEAD(&rxq->rx_used); | |
3969 | ||
3970 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | |
3971 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) | |
3972 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | |
3973 | ||
3974 | /* Set us so that we have processed and used all buffers, but have | |
3975 | * not restocked the Rx queue with fresh buffers */ | |
3976 | rxq->read = rxq->write = 0; | |
3977 | rxq->processed = RX_QUEUE_SIZE - 1; | |
3978 | rxq->free_count = 0; | |
3979 | ||
3980 | return rxq; | |
3981 | } | |
3982 | ||
3983 | static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate) | |
3984 | { | |
3985 | rate &= ~IEEE80211_BASIC_RATE_MASK; | |
3986 | if (ieee_mode == IEEE_A) { | |
3987 | switch (rate) { | |
3988 | case IEEE80211_OFDM_RATE_6MB: | |
3989 | return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? | |
3990 | 1 : 0; | |
3991 | case IEEE80211_OFDM_RATE_9MB: | |
3992 | return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? | |
3993 | 1 : 0; | |
3994 | case IEEE80211_OFDM_RATE_12MB: | |
3995 | return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? | |
3996 | 1 : 0; | |
3997 | case IEEE80211_OFDM_RATE_18MB: | |
3998 | return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? | |
3999 | 1 : 0; | |
4000 | case IEEE80211_OFDM_RATE_24MB: | |
4001 | return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? | |
4002 | 1 : 0; | |
4003 | case IEEE80211_OFDM_RATE_36MB: | |
4004 | return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? | |
4005 | 1 : 0; | |
4006 | case IEEE80211_OFDM_RATE_48MB: | |
4007 | return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? | |
4008 | 1 : 0; | |
4009 | case IEEE80211_OFDM_RATE_54MB: | |
4010 | return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? | |
4011 | 1 : 0; | |
4012 | default: | |
4013 | return 0; | |
4014 | } | |
4015 | } | |
4016 | ||
4017 | /* B and G mixed */ | |
4018 | switch (rate) { | |
4019 | case IEEE80211_CCK_RATE_1MB: | |
4020 | return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0; | |
4021 | case IEEE80211_CCK_RATE_2MB: | |
4022 | return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0; | |
4023 | case IEEE80211_CCK_RATE_5MB: | |
4024 | return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0; | |
4025 | case IEEE80211_CCK_RATE_11MB: | |
4026 | return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0; | |
4027 | } | |
4028 | ||
4029 | /* If we are limited to B modulations, bail at this point */ | |
4030 | if (ieee_mode == IEEE_B) | |
4031 | return 0; | |
4032 | ||
4033 | /* G */ | |
4034 | switch (rate) { | |
4035 | case IEEE80211_OFDM_RATE_6MB: | |
4036 | return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0; | |
4037 | case IEEE80211_OFDM_RATE_9MB: | |
4038 | return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0; | |
4039 | case IEEE80211_OFDM_RATE_12MB: | |
4040 | return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0; | |
4041 | case IEEE80211_OFDM_RATE_18MB: | |
4042 | return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0; | |
4043 | case IEEE80211_OFDM_RATE_24MB: | |
4044 | return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0; | |
4045 | case IEEE80211_OFDM_RATE_36MB: | |
4046 | return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0; | |
4047 | case IEEE80211_OFDM_RATE_48MB: | |
4048 | return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0; | |
4049 | case IEEE80211_OFDM_RATE_54MB: | |
4050 | return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0; | |
4051 | } | |
4052 | ||
4053 | return 0; | |
4054 | } | |
4055 | ||
4056 | static int ipw_compatible_rates(struct ipw_priv *priv, | |
4057 | const struct ieee80211_network *network, | |
4058 | struct ipw_supported_rates *rates) | |
4059 | { | |
4060 | int num_rates, i; | |
4061 | ||
4062 | memset(rates, 0, sizeof(*rates)); | |
4063 | num_rates = min(network->rates_len, (u8)IPW_MAX_RATES); | |
4064 | rates->num_rates = 0; | |
4065 | for (i = 0; i < num_rates; i++) { | |
4066 | if (!ipw_is_rate_in_mask(priv, network->mode, network->rates[i])) { | |
4067 | IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n", | |
4068 | network->rates[i], priv->rates_mask); | |
4069 | continue; | |
4070 | } | |
4071 | ||
4072 | rates->supported_rates[rates->num_rates++] = network->rates[i]; | |
4073 | } | |
4074 | ||
4075 | num_rates = min(network->rates_ex_len, (u8)(IPW_MAX_RATES - num_rates)); | |
4076 | for (i = 0; i < num_rates; i++) { | |
4077 | if (!ipw_is_rate_in_mask(priv, network->mode, network->rates_ex[i])) { | |
4078 | IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n", | |
4079 | network->rates_ex[i], priv->rates_mask); | |
4080 | continue; | |
4081 | } | |
4082 | ||
4083 | rates->supported_rates[rates->num_rates++] = network->rates_ex[i]; | |
4084 | } | |
4085 | ||
4086 | return rates->num_rates; | |
4087 | } | |
4088 | ||
4089 | static inline void ipw_copy_rates(struct ipw_supported_rates *dest, | |
4090 | const struct ipw_supported_rates *src) | |
4091 | { | |
4092 | u8 i; | |
4093 | for (i = 0; i < src->num_rates; i++) | |
4094 | dest->supported_rates[i] = src->supported_rates[i]; | |
4095 | dest->num_rates = src->num_rates; | |
4096 | } | |
4097 | ||
4098 | /* TODO: Look at sniffed packets in the air to determine if the basic rate | |
4099 | * mask should ever be used -- right now all callers to add the scan rates are | |
4100 | * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */ | |
4101 | static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates, | |
4102 | u8 modulation, u32 rate_mask) | |
4103 | { | |
4104 | u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ? | |
4105 | IEEE80211_BASIC_RATE_MASK : 0; | |
4106 | ||
4107 | if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK) | |
4108 | rates->supported_rates[rates->num_rates++] = | |
4109 | IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB; | |
4110 | ||
4111 | if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK) | |
4112 | rates->supported_rates[rates->num_rates++] = | |
4113 | IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB; | |
4114 | ||
4115 | if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK) | |
4116 | rates->supported_rates[rates->num_rates++] = basic_mask | | |
4117 | IEEE80211_CCK_RATE_5MB; | |
4118 | ||
4119 | if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK) | |
4120 | rates->supported_rates[rates->num_rates++] = basic_mask | | |
4121 | IEEE80211_CCK_RATE_11MB; | |
4122 | } | |
4123 | ||
4124 | static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates, | |
4125 | u8 modulation, u32 rate_mask) | |
4126 | { | |
4127 | u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ? | |
4128 | IEEE80211_BASIC_RATE_MASK : 0; | |
4129 | ||
4130 | if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK) | |
4131 | rates->supported_rates[rates->num_rates++] = basic_mask | | |
4132 | IEEE80211_OFDM_RATE_6MB; | |
4133 | ||
4134 | if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK) | |
4135 | rates->supported_rates[rates->num_rates++] = | |
4136 | IEEE80211_OFDM_RATE_9MB; | |
4137 | ||
4138 | if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK) | |
4139 | rates->supported_rates[rates->num_rates++] = basic_mask | | |
4140 | IEEE80211_OFDM_RATE_12MB; | |
4141 | ||
4142 | if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK) | |
4143 | rates->supported_rates[rates->num_rates++] = | |
4144 | IEEE80211_OFDM_RATE_18MB; | |
4145 | ||
4146 | if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK) | |
4147 | rates->supported_rates[rates->num_rates++] = basic_mask | | |
4148 | IEEE80211_OFDM_RATE_24MB; | |
4149 | ||
4150 | if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK) | |
4151 | rates->supported_rates[rates->num_rates++] = | |
4152 | IEEE80211_OFDM_RATE_36MB; | |
4153 | ||
4154 | if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK) | |
4155 | rates->supported_rates[rates->num_rates++] = | |
4156 | IEEE80211_OFDM_RATE_48MB; | |
4157 | ||
4158 | if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK) | |
4159 | rates->supported_rates[rates->num_rates++] = | |
4160 | IEEE80211_OFDM_RATE_54MB; | |
4161 | } | |
4162 | ||
4163 | struct ipw_network_match { | |
4164 | struct ieee80211_network *network; | |
4165 | struct ipw_supported_rates rates; | |
4166 | }; | |
4167 | ||
4168 | static int ipw_best_network( | |
4169 | struct ipw_priv *priv, | |
4170 | struct ipw_network_match *match, | |
4171 | struct ieee80211_network *network, | |
4172 | int roaming) | |
4173 | { | |
4174 | struct ipw_supported_rates rates; | |
4175 | ||
4176 | /* Verify that this network's capability is compatible with the | |
4177 | * current mode (AdHoc or Infrastructure) */ | |
4178 | if ((priv->ieee->iw_mode == IW_MODE_INFRA && | |
4179 | !(network->capability & WLAN_CAPABILITY_BSS)) || | |
4180 | (priv->ieee->iw_mode == IW_MODE_ADHOC && | |
4181 | !(network->capability & WLAN_CAPABILITY_IBSS))) { | |
4182 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to " | |
4183 | "capability mismatch.\n", | |
4184 | escape_essid(network->ssid, network->ssid_len), | |
4185 | MAC_ARG(network->bssid)); | |
4186 | return 0; | |
4187 | } | |
4188 | ||
4189 | /* If we do not have an ESSID for this AP, we can not associate with | |
4190 | * it */ | |
4191 | if (network->flags & NETWORK_EMPTY_ESSID) { | |
4192 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " | |
4193 | "because of hidden ESSID.\n", | |
4194 | escape_essid(network->ssid, network->ssid_len), | |
4195 | MAC_ARG(network->bssid)); | |
4196 | return 0; | |
4197 | } | |
4198 | ||
4199 | if (unlikely(roaming)) { | |
4200 | /* If we are roaming, then ensure check if this is a valid | |
4201 | * network to try and roam to */ | |
4202 | if ((network->ssid_len != match->network->ssid_len) || | |
4203 | memcmp(network->ssid, match->network->ssid, | |
4204 | network->ssid_len)) { | |
4205 | IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded " | |
4206 | "because of non-network ESSID.\n", | |
4207 | escape_essid(network->ssid, | |
4208 | network->ssid_len), | |
4209 | MAC_ARG(network->bssid)); | |
4210 | return 0; | |
4211 | } | |
4212 | } else { | |
4213 | /* If an ESSID has been configured then compare the broadcast | |
4214 | * ESSID to ours */ | |
4215 | if ((priv->config & CFG_STATIC_ESSID) && | |
4216 | ((network->ssid_len != priv->essid_len) || | |
4217 | memcmp(network->ssid, priv->essid, | |
4218 | min(network->ssid_len, priv->essid_len)))) { | |
4219 | char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; | |
4220 | strncpy(escaped, escape_essid( | |
4221 | network->ssid, network->ssid_len), | |
4222 | sizeof(escaped)); | |
4223 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " | |
4224 | "because of ESSID mismatch: '%s'.\n", | |
4225 | escaped, MAC_ARG(network->bssid), | |
4226 | escape_essid(priv->essid, priv->essid_len)); | |
4227 | return 0; | |
4228 | } | |
4229 | } | |
4230 | ||
4231 | /* If the old network rate is better than this one, don't bother | |
4232 | * testing everything else. */ | |
4233 | if (match->network && match->network->stats.rssi > | |
4234 | network->stats.rssi) { | |
4235 | char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; | |
4236 | strncpy(escaped, | |
4237 | escape_essid(network->ssid, network->ssid_len), | |
4238 | sizeof(escaped)); | |
4239 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because " | |
4240 | "'%s (" MAC_FMT ")' has a stronger signal.\n", | |
4241 | escaped, MAC_ARG(network->bssid), | |
4242 | escape_essid(match->network->ssid, | |
4243 | match->network->ssid_len), | |
4244 | MAC_ARG(match->network->bssid)); | |
4245 | return 0; | |
4246 | } | |
4247 | ||
4248 | /* If this network has already had an association attempt within the | |
4249 | * last 3 seconds, do not try and associate again... */ | |
4250 | if (network->last_associate && | |
4251 | time_after(network->last_associate + (HZ * 5UL), jiffies)) { | |
4252 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " | |
4253 | "because of storming (%lu since last " | |
4254 | "assoc attempt).\n", | |
4255 | escape_essid(network->ssid, network->ssid_len), | |
4256 | MAC_ARG(network->bssid), | |
4257 | (jiffies - network->last_associate) / HZ); | |
4258 | return 0; | |
4259 | } | |
4260 | ||
4261 | /* Now go through and see if the requested network is valid... */ | |
4262 | if (priv->ieee->scan_age != 0 && | |
4263 | jiffies - network->last_scanned > priv->ieee->scan_age) { | |
4264 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " | |
4265 | "because of age: %lums.\n", | |
4266 | escape_essid(network->ssid, network->ssid_len), | |
4267 | MAC_ARG(network->bssid), | |
4268 | (jiffies - network->last_scanned) / (HZ / 100)); | |
4269 | return 0; | |
4270 | } | |
4271 | ||
4272 | if ((priv->config & CFG_STATIC_CHANNEL) && | |
4273 | (network->channel != priv->channel)) { | |
4274 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " | |
4275 | "because of channel mismatch: %d != %d.\n", | |
4276 | escape_essid(network->ssid, network->ssid_len), | |
4277 | MAC_ARG(network->bssid), | |
4278 | network->channel, priv->channel); | |
4279 | return 0; | |
4280 | } | |
4281 | ||
4282 | /* Verify privacy compatability */ | |
4283 | if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) != | |
4284 | ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) { | |
4285 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " | |
4286 | "because of privacy mismatch: %s != %s.\n", | |
4287 | escape_essid(network->ssid, network->ssid_len), | |
4288 | MAC_ARG(network->bssid), | |
4289 | priv->capability & CAP_PRIVACY_ON ? "on" : | |
4290 | "off", | |
4291 | network->capability & | |
4292 | WLAN_CAPABILITY_PRIVACY ?"on" : "off"); | |
4293 | return 0; | |
4294 | } | |
4295 | ||
4296 | if ((priv->config & CFG_STATIC_BSSID) && | |
4297 | memcmp(network->bssid, priv->bssid, ETH_ALEN)) { | |
4298 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " | |
4299 | "because of BSSID mismatch: " MAC_FMT ".\n", | |
4300 | escape_essid(network->ssid, network->ssid_len), | |
4301 | MAC_ARG(network->bssid), | |
4302 | MAC_ARG(priv->bssid)); | |
4303 | return 0; | |
4304 | } | |
4305 | ||
4306 | /* Filter out any incompatible freq / mode combinations */ | |
4307 | if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) { | |
4308 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " | |
4309 | "because of invalid frequency/mode " | |
4310 | "combination.\n", | |
4311 | escape_essid(network->ssid, network->ssid_len), | |
4312 | MAC_ARG(network->bssid)); | |
4313 | return 0; | |
4314 | } | |
4315 | ||
4316 | ipw_compatible_rates(priv, network, &rates); | |
4317 | if (rates.num_rates == 0) { | |
4318 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " | |
4319 | "because of no compatible rates.\n", | |
4320 | escape_essid(network->ssid, network->ssid_len), | |
4321 | MAC_ARG(network->bssid)); | |
4322 | return 0; | |
4323 | } | |
4324 | ||
4325 | /* TODO: Perform any further minimal comparititive tests. We do not | |
4326 | * want to put too much policy logic here; intelligent scan selection | |
4327 | * should occur within a generic IEEE 802.11 user space tool. */ | |
4328 | ||
4329 | /* Set up 'new' AP to this network */ | |
4330 | ipw_copy_rates(&match->rates, &rates); | |
4331 | match->network = network; | |
4332 | ||
4333 | IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n", | |
4334 | escape_essid(network->ssid, network->ssid_len), | |
4335 | MAC_ARG(network->bssid)); | |
4336 | ||
4337 | return 1; | |
4338 | } | |
4339 | ||
4340 | ||
4341 | static void ipw_adhoc_create(struct ipw_priv *priv, | |
4342 | struct ieee80211_network *network) | |
4343 | { | |
4344 | /* | |
4345 | * For the purposes of scanning, we can set our wireless mode | |
4346 | * to trigger scans across combinations of bands, but when it | |
4347 | * comes to creating a new ad-hoc network, we have tell the FW | |
4348 | * exactly which band to use. | |
4349 | * | |
4350 | * We also have the possibility of an invalid channel for the | |
4351 | * chossen band. Attempting to create a new ad-hoc network | |
4352 | * with an invalid channel for wireless mode will trigger a | |
4353 | * FW fatal error. | |
4354 | */ | |
4355 | network->mode = is_valid_channel(priv->ieee->mode, priv->channel); | |
4356 | if (network->mode) { | |
4357 | network->channel = priv->channel; | |
4358 | } else { | |
4359 | IPW_WARNING("Overriding invalid channel\n"); | |
4360 | if (priv->ieee->mode & IEEE_A) { | |
4361 | network->mode = IEEE_A; | |
4362 | priv->channel = band_a_active_channel[0]; | |
4363 | } else if (priv->ieee->mode & IEEE_G) { | |
4364 | network->mode = IEEE_G; | |
4365 | priv->channel = band_b_active_channel[0]; | |
4366 | } else { | |
4367 | network->mode = IEEE_B; | |
4368 | priv->channel = band_b_active_channel[0]; | |
4369 | } | |
4370 | } | |
4371 | ||
4372 | network->channel = priv->channel; | |
4373 | priv->config |= CFG_ADHOC_PERSIST; | |
4374 | ipw_create_bssid(priv, network->bssid); | |
4375 | network->ssid_len = priv->essid_len; | |
4376 | memcpy(network->ssid, priv->essid, priv->essid_len); | |
4377 | memset(&network->stats, 0, sizeof(network->stats)); | |
4378 | network->capability = WLAN_CAPABILITY_IBSS; | |
4379 | if (priv->capability & CAP_PRIVACY_ON) | |
4380 | network->capability |= WLAN_CAPABILITY_PRIVACY; | |
4381 | network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH); | |
4382 | memcpy(network->rates, priv->rates.supported_rates, | |
4383 | network->rates_len); | |
4384 | network->rates_ex_len = priv->rates.num_rates - network->rates_len; | |
4385 | memcpy(network->rates_ex, | |
4386 | &priv->rates.supported_rates[network->rates_len], | |
4387 | network->rates_ex_len); | |
4388 | network->last_scanned = 0; | |
4389 | network->flags = 0; | |
4390 | network->last_associate = 0; | |
4391 | network->time_stamp[0] = 0; | |
4392 | network->time_stamp[1] = 0; | |
4393 | network->beacon_interval = 100; /* Default */ | |
4394 | network->listen_interval = 10; /* Default */ | |
4395 | network->atim_window = 0; /* Default */ | |
4396 | #ifdef CONFIG_IEEE80211_WPA | |
4397 | network->wpa_ie_len = 0; | |
4398 | network->rsn_ie_len = 0; | |
4399 | #endif /* CONFIG_IEEE80211_WPA */ | |
4400 | } | |
4401 | ||
4402 | static void ipw_send_wep_keys(struct ipw_priv *priv) | |
4403 | { | |
4404 | struct ipw_wep_key *key; | |
4405 | int i; | |
4406 | struct host_cmd cmd = { | |
4407 | .cmd = IPW_CMD_WEP_KEY, | |
4408 | .len = sizeof(*key) | |
4409 | }; | |
4410 | ||
4411 | key = (struct ipw_wep_key *)&cmd.param; | |
4412 | key->cmd_id = DINO_CMD_WEP_KEY; | |
4413 | key->seq_num = 0; | |
4414 | ||
4415 | for (i = 0; i < 4; i++) { | |
4416 | key->key_index = i; | |
4417 | if (!(priv->sec.flags & (1 << i))) { | |
4418 | key->key_size = 0; | |
4419 | } else { | |
4420 | key->key_size = priv->sec.key_sizes[i]; | |
4421 | memcpy(key->key, priv->sec.keys[i], key->key_size); | |
4422 | } | |
4423 | ||
4424 | if (ipw_send_cmd(priv, &cmd)) { | |
4425 | IPW_ERROR("failed to send WEP_KEY command\n"); | |
4426 | return; | |
4427 | } | |
4428 | } | |
4429 | } | |
4430 | ||
4431 | static void ipw_adhoc_check(void *data) | |
4432 | { | |
4433 | struct ipw_priv *priv = data; | |
4434 | ||
4435 | if (priv->missed_adhoc_beacons++ > priv->missed_beacon_threshold && | |
4436 | !(priv->config & CFG_ADHOC_PERSIST)) { | |
4437 | IPW_DEBUG_SCAN("Disassociating due to missed beacons\n"); | |
4438 | ipw_remove_current_network(priv); | |
4439 | ipw_disassociate(priv); | |
4440 | return; | |
4441 | } | |
4442 | ||
4443 | queue_delayed_work(priv->workqueue, &priv->adhoc_check, | |
4444 | priv->assoc_request.beacon_interval); | |
4445 | } | |
4446 | ||
4447 | #ifdef CONFIG_IPW_DEBUG | |
4448 | static void ipw_debug_config(struct ipw_priv *priv) | |
4449 | { | |
4450 | IPW_DEBUG_INFO("Scan completed, no valid APs matched " | |
4451 | "[CFG 0x%08X]\n", priv->config); | |
4452 | if (priv->config & CFG_STATIC_CHANNEL) | |
4453 | IPW_DEBUG_INFO("Channel locked to %d\n", | |
4454 | priv->channel); | |
4455 | else | |
4456 | IPW_DEBUG_INFO("Channel unlocked.\n"); | |
4457 | if (priv->config & CFG_STATIC_ESSID) | |
4458 | IPW_DEBUG_INFO("ESSID locked to '%s'\n", | |
4459 | escape_essid(priv->essid, | |
4460 | priv->essid_len)); | |
4461 | else | |
4462 | IPW_DEBUG_INFO("ESSID unlocked.\n"); | |
4463 | if (priv->config & CFG_STATIC_BSSID) | |
4464 | IPW_DEBUG_INFO("BSSID locked to %d\n", priv->channel); | |
4465 | else | |
4466 | IPW_DEBUG_INFO("BSSID unlocked.\n"); | |
4467 | if (priv->capability & CAP_PRIVACY_ON) | |
4468 | IPW_DEBUG_INFO("PRIVACY on\n"); | |
4469 | else | |
4470 | IPW_DEBUG_INFO("PRIVACY off\n"); | |
4471 | IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask); | |
4472 | } | |
4473 | #else | |
4474 | #define ipw_debug_config(x) do {} while (0); | |
4475 | #endif | |
4476 | ||
4477 | static inline void ipw_set_fixed_rate(struct ipw_priv *priv, | |
4478 | struct ieee80211_network *network) | |
4479 | { | |
4480 | /* TODO: Verify that this works... */ | |
4481 | struct ipw_fixed_rate fr = { | |
4482 | .tx_rates = priv->rates_mask | |
4483 | }; | |
4484 | u32 reg; | |
4485 | u16 mask = 0; | |
4486 | ||
4487 | /* Identify 'current FW band' and match it with the fixed | |
4488 | * Tx rates */ | |
4489 | ||
4490 | switch (priv->ieee->freq_band) { | |
4491 | case IEEE80211_52GHZ_BAND: /* A only */ | |
4492 | /* IEEE_A */ | |
4493 | if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) { | |
4494 | /* Invalid fixed rate mask */ | |
4495 | fr.tx_rates = 0; | |
4496 | break; | |
4497 | } | |
4498 | ||
4499 | fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A; | |
4500 | break; | |
4501 | ||
4502 | default: /* 2.4Ghz or Mixed */ | |
4503 | /* IEEE_B */ | |
4504 | if (network->mode == IEEE_B) { | |
4505 | if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) { | |
4506 | /* Invalid fixed rate mask */ | |
4507 | fr.tx_rates = 0; | |
4508 | } | |
4509 | break; | |
4510 | } | |
4511 | ||
4512 | /* IEEE_G */ | |
4513 | if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK | | |
4514 | IEEE80211_OFDM_RATES_MASK)) { | |
4515 | /* Invalid fixed rate mask */ | |
4516 | fr.tx_rates = 0; | |
4517 | break; | |
4518 | } | |
4519 | ||
4520 | if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) { | |
4521 | mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1); | |
4522 | fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK; | |
4523 | } | |
4524 | ||
4525 | if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) { | |
4526 | mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1); | |
4527 | fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK; | |
4528 | } | |
4529 | ||
4530 | if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) { | |
4531 | mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1); | |
4532 | fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK; | |
4533 | } | |
4534 | ||
4535 | fr.tx_rates |= mask; | |
4536 | break; | |
4537 | } | |
4538 | ||
4539 | reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE); | |
4540 | ipw_write_reg32(priv, reg, *(u32*)&fr); | |
4541 | } | |
4542 | ||
4543 | static int ipw_associate_network(struct ipw_priv *priv, | |
4544 | struct ieee80211_network *network, | |
4545 | struct ipw_supported_rates *rates, | |
4546 | int roaming) | |
4547 | { | |
4548 | int err; | |
4549 | ||
4550 | if (priv->config & CFG_FIXED_RATE) | |
4551 | ipw_set_fixed_rate(priv, network); | |
4552 | ||
4553 | if (!(priv->config & CFG_STATIC_ESSID)) { | |
4554 | priv->essid_len = min(network->ssid_len, | |
4555 | (u8)IW_ESSID_MAX_SIZE); | |
4556 | memcpy(priv->essid, network->ssid, priv->essid_len); | |
4557 | } | |
4558 | ||
4559 | network->last_associate = jiffies; | |
4560 | ||
4561 | memset(&priv->assoc_request, 0, sizeof(priv->assoc_request)); | |
4562 | priv->assoc_request.channel = network->channel; | |
4563 | if ((priv->capability & CAP_PRIVACY_ON) && | |
4564 | (priv->capability & CAP_SHARED_KEY)) { | |
4565 | priv->assoc_request.auth_type = AUTH_SHARED_KEY; | |
4566 | priv->assoc_request.auth_key = priv->sec.active_key; | |
4567 | } else { | |
4568 | priv->assoc_request.auth_type = AUTH_OPEN; | |
4569 | priv->assoc_request.auth_key = 0; | |
4570 | } | |
4571 | ||
4572 | if (priv->capability & CAP_PRIVACY_ON) | |
4573 | ipw_send_wep_keys(priv); | |
4574 | ||
4575 | /* | |
4576 | * It is valid for our ieee device to support multiple modes, but | |
4577 | * when it comes to associating to a given network we have to choose | |
4578 | * just one mode. | |
4579 | */ | |
4580 | if (network->mode & priv->ieee->mode & IEEE_A) | |
4581 | priv->assoc_request.ieee_mode = IPW_A_MODE; | |
4582 | else if (network->mode & priv->ieee->mode & IEEE_G) | |
4583 | priv->assoc_request.ieee_mode = IPW_G_MODE; | |
4584 | else if (network->mode & priv->ieee->mode & IEEE_B) | |
4585 | priv->assoc_request.ieee_mode = IPW_B_MODE; | |
4586 | ||
4587 | IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, " | |
4588 | "802.11%c [%d], enc=%s%s%s%c%c\n", | |
4589 | roaming ? "Rea" : "A", | |
4590 | escape_essid(priv->essid, priv->essid_len), | |
4591 | network->channel, | |
4592 | ipw_modes[priv->assoc_request.ieee_mode], | |
4593 | rates->num_rates, | |
4594 | priv->capability & CAP_PRIVACY_ON ? "on " : "off", | |
4595 | priv->capability & CAP_PRIVACY_ON ? | |
4596 | (priv->capability & CAP_SHARED_KEY ? "(shared)" : | |
4597 | "(open)") : "", | |
4598 | priv->capability & CAP_PRIVACY_ON ? " key=" : "", | |
4599 | priv->capability & CAP_PRIVACY_ON ? | |
4600 | '1' + priv->sec.active_key : '.', | |
4601 | priv->capability & CAP_PRIVACY_ON ? | |
4602 | '.' : ' '); | |
4603 | ||
4604 | priv->assoc_request.beacon_interval = network->beacon_interval; | |
4605 | if ((priv->ieee->iw_mode == IW_MODE_ADHOC) && | |
4606 | (network->time_stamp[0] == 0) && | |
4607 | (network->time_stamp[1] == 0)) { | |
4608 | priv->assoc_request.assoc_type = HC_IBSS_START; | |
4609 | priv->assoc_request.assoc_tsf_msw = 0; | |
4610 | priv->assoc_request.assoc_tsf_lsw = 0; | |
4611 | } else { | |
4612 | if (unlikely(roaming)) | |
4613 | priv->assoc_request.assoc_type = HC_REASSOCIATE; | |
4614 | else | |
4615 | priv->assoc_request.assoc_type = HC_ASSOCIATE; | |
4616 | priv->assoc_request.assoc_tsf_msw = network->time_stamp[1]; | |
4617 | priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0]; | |
4618 | } | |
4619 | ||
4620 | memcpy(&priv->assoc_request.bssid, network->bssid, ETH_ALEN); | |
4621 | ||
4622 | if (priv->ieee->iw_mode == IW_MODE_ADHOC) { | |
4623 | memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN); | |
4624 | priv->assoc_request.atim_window = network->atim_window; | |
4625 | } else { | |
4626 | memcpy(&priv->assoc_request.dest, network->bssid, | |
4627 | ETH_ALEN); | |
4628 | priv->assoc_request.atim_window = 0; | |
4629 | } | |
4630 | ||
4631 | priv->assoc_request.capability = network->capability; | |
4632 | priv->assoc_request.listen_interval = network->listen_interval; | |
4633 | ||
4634 | err = ipw_send_ssid(priv, priv->essid, priv->essid_len); | |
4635 | if (err) { | |
4636 | IPW_DEBUG_HC("Attempt to send SSID command failed.\n"); | |
4637 | return err; | |
4638 | } | |
4639 | ||
4640 | rates->ieee_mode = priv->assoc_request.ieee_mode; | |
4641 | rates->purpose = IPW_RATE_CONNECT; | |
4642 | ipw_send_supported_rates(priv, rates); | |
4643 | ||
4644 | if (priv->assoc_request.ieee_mode == IPW_G_MODE) | |
4645 | priv->sys_config.dot11g_auto_detection = 1; | |
4646 | else | |
4647 | priv->sys_config.dot11g_auto_detection = 0; | |
4648 | err = ipw_send_system_config(priv, &priv->sys_config); | |
4649 | if (err) { | |
4650 | IPW_DEBUG_HC("Attempt to send sys config command failed.\n"); | |
4651 | return err; | |
4652 | } | |
4653 | ||
4654 | IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi); | |
4655 | err = ipw_set_sensitivity(priv, network->stats.rssi); | |
4656 | if (err) { | |
4657 | IPW_DEBUG_HC("Attempt to send associate command failed.\n"); | |
4658 | return err; | |
4659 | } | |
4660 | ||
4661 | /* | |
4662 | * If preemption is enabled, it is possible for the association | |
4663 | * to complete before we return from ipw_send_associate. Therefore | |
4664 | * we have to be sure and update our priviate data first. | |
4665 | */ | |
4666 | priv->channel = network->channel; | |
4667 | memcpy(priv->bssid, network->bssid, ETH_ALEN); | |
4668 | priv->status |= STATUS_ASSOCIATING; | |
4669 | priv->status &= ~STATUS_SECURITY_UPDATED; | |
4670 | ||
4671 | priv->assoc_network = network; | |
4672 | ||
4673 | err = ipw_send_associate(priv, &priv->assoc_request); | |
4674 | if (err) { | |
4675 | IPW_DEBUG_HC("Attempt to send associate command failed.\n"); | |
4676 | return err; | |
4677 | } | |
4678 | ||
4679 | IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n", | |
4680 | escape_essid(priv->essid, priv->essid_len), | |
4681 | MAC_ARG(priv->bssid)); | |
4682 | ||
4683 | return 0; | |
4684 | } | |
4685 | ||
4686 | static void ipw_roam(void *data) | |
4687 | { | |
4688 | struct ipw_priv *priv = data; | |
4689 | struct ieee80211_network *network = NULL; | |
4690 | struct ipw_network_match match = { | |
4691 | .network = priv->assoc_network | |
4692 | }; | |
4693 | ||
4694 | /* The roaming process is as follows: | |
4695 | * | |
4696 | * 1. Missed beacon threshold triggers the roaming process by | |
4697 | * setting the status ROAM bit and requesting a scan. | |
4698 | * 2. When the scan completes, it schedules the ROAM work | |
4699 | * 3. The ROAM work looks at all of the known networks for one that | |
4700 | * is a better network than the currently associated. If none | |
4701 | * found, the ROAM process is over (ROAM bit cleared) | |
4702 | * 4. If a better network is found, a disassociation request is | |
4703 | * sent. | |
4704 | * 5. When the disassociation completes, the roam work is again | |
4705 | * scheduled. The second time through, the driver is no longer | |
4706 | * associated, and the newly selected network is sent an | |
4707 | * association request. | |
4708 | * 6. At this point ,the roaming process is complete and the ROAM | |
4709 | * status bit is cleared. | |
4710 | */ | |
4711 | ||
4712 | /* If we are no longer associated, and the roaming bit is no longer | |
4713 | * set, then we are not actively roaming, so just return */ | |
4714 | if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING))) | |
4715 | return; | |
4716 | ||
4717 | if (priv->status & STATUS_ASSOCIATED) { | |
4718 | /* First pass through ROAM process -- look for a better | |
4719 | * network */ | |
4720 | u8 rssi = priv->assoc_network->stats.rssi; | |
4721 | priv->assoc_network->stats.rssi = -128; | |
4722 | list_for_each_entry(network, &priv->ieee->network_list, list) { | |
4723 | if (network != priv->assoc_network) | |
4724 | ipw_best_network(priv, &match, network, 1); | |
4725 | } | |
4726 | priv->assoc_network->stats.rssi = rssi; | |
4727 | ||
4728 | if (match.network == priv->assoc_network) { | |
4729 | IPW_DEBUG_ASSOC("No better APs in this network to " | |
4730 | "roam to.\n"); | |
4731 | priv->status &= ~STATUS_ROAMING; | |
4732 | ipw_debug_config(priv); | |
4733 | return; | |
4734 | } | |
4735 | ||
4736 | ipw_send_disassociate(priv, 1); | |
4737 | priv->assoc_network = match.network; | |
4738 | ||
4739 | return; | |
4740 | } | |
4741 | ||
4742 | /* Second pass through ROAM process -- request association */ | |
4743 | ipw_compatible_rates(priv, priv->assoc_network, &match.rates); | |
4744 | ipw_associate_network(priv, priv->assoc_network, &match.rates, 1); | |
4745 | priv->status &= ~STATUS_ROAMING; | |
4746 | } | |
4747 | ||
4748 | static void ipw_associate(void *data) | |
4749 | { | |
4750 | struct ipw_priv *priv = data; | |
4751 | ||
4752 | struct ieee80211_network *network = NULL; | |
4753 | struct ipw_network_match match = { | |
4754 | .network = NULL | |
4755 | }; | |
4756 | struct ipw_supported_rates *rates; | |
4757 | struct list_head *element; | |
4758 | ||
4759 | if (!(priv->config & CFG_ASSOCIATE) && | |
4760 | !(priv->config & (CFG_STATIC_ESSID | | |
4761 | CFG_STATIC_CHANNEL | | |
4762 | CFG_STATIC_BSSID))) { | |
4763 | IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n"); | |
4764 | return; | |
4765 | } | |
4766 | ||
4767 | list_for_each_entry(network, &priv->ieee->network_list, list) | |
4768 | ipw_best_network(priv, &match, network, 0); | |
4769 | ||
4770 | network = match.network; | |
4771 | rates = &match.rates; | |
4772 | ||
4773 | if (network == NULL && | |
4774 | priv->ieee->iw_mode == IW_MODE_ADHOC && | |
4775 | priv->config & CFG_ADHOC_CREATE && | |
4776 | priv->config & CFG_STATIC_ESSID && | |
4777 | !list_empty(&priv->ieee->network_free_list)) { | |
4778 | element = priv->ieee->network_free_list.next; | |
4779 | network = list_entry(element, struct ieee80211_network, | |
4780 | list); | |
4781 | ipw_adhoc_create(priv, network); | |
4782 | rates = &priv->rates; | |
4783 | list_del(element); | |
4784 | list_add_tail(&network->list, &priv->ieee->network_list); | |
4785 | } | |
4786 | ||
4787 | /* If we reached the end of the list, then we don't have any valid | |
4788 | * matching APs */ | |
4789 | if (!network) { | |
4790 | ipw_debug_config(priv); | |
4791 | ||
4792 | queue_delayed_work(priv->workqueue, &priv->request_scan, | |
4793 | SCAN_INTERVAL); | |
4794 | ||
4795 | return; | |
4796 | } | |
4797 | ||
4798 | ipw_associate_network(priv, network, rates, 0); | |
4799 | } | |
4800 | ||
4801 | static inline void ipw_handle_data_packet(struct ipw_priv *priv, | |
4802 | struct ipw_rx_mem_buffer *rxb, | |
4803 | struct ieee80211_rx_stats *stats) | |
4804 | { | |
4805 | struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data; | |
4806 | ||
4807 | /* We received data from the HW, so stop the watchdog */ | |
4808 | priv->net_dev->trans_start = jiffies; | |
4809 | ||
4810 | /* We only process data packets if the | |
4811 | * interface is open */ | |
4812 | if (unlikely((pkt->u.frame.length + IPW_RX_FRAME_SIZE) > | |
4813 | skb_tailroom(rxb->skb))) { | |
4814 | priv->ieee->stats.rx_errors++; | |
4815 | priv->wstats.discard.misc++; | |
4816 | IPW_DEBUG_DROP("Corruption detected! Oh no!\n"); | |
4817 | return; | |
4818 | } else if (unlikely(!netif_running(priv->net_dev))) { | |
4819 | priv->ieee->stats.rx_dropped++; | |
4820 | priv->wstats.discard.misc++; | |
4821 | IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); | |
4822 | return; | |
4823 | } | |
4824 | ||
4825 | /* Advance skb->data to the start of the actual payload */ | |
4826 | skb_reserve(rxb->skb, (u32)&pkt->u.frame.data[0] - (u32)pkt); | |
4827 | ||
4828 | /* Set the size of the skb to the size of the frame */ | |
4829 | skb_put(rxb->skb, pkt->u.frame.length); | |
4830 | ||
4831 | IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len); | |
4832 | ||
4833 | if (!ieee80211_rx(priv->ieee, rxb->skb, stats)) | |
4834 | priv->ieee->stats.rx_errors++; | |
4835 | else /* ieee80211_rx succeeded, so it now owns the SKB */ | |
4836 | rxb->skb = NULL; | |
4837 | } | |
4838 | ||
4839 | ||
4840 | /* | |
4841 | * Main entry function for recieving a packet with 80211 headers. This | |
4842 | * should be called when ever the FW has notified us that there is a new | |
4843 | * skb in the recieve queue. | |
4844 | */ | |
4845 | static void ipw_rx(struct ipw_priv *priv) | |
4846 | { | |
4847 | struct ipw_rx_mem_buffer *rxb; | |
4848 | struct ipw_rx_packet *pkt; | |
4849 | struct ieee80211_hdr *header; | |
4850 | u32 r, w, i; | |
4851 | u8 network_packet; | |
4852 | ||
4853 | r = ipw_read32(priv, CX2_RX_READ_INDEX); | |
4854 | w = ipw_read32(priv, CX2_RX_WRITE_INDEX); | |
4855 | i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE; | |
4856 | ||
4857 | while (i != r) { | |
4858 | rxb = priv->rxq->queue[i]; | |
4859 | #ifdef CONFIG_IPW_DEBUG | |
4860 | if (unlikely(rxb == NULL)) { | |
4861 | printk(KERN_CRIT "Queue not allocated!\n"); | |
4862 | break; | |
4863 | } | |
4864 | #endif | |
4865 | priv->rxq->queue[i] = NULL; | |
4866 | ||
4867 | pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, | |
4868 | CX2_RX_BUF_SIZE, | |
4869 | PCI_DMA_FROMDEVICE); | |
4870 | ||
4871 | pkt = (struct ipw_rx_packet *)rxb->skb->data; | |
4872 | IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n", | |
4873 | pkt->header.message_type, | |
4874 | pkt->header.rx_seq_num, | |
4875 | pkt->header.control_bits); | |
4876 | ||
4877 | switch (pkt->header.message_type) { | |
4878 | case RX_FRAME_TYPE: /* 802.11 frame */ { | |
4879 | struct ieee80211_rx_stats stats = { | |
4880 | .rssi = pkt->u.frame.rssi_dbm - | |
4881 | IPW_RSSI_TO_DBM, | |
4882 | .signal = pkt->u.frame.signal, | |
4883 | .rate = pkt->u.frame.rate, | |
4884 | .mac_time = jiffies, | |
4885 | .received_channel = | |
4886 | pkt->u.frame.received_channel, | |
4887 | .freq = (pkt->u.frame.control & (1<<0)) ? | |
4888 | IEEE80211_24GHZ_BAND : IEEE80211_52GHZ_BAND, | |
4889 | .len = pkt->u.frame.length, | |
4890 | }; | |
4891 | ||
4892 | if (stats.rssi != 0) | |
4893 | stats.mask |= IEEE80211_STATMASK_RSSI; | |
4894 | if (stats.signal != 0) | |
4895 | stats.mask |= IEEE80211_STATMASK_SIGNAL; | |
4896 | if (stats.rate != 0) | |
4897 | stats.mask |= IEEE80211_STATMASK_RATE; | |
4898 | ||
4899 | priv->rx_packets++; | |
4900 | ||
4901 | #ifdef CONFIG_IPW_PROMISC | |
4902 | if (priv->ieee->iw_mode == IW_MODE_MONITOR) { | |
4903 | ipw_handle_data_packet(priv, rxb, &stats); | |
4904 | break; | |
4905 | } | |
4906 | #endif | |
4907 | ||
4908 | header = (struct ieee80211_hdr *)(rxb->skb->data + | |
4909 | IPW_RX_FRAME_SIZE); | |
4910 | /* TODO: Check Ad-Hoc dest/source and make sure | |
4911 | * that we are actually parsing these packets | |
4912 | * correctly -- we should probably use the | |
4913 | * frame control of the packet and disregard | |
4914 | * the current iw_mode */ | |
4915 | switch (priv->ieee->iw_mode) { | |
4916 | case IW_MODE_ADHOC: | |
4917 | network_packet = | |
4918 | !memcmp(header->addr1, | |
4919 | priv->net_dev->dev_addr, | |
4920 | ETH_ALEN) || | |
4921 | !memcmp(header->addr3, | |
4922 | priv->bssid, ETH_ALEN) || | |
4923 | is_broadcast_ether_addr(header->addr1) || | |
4924 | is_multicast_ether_addr(header->addr1); | |
4925 | break; | |
4926 | ||
4927 | case IW_MODE_INFRA: | |
4928 | default: | |
4929 | network_packet = | |
4930 | !memcmp(header->addr3, | |
4931 | priv->bssid, ETH_ALEN) || | |
4932 | !memcmp(header->addr1, | |
4933 | priv->net_dev->dev_addr, | |
4934 | ETH_ALEN) || | |
4935 | is_broadcast_ether_addr(header->addr1) || | |
4936 | is_multicast_ether_addr(header->addr1); | |
4937 | break; | |
4938 | } | |
4939 | ||
4940 | if (network_packet && priv->assoc_network) { | |
4941 | priv->assoc_network->stats.rssi = stats.rssi; | |
4942 | average_add(&priv->average_rssi, | |
4943 | stats.rssi); | |
4944 | priv->last_rx_rssi = stats.rssi; | |
4945 | } | |
4946 | ||
4947 | IPW_DEBUG_RX("Frame: len=%u\n", pkt->u.frame.length); | |
4948 | ||
4949 | if (pkt->u.frame.length < frame_hdr_len(header)) { | |
4950 | IPW_DEBUG_DROP("Received packet is too small. " | |
4951 | "Dropping.\n"); | |
4952 | priv->ieee->stats.rx_errors++; | |
4953 | priv->wstats.discard.misc++; | |
4954 | break; | |
4955 | } | |
4956 | ||
4957 | switch (WLAN_FC_GET_TYPE(header->frame_ctl)) { | |
4958 | case IEEE80211_FTYPE_MGMT: | |
4959 | ieee80211_rx_mgt(priv->ieee, header, &stats); | |
4960 | if (priv->ieee->iw_mode == IW_MODE_ADHOC && | |
4961 | ((WLAN_FC_GET_STYPE(header->frame_ctl) == | |
4962 | IEEE80211_STYPE_PROBE_RESP) || | |
4963 | (WLAN_FC_GET_STYPE(header->frame_ctl) == | |
4964 | IEEE80211_STYPE_BEACON)) && | |
4965 | !memcmp(header->addr3, priv->bssid, ETH_ALEN)) | |
4966 | ipw_add_station(priv, header->addr2); | |
4967 | break; | |
4968 | ||
4969 | case IEEE80211_FTYPE_CTL: | |
4970 | break; | |
4971 | ||
4972 | case IEEE80211_FTYPE_DATA: | |
4973 | if (network_packet) | |
4974 | ipw_handle_data_packet(priv, rxb, &stats); | |
4975 | else | |
4976 | IPW_DEBUG_DROP("Dropping: " MAC_FMT | |
4977 | ", " MAC_FMT ", " MAC_FMT "\n", | |
4978 | MAC_ARG(header->addr1), MAC_ARG(header->addr2), | |
4979 | MAC_ARG(header->addr3)); | |
4980 | break; | |
4981 | } | |
4982 | break; | |
4983 | } | |
4984 | ||
4985 | case RX_HOST_NOTIFICATION_TYPE: { | |
4986 | IPW_DEBUG_RX("Notification: subtype=%02X flags=%02X size=%d\n", | |
4987 | pkt->u.notification.subtype, | |
4988 | pkt->u.notification.flags, | |
4989 | pkt->u.notification.size); | |
4990 | ipw_rx_notification(priv, &pkt->u.notification); | |
4991 | break; | |
4992 | } | |
4993 | ||
4994 | default: | |
4995 | IPW_DEBUG_RX("Bad Rx packet of type %d\n", | |
4996 | pkt->header.message_type); | |
4997 | break; | |
4998 | } | |
4999 | ||
5000 | /* For now we just don't re-use anything. We can tweak this | |
5001 | * later to try and re-use notification packets and SKBs that | |
5002 | * fail to Rx correctly */ | |
5003 | if (rxb->skb != NULL) { | |
5004 | dev_kfree_skb_any(rxb->skb); | |
5005 | rxb->skb = NULL; | |
5006 | } | |
5007 | ||
5008 | pci_unmap_single(priv->pci_dev, rxb->dma_addr, | |
5009 | CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); | |
5010 | list_add_tail(&rxb->list, &priv->rxq->rx_used); | |
5011 | ||
5012 | i = (i + 1) % RX_QUEUE_SIZE; | |
5013 | } | |
5014 | ||
5015 | /* Backtrack one entry */ | |
5016 | priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1; | |
5017 | ||
5018 | ipw_rx_queue_restock(priv); | |
5019 | } | |
5020 | ||
5021 | static void ipw_abort_scan(struct ipw_priv *priv) | |
5022 | { | |
5023 | int err; | |
5024 | ||
5025 | if (priv->status & STATUS_SCAN_ABORTING) { | |
5026 | IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n"); | |
5027 | return; | |
5028 | } | |
5029 | priv->status |= STATUS_SCAN_ABORTING; | |
5030 | ||
5031 | err = ipw_send_scan_abort(priv); | |
5032 | if (err) | |
5033 | IPW_DEBUG_HC("Request to abort scan failed.\n"); | |
5034 | } | |
5035 | ||
5036 | static int ipw_request_scan(struct ipw_priv *priv) | |
5037 | { | |
5038 | struct ipw_scan_request_ext scan; | |
5039 | int channel_index = 0; | |
5040 | int i, err, scan_type; | |
5041 | ||
5042 | if (priv->status & STATUS_EXIT_PENDING) { | |
5043 | IPW_DEBUG_SCAN("Aborting scan due to device shutdown\n"); | |
5044 | priv->status |= STATUS_SCAN_PENDING; | |
5045 | return 0; | |
5046 | } | |
5047 | ||
5048 | if (priv->status & STATUS_SCANNING) { | |
5049 | IPW_DEBUG_HC("Concurrent scan requested. Aborting first.\n"); | |
5050 | priv->status |= STATUS_SCAN_PENDING; | |
5051 | ipw_abort_scan(priv); | |
5052 | return 0; | |
5053 | } | |
5054 | ||
5055 | if (priv->status & STATUS_SCAN_ABORTING) { | |
5056 | IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n"); | |
5057 | priv->status |= STATUS_SCAN_PENDING; | |
5058 | return 0; | |
5059 | } | |
5060 | ||
5061 | if (priv->status & STATUS_RF_KILL_MASK) { | |
5062 | IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n"); | |
5063 | priv->status |= STATUS_SCAN_PENDING; | |
5064 | return 0; | |
5065 | } | |
5066 | ||
5067 | memset(&scan, 0, sizeof(scan)); | |
5068 | ||
5069 | scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = 20; | |
5070 | scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] = 20; | |
5071 | scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = 20; | |
5072 | ||
5073 | scan.full_scan_index = ieee80211_get_scans(priv->ieee); | |
5074 | /* If we are roaming, then make this a directed scan for the current | |
5075 | * network. Otherwise, ensure that every other scan is a fast | |
5076 | * channel hop scan */ | |
5077 | if ((priv->status & STATUS_ROAMING) || ( | |
5078 | !(priv->status & STATUS_ASSOCIATED) && | |
5079 | (priv->config & CFG_STATIC_ESSID) && | |
5080 | (scan.full_scan_index % 2))) { | |
5081 | err = ipw_send_ssid(priv, priv->essid, priv->essid_len); | |
5082 | if (err) { | |
5083 | IPW_DEBUG_HC("Attempt to send SSID command failed.\n"); | |
5084 | return err; | |
5085 | } | |
5086 | ||
5087 | scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN; | |
5088 | } else { | |
5089 | scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN; | |
5090 | } | |
5091 | ||
5092 | if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) { | |
5093 | int start = channel_index; | |
5094 | for (i = 0; i < MAX_A_CHANNELS; i++) { | |
5095 | if (band_a_active_channel[i] == 0) | |
5096 | break; | |
5097 | if ((priv->status & STATUS_ASSOCIATED) && | |
5098 | band_a_active_channel[i] == priv->channel) | |
5099 | continue; | |
5100 | channel_index++; | |
5101 | scan.channels_list[channel_index] = | |
5102 | band_a_active_channel[i]; | |
5103 | ipw_set_scan_type(&scan, channel_index, scan_type); | |
5104 | } | |
5105 | ||
5106 | if (start != channel_index) { | |
5107 | scan.channels_list[start] = (u8)(IPW_A_MODE << 6) | | |
5108 | (channel_index - start); | |
5109 | channel_index++; | |
5110 | } | |
5111 | } | |
5112 | ||
5113 | if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) { | |
5114 | int start = channel_index; | |
5115 | for (i = 0; i < MAX_B_CHANNELS; i++) { | |
5116 | if (band_b_active_channel[i] == 0) | |
5117 | break; | |
5118 | if ((priv->status & STATUS_ASSOCIATED) && | |
5119 | band_b_active_channel[i] == priv->channel) | |
5120 | continue; | |
5121 | channel_index++; | |
5122 | scan.channels_list[channel_index] = | |
5123 | band_b_active_channel[i]; | |
5124 | ipw_set_scan_type(&scan, channel_index, scan_type); | |
5125 | } | |
5126 | ||
5127 | if (start != channel_index) { | |
5128 | scan.channels_list[start] = (u8)(IPW_B_MODE << 6) | | |
5129 | (channel_index - start); | |
5130 | } | |
5131 | } | |
5132 | ||
5133 | err = ipw_send_scan_request_ext(priv, &scan); | |
5134 | if (err) { | |
5135 | IPW_DEBUG_HC("Sending scan command failed: %08X\n", | |
5136 | err); | |
5137 | return -EIO; | |
5138 | } | |
5139 | ||
5140 | priv->status |= STATUS_SCANNING; | |
5141 | priv->status &= ~STATUS_SCAN_PENDING; | |
5142 | ||
5143 | return 0; | |
5144 | } | |
5145 | ||
5146 | /* | |
5147 | * This file defines the Wireless Extension handlers. It does not | |
5148 | * define any methods of hardware manipulation and relies on the | |
5149 | * functions defined in ipw_main to provide the HW interaction. | |
5150 | * | |
5151 | * The exception to this is the use of the ipw_get_ordinal() | |
5152 | * function used to poll the hardware vs. making unecessary calls. | |
5153 | * | |
5154 | */ | |
5155 | ||
5156 | static int ipw_wx_get_name(struct net_device *dev, | |
5157 | struct iw_request_info *info, | |
5158 | union iwreq_data *wrqu, char *extra) | |
5159 | { | |
5160 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5161 | if (!(priv->status & STATUS_ASSOCIATED)) | |
5162 | strcpy(wrqu->name, "unassociated"); | |
5163 | else | |
5164 | snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c", | |
5165 | ipw_modes[priv->assoc_request.ieee_mode]); | |
5166 | IPW_DEBUG_WX("Name: %s\n", wrqu->name); | |
5167 | return 0; | |
5168 | } | |
5169 | ||
5170 | static int ipw_set_channel(struct ipw_priv *priv, u8 channel) | |
5171 | { | |
5172 | if (channel == 0) { | |
5173 | IPW_DEBUG_INFO("Setting channel to ANY (0)\n"); | |
5174 | priv->config &= ~CFG_STATIC_CHANNEL; | |
5175 | if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED | | |
5176 | STATUS_ASSOCIATING))) { | |
5177 | IPW_DEBUG_ASSOC("Attempting to associate with new " | |
5178 | "parameters.\n"); | |
5179 | ipw_associate(priv); | |
5180 | } | |
5181 | ||
5182 | return 0; | |
5183 | } | |
5184 | ||
5185 | priv->config |= CFG_STATIC_CHANNEL; | |
5186 | ||
5187 | if (priv->channel == channel) { | |
5188 | IPW_DEBUG_INFO( | |
5189 | "Request to set channel to current value (%d)\n", | |
5190 | channel); | |
5191 | return 0; | |
5192 | } | |
5193 | ||
5194 | IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel); | |
5195 | priv->channel = channel; | |
5196 | ||
5197 | /* If we are currently associated, or trying to associate | |
5198 | * then see if this is a new channel (causing us to disassociate) */ | |
5199 | if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { | |
5200 | IPW_DEBUG_ASSOC("Disassociating due to channel change.\n"); | |
5201 | ipw_disassociate(priv); | |
5202 | } else { | |
5203 | ipw_associate(priv); | |
5204 | } | |
5205 | ||
5206 | return 0; | |
5207 | } | |
5208 | ||
5209 | static int ipw_wx_set_freq(struct net_device *dev, | |
5210 | struct iw_request_info *info, | |
5211 | union iwreq_data *wrqu, char *extra) | |
5212 | { | |
5213 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5214 | struct iw_freq *fwrq = &wrqu->freq; | |
5215 | ||
5216 | /* if setting by freq convert to channel */ | |
5217 | if (fwrq->e == 1) { | |
5218 | if ((fwrq->m >= (int) 2.412e8 && | |
5219 | fwrq->m <= (int) 2.487e8)) { | |
5220 | int f = fwrq->m / 100000; | |
5221 | int c = 0; | |
5222 | ||
5223 | while ((c < REG_MAX_CHANNEL) && | |
5224 | (f != ipw_frequencies[c])) | |
5225 | c++; | |
5226 | ||
5227 | /* hack to fall through */ | |
5228 | fwrq->e = 0; | |
5229 | fwrq->m = c + 1; | |
5230 | } | |
5231 | } | |
5232 | ||
5233 | if (fwrq->e > 0 || fwrq->m > 1000) | |
5234 | return -EOPNOTSUPP; | |
5235 | ||
5236 | IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m); | |
5237 | return ipw_set_channel(priv, (u8)fwrq->m); | |
5238 | ||
5239 | return 0; | |
5240 | } | |
5241 | ||
5242 | ||
5243 | static int ipw_wx_get_freq(struct net_device *dev, | |
5244 | struct iw_request_info *info, | |
5245 | union iwreq_data *wrqu, char *extra) | |
5246 | { | |
5247 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5248 | ||
5249 | wrqu->freq.e = 0; | |
5250 | ||
5251 | /* If we are associated, trying to associate, or have a statically | |
5252 | * configured CHANNEL then return that; otherwise return ANY */ | |
5253 | if (priv->config & CFG_STATIC_CHANNEL || | |
5254 | priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) | |
5255 | wrqu->freq.m = priv->channel; | |
5256 | else | |
5257 | wrqu->freq.m = 0; | |
5258 | ||
5259 | IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel); | |
5260 | return 0; | |
5261 | } | |
5262 | ||
5263 | static int ipw_wx_set_mode(struct net_device *dev, | |
5264 | struct iw_request_info *info, | |
5265 | union iwreq_data *wrqu, char *extra) | |
5266 | { | |
5267 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5268 | int err = 0; | |
5269 | ||
5270 | IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode); | |
5271 | ||
5272 | if (wrqu->mode == priv->ieee->iw_mode) | |
5273 | return 0; | |
5274 | ||
5275 | switch (wrqu->mode) { | |
5276 | #ifdef CONFIG_IPW_PROMISC | |
5277 | case IW_MODE_MONITOR: | |
5278 | #endif | |
5279 | case IW_MODE_ADHOC: | |
5280 | case IW_MODE_INFRA: | |
5281 | break; | |
5282 | case IW_MODE_AUTO: | |
5283 | wrqu->mode = IW_MODE_INFRA; | |
5284 | break; | |
5285 | default: | |
5286 | return -EINVAL; | |
5287 | } | |
5288 | ||
5289 | #ifdef CONFIG_IPW_PROMISC | |
5290 | if (priv->ieee->iw_mode == IW_MODE_MONITOR) | |
5291 | priv->net_dev->type = ARPHRD_ETHER; | |
5292 | ||
5293 | if (wrqu->mode == IW_MODE_MONITOR) | |
5294 | priv->net_dev->type = ARPHRD_IEEE80211; | |
5295 | #endif /* CONFIG_IPW_PROMISC */ | |
5296 | ||
5297 | #ifdef CONFIG_PM | |
5298 | /* Free the existing firmware and reset the fw_loaded | |
5299 | * flag so ipw_load() will bring in the new firmawre */ | |
5300 | if (fw_loaded) { | |
5301 | fw_loaded = 0; | |
5302 | } | |
5303 | ||
5304 | release_firmware(bootfw); | |
5305 | release_firmware(ucode); | |
5306 | release_firmware(firmware); | |
5307 | bootfw = ucode = firmware = NULL; | |
5308 | #endif | |
5309 | ||
5310 | priv->ieee->iw_mode = wrqu->mode; | |
5311 | ipw_adapter_restart(priv); | |
5312 | ||
5313 | return err; | |
5314 | } | |
5315 | ||
5316 | static int ipw_wx_get_mode(struct net_device *dev, | |
5317 | struct iw_request_info *info, | |
5318 | union iwreq_data *wrqu, char *extra) | |
5319 | { | |
5320 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5321 | ||
5322 | wrqu->mode = priv->ieee->iw_mode; | |
5323 | IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode); | |
5324 | ||
5325 | return 0; | |
5326 | } | |
5327 | ||
5328 | ||
5329 | #define DEFAULT_RTS_THRESHOLD 2304U | |
5330 | #define MIN_RTS_THRESHOLD 1U | |
5331 | #define MAX_RTS_THRESHOLD 2304U | |
5332 | #define DEFAULT_BEACON_INTERVAL 100U | |
5333 | #define DEFAULT_SHORT_RETRY_LIMIT 7U | |
5334 | #define DEFAULT_LONG_RETRY_LIMIT 4U | |
5335 | ||
5336 | /* Values are in microsecond */ | |
5337 | static const s32 timeout_duration[] = { | |
5338 | 350000, | |
5339 | 250000, | |
5340 | 75000, | |
5341 | 37000, | |
5342 | 25000, | |
5343 | }; | |
5344 | ||
5345 | static const s32 period_duration[] = { | |
5346 | 400000, | |
5347 | 700000, | |
5348 | 1000000, | |
5349 | 1000000, | |
5350 | 1000000 | |
5351 | }; | |
5352 | ||
5353 | static int ipw_wx_get_range(struct net_device *dev, | |
5354 | struct iw_request_info *info, | |
5355 | union iwreq_data *wrqu, char *extra) | |
5356 | { | |
5357 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5358 | struct iw_range *range = (struct iw_range *)extra; | |
5359 | u16 val; | |
5360 | int i; | |
5361 | ||
5362 | wrqu->data.length = sizeof(*range); | |
5363 | memset(range, 0, sizeof(*range)); | |
5364 | ||
5365 | /* 54Mbs == ~27 Mb/s real (802.11g) */ | |
5366 | range->throughput = 27 * 1000 * 1000; | |
5367 | ||
5368 | range->max_qual.qual = 100; | |
5369 | /* TODO: Find real max RSSI and stick here */ | |
5370 | range->max_qual.level = 0; | |
5371 | range->max_qual.noise = 0; | |
5372 | range->max_qual.updated = 7; /* Updated all three */ | |
5373 | ||
5374 | range->avg_qual.qual = 70; | |
5375 | /* TODO: Find real 'good' to 'bad' threshol value for RSSI */ | |
5376 | range->avg_qual.level = 0; /* FIXME to real average level */ | |
5377 | range->avg_qual.noise = 0; | |
5378 | range->avg_qual.updated = 7; /* Updated all three */ | |
5379 | ||
5380 | range->num_bitrates = min(priv->rates.num_rates, (u8)IW_MAX_BITRATES); | |
5381 | ||
5382 | for (i = 0; i < range->num_bitrates; i++) | |
5383 | range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) * | |
5384 | 500000; | |
5385 | ||
5386 | range->max_rts = DEFAULT_RTS_THRESHOLD; | |
5387 | range->min_frag = MIN_FRAG_THRESHOLD; | |
5388 | range->max_frag = MAX_FRAG_THRESHOLD; | |
5389 | ||
5390 | range->encoding_size[0] = 5; | |
5391 | range->encoding_size[1] = 13; | |
5392 | range->num_encoding_sizes = 2; | |
5393 | range->max_encoding_tokens = WEP_KEYS; | |
5394 | ||
5395 | /* Set the Wireless Extension versions */ | |
5396 | range->we_version_compiled = WIRELESS_EXT; | |
5397 | range->we_version_source = 16; | |
5398 | ||
5399 | range->num_channels = FREQ_COUNT; | |
5400 | ||
5401 | val = 0; | |
5402 | for (i = 0; i < FREQ_COUNT; i++) { | |
5403 | range->freq[val].i = i + 1; | |
5404 | range->freq[val].m = ipw_frequencies[i] * 100000; | |
5405 | range->freq[val].e = 1; | |
5406 | val++; | |
5407 | ||
5408 | if (val == IW_MAX_FREQUENCIES) | |
5409 | break; | |
5410 | } | |
5411 | range->num_frequency = val; | |
5412 | ||
5413 | IPW_DEBUG_WX("GET Range\n"); | |
5414 | return 0; | |
5415 | } | |
5416 | ||
5417 | static int ipw_wx_set_wap(struct net_device *dev, | |
5418 | struct iw_request_info *info, | |
5419 | union iwreq_data *wrqu, char *extra) | |
5420 | { | |
5421 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5422 | ||
5423 | static const unsigned char any[] = { | |
5424 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff | |
5425 | }; | |
5426 | static const unsigned char off[] = { | |
5427 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 | |
5428 | }; | |
5429 | ||
5430 | if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) | |
5431 | return -EINVAL; | |
5432 | ||
5433 | if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) || | |
5434 | !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) { | |
5435 | /* we disable mandatory BSSID association */ | |
5436 | IPW_DEBUG_WX("Setting AP BSSID to ANY\n"); | |
5437 | priv->config &= ~CFG_STATIC_BSSID; | |
5438 | if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED | | |
5439 | STATUS_ASSOCIATING))) { | |
5440 | IPW_DEBUG_ASSOC("Attempting to associate with new " | |
5441 | "parameters.\n"); | |
5442 | ipw_associate(priv); | |
5443 | } | |
5444 | ||
5445 | return 0; | |
5446 | } | |
5447 | ||
5448 | priv->config |= CFG_STATIC_BSSID; | |
5449 | if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) { | |
5450 | IPW_DEBUG_WX("BSSID set to current BSSID.\n"); | |
5451 | return 0; | |
5452 | } | |
5453 | ||
5454 | IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n", | |
5455 | MAC_ARG(wrqu->ap_addr.sa_data)); | |
5456 | ||
5457 | memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN); | |
5458 | ||
5459 | /* If we are currently associated, or trying to associate | |
5460 | * then see if this is a new BSSID (causing us to disassociate) */ | |
5461 | if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { | |
5462 | IPW_DEBUG_ASSOC("Disassociating due to BSSID change.\n"); | |
5463 | ipw_disassociate(priv); | |
5464 | } else { | |
5465 | ipw_associate(priv); | |
5466 | } | |
5467 | ||
5468 | return 0; | |
5469 | } | |
5470 | ||
5471 | static int ipw_wx_get_wap(struct net_device *dev, | |
5472 | struct iw_request_info *info, | |
5473 | union iwreq_data *wrqu, char *extra) | |
5474 | { | |
5475 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5476 | /* If we are associated, trying to associate, or have a statically | |
5477 | * configured BSSID then return that; otherwise return ANY */ | |
5478 | if (priv->config & CFG_STATIC_BSSID || | |
5479 | priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { | |
5480 | wrqu->ap_addr.sa_family = ARPHRD_ETHER; | |
5481 | memcpy(wrqu->ap_addr.sa_data, &priv->bssid, ETH_ALEN); | |
5482 | } else | |
5483 | memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); | |
5484 | ||
5485 | IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n", | |
5486 | MAC_ARG(wrqu->ap_addr.sa_data)); | |
5487 | return 0; | |
5488 | } | |
5489 | ||
5490 | static int ipw_wx_set_essid(struct net_device *dev, | |
5491 | struct iw_request_info *info, | |
5492 | union iwreq_data *wrqu, char *extra) | |
5493 | { | |
5494 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5495 | char *essid = ""; /* ANY */ | |
5496 | int length = 0; | |
5497 | ||
5498 | if (wrqu->essid.flags && wrqu->essid.length) { | |
5499 | length = wrqu->essid.length - 1; | |
5500 | essid = extra; | |
5501 | } | |
5502 | if (length == 0) { | |
5503 | IPW_DEBUG_WX("Setting ESSID to ANY\n"); | |
5504 | priv->config &= ~CFG_STATIC_ESSID; | |
5505 | if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED | | |
5506 | STATUS_ASSOCIATING))) { | |
5507 | IPW_DEBUG_ASSOC("Attempting to associate with new " | |
5508 | "parameters.\n"); | |
5509 | ipw_associate(priv); | |
5510 | } | |
5511 | ||
5512 | return 0; | |
5513 | } | |
5514 | ||
5515 | length = min(length, IW_ESSID_MAX_SIZE); | |
5516 | ||
5517 | priv->config |= CFG_STATIC_ESSID; | |
5518 | ||
5519 | if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) { | |
5520 | IPW_DEBUG_WX("ESSID set to current ESSID.\n"); | |
5521 | return 0; | |
5522 | } | |
5523 | ||
5524 | IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length), | |
5525 | length); | |
5526 | ||
5527 | priv->essid_len = length; | |
5528 | memcpy(priv->essid, essid, priv->essid_len); | |
5529 | ||
5530 | /* If we are currently associated, or trying to associate | |
5531 | * then see if this is a new ESSID (causing us to disassociate) */ | |
5532 | if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { | |
5533 | IPW_DEBUG_ASSOC("Disassociating due to ESSID change.\n"); | |
5534 | ipw_disassociate(priv); | |
5535 | } else { | |
5536 | ipw_associate(priv); | |
5537 | } | |
5538 | ||
5539 | return 0; | |
5540 | } | |
5541 | ||
5542 | static int ipw_wx_get_essid(struct net_device *dev, | |
5543 | struct iw_request_info *info, | |
5544 | union iwreq_data *wrqu, char *extra) | |
5545 | { | |
5546 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5547 | ||
5548 | /* If we are associated, trying to associate, or have a statically | |
5549 | * configured ESSID then return that; otherwise return ANY */ | |
5550 | if (priv->config & CFG_STATIC_ESSID || | |
5551 | priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { | |
5552 | IPW_DEBUG_WX("Getting essid: '%s'\n", | |
5553 | escape_essid(priv->essid, priv->essid_len)); | |
5554 | memcpy(extra, priv->essid, priv->essid_len); | |
5555 | wrqu->essid.length = priv->essid_len; | |
5556 | wrqu->essid.flags = 1; /* active */ | |
5557 | } else { | |
5558 | IPW_DEBUG_WX("Getting essid: ANY\n"); | |
5559 | wrqu->essid.length = 0; | |
5560 | wrqu->essid.flags = 0; /* active */ | |
5561 | } | |
5562 | ||
5563 | return 0; | |
5564 | } | |
5565 | ||
5566 | static int ipw_wx_set_nick(struct net_device *dev, | |
5567 | struct iw_request_info *info, | |
5568 | union iwreq_data *wrqu, char *extra) | |
5569 | { | |
5570 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5571 | ||
5572 | IPW_DEBUG_WX("Setting nick to '%s'\n", extra); | |
5573 | if (wrqu->data.length > IW_ESSID_MAX_SIZE) | |
5574 | return -E2BIG; | |
5575 | ||
5576 | wrqu->data.length = min((size_t)wrqu->data.length, sizeof(priv->nick)); | |
5577 | memset(priv->nick, 0, sizeof(priv->nick)); | |
5578 | memcpy(priv->nick, extra, wrqu->data.length); | |
5579 | IPW_DEBUG_TRACE("<<\n"); | |
5580 | return 0; | |
5581 | ||
5582 | } | |
5583 | ||
5584 | ||
5585 | static int ipw_wx_get_nick(struct net_device *dev, | |
5586 | struct iw_request_info *info, | |
5587 | union iwreq_data *wrqu, char *extra) | |
5588 | { | |
5589 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5590 | IPW_DEBUG_WX("Getting nick\n"); | |
5591 | wrqu->data.length = strlen(priv->nick) + 1; | |
5592 | memcpy(extra, priv->nick, wrqu->data.length); | |
5593 | wrqu->data.flags = 1; /* active */ | |
5594 | return 0; | |
5595 | } | |
5596 | ||
5597 | ||
5598 | static int ipw_wx_set_rate(struct net_device *dev, | |
5599 | struct iw_request_info *info, | |
5600 | union iwreq_data *wrqu, char *extra) | |
5601 | { | |
5602 | IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu); | |
5603 | return -EOPNOTSUPP; | |
5604 | } | |
5605 | ||
5606 | static int ipw_wx_get_rate(struct net_device *dev, | |
5607 | struct iw_request_info *info, | |
5608 | union iwreq_data *wrqu, char *extra) | |
5609 | { | |
5610 | struct ipw_priv * priv = ieee80211_priv(dev); | |
5611 | wrqu->bitrate.value = priv->last_rate; | |
5612 | ||
5613 | IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value); | |
5614 | return 0; | |
5615 | } | |
5616 | ||
5617 | ||
5618 | static int ipw_wx_set_rts(struct net_device *dev, | |
5619 | struct iw_request_info *info, | |
5620 | union iwreq_data *wrqu, char *extra) | |
5621 | { | |
5622 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5623 | ||
5624 | if (wrqu->rts.disabled) | |
5625 | priv->rts_threshold = DEFAULT_RTS_THRESHOLD; | |
5626 | else { | |
5627 | if (wrqu->rts.value < MIN_RTS_THRESHOLD || | |
5628 | wrqu->rts.value > MAX_RTS_THRESHOLD) | |
5629 | return -EINVAL; | |
5630 | ||
5631 | priv->rts_threshold = wrqu->rts.value; | |
5632 | } | |
5633 | ||
5634 | ipw_send_rts_threshold(priv, priv->rts_threshold); | |
5635 | IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold); | |
5636 | return 0; | |
5637 | } | |
5638 | ||
5639 | static int ipw_wx_get_rts(struct net_device *dev, | |
5640 | struct iw_request_info *info, | |
5641 | union iwreq_data *wrqu, char *extra) | |
5642 | { | |
5643 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5644 | wrqu->rts.value = priv->rts_threshold; | |
5645 | wrqu->rts.fixed = 0; /* no auto select */ | |
5646 | wrqu->rts.disabled = | |
5647 | (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); | |
5648 | ||
5649 | IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value); | |
5650 | return 0; | |
5651 | } | |
5652 | ||
5653 | ||
5654 | static int ipw_wx_set_txpow(struct net_device *dev, | |
5655 | struct iw_request_info *info, | |
5656 | union iwreq_data *wrqu, char *extra) | |
5657 | { | |
5658 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5659 | struct ipw_tx_power tx_power; | |
5660 | int i; | |
5661 | ||
5662 | if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) | |
5663 | return -EINPROGRESS; | |
5664 | ||
5665 | if (wrqu->power.flags != IW_TXPOW_DBM) | |
5666 | return -EINVAL; | |
5667 | ||
5668 | if ((wrqu->power.value > 20) || | |
5669 | (wrqu->power.value < -12)) | |
5670 | return -EINVAL; | |
5671 | ||
5672 | priv->tx_power = wrqu->power.value; | |
5673 | ||
5674 | memset(&tx_power, 0, sizeof(tx_power)); | |
5675 | ||
5676 | /* configure device for 'G' band */ | |
5677 | tx_power.ieee_mode = IPW_G_MODE; | |
5678 | tx_power.num_channels = 11; | |
5679 | for (i = 0; i < 11; i++) { | |
5680 | tx_power.channels_tx_power[i].channel_number = i + 1; | |
5681 | tx_power.channels_tx_power[i].tx_power = priv->tx_power; | |
5682 | } | |
5683 | if (ipw_send_tx_power(priv, &tx_power)) | |
5684 | goto error; | |
5685 | ||
5686 | /* configure device to also handle 'B' band */ | |
5687 | tx_power.ieee_mode = IPW_B_MODE; | |
5688 | if (ipw_send_tx_power(priv, &tx_power)) | |
5689 | goto error; | |
5690 | ||
5691 | return 0; | |
5692 | ||
5693 | error: | |
5694 | return -EIO; | |
5695 | } | |
5696 | ||
5697 | ||
5698 | static int ipw_wx_get_txpow(struct net_device *dev, | |
5699 | struct iw_request_info *info, | |
5700 | union iwreq_data *wrqu, char *extra) | |
5701 | { | |
5702 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5703 | ||
5704 | wrqu->power.value = priv->tx_power; | |
5705 | wrqu->power.fixed = 1; | |
5706 | wrqu->power.flags = IW_TXPOW_DBM; | |
5707 | wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0; | |
5708 | ||
5709 | IPW_DEBUG_WX("GET TX Power -> %s %d \n", | |
5710 | wrqu->power.disabled ? "ON" : "OFF", | |
5711 | wrqu->power.value); | |
5712 | ||
5713 | return 0; | |
5714 | } | |
5715 | ||
5716 | static int ipw_wx_set_frag(struct net_device *dev, | |
5717 | struct iw_request_info *info, | |
5718 | union iwreq_data *wrqu, char *extra) | |
5719 | { | |
5720 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5721 | ||
5722 | if (wrqu->frag.disabled) | |
5723 | priv->ieee->fts = DEFAULT_FTS; | |
5724 | else { | |
5725 | if (wrqu->frag.value < MIN_FRAG_THRESHOLD || | |
5726 | wrqu->frag.value > MAX_FRAG_THRESHOLD) | |
5727 | return -EINVAL; | |
5728 | ||
5729 | priv->ieee->fts = wrqu->frag.value & ~0x1; | |
5730 | } | |
5731 | ||
5732 | ipw_send_frag_threshold(priv, wrqu->frag.value); | |
5733 | IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value); | |
5734 | return 0; | |
5735 | } | |
5736 | ||
5737 | static int ipw_wx_get_frag(struct net_device *dev, | |
5738 | struct iw_request_info *info, | |
5739 | union iwreq_data *wrqu, char *extra) | |
5740 | { | |
5741 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5742 | wrqu->frag.value = priv->ieee->fts; | |
5743 | wrqu->frag.fixed = 0; /* no auto select */ | |
5744 | wrqu->frag.disabled = | |
5745 | (wrqu->frag.value == DEFAULT_FTS); | |
5746 | ||
5747 | IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value); | |
5748 | ||
5749 | return 0; | |
5750 | } | |
5751 | ||
5752 | static int ipw_wx_set_retry(struct net_device *dev, | |
5753 | struct iw_request_info *info, | |
5754 | union iwreq_data *wrqu, char *extra) | |
5755 | { | |
5756 | IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu); | |
5757 | return -EOPNOTSUPP; | |
5758 | } | |
5759 | ||
5760 | ||
5761 | static int ipw_wx_get_retry(struct net_device *dev, | |
5762 | struct iw_request_info *info, | |
5763 | union iwreq_data *wrqu, char *extra) | |
5764 | { | |
5765 | IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu); | |
5766 | return -EOPNOTSUPP; | |
5767 | } | |
5768 | ||
5769 | ||
5770 | static int ipw_wx_set_scan(struct net_device *dev, | |
5771 | struct iw_request_info *info, | |
5772 | union iwreq_data *wrqu, char *extra) | |
5773 | { | |
5774 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5775 | IPW_DEBUG_WX("Start scan\n"); | |
5776 | if (ipw_request_scan(priv)) | |
5777 | return -EIO; | |
5778 | return 0; | |
5779 | } | |
5780 | ||
5781 | static int ipw_wx_get_scan(struct net_device *dev, | |
5782 | struct iw_request_info *info, | |
5783 | union iwreq_data *wrqu, char *extra) | |
5784 | { | |
5785 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5786 | return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra); | |
5787 | } | |
5788 | ||
5789 | static int ipw_wx_set_encode(struct net_device *dev, | |
5790 | struct iw_request_info *info, | |
5791 | union iwreq_data *wrqu, char *key) | |
5792 | { | |
5793 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5794 | return ieee80211_wx_set_encode(priv->ieee, info, wrqu, key); | |
5795 | } | |
5796 | ||
5797 | static int ipw_wx_get_encode(struct net_device *dev, | |
5798 | struct iw_request_info *info, | |
5799 | union iwreq_data *wrqu, char *key) | |
5800 | { | |
5801 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5802 | return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key); | |
5803 | } | |
5804 | ||
5805 | static int ipw_wx_set_power(struct net_device *dev, | |
5806 | struct iw_request_info *info, | |
5807 | union iwreq_data *wrqu, char *extra) | |
5808 | { | |
5809 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5810 | int err; | |
5811 | ||
5812 | if (wrqu->power.disabled) { | |
5813 | priv->power_mode = IPW_POWER_LEVEL(priv->power_mode); | |
5814 | err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM); | |
5815 | if (err) { | |
5816 | IPW_DEBUG_WX("failed setting power mode.\n"); | |
5817 | return err; | |
5818 | } | |
5819 | ||
5820 | IPW_DEBUG_WX("SET Power Management Mode -> off\n"); | |
5821 | ||
5822 | return 0; | |
5823 | } | |
5824 | ||
5825 | switch (wrqu->power.flags & IW_POWER_MODE) { | |
5826 | case IW_POWER_ON: /* If not specified */ | |
5827 | case IW_POWER_MODE: /* If set all mask */ | |
5828 | case IW_POWER_ALL_R: /* If explicitely state all */ | |
5829 | break; | |
5830 | default: /* Otherwise we don't support it */ | |
5831 | IPW_DEBUG_WX("SET PM Mode: %X not supported.\n", | |
5832 | wrqu->power.flags); | |
5833 | return -EOPNOTSUPP; | |
5834 | } | |
5835 | ||
5836 | /* If the user hasn't specified a power management mode yet, default | |
5837 | * to BATTERY */ | |
5838 | if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC) | |
5839 | priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY; | |
5840 | else | |
5841 | priv->power_mode = IPW_POWER_ENABLED | priv->power_mode; | |
5842 | err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode)); | |
5843 | if (err) { | |
5844 | IPW_DEBUG_WX("failed setting power mode.\n"); | |
5845 | return err; | |
5846 | } | |
5847 | ||
5848 | IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", | |
5849 | priv->power_mode); | |
5850 | ||
5851 | return 0; | |
5852 | } | |
5853 | ||
5854 | static int ipw_wx_get_power(struct net_device *dev, | |
5855 | struct iw_request_info *info, | |
5856 | union iwreq_data *wrqu, char *extra) | |
5857 | { | |
5858 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5859 | ||
5860 | if (!(priv->power_mode & IPW_POWER_ENABLED)) { | |
5861 | wrqu->power.disabled = 1; | |
5862 | } else { | |
5863 | wrqu->power.disabled = 0; | |
5864 | } | |
5865 | ||
5866 | IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode); | |
5867 | ||
5868 | return 0; | |
5869 | } | |
5870 | ||
5871 | static int ipw_wx_set_powermode(struct net_device *dev, | |
5872 | struct iw_request_info *info, | |
5873 | union iwreq_data *wrqu, char *extra) | |
5874 | { | |
5875 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5876 | int mode = *(int *)extra; | |
5877 | int err; | |
5878 | ||
5879 | if ((mode < 1) || (mode > IPW_POWER_LIMIT)) { | |
5880 | mode = IPW_POWER_AC; | |
5881 | priv->power_mode = mode; | |
5882 | } else { | |
5883 | priv->power_mode = IPW_POWER_ENABLED | mode; | |
5884 | } | |
5885 | ||
5886 | if (priv->power_mode != mode) { | |
5887 | err = ipw_send_power_mode(priv, mode); | |
5888 | ||
5889 | if (err) { | |
5890 | IPW_DEBUG_WX("failed setting power mode.\n"); | |
5891 | return err; | |
5892 | } | |
5893 | } | |
5894 | ||
5895 | return 0; | |
5896 | } | |
5897 | ||
5898 | #define MAX_WX_STRING 80 | |
5899 | static int ipw_wx_get_powermode(struct net_device *dev, | |
5900 | struct iw_request_info *info, | |
5901 | union iwreq_data *wrqu, char *extra) | |
5902 | { | |
5903 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5904 | int level = IPW_POWER_LEVEL(priv->power_mode); | |
5905 | char *p = extra; | |
5906 | ||
5907 | p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level); | |
5908 | ||
5909 | switch (level) { | |
5910 | case IPW_POWER_AC: | |
5911 | p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)"); | |
5912 | break; | |
5913 | case IPW_POWER_BATTERY: | |
5914 | p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)"); | |
5915 | break; | |
5916 | default: | |
5917 | p += snprintf(p, MAX_WX_STRING - (p - extra), | |
5918 | "(Timeout %dms, Period %dms)", | |
5919 | timeout_duration[level - 1] / 1000, | |
5920 | period_duration[level - 1] / 1000); | |
5921 | } | |
5922 | ||
5923 | if (!(priv->power_mode & IPW_POWER_ENABLED)) | |
5924 | p += snprintf(p, MAX_WX_STRING - (p - extra)," OFF"); | |
5925 | ||
5926 | wrqu->data.length = p - extra + 1; | |
5927 | ||
5928 | return 0; | |
5929 | } | |
5930 | ||
5931 | static int ipw_wx_set_wireless_mode(struct net_device *dev, | |
5932 | struct iw_request_info *info, | |
5933 | union iwreq_data *wrqu, char *extra) | |
5934 | { | |
5935 | struct ipw_priv *priv = ieee80211_priv(dev); | |
5936 | int mode = *(int *)extra; | |
5937 | u8 band = 0, modulation = 0; | |
5938 | ||
5939 | if (mode == 0 || mode & ~IEEE_MODE_MASK) { | |
5940 | IPW_WARNING("Attempt to set invalid wireless mode: %d\n", | |
5941 | mode); | |
5942 | return -EINVAL; | |
5943 | } | |
5944 | ||
5945 | if (priv->adapter == IPW_2915ABG) { | |
5946 | priv->ieee->abg_ture = 1; | |
5947 | if (mode & IEEE_A) { | |
5948 | band |= IEEE80211_52GHZ_BAND; | |
5949 | modulation |= IEEE80211_OFDM_MODULATION; | |
5950 | } else | |
5951 | priv->ieee->abg_ture = 0; | |
5952 | } else { | |
5953 | if (mode & IEEE_A) { | |
5954 | IPW_WARNING("Attempt to set 2200BG into " | |
5955 | "802.11a mode\n"); | |
5956 | return -EINVAL; | |
5957 | } | |
5958 | ||
5959 | priv->ieee->abg_ture = 0; | |
5960 | } | |
5961 | ||
5962 | if (mode & IEEE_B) { | |
5963 | band |= IEEE80211_24GHZ_BAND; | |
5964 | modulation |= IEEE80211_CCK_MODULATION; | |
5965 | } else | |
5966 | priv->ieee->abg_ture = 0; | |
5967 | ||
5968 | if (mode & IEEE_G) { | |
5969 | band |= IEEE80211_24GHZ_BAND; | |
5970 | modulation |= IEEE80211_OFDM_MODULATION; | |
5971 | } else | |
5972 | priv->ieee->abg_ture = 0; | |
5973 | ||
5974 | priv->ieee->mode = mode; | |
5975 | priv->ieee->freq_band = band; | |
5976 | priv->ieee->modulation = modulation; | |
5977 | init_supported_rates(priv, &priv->rates); | |
5978 | ||
5979 | /* If we are currently associated, or trying to associate | |
5980 | * then see if this is a new configuration (causing us to | |
5981 | * disassociate) */ | |
5982 | if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { | |
5983 | /* The resulting association will trigger | |
5984 | * the new rates to be sent to the device */ | |
5985 | IPW_DEBUG_ASSOC("Disassociating due to mode change.\n"); | |
5986 | ipw_disassociate(priv); | |
5987 | } else | |
5988 | ipw_send_supported_rates(priv, &priv->rates); | |
5989 | ||
5990 | IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n", | |
5991 | mode & IEEE_A ? 'a' : '.', | |
5992 | mode & IEEE_B ? 'b' : '.', | |
5993 | mode & IEEE_G ? 'g' : '.'); | |
5994 | return 0; | |
5995 | } | |
5996 | ||
5997 | static int ipw_wx_get_wireless_mode(struct net_device *dev, | |
5998 | struct iw_request_info *info, | |
5999 | union iwreq_data *wrqu, char *extra) | |
6000 | { | |
6001 | struct ipw_priv *priv = ieee80211_priv(dev); | |
6002 | ||
6003 | switch (priv->ieee->freq_band) { | |
6004 | case IEEE80211_24GHZ_BAND: | |
6005 | switch (priv->ieee->modulation) { | |
6006 | case IEEE80211_CCK_MODULATION: | |
6007 | strncpy(extra, "802.11b (2)", MAX_WX_STRING); | |
6008 | break; | |
6009 | case IEEE80211_OFDM_MODULATION: | |
6010 | strncpy(extra, "802.11g (4)", MAX_WX_STRING); | |
6011 | break; | |
6012 | default: | |
6013 | strncpy(extra, "802.11bg (6)", MAX_WX_STRING); | |
6014 | break; | |
6015 | } | |
6016 | break; | |
6017 | ||
6018 | case IEEE80211_52GHZ_BAND: | |
6019 | strncpy(extra, "802.11a (1)", MAX_WX_STRING); | |
6020 | break; | |
6021 | ||
6022 | default: /* Mixed Band */ | |
6023 | switch (priv->ieee->modulation) { | |
6024 | case IEEE80211_CCK_MODULATION: | |
6025 | strncpy(extra, "802.11ab (3)", MAX_WX_STRING); | |
6026 | break; | |
6027 | case IEEE80211_OFDM_MODULATION: | |
6028 | strncpy(extra, "802.11ag (5)", MAX_WX_STRING); | |
6029 | break; | |
6030 | default: | |
6031 | strncpy(extra, "802.11abg (7)", MAX_WX_STRING); | |
6032 | break; | |
6033 | } | |
6034 | break; | |
6035 | } | |
6036 | ||
6037 | IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra); | |
6038 | ||
6039 | wrqu->data.length = strlen(extra) + 1; | |
6040 | ||
6041 | return 0; | |
6042 | } | |
6043 | ||
6044 | #ifdef CONFIG_IPW_PROMISC | |
6045 | static int ipw_wx_set_promisc(struct net_device *dev, | |
6046 | struct iw_request_info *info, | |
6047 | union iwreq_data *wrqu, char *extra) | |
6048 | { | |
6049 | struct ipw_priv *priv = ieee80211_priv(dev); | |
6050 | int *parms = (int *)extra; | |
6051 | int enable = (parms[0] > 0); | |
6052 | ||
6053 | IPW_DEBUG_WX("SET PROMISC: %d %d\n", enable, parms[1]); | |
6054 | if (enable) { | |
6055 | if (priv->ieee->iw_mode != IW_MODE_MONITOR) { | |
6056 | priv->net_dev->type = ARPHRD_IEEE80211; | |
6057 | ipw_adapter_restart(priv); | |
6058 | } | |
6059 | ||
6060 | ipw_set_channel(priv, parms[1]); | |
6061 | } else { | |
6062 | if (priv->ieee->iw_mode != IW_MODE_MONITOR) | |
6063 | return 0; | |
6064 | priv->net_dev->type = ARPHRD_ETHER; | |
6065 | ipw_adapter_restart(priv); | |
6066 | } | |
6067 | return 0; | |
6068 | } | |
6069 | ||
6070 | ||
6071 | static int ipw_wx_reset(struct net_device *dev, | |
6072 | struct iw_request_info *info, | |
6073 | union iwreq_data *wrqu, char *extra) | |
6074 | { | |
6075 | struct ipw_priv *priv = ieee80211_priv(dev); | |
6076 | IPW_DEBUG_WX("RESET\n"); | |
6077 | ipw_adapter_restart(priv); | |
6078 | return 0; | |
6079 | } | |
6080 | #endif // CONFIG_IPW_PROMISC | |
6081 | ||
6082 | /* Rebase the WE IOCTLs to zero for the handler array */ | |
6083 | #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT] | |
6084 | static iw_handler ipw_wx_handlers[] = | |
6085 | { | |
6086 | IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name, | |
6087 | IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq, | |
6088 | IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq, | |
6089 | IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode, | |
6090 | IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode, | |
6091 | IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range, | |
6092 | IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap, | |
6093 | IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap, | |
6094 | IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan, | |
6095 | IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan, | |
6096 | IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid, | |
6097 | IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid, | |
6098 | IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick, | |
6099 | IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick, | |
6100 | IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate, | |
6101 | IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate, | |
6102 | IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts, | |
6103 | IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts, | |
6104 | IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag, | |
6105 | IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag, | |
6106 | IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow, | |
6107 | IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow, | |
6108 | IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry, | |
6109 | IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry, | |
6110 | IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode, | |
6111 | IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode, | |
6112 | IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power, | |
6113 | IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power, | |
6114 | }; | |
6115 | ||
6116 | #define IPW_PRIV_SET_POWER SIOCIWFIRSTPRIV | |
6117 | #define IPW_PRIV_GET_POWER SIOCIWFIRSTPRIV+1 | |
6118 | #define IPW_PRIV_SET_MODE SIOCIWFIRSTPRIV+2 | |
6119 | #define IPW_PRIV_GET_MODE SIOCIWFIRSTPRIV+3 | |
6120 | #define IPW_PRIV_SET_PROMISC SIOCIWFIRSTPRIV+4 | |
6121 | #define IPW_PRIV_RESET SIOCIWFIRSTPRIV+5 | |
6122 | ||
6123 | ||
6124 | static struct iw_priv_args ipw_priv_args[] = { | |
6125 | { | |
6126 | .cmd = IPW_PRIV_SET_POWER, | |
6127 | .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, | |
6128 | .name = "set_power" | |
6129 | }, | |
6130 | { | |
6131 | .cmd = IPW_PRIV_GET_POWER, | |
6132 | .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, | |
6133 | .name = "get_power" | |
6134 | }, | |
6135 | { | |
6136 | .cmd = IPW_PRIV_SET_MODE, | |
6137 | .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, | |
6138 | .name = "set_mode" | |
6139 | }, | |
6140 | { | |
6141 | .cmd = IPW_PRIV_GET_MODE, | |
6142 | .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, | |
6143 | .name = "get_mode" | |
6144 | }, | |
6145 | #ifdef CONFIG_IPW_PROMISC | |
6146 | { | |
6147 | IPW_PRIV_SET_PROMISC, | |
6148 | IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor" | |
6149 | }, | |
6150 | { | |
6151 | IPW_PRIV_RESET, | |
6152 | IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset" | |
6153 | }, | |
6154 | #endif /* CONFIG_IPW_PROMISC */ | |
6155 | }; | |
6156 | ||
6157 | static iw_handler ipw_priv_handler[] = { | |
6158 | ipw_wx_set_powermode, | |
6159 | ipw_wx_get_powermode, | |
6160 | ipw_wx_set_wireless_mode, | |
6161 | ipw_wx_get_wireless_mode, | |
6162 | #ifdef CONFIG_IPW_PROMISC | |
6163 | ipw_wx_set_promisc, | |
6164 | ipw_wx_reset, | |
6165 | #endif | |
6166 | }; | |
6167 | ||
6168 | static struct iw_handler_def ipw_wx_handler_def = | |
6169 | { | |
6170 | .standard = ipw_wx_handlers, | |
6171 | .num_standard = ARRAY_SIZE(ipw_wx_handlers), | |
6172 | .num_private = ARRAY_SIZE(ipw_priv_handler), | |
6173 | .num_private_args = ARRAY_SIZE(ipw_priv_args), | |
6174 | .private = ipw_priv_handler, | |
6175 | .private_args = ipw_priv_args, | |
6176 | }; | |
6177 | ||
6178 | ||
6179 | ||
6180 | ||
6181 | /* | |
6182 | * Get wireless statistics. | |
6183 | * Called by /proc/net/wireless | |
6184 | * Also called by SIOCGIWSTATS | |
6185 | */ | |
6186 | static struct iw_statistics *ipw_get_wireless_stats(struct net_device * dev) | |
6187 | { | |
6188 | struct ipw_priv *priv = ieee80211_priv(dev); | |
6189 | struct iw_statistics *wstats; | |
6190 | ||
6191 | wstats = &priv->wstats; | |
6192 | ||
6193 | /* if hw is disabled, then ipw2100_get_ordinal() can't be called. | |
6194 | * ipw2100_wx_wireless_stats seems to be called before fw is | |
6195 | * initialized. STATUS_ASSOCIATED will only be set if the hw is up | |
6196 | * and associated; if not associcated, the values are all meaningless | |
6197 | * anyway, so set them all to NULL and INVALID */ | |
6198 | if (!(priv->status & STATUS_ASSOCIATED)) { | |
6199 | wstats->miss.beacon = 0; | |
6200 | wstats->discard.retries = 0; | |
6201 | wstats->qual.qual = 0; | |
6202 | wstats->qual.level = 0; | |
6203 | wstats->qual.noise = 0; | |
6204 | wstats->qual.updated = 7; | |
6205 | wstats->qual.updated |= IW_QUAL_NOISE_INVALID | | |
6206 | IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID; | |
6207 | return wstats; | |
6208 | } | |
6209 | ||
6210 | wstats->qual.qual = priv->quality; | |
6211 | wstats->qual.level = average_value(&priv->average_rssi); | |
6212 | wstats->qual.noise = average_value(&priv->average_noise); | |
6213 | wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | | |
6214 | IW_QUAL_NOISE_UPDATED; | |
6215 | ||
6216 | wstats->miss.beacon = average_value(&priv->average_missed_beacons); | |
6217 | wstats->discard.retries = priv->last_tx_failures; | |
6218 | wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable; | |
6219 | ||
6220 | /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len)) | |
6221 | goto fail_get_ordinal; | |
6222 | wstats->discard.retries += tx_retry; */ | |
6223 | ||
6224 | return wstats; | |
6225 | } | |
6226 | ||
6227 | ||
6228 | /* net device stuff */ | |
6229 | ||
6230 | static inline void init_sys_config(struct ipw_sys_config *sys_config) | |
6231 | { | |
6232 | memset(sys_config, 0, sizeof(struct ipw_sys_config)); | |
6233 | sys_config->bt_coexistence = 1; /* We may need to look into prvStaBtConfig */ | |
6234 | sys_config->answer_broadcast_ssid_probe = 0; | |
6235 | sys_config->accept_all_data_frames = 0; | |
6236 | sys_config->accept_non_directed_frames = 1; | |
6237 | sys_config->exclude_unicast_unencrypted = 0; | |
6238 | sys_config->disable_unicast_decryption = 1; | |
6239 | sys_config->exclude_multicast_unencrypted = 0; | |
6240 | sys_config->disable_multicast_decryption = 1; | |
6241 | sys_config->antenna_diversity = CFG_SYS_ANTENNA_BOTH; | |
6242 | sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */ | |
6243 | sys_config->dot11g_auto_detection = 0; | |
6244 | sys_config->enable_cts_to_self = 0; | |
6245 | sys_config->bt_coexist_collision_thr = 0; | |
6246 | sys_config->pass_noise_stats_to_host = 1; | |
6247 | } | |
6248 | ||
6249 | static int ipw_net_open(struct net_device *dev) | |
6250 | { | |
6251 | struct ipw_priv *priv = ieee80211_priv(dev); | |
6252 | IPW_DEBUG_INFO("dev->open\n"); | |
6253 | /* we should be verifying the device is ready to be opened */ | |
6254 | if (!(priv->status & STATUS_RF_KILL_MASK) && | |
6255 | (priv->status & STATUS_ASSOCIATED)) | |
6256 | netif_start_queue(dev); | |
6257 | return 0; | |
6258 | } | |
6259 | ||
6260 | static int ipw_net_stop(struct net_device *dev) | |
6261 | { | |
6262 | IPW_DEBUG_INFO("dev->close\n"); | |
6263 | netif_stop_queue(dev); | |
6264 | return 0; | |
6265 | } | |
6266 | ||
6267 | /* | |
6268 | todo: | |
6269 | ||
6270 | modify to send one tfd per fragment instead of using chunking. otherwise | |
6271 | we need to heavily modify the ieee80211_skb_to_txb. | |
6272 | */ | |
6273 | ||
6274 | static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb) | |
6275 | { | |
6276 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) | |
6277 | txb->fragments[0]->data; | |
6278 | int i = 0; | |
6279 | struct tfd_frame *tfd; | |
6280 | struct clx2_tx_queue *txq = &priv->txq[0]; | |
6281 | struct clx2_queue *q = &txq->q; | |
6282 | u8 id, hdr_len, unicast; | |
6283 | u16 remaining_bytes; | |
6284 | ||
6285 | switch (priv->ieee->iw_mode) { | |
6286 | case IW_MODE_ADHOC: | |
6287 | hdr_len = IEEE80211_3ADDR_LEN; | |
6288 | unicast = !is_broadcast_ether_addr(hdr->addr1) && | |
6289 | !is_multicast_ether_addr(hdr->addr1); | |
6290 | id = ipw_find_station(priv, hdr->addr1); | |
6291 | if (id == IPW_INVALID_STATION) { | |
6292 | id = ipw_add_station(priv, hdr->addr1); | |
6293 | if (id == IPW_INVALID_STATION) { | |
6294 | IPW_WARNING("Attempt to send data to " | |
6295 | "invalid cell: " MAC_FMT "\n", | |
6296 | MAC_ARG(hdr->addr1)); | |
6297 | goto drop; | |
6298 | } | |
6299 | } | |
6300 | break; | |
6301 | ||
6302 | case IW_MODE_INFRA: | |
6303 | default: | |
6304 | unicast = !is_broadcast_ether_addr(hdr->addr3) && | |
6305 | !is_multicast_ether_addr(hdr->addr3); | |
6306 | hdr_len = IEEE80211_3ADDR_LEN; | |
6307 | id = 0; | |
6308 | break; | |
6309 | } | |
6310 | ||
6311 | tfd = &txq->bd[q->first_empty]; | |
6312 | txq->txb[q->first_empty] = txb; | |
6313 | memset(tfd, 0, sizeof(*tfd)); | |
6314 | tfd->u.data.station_number = id; | |
6315 | ||
6316 | tfd->control_flags.message_type = TX_FRAME_TYPE; | |
6317 | tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK; | |
6318 | ||
6319 | tfd->u.data.cmd_id = DINO_CMD_TX; | |
6320 | tfd->u.data.len = txb->payload_size; | |
6321 | remaining_bytes = txb->payload_size; | |
6322 | if (unlikely(!unicast)) | |
6323 | tfd->u.data.tx_flags = DCT_FLAG_NO_WEP; | |
6324 | else | |
6325 | tfd->u.data.tx_flags = DCT_FLAG_NO_WEP | DCT_FLAG_ACK_REQD; | |
6326 | ||
6327 | if (priv->assoc_request.ieee_mode == IPW_B_MODE) | |
6328 | tfd->u.data.tx_flags_ext = DCT_FLAG_EXT_MODE_CCK; | |
6329 | else | |
6330 | tfd->u.data.tx_flags_ext = DCT_FLAG_EXT_MODE_OFDM; | |
6331 | ||
6332 | if (priv->config & CFG_PREAMBLE) | |
6333 | tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREMBL; | |
6334 | ||
6335 | memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len); | |
6336 | ||
6337 | /* payload */ | |
6338 | tfd->u.data.num_chunks = min((u8)(NUM_TFD_CHUNKS - 2), txb->nr_frags); | |
6339 | for (i = 0; i < tfd->u.data.num_chunks; i++) { | |
6340 | IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n", | |
6341 | i, tfd->u.data.num_chunks, | |
6342 | txb->fragments[i]->len - hdr_len); | |
6343 | printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len, | |
6344 | txb->fragments[i]->len - hdr_len); | |
6345 | ||
6346 | tfd->u.data.chunk_ptr[i] = pci_map_single( | |
6347 | priv->pci_dev, txb->fragments[i]->data + hdr_len, | |
6348 | txb->fragments[i]->len - hdr_len, PCI_DMA_TODEVICE); | |
6349 | tfd->u.data.chunk_len[i] = txb->fragments[i]->len - hdr_len; | |
6350 | } | |
6351 | ||
6352 | if (i != txb->nr_frags) { | |
6353 | struct sk_buff *skb; | |
6354 | u16 remaining_bytes = 0; | |
6355 | int j; | |
6356 | ||
6357 | for (j = i; j < txb->nr_frags; j++) | |
6358 | remaining_bytes += txb->fragments[j]->len - hdr_len; | |
6359 | ||
6360 | printk(KERN_INFO "Trying to reallocate for %d bytes\n", | |
6361 | remaining_bytes); | |
6362 | skb = alloc_skb(remaining_bytes, GFP_ATOMIC); | |
6363 | if (skb != NULL) { | |
6364 | tfd->u.data.chunk_len[i] = remaining_bytes; | |
6365 | for (j = i; j < txb->nr_frags; j++) { | |
6366 | int size = txb->fragments[j]->len - hdr_len; | |
6367 | printk(KERN_INFO "Adding frag %d %d...\n", | |
6368 | j, size); | |
6369 | memcpy(skb_put(skb, size), | |
6370 | txb->fragments[j]->data + hdr_len, | |
6371 | size); | |
6372 | } | |
6373 | dev_kfree_skb_any(txb->fragments[i]); | |
6374 | txb->fragments[i] = skb; | |
6375 | tfd->u.data.chunk_ptr[i] = pci_map_single( | |
6376 | priv->pci_dev, skb->data, | |
6377 | tfd->u.data.chunk_len[i], PCI_DMA_TODEVICE); | |
6378 | tfd->u.data.num_chunks++; | |
6379 | } | |
6380 | } | |
6381 | ||
6382 | /* kick DMA */ | |
6383 | q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd); | |
6384 | ipw_write32(priv, q->reg_w, q->first_empty); | |
6385 | ||
6386 | if (ipw_queue_space(q) < q->high_mark) | |
6387 | netif_stop_queue(priv->net_dev); | |
6388 | ||
6389 | return; | |
6390 | ||
6391 | drop: | |
6392 | IPW_DEBUG_DROP("Silently dropping Tx packet.\n"); | |
6393 | ieee80211_txb_free(txb); | |
6394 | } | |
6395 | ||
6396 | static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb, | |
6397 | struct net_device *dev) | |
6398 | { | |
6399 | struct ipw_priv *priv = ieee80211_priv(dev); | |
6400 | unsigned long flags; | |
6401 | ||
6402 | IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size); | |
6403 | ||
6404 | spin_lock_irqsave(&priv->lock, flags); | |
6405 | ||
6406 | if (!(priv->status & STATUS_ASSOCIATED)) { | |
6407 | IPW_DEBUG_INFO("Tx attempt while not associated.\n"); | |
6408 | priv->ieee->stats.tx_carrier_errors++; | |
6409 | netif_stop_queue(dev); | |
6410 | goto fail_unlock; | |
6411 | } | |
6412 | ||
6413 | ipw_tx_skb(priv, txb); | |
6414 | ||
6415 | spin_unlock_irqrestore(&priv->lock, flags); | |
6416 | return 0; | |
6417 | ||
6418 | fail_unlock: | |
6419 | spin_unlock_irqrestore(&priv->lock, flags); | |
6420 | return 1; | |
6421 | } | |
6422 | ||
6423 | static struct net_device_stats *ipw_net_get_stats(struct net_device *dev) | |
6424 | { | |
6425 | struct ipw_priv *priv = ieee80211_priv(dev); | |
6426 | ||
6427 | priv->ieee->stats.tx_packets = priv->tx_packets; | |
6428 | priv->ieee->stats.rx_packets = priv->rx_packets; | |
6429 | return &priv->ieee->stats; | |
6430 | } | |
6431 | ||
6432 | static void ipw_net_set_multicast_list(struct net_device *dev) | |
6433 | { | |
6434 | ||
6435 | } | |
6436 | ||
6437 | static int ipw_net_set_mac_address(struct net_device *dev, void *p) | |
6438 | { | |
6439 | struct ipw_priv *priv = ieee80211_priv(dev); | |
6440 | struct sockaddr *addr = p; | |
6441 | if (!is_valid_ether_addr(addr->sa_data)) | |
6442 | return -EADDRNOTAVAIL; | |
6443 | priv->config |= CFG_CUSTOM_MAC; | |
6444 | memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); | |
6445 | printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n", | |
6446 | priv->net_dev->name, MAC_ARG(priv->mac_addr)); | |
6447 | ipw_adapter_restart(priv); | |
6448 | return 0; | |
6449 | } | |
6450 | ||
6451 | static void ipw_ethtool_get_drvinfo(struct net_device *dev, | |
6452 | struct ethtool_drvinfo *info) | |
6453 | { | |
6454 | struct ipw_priv *p = ieee80211_priv(dev); | |
6455 | char vers[64]; | |
6456 | char date[32]; | |
6457 | u32 len; | |
6458 | ||
6459 | strcpy(info->driver, DRV_NAME); | |
6460 | strcpy(info->version, DRV_VERSION); | |
6461 | ||
6462 | len = sizeof(vers); | |
6463 | ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len); | |
6464 | len = sizeof(date); | |
6465 | ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len); | |
6466 | ||
6467 | snprintf(info->fw_version, sizeof(info->fw_version),"%s (%s)", | |
6468 | vers, date); | |
6469 | strcpy(info->bus_info, pci_name(p->pci_dev)); | |
6470 | info->eedump_len = CX2_EEPROM_IMAGE_SIZE; | |
6471 | } | |
6472 | ||
6473 | static u32 ipw_ethtool_get_link(struct net_device *dev) | |
6474 | { | |
6475 | struct ipw_priv *priv = ieee80211_priv(dev); | |
6476 | return (priv->status & STATUS_ASSOCIATED) != 0; | |
6477 | } | |
6478 | ||
6479 | static int ipw_ethtool_get_eeprom_len(struct net_device *dev) | |
6480 | { | |
6481 | return CX2_EEPROM_IMAGE_SIZE; | |
6482 | } | |
6483 | ||
6484 | static int ipw_ethtool_get_eeprom(struct net_device *dev, | |
6485 | struct ethtool_eeprom *eeprom, u8 *bytes) | |
6486 | { | |
6487 | struct ipw_priv *p = ieee80211_priv(dev); | |
6488 | ||
6489 | if (eeprom->offset + eeprom->len > CX2_EEPROM_IMAGE_SIZE) | |
6490 | return -EINVAL; | |
6491 | ||
6492 | memcpy(bytes, &((u8 *)p->eeprom)[eeprom->offset], eeprom->len); | |
6493 | return 0; | |
6494 | } | |
6495 | ||
6496 | static int ipw_ethtool_set_eeprom(struct net_device *dev, | |
6497 | struct ethtool_eeprom *eeprom, u8 *bytes) | |
6498 | { | |
6499 | struct ipw_priv *p = ieee80211_priv(dev); | |
6500 | int i; | |
6501 | ||
6502 | if (eeprom->offset + eeprom->len > CX2_EEPROM_IMAGE_SIZE) | |
6503 | return -EINVAL; | |
6504 | ||
6505 | memcpy(&((u8 *)p->eeprom)[eeprom->offset], bytes, eeprom->len); | |
6506 | for (i = IPW_EEPROM_DATA; | |
6507 | i < IPW_EEPROM_DATA + CX2_EEPROM_IMAGE_SIZE; | |
6508 | i++) | |
6509 | ipw_write8(p, i, p->eeprom[i]); | |
6510 | ||
6511 | return 0; | |
6512 | } | |
6513 | ||
6514 | static struct ethtool_ops ipw_ethtool_ops = { | |
6515 | .get_link = ipw_ethtool_get_link, | |
6516 | .get_drvinfo = ipw_ethtool_get_drvinfo, | |
6517 | .get_eeprom_len = ipw_ethtool_get_eeprom_len, | |
6518 | .get_eeprom = ipw_ethtool_get_eeprom, | |
6519 | .set_eeprom = ipw_ethtool_set_eeprom, | |
6520 | }; | |
6521 | ||
6522 | static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs) | |
6523 | { | |
6524 | struct ipw_priv *priv = data; | |
6525 | u32 inta, inta_mask; | |
6526 | ||
6527 | if (!priv) | |
6528 | return IRQ_NONE; | |
6529 | ||
6530 | spin_lock(&priv->lock); | |
6531 | ||
6532 | if (!(priv->status & STATUS_INT_ENABLED)) { | |
6533 | /* Shared IRQ */ | |
6534 | goto none; | |
6535 | } | |
6536 | ||
6537 | inta = ipw_read32(priv, CX2_INTA_RW); | |
6538 | inta_mask = ipw_read32(priv, CX2_INTA_MASK_R); | |
6539 | ||
6540 | if (inta == 0xFFFFFFFF) { | |
6541 | /* Hardware disappeared */ | |
6542 | IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n"); | |
6543 | goto none; | |
6544 | } | |
6545 | ||
6546 | if (!(inta & (CX2_INTA_MASK_ALL & inta_mask))) { | |
6547 | /* Shared interrupt */ | |
6548 | goto none; | |
6549 | } | |
6550 | ||
6551 | /* tell the device to stop sending interrupts */ | |
6552 | ipw_disable_interrupts(priv); | |
6553 | ||
6554 | /* ack current interrupts */ | |
6555 | inta &= (CX2_INTA_MASK_ALL & inta_mask); | |
6556 | ipw_write32(priv, CX2_INTA_RW, inta); | |
6557 | ||
6558 | /* Cache INTA value for our tasklet */ | |
6559 | priv->isr_inta = inta; | |
6560 | ||
6561 | tasklet_schedule(&priv->irq_tasklet); | |
6562 | ||
6563 | spin_unlock(&priv->lock); | |
6564 | ||
6565 | return IRQ_HANDLED; | |
6566 | none: | |
6567 | spin_unlock(&priv->lock); | |
6568 | return IRQ_NONE; | |
6569 | } | |
6570 | ||
6571 | static void ipw_rf_kill(void *adapter) | |
6572 | { | |
6573 | struct ipw_priv *priv = adapter; | |
6574 | unsigned long flags; | |
6575 | ||
6576 | spin_lock_irqsave(&priv->lock, flags); | |
6577 | ||
6578 | if (rf_kill_active(priv)) { | |
6579 | IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); | |
6580 | if (priv->workqueue) | |
6581 | queue_delayed_work(priv->workqueue, | |
6582 | &priv->rf_kill, 2 * HZ); | |
6583 | goto exit_unlock; | |
6584 | } | |
6585 | ||
6586 | /* RF Kill is now disabled, so bring the device back up */ | |
6587 | ||
6588 | if (!(priv->status & STATUS_RF_KILL_MASK)) { | |
6589 | IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting " | |
6590 | "device\n"); | |
6591 | ||
6592 | /* we can not do an adapter restart while inside an irq lock */ | |
6593 | queue_work(priv->workqueue, &priv->adapter_restart); | |
6594 | } else | |
6595 | IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still " | |
6596 | "enabled\n"); | |
6597 | ||
6598 | exit_unlock: | |
6599 | spin_unlock_irqrestore(&priv->lock, flags); | |
6600 | } | |
6601 | ||
6602 | static int ipw_setup_deferred_work(struct ipw_priv *priv) | |
6603 | { | |
6604 | int ret = 0; | |
6605 | ||
6606 | #ifdef CONFIG_SOFTWARE_SUSPEND2 | |
6607 | priv->workqueue = create_workqueue(DRV_NAME, 0); | |
6608 | #else | |
6609 | priv->workqueue = create_workqueue(DRV_NAME); | |
6610 | #endif | |
6611 | init_waitqueue_head(&priv->wait_command_queue); | |
6612 | ||
6613 | INIT_WORK(&priv->adhoc_check, ipw_adhoc_check, priv); | |
6614 | INIT_WORK(&priv->associate, ipw_associate, priv); | |
6615 | INIT_WORK(&priv->disassociate, ipw_disassociate, priv); | |
6616 | INIT_WORK(&priv->rx_replenish, ipw_rx_queue_replenish, priv); | |
6617 | INIT_WORK(&priv->adapter_restart, ipw_adapter_restart, priv); | |
6618 | INIT_WORK(&priv->rf_kill, ipw_rf_kill, priv); | |
6619 | INIT_WORK(&priv->up, (void (*)(void *))ipw_up, priv); | |
6620 | INIT_WORK(&priv->down, (void (*)(void *))ipw_down, priv); | |
6621 | INIT_WORK(&priv->request_scan, | |
6622 | (void (*)(void *))ipw_request_scan, priv); | |
6623 | INIT_WORK(&priv->gather_stats, | |
6624 | (void (*)(void *))ipw_gather_stats, priv); | |
6625 | INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_abort_scan, priv); | |
6626 | INIT_WORK(&priv->roam, ipw_roam, priv); | |
6627 | INIT_WORK(&priv->scan_check, ipw_scan_check, priv); | |
6628 | ||
6629 | tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) | |
6630 | ipw_irq_tasklet, (unsigned long)priv); | |
6631 | ||
6632 | return ret; | |
6633 | } | |
6634 | ||
6635 | ||
6636 | static void shim__set_security(struct net_device *dev, | |
6637 | struct ieee80211_security *sec) | |
6638 | { | |
6639 | struct ipw_priv *priv = ieee80211_priv(dev); | |
6640 | int i; | |
6641 | ||
6642 | for (i = 0; i < 4; i++) { | |
6643 | if (sec->flags & (1 << i)) { | |
6644 | priv->sec.key_sizes[i] = sec->key_sizes[i]; | |
6645 | if (sec->key_sizes[i] == 0) | |
6646 | priv->sec.flags &= ~(1 << i); | |
6647 | else | |
6648 | memcpy(priv->sec.keys[i], sec->keys[i], | |
6649 | sec->key_sizes[i]); | |
6650 | priv->sec.flags |= (1 << i); | |
6651 | priv->status |= STATUS_SECURITY_UPDATED; | |
6652 | } | |
6653 | } | |
6654 | ||
6655 | if ((sec->flags & SEC_ACTIVE_KEY) && | |
6656 | priv->sec.active_key != sec->active_key) { | |
6657 | if (sec->active_key <= 3) { | |
6658 | priv->sec.active_key = sec->active_key; | |
6659 | priv->sec.flags |= SEC_ACTIVE_KEY; | |
6660 | } else | |
6661 | priv->sec.flags &= ~SEC_ACTIVE_KEY; | |
6662 | priv->status |= STATUS_SECURITY_UPDATED; | |
6663 | } | |
6664 | ||
6665 | if ((sec->flags & SEC_AUTH_MODE) && | |
6666 | (priv->sec.auth_mode != sec->auth_mode)) { | |
6667 | priv->sec.auth_mode = sec->auth_mode; | |
6668 | priv->sec.flags |= SEC_AUTH_MODE; | |
6669 | if (sec->auth_mode == WLAN_AUTH_SHARED_KEY) | |
6670 | priv->capability |= CAP_SHARED_KEY; | |
6671 | else | |
6672 | priv->capability &= ~CAP_SHARED_KEY; | |
6673 | priv->status |= STATUS_SECURITY_UPDATED; | |
6674 | } | |
6675 | ||
6676 | if (sec->flags & SEC_ENABLED && | |
6677 | priv->sec.enabled != sec->enabled) { | |
6678 | priv->sec.flags |= SEC_ENABLED; | |
6679 | priv->sec.enabled = sec->enabled; | |
6680 | priv->status |= STATUS_SECURITY_UPDATED; | |
6681 | if (sec->enabled) | |
6682 | priv->capability |= CAP_PRIVACY_ON; | |
6683 | else | |
6684 | priv->capability &= ~CAP_PRIVACY_ON; | |
6685 | } | |
6686 | ||
6687 | if (sec->flags & SEC_LEVEL && | |
6688 | priv->sec.level != sec->level) { | |
6689 | priv->sec.level = sec->level; | |
6690 | priv->sec.flags |= SEC_LEVEL; | |
6691 | priv->status |= STATUS_SECURITY_UPDATED; | |
6692 | } | |
6693 | ||
6694 | /* To match current functionality of ipw2100 (which works well w/ | |
6695 | * various supplicants, we don't force a disassociate if the | |
6696 | * privacy capability changes ... */ | |
6697 | #if 0 | |
6698 | if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) && | |
6699 | (((priv->assoc_request.capability & | |
6700 | WLAN_CAPABILITY_PRIVACY) && !sec->enabled) || | |
6701 | (!(priv->assoc_request.capability & | |
6702 | WLAN_CAPABILITY_PRIVACY) && sec->enabled))) { | |
6703 | IPW_DEBUG_ASSOC("Disassociating due to capability " | |
6704 | "change.\n"); | |
6705 | ipw_disassociate(priv); | |
6706 | } | |
6707 | #endif | |
6708 | } | |
6709 | ||
6710 | static int init_supported_rates(struct ipw_priv *priv, | |
6711 | struct ipw_supported_rates *rates) | |
6712 | { | |
6713 | /* TODO: Mask out rates based on priv->rates_mask */ | |
6714 | ||
6715 | memset(rates, 0, sizeof(*rates)); | |
6716 | /* configure supported rates */ | |
6717 | switch (priv->ieee->freq_band) { | |
6718 | case IEEE80211_52GHZ_BAND: | |
6719 | rates->ieee_mode = IPW_A_MODE; | |
6720 | rates->purpose = IPW_RATE_CAPABILITIES; | |
6721 | ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION, | |
6722 | IEEE80211_OFDM_DEFAULT_RATES_MASK); | |
6723 | break; | |
6724 | ||
6725 | default: /* Mixed or 2.4Ghz */ | |
6726 | rates->ieee_mode = IPW_G_MODE; | |
6727 | rates->purpose = IPW_RATE_CAPABILITIES; | |
6728 | ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION, | |
6729 | IEEE80211_CCK_DEFAULT_RATES_MASK); | |
6730 | if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) { | |
6731 | ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION, | |
6732 | IEEE80211_OFDM_DEFAULT_RATES_MASK); | |
6733 | } | |
6734 | break; | |
6735 | } | |
6736 | ||
6737 | return 0; | |
6738 | } | |
6739 | ||
6740 | static int ipw_config(struct ipw_priv *priv) | |
6741 | { | |
6742 | int i; | |
6743 | struct ipw_tx_power tx_power; | |
6744 | ||
6745 | memset(&priv->sys_config, 0, sizeof(priv->sys_config)); | |
6746 | memset(&tx_power, 0, sizeof(tx_power)); | |
6747 | ||
6748 | /* This is only called from ipw_up, which resets/reloads the firmware | |
6749 | so, we don't need to first disable the card before we configure | |
6750 | it */ | |
6751 | ||
6752 | /* configure device for 'G' band */ | |
6753 | tx_power.ieee_mode = IPW_G_MODE; | |
6754 | tx_power.num_channels = 11; | |
6755 | for (i = 0; i < 11; i++) { | |
6756 | tx_power.channels_tx_power[i].channel_number = i + 1; | |
6757 | tx_power.channels_tx_power[i].tx_power = priv->tx_power; | |
6758 | } | |
6759 | if (ipw_send_tx_power(priv, &tx_power)) | |
6760 | goto error; | |
6761 | ||
6762 | /* configure device to also handle 'B' band */ | |
6763 | tx_power.ieee_mode = IPW_B_MODE; | |
6764 | if (ipw_send_tx_power(priv, &tx_power)) | |
6765 | goto error; | |
6766 | ||
6767 | /* initialize adapter address */ | |
6768 | if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr)) | |
6769 | goto error; | |
6770 | ||
6771 | /* set basic system config settings */ | |
6772 | init_sys_config(&priv->sys_config); | |
6773 | if (ipw_send_system_config(priv, &priv->sys_config)) | |
6774 | goto error; | |
6775 | ||
6776 | init_supported_rates(priv, &priv->rates); | |
6777 | if (ipw_send_supported_rates(priv, &priv->rates)) | |
6778 | goto error; | |
6779 | ||
6780 | /* Set request-to-send threshold */ | |
6781 | if (priv->rts_threshold) { | |
6782 | if (ipw_send_rts_threshold(priv, priv->rts_threshold)) | |
6783 | goto error; | |
6784 | } | |
6785 | ||
6786 | if (ipw_set_random_seed(priv)) | |
6787 | goto error; | |
6788 | ||
6789 | /* final state transition to the RUN state */ | |
6790 | if (ipw_send_host_complete(priv)) | |
6791 | goto error; | |
6792 | ||
6793 | /* If configured to try and auto-associate, kick off a scan */ | |
6794 | if ((priv->config & CFG_ASSOCIATE) && ipw_request_scan(priv)) | |
6795 | goto error; | |
6796 | ||
6797 | return 0; | |
6798 | ||
6799 | error: | |
6800 | return -EIO; | |
6801 | } | |
6802 | ||
6803 | #define MAX_HW_RESTARTS 5 | |
6804 | static int ipw_up(struct ipw_priv *priv) | |
6805 | { | |
6806 | int rc, i; | |
6807 | ||
6808 | if (priv->status & STATUS_EXIT_PENDING) | |
6809 | return -EIO; | |
6810 | ||
6811 | for (i = 0; i < MAX_HW_RESTARTS; i++ ) { | |
6812 | /* Load the microcode, firmware, and eeprom. | |
6813 | * Also start the clocks. */ | |
6814 | rc = ipw_load(priv); | |
6815 | if (rc) { | |
6816 | IPW_ERROR("Unable to load firmware: 0x%08X\n", | |
6817 | rc); | |
6818 | return rc; | |
6819 | } | |
6820 | ||
6821 | ipw_init_ordinals(priv); | |
6822 | if (!(priv->config & CFG_CUSTOM_MAC)) | |
6823 | eeprom_parse_mac(priv, priv->mac_addr); | |
6824 | memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN); | |
6825 | ||
6826 | if (priv->status & STATUS_RF_KILL_MASK) | |
6827 | return 0; | |
6828 | ||
6829 | rc = ipw_config(priv); | |
6830 | if (!rc) { | |
6831 | IPW_DEBUG_INFO("Configured device on count %i\n", i); | |
6832 | priv->notif_missed_beacons = 0; | |
6833 | netif_start_queue(priv->net_dev); | |
6834 | return 0; | |
6835 | } else { | |
6836 | IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", | |
6837 | rc); | |
6838 | } | |
6839 | ||
6840 | IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n", | |
6841 | i, MAX_HW_RESTARTS); | |
6842 | ||
6843 | /* We had an error bringing up the hardware, so take it | |
6844 | * all the way back down so we can try again */ | |
6845 | ipw_down(priv); | |
6846 | } | |
6847 | ||
6848 | /* tried to restart and config the device for as long as our | |
6849 | * patience could withstand */ | |
6850 | IPW_ERROR("Unable to initialize device after %d attempts.\n", | |
6851 | i); | |
6852 | return -EIO; | |
6853 | } | |
6854 | ||
6855 | static void ipw_down(struct ipw_priv *priv) | |
6856 | { | |
6857 | /* Attempt to disable the card */ | |
6858 | #if 0 | |
6859 | ipw_send_card_disable(priv, 0); | |
6860 | #endif | |
6861 | ||
6862 | /* tell the device to stop sending interrupts */ | |
6863 | ipw_disable_interrupts(priv); | |
6864 | ||
6865 | /* Clear all bits but the RF Kill */ | |
6866 | priv->status &= STATUS_RF_KILL_MASK; | |
6867 | ||
6868 | netif_carrier_off(priv->net_dev); | |
6869 | netif_stop_queue(priv->net_dev); | |
6870 | ||
6871 | ipw_stop_nic(priv); | |
6872 | } | |
6873 | ||
6874 | /* Called by register_netdev() */ | |
6875 | static int ipw_net_init(struct net_device *dev) | |
6876 | { | |
6877 | struct ipw_priv *priv = ieee80211_priv(dev); | |
6878 | ||
6879 | if (priv->status & STATUS_RF_KILL_SW) { | |
6880 | IPW_WARNING("Radio disabled by module parameter.\n"); | |
6881 | return 0; | |
6882 | } else if (rf_kill_active(priv)) { | |
6883 | IPW_WARNING("Radio Frequency Kill Switch is On:\n" | |
6884 | "Kill switch must be turned off for " | |
6885 | "wireless networking to work.\n"); | |
6886 | queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ); | |
6887 | return 0; | |
6888 | } | |
6889 | ||
6890 | if (ipw_up(priv)) | |
6891 | return -EIO; | |
6892 | ||
6893 | return 0; | |
6894 | } | |
6895 | ||
6896 | /* PCI driver stuff */ | |
6897 | static struct pci_device_id card_ids[] = { | |
6898 | {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0}, | |
6899 | {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0}, | |
6900 | {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0}, | |
6901 | {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0}, | |
6902 | {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0}, | |
6903 | {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0}, | |
6904 | {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0}, | |
6905 | {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0}, | |
6906 | {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0}, | |
6907 | {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0}, | |
6908 | {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0}, | |
6909 | {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0}, | |
6910 | {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0}, | |
6911 | {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0}, | |
6912 | {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0}, | |
6913 | {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0}, | |
6914 | {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0}, | |
6915 | {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, | |
6916 | {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */ | |
6917 | {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* 2225BG */ | |
6918 | {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */ | |
6919 | {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */ | |
6920 | ||
6921 | /* required last entry */ | |
6922 | {0,} | |
6923 | }; | |
6924 | ||
6925 | MODULE_DEVICE_TABLE(pci, card_ids); | |
6926 | ||
6927 | static struct attribute *ipw_sysfs_entries[] = { | |
6928 | &dev_attr_rf_kill.attr, | |
6929 | &dev_attr_direct_dword.attr, | |
6930 | &dev_attr_indirect_byte.attr, | |
6931 | &dev_attr_indirect_dword.attr, | |
6932 | &dev_attr_mem_gpio_reg.attr, | |
6933 | &dev_attr_command_event_reg.attr, | |
6934 | &dev_attr_nic_type.attr, | |
6935 | &dev_attr_status.attr, | |
6936 | &dev_attr_cfg.attr, | |
6937 | &dev_attr_dump_errors.attr, | |
6938 | &dev_attr_dump_events.attr, | |
6939 | &dev_attr_eeprom_delay.attr, | |
6940 | &dev_attr_ucode_version.attr, | |
6941 | &dev_attr_rtc.attr, | |
6942 | NULL | |
6943 | }; | |
6944 | ||
6945 | static struct attribute_group ipw_attribute_group = { | |
6946 | .name = NULL, /* put in device directory */ | |
6947 | .attrs = ipw_sysfs_entries, | |
6948 | }; | |
6949 | ||
6950 | static int ipw_pci_probe(struct pci_dev *pdev, | |
6951 | const struct pci_device_id *ent) | |
6952 | { | |
6953 | int err = 0; | |
6954 | struct net_device *net_dev; | |
6955 | void __iomem *base; | |
6956 | u32 length, val; | |
6957 | struct ipw_priv *priv; | |
6958 | int band, modulation; | |
6959 | ||
6960 | net_dev = alloc_ieee80211(sizeof(struct ipw_priv)); | |
6961 | if (net_dev == NULL) { | |
6962 | err = -ENOMEM; | |
6963 | goto out; | |
6964 | } | |
6965 | ||
6966 | priv = ieee80211_priv(net_dev); | |
6967 | priv->ieee = netdev_priv(net_dev); | |
6968 | priv->net_dev = net_dev; | |
6969 | priv->pci_dev = pdev; | |
6970 | #ifdef CONFIG_IPW_DEBUG | |
6971 | ipw_debug_level = debug; | |
6972 | #endif | |
6973 | spin_lock_init(&priv->lock); | |
6974 | ||
6975 | if (pci_enable_device(pdev)) { | |
6976 | err = -ENODEV; | |
6977 | goto out_free_ieee80211; | |
6978 | } | |
6979 | ||
6980 | pci_set_master(pdev); | |
6981 | ||
6982 | #define PCI_DMA_32BIT 0x00000000ffffffffULL | |
6983 | err = pci_set_dma_mask(pdev, PCI_DMA_32BIT); | |
6984 | if (!err) | |
6985 | err = pci_set_consistent_dma_mask(pdev, PCI_DMA_32BIT); | |
6986 | if (err) { | |
6987 | printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n"); | |
6988 | goto out_pci_disable_device; | |
6989 | } | |
6990 | ||
6991 | pci_set_drvdata(pdev, priv); | |
6992 | ||
6993 | err = pci_request_regions(pdev, DRV_NAME); | |
6994 | if (err) | |
6995 | goto out_pci_disable_device; | |
6996 | ||
6997 | /* We disable the RETRY_TIMEOUT register (0x41) to keep | |
6998 | * PCI Tx retries from interfering with C3 CPU state */ | |
6999 | pci_read_config_dword(pdev, 0x40, &val); | |
7000 | if ((val & 0x0000ff00) != 0) | |
7001 | pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); | |
7002 | ||
7003 | length = pci_resource_len(pdev, 0); | |
7004 | priv->hw_len = length; | |
7005 | ||
7006 | base = ioremap_nocache(pci_resource_start(pdev, 0), length); | |
7007 | if (!base) { | |
7008 | err = -ENODEV; | |
7009 | goto out_pci_release_regions; | |
7010 | } | |
7011 | ||
7012 | priv->hw_base = base; | |
7013 | IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length); | |
7014 | IPW_DEBUG_INFO("pci_resource_base = %p\n", base); | |
7015 | ||
7016 | err = ipw_setup_deferred_work(priv); | |
7017 | if (err) { | |
7018 | IPW_ERROR("Unable to setup deferred work\n"); | |
7019 | goto out_iounmap; | |
7020 | } | |
7021 | ||
7022 | /* Initialize module parameter values here */ | |
7023 | if (ifname) | |
7024 | strncpy(net_dev->name, ifname, IFNAMSIZ); | |
7025 | ||
7026 | if (associate) | |
7027 | priv->config |= CFG_ASSOCIATE; | |
7028 | else | |
7029 | IPW_DEBUG_INFO("Auto associate disabled.\n"); | |
7030 | ||
7031 | if (auto_create) | |
7032 | priv->config |= CFG_ADHOC_CREATE; | |
7033 | else | |
7034 | IPW_DEBUG_INFO("Auto adhoc creation disabled.\n"); | |
7035 | ||
7036 | if (disable) { | |
7037 | priv->status |= STATUS_RF_KILL_SW; | |
7038 | IPW_DEBUG_INFO("Radio disabled.\n"); | |
7039 | } | |
7040 | ||
7041 | if (channel != 0) { | |
7042 | priv->config |= CFG_STATIC_CHANNEL; | |
7043 | priv->channel = channel; | |
7044 | IPW_DEBUG_INFO("Bind to static channel %d\n", channel); | |
7045 | IPW_DEBUG_INFO("Bind to static channel %d\n", channel); | |
7046 | /* TODO: Validate that provided channel is in range */ | |
7047 | } | |
7048 | ||
7049 | switch (mode) { | |
7050 | case 1: | |
7051 | priv->ieee->iw_mode = IW_MODE_ADHOC; | |
7052 | break; | |
7053 | #ifdef CONFIG_IPW_PROMISC | |
7054 | case 2: | |
7055 | priv->ieee->iw_mode = IW_MODE_MONITOR; | |
7056 | break; | |
7057 | #endif | |
7058 | default: | |
7059 | case 0: | |
7060 | priv->ieee->iw_mode = IW_MODE_INFRA; | |
7061 | break; | |
7062 | } | |
7063 | ||
7064 | if ((priv->pci_dev->device == 0x4223) || | |
7065 | (priv->pci_dev->device == 0x4224)) { | |
7066 | printk(KERN_INFO DRV_NAME | |
7067 | ": Detected Intel PRO/Wireless 2915ABG Network " | |
7068 | "Connection\n"); | |
7069 | priv->ieee->abg_ture = 1; | |
7070 | band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND; | |
7071 | modulation = IEEE80211_OFDM_MODULATION | | |
7072 | IEEE80211_CCK_MODULATION; | |
7073 | priv->adapter = IPW_2915ABG; | |
7074 | priv->ieee->mode = IEEE_A|IEEE_G|IEEE_B; | |
7075 | } else { | |
7076 | if (priv->pci_dev->device == 0x4221) | |
7077 | printk(KERN_INFO DRV_NAME | |
7078 | ": Detected Intel PRO/Wireless 2225BG Network " | |
7079 | "Connection\n"); | |
7080 | else | |
7081 | printk(KERN_INFO DRV_NAME | |
7082 | ": Detected Intel PRO/Wireless 2200BG Network " | |
7083 | "Connection\n"); | |
7084 | ||
7085 | priv->ieee->abg_ture = 0; | |
7086 | band = IEEE80211_24GHZ_BAND; | |
7087 | modulation = IEEE80211_OFDM_MODULATION | | |
7088 | IEEE80211_CCK_MODULATION; | |
7089 | priv->adapter = IPW_2200BG; | |
7090 | priv->ieee->mode = IEEE_G|IEEE_B; | |
7091 | } | |
7092 | ||
7093 | priv->ieee->freq_band = band; | |
7094 | priv->ieee->modulation = modulation; | |
7095 | ||
7096 | priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK; | |
7097 | ||
7098 | priv->missed_beacon_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT; | |
7099 | priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT; | |
7100 | ||
7101 | priv->rts_threshold = DEFAULT_RTS_THRESHOLD; | |
7102 | ||
7103 | /* If power management is turned on, default to AC mode */ | |
7104 | priv->power_mode = IPW_POWER_AC; | |
7105 | priv->tx_power = IPW_DEFAULT_TX_POWER; | |
7106 | ||
7107 | err = request_irq(pdev->irq, ipw_isr, SA_SHIRQ, DRV_NAME, | |
7108 | priv); | |
7109 | if (err) { | |
7110 | IPW_ERROR("Error allocating IRQ %d\n", pdev->irq); | |
7111 | goto out_destroy_workqueue; | |
7112 | } | |
7113 | ||
7114 | SET_MODULE_OWNER(net_dev); | |
7115 | SET_NETDEV_DEV(net_dev, &pdev->dev); | |
7116 | ||
7117 | priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit; | |
7118 | priv->ieee->set_security = shim__set_security; | |
7119 | ||
7120 | net_dev->open = ipw_net_open; | |
7121 | net_dev->stop = ipw_net_stop; | |
7122 | net_dev->init = ipw_net_init; | |
7123 | net_dev->get_stats = ipw_net_get_stats; | |
7124 | net_dev->set_multicast_list = ipw_net_set_multicast_list; | |
7125 | net_dev->set_mac_address = ipw_net_set_mac_address; | |
7126 | net_dev->get_wireless_stats = ipw_get_wireless_stats; | |
7127 | net_dev->wireless_handlers = &ipw_wx_handler_def; | |
7128 | net_dev->ethtool_ops = &ipw_ethtool_ops; | |
7129 | net_dev->irq = pdev->irq; | |
7130 | net_dev->base_addr = (unsigned long )priv->hw_base; | |
7131 | net_dev->mem_start = pci_resource_start(pdev, 0); | |
7132 | net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1; | |
7133 | ||
7134 | err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group); | |
7135 | if (err) { | |
7136 | IPW_ERROR("failed to create sysfs device attributes\n"); | |
7137 | goto out_release_irq; | |
7138 | } | |
7139 | ||
7140 | err = register_netdev(net_dev); | |
7141 | if (err) { | |
7142 | IPW_ERROR("failed to register network device\n"); | |
7143 | goto out_remove_group; | |
7144 | } | |
7145 | ||
7146 | return 0; | |
7147 | ||
7148 | out_remove_group: | |
7149 | sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); | |
7150 | out_release_irq: | |
7151 | free_irq(pdev->irq, priv); | |
7152 | out_destroy_workqueue: | |
7153 | destroy_workqueue(priv->workqueue); | |
7154 | priv->workqueue = NULL; | |
7155 | out_iounmap: | |
7156 | iounmap(priv->hw_base); | |
7157 | out_pci_release_regions: | |
7158 | pci_release_regions(pdev); | |
7159 | out_pci_disable_device: | |
7160 | pci_disable_device(pdev); | |
7161 | pci_set_drvdata(pdev, NULL); | |
7162 | out_free_ieee80211: | |
7163 | free_ieee80211(priv->net_dev); | |
7164 | out: | |
7165 | return err; | |
7166 | } | |
7167 | ||
7168 | static void ipw_pci_remove(struct pci_dev *pdev) | |
7169 | { | |
7170 | struct ipw_priv *priv = pci_get_drvdata(pdev); | |
7171 | if (!priv) | |
7172 | return; | |
7173 | ||
7174 | priv->status |= STATUS_EXIT_PENDING; | |
7175 | ||
7176 | sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); | |
7177 | ||
7178 | ipw_down(priv); | |
7179 | ||
7180 | unregister_netdev(priv->net_dev); | |
7181 | ||
7182 | if (priv->rxq) { | |
7183 | ipw_rx_queue_free(priv, priv->rxq); | |
7184 | priv->rxq = NULL; | |
7185 | } | |
7186 | ipw_tx_queue_free(priv); | |
7187 | ||
7188 | /* ipw_down will ensure that there is no more pending work | |
7189 | * in the workqueue's, so we can safely remove them now. */ | |
7190 | if (priv->workqueue) { | |
7191 | cancel_delayed_work(&priv->adhoc_check); | |
7192 | cancel_delayed_work(&priv->gather_stats); | |
7193 | cancel_delayed_work(&priv->request_scan); | |
7194 | cancel_delayed_work(&priv->rf_kill); | |
7195 | cancel_delayed_work(&priv->scan_check); | |
7196 | destroy_workqueue(priv->workqueue); | |
7197 | priv->workqueue = NULL; | |
7198 | } | |
7199 | ||
7200 | free_irq(pdev->irq, priv); | |
7201 | iounmap(priv->hw_base); | |
7202 | pci_release_regions(pdev); | |
7203 | pci_disable_device(pdev); | |
7204 | pci_set_drvdata(pdev, NULL); | |
7205 | free_ieee80211(priv->net_dev); | |
7206 | ||
7207 | #ifdef CONFIG_PM | |
7208 | if (fw_loaded) { | |
7209 | release_firmware(bootfw); | |
7210 | release_firmware(ucode); | |
7211 | release_firmware(firmware); | |
7212 | fw_loaded = 0; | |
7213 | } | |
7214 | #endif | |
7215 | } | |
7216 | ||
7217 | ||
7218 | #ifdef CONFIG_PM | |
7219 | static int ipw_pci_suspend(struct pci_dev *pdev, u32 state) | |
7220 | { | |
7221 | struct ipw_priv *priv = pci_get_drvdata(pdev); | |
7222 | struct net_device *dev = priv->net_dev; | |
7223 | ||
7224 | printk(KERN_INFO "%s: Going into suspend...\n", dev->name); | |
7225 | ||
7226 | /* Take down the device; powers it off, etc. */ | |
7227 | ipw_down(priv); | |
7228 | ||
7229 | /* Remove the PRESENT state of the device */ | |
7230 | netif_device_detach(dev); | |
7231 | ||
7232 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) | |
7233 | pci_save_state(pdev, priv->pm_state); | |
7234 | #else | |
7235 | pci_save_state(pdev); | |
7236 | #endif | |
7237 | pci_disable_device(pdev); | |
7238 | pci_set_power_state(pdev, state); | |
7239 | ||
7240 | return 0; | |
7241 | } | |
7242 | ||
7243 | static int ipw_pci_resume(struct pci_dev *pdev) | |
7244 | { | |
7245 | struct ipw_priv *priv = pci_get_drvdata(pdev); | |
7246 | struct net_device *dev = priv->net_dev; | |
7247 | u32 val; | |
7248 | ||
7249 | printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name); | |
7250 | ||
7251 | pci_set_power_state(pdev, 0); | |
7252 | pci_enable_device(pdev); | |
7253 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) | |
7254 | pci_restore_state(pdev, priv->pm_state); | |
7255 | #else | |
7256 | pci_restore_state(pdev); | |
7257 | #endif | |
7258 | /* | |
7259 | * Suspend/Resume resets the PCI configuration space, so we have to | |
7260 | * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries | |
7261 | * from interfering with C3 CPU state. pci_restore_state won't help | |
7262 | * here since it only restores the first 64 bytes pci config header. | |
7263 | */ | |
7264 | pci_read_config_dword(pdev, 0x40, &val); | |
7265 | if ((val & 0x0000ff00) != 0) | |
7266 | pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); | |
7267 | ||
7268 | /* Set the device back into the PRESENT state; this will also wake | |
7269 | * the queue of needed */ | |
7270 | netif_device_attach(dev); | |
7271 | ||
7272 | /* Bring the device back up */ | |
7273 | queue_work(priv->workqueue, &priv->up); | |
7274 | ||
7275 | return 0; | |
7276 | } | |
7277 | #endif | |
7278 | ||
7279 | /* driver initialization stuff */ | |
7280 | static struct pci_driver ipw_driver = { | |
7281 | .name = DRV_NAME, | |
7282 | .id_table = card_ids, | |
7283 | .probe = ipw_pci_probe, | |
7284 | .remove = __devexit_p(ipw_pci_remove), | |
7285 | #ifdef CONFIG_PM | |
7286 | .suspend = ipw_pci_suspend, | |
7287 | .resume = ipw_pci_resume, | |
7288 | #endif | |
7289 | }; | |
7290 | ||
7291 | static int __init ipw_init(void) | |
7292 | { | |
7293 | int ret; | |
7294 | ||
7295 | printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); | |
7296 | printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); | |
7297 | ||
7298 | ret = pci_module_init(&ipw_driver); | |
7299 | if (ret) { | |
7300 | IPW_ERROR("Unable to initialize PCI module\n"); | |
7301 | return ret; | |
7302 | } | |
7303 | ||
7304 | ret = driver_create_file(&ipw_driver.driver, | |
7305 | &driver_attr_debug_level); | |
7306 | if (ret) { | |
7307 | IPW_ERROR("Unable to create driver sysfs file\n"); | |
7308 | pci_unregister_driver(&ipw_driver); | |
7309 | return ret; | |
7310 | } | |
7311 | ||
7312 | return ret; | |
7313 | } | |
7314 | ||
7315 | static void __exit ipw_exit(void) | |
7316 | { | |
7317 | driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level); | |
7318 | pci_unregister_driver(&ipw_driver); | |
7319 | } | |
7320 | ||
7321 | module_param(disable, int, 0444); | |
7322 | MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])"); | |
7323 | ||
7324 | module_param(associate, int, 0444); | |
7325 | MODULE_PARM_DESC(associate, "auto associate when scanning (default on)"); | |
7326 | ||
7327 | module_param(auto_create, int, 0444); | |
7328 | MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)"); | |
7329 | ||
7330 | module_param(debug, int, 0444); | |
7331 | MODULE_PARM_DESC(debug, "debug output mask"); | |
7332 | ||
7333 | module_param(channel, int, 0444); | |
7334 | MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])"); | |
7335 | ||
7336 | module_param(ifname, charp, 0444); | |
7337 | MODULE_PARM_DESC(ifname, "network device name (default eth%d)"); | |
7338 | ||
7339 | #ifdef CONFIG_IPW_PROMISC | |
7340 | module_param(mode, int, 0444); | |
7341 | MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)"); | |
7342 | #else | |
7343 | module_param(mode, int, 0444); | |
7344 | MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)"); | |
7345 | #endif | |
7346 | ||
7347 | module_exit(ipw_exit); | |
7348 | module_init(ipw_init); |